diff --git a/.gitattributes b/.gitattributes
index c3ef1539cd6e5610ecb173fd0448181b2512c39a..f8444a7fcb131964e7290d6eb9f797d6c59e47a7 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -1695,3 +1695,4 @@ vllm/lib/python3.10/site-packages/sympy/solvers/diophantine/__pycache__/diophant
parrot/lib/python3.10/site-packages/scipy/linalg/_solve_toeplitz.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
vllm/lib/python3.10/site-packages/sympy/solvers/tests/__pycache__/test_solveset.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
vllm/lib/python3.10/site-packages/sympy/solvers/ode/__pycache__/ode.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
+parrot/lib/python3.10/site-packages/decord.libs/libavfilter-1e2243e2.so.7.40.101 filter=lfs diff=lfs merge=lfs -text
diff --git a/parrot/lib/python3.10/site-packages/decord.libs/libavfilter-1e2243e2.so.7.40.101 b/parrot/lib/python3.10/site-packages/decord.libs/libavfilter-1e2243e2.so.7.40.101
new file mode 100644
index 0000000000000000000000000000000000000000..b5c8746104e2617fa4ba62bc59cc1df4f1a45edc
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/decord.libs/libavfilter-1e2243e2.so.7.40.101
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:79b37a526b50d6ebcd2255983198276718c29c0942d1fde96306e413041e01cb
+size 3075448
diff --git a/parrot/lib/python3.10/site-packages/scipy/ndimage/_cytest.cpython-310-x86_64-linux-gnu.so b/parrot/lib/python3.10/site-packages/scipy/ndimage/_cytest.cpython-310-x86_64-linux-gnu.so
new file mode 100644
index 0000000000000000000000000000000000000000..69dd431440c7266e26056b61d7bae98be2550957
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/ndimage/_cytest.cpython-310-x86_64-linux-gnu.so differ
diff --git a/parrot/lib/python3.10/site-packages/scipy/ndimage/_measurements.py b/parrot/lib/python3.10/site-packages/scipy/ndimage/_measurements.py
new file mode 100644
index 0000000000000000000000000000000000000000..bcd83df42be3708231870cf5eff977a6388cc3a1
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/scipy/ndimage/_measurements.py
@@ -0,0 +1,1680 @@
+# Copyright (C) 2003-2005 Peter J. Verveer
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+#
+# 3. The name of the author may not be used to endorse or promote
+# products derived from this software without specific prior
+# written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import numpy as np
+from . import _ni_support
+from . import _ni_label
+from . import _nd_image
+from . import _morphology
+
+__all__ = ['label', 'find_objects', 'labeled_comprehension', 'sum', 'mean',
+ 'variance', 'standard_deviation', 'minimum', 'maximum', 'median',
+ 'minimum_position', 'maximum_position', 'extrema', 'center_of_mass',
+ 'histogram', 'watershed_ift', 'sum_labels', 'value_indices']
+
+
+def label(input, structure=None, output=None):
+ """
+ Label features in an array.
+
+ Parameters
+ ----------
+ input : array_like
+ An array-like object to be labeled. Any non-zero values in `input` are
+ counted as features and zero values are considered the background.
+ structure : array_like, optional
+ A structuring element that defines feature connections.
+ `structure` must be centrosymmetric
+ (see Notes).
+ If no structuring element is provided,
+ one is automatically generated with a squared connectivity equal to
+ one. That is, for a 2-D `input` array, the default structuring element
+ is::
+
+ [[0,1,0],
+ [1,1,1],
+ [0,1,0]]
+
+ output : (None, data-type, array_like), optional
+ If `output` is a data type, it specifies the type of the resulting
+ labeled feature array.
+ If `output` is an array-like object, then `output` will be updated
+ with the labeled features from this function. This function can
+ operate in-place, by passing output=input.
+ Note that the output must be able to store the largest label, or this
+ function will raise an Exception.
+
+ Returns
+ -------
+ label : ndarray or int
+ An integer ndarray where each unique feature in `input` has a unique
+ label in the returned array.
+ num_features : int
+ How many objects were found.
+
+ If `output` is None, this function returns a tuple of
+ (`labeled_array`, `num_features`).
+
+ If `output` is a ndarray, then it will be updated with values in
+ `labeled_array` and only `num_features` will be returned by this
+ function.
+
+ See Also
+ --------
+ find_objects : generate a list of slices for the labeled features (or
+ objects); useful for finding features' position or
+ dimensions
+
+ Notes
+ -----
+ A centrosymmetric matrix is a matrix that is symmetric about the center.
+ See [1]_ for more information.
+
+ The `structure` matrix must be centrosymmetric to ensure
+ two-way connections.
+ For instance, if the `structure` matrix is not centrosymmetric
+ and is defined as::
+
+ [[0,1,0],
+ [1,1,0],
+ [0,0,0]]
+
+ and the `input` is::
+
+ [[1,2],
+ [0,3]]
+
+ then the structure matrix would indicate the
+ entry 2 in the input is connected to 1,
+ but 1 is not connected to 2.
+
+ References
+ ----------
+ .. [1] James R. Weaver, "Centrosymmetric (cross-symmetric)
+ matrices, their basic properties, eigenvalues, and
+ eigenvectors." The American Mathematical Monthly 92.10
+ (1985): 711-717.
+
+ Examples
+ --------
+ Create an image with some features, then label it using the default
+ (cross-shaped) structuring element:
+
+ >>> from scipy.ndimage import label, generate_binary_structure
+ >>> import numpy as np
+ >>> a = np.array([[0,0,1,1,0,0],
+ ... [0,0,0,1,0,0],
+ ... [1,1,0,0,1,0],
+ ... [0,0,0,1,0,0]])
+ >>> labeled_array, num_features = label(a)
+
+ Each of the 4 features are labeled with a different integer:
+
+ >>> num_features
+ 4
+ >>> labeled_array
+ array([[0, 0, 1, 1, 0, 0],
+ [0, 0, 0, 1, 0, 0],
+ [2, 2, 0, 0, 3, 0],
+ [0, 0, 0, 4, 0, 0]])
+
+ Generate a structuring element that will consider features connected even
+ if they touch diagonally:
+
+ >>> s = generate_binary_structure(2,2)
+
+ or,
+
+ >>> s = [[1,1,1],
+ ... [1,1,1],
+ ... [1,1,1]]
+
+ Label the image using the new structuring element:
+
+ >>> labeled_array, num_features = label(a, structure=s)
+
+ Show the 2 labeled features (note that features 1, 3, and 4 from above are
+ now considered a single feature):
+
+ >>> num_features
+ 2
+ >>> labeled_array
+ array([[0, 0, 1, 1, 0, 0],
+ [0, 0, 0, 1, 0, 0],
+ [2, 2, 0, 0, 1, 0],
+ [0, 0, 0, 1, 0, 0]])
+
+ """
+ input = np.asarray(input)
+ if np.iscomplexobj(input):
+ raise TypeError('Complex type not supported')
+ if structure is None:
+ structure = _morphology.generate_binary_structure(input.ndim, 1)
+ structure = np.asarray(structure, dtype=bool)
+ if structure.ndim != input.ndim:
+ raise RuntimeError('structure and input must have equal rank')
+ for ii in structure.shape:
+ if ii != 3:
+ raise ValueError('structure dimensions must be equal to 3')
+
+ # Use 32 bits if it's large enough for this image.
+ # _ni_label.label() needs two entries for background and
+ # foreground tracking
+ need_64bits = input.size >= (2**31 - 2)
+
+ if isinstance(output, np.ndarray):
+ if output.shape != input.shape:
+ raise ValueError("output shape not correct")
+ caller_provided_output = True
+ else:
+ caller_provided_output = False
+ if output is None:
+ output = np.empty(input.shape, np.intp if need_64bits else np.int32)
+ else:
+ output = np.empty(input.shape, output)
+
+ # handle scalars, 0-D arrays
+ if input.ndim == 0 or input.size == 0:
+ if input.ndim == 0:
+ # scalar
+ maxlabel = 1 if (input != 0) else 0
+ output[...] = maxlabel
+ else:
+ # 0-D
+ maxlabel = 0
+ if caller_provided_output:
+ return maxlabel
+ else:
+ return output, maxlabel
+
+ try:
+ max_label = _ni_label._label(input, structure, output)
+ except _ni_label.NeedMoreBits as e:
+ # Make another attempt with enough bits, then try to cast to the
+ # new type.
+ tmp_output = np.empty(input.shape, np.intp if need_64bits else np.int32)
+ max_label = _ni_label._label(input, structure, tmp_output)
+ output[...] = tmp_output[...]
+ if not np.all(output == tmp_output):
+ # refuse to return bad results
+ raise RuntimeError(
+ "insufficient bit-depth in requested output type"
+ ) from e
+
+ if caller_provided_output:
+ # result was written in-place
+ return max_label
+ else:
+ return output, max_label
+
+
+def find_objects(input, max_label=0):
+ """
+ Find objects in a labeled array.
+
+ Parameters
+ ----------
+ input : ndarray of ints
+ Array containing objects defined by different labels. Labels with
+ value 0 are ignored.
+ max_label : int, optional
+ Maximum label to be searched for in `input`. If max_label is not
+ given, the positions of all objects are returned.
+
+ Returns
+ -------
+ object_slices : list of tuples
+ A list of tuples, with each tuple containing N slices (with N the
+ dimension of the input array). Slices correspond to the minimal
+ parallelepiped that contains the object. If a number is missing,
+ None is returned instead of a slice. The label ``l`` corresponds to
+ the index ``l-1`` in the returned list.
+
+ See Also
+ --------
+ label, center_of_mass
+
+ Notes
+ -----
+ This function is very useful for isolating a volume of interest inside
+ a 3-D array, that cannot be "seen through".
+
+ Examples
+ --------
+ >>> from scipy import ndimage
+ >>> import numpy as np
+ >>> a = np.zeros((6,6), dtype=int)
+ >>> a[2:4, 2:4] = 1
+ >>> a[4, 4] = 1
+ >>> a[:2, :3] = 2
+ >>> a[0, 5] = 3
+ >>> a
+ array([[2, 2, 2, 0, 0, 3],
+ [2, 2, 2, 0, 0, 0],
+ [0, 0, 1, 1, 0, 0],
+ [0, 0, 1, 1, 0, 0],
+ [0, 0, 0, 0, 1, 0],
+ [0, 0, 0, 0, 0, 0]])
+ >>> ndimage.find_objects(a)
+ [(slice(2, 5, None), slice(2, 5, None)),
+ (slice(0, 2, None), slice(0, 3, None)),
+ (slice(0, 1, None), slice(5, 6, None))]
+ >>> ndimage.find_objects(a, max_label=2)
+ [(slice(2, 5, None), slice(2, 5, None)), (slice(0, 2, None), slice(0, 3, None))]
+ >>> ndimage.find_objects(a == 1, max_label=2)
+ [(slice(2, 5, None), slice(2, 5, None)), None]
+
+ >>> loc = ndimage.find_objects(a)[0]
+ >>> a[loc]
+ array([[1, 1, 0],
+ [1, 1, 0],
+ [0, 0, 1]])
+
+ """
+ input = np.asarray(input)
+ if np.iscomplexobj(input):
+ raise TypeError('Complex type not supported')
+
+ if max_label < 1:
+ max_label = input.max()
+
+ return _nd_image.find_objects(input, max_label)
+
+
+def value_indices(arr, *, ignore_value=None):
+ """
+ Find indices of each distinct value in given array.
+
+ Parameters
+ ----------
+ arr : ndarray of ints
+ Array containing integer values.
+ ignore_value : int, optional
+ This value will be ignored in searching the `arr` array. If not
+ given, all values found will be included in output. Default
+ is None.
+
+ Returns
+ -------
+ indices : dictionary
+ A Python dictionary of array indices for each distinct value. The
+ dictionary is keyed by the distinct values, the entries are array
+ index tuples covering all occurrences of the value within the
+ array.
+
+ This dictionary can occupy significant memory, usually several times
+ the size of the input array.
+
+ See Also
+ --------
+ label, maximum, median, minimum_position, extrema, sum, mean, variance,
+ standard_deviation, numpy.where, numpy.unique
+
+ Notes
+ -----
+ For a small array with few distinct values, one might use
+ `numpy.unique()` to find all possible values, and ``(arr == val)`` to
+ locate each value within that array. However, for large arrays,
+ with many distinct values, this can become extremely inefficient,
+ as locating each value would require a new search through the entire
+ array. Using this function, there is essentially one search, with
+ the indices saved for all distinct values.
+
+ This is useful when matching a categorical image (e.g. a segmentation
+ or classification) to an associated image of other data, allowing
+ any per-class statistic(s) to then be calculated. Provides a
+ more flexible alternative to functions like ``scipy.ndimage.mean()``
+ and ``scipy.ndimage.variance()``.
+
+ Some other closely related functionality, with different strengths and
+ weaknesses, can also be found in ``scipy.stats.binned_statistic()`` and
+ the `scikit-image `_ function
+ ``skimage.measure.regionprops()``.
+
+ Note for IDL users: this provides functionality equivalent to IDL's
+ REVERSE_INDICES option (as per the IDL documentation for the
+ `HISTOGRAM `_
+ function).
+
+ .. versionadded:: 1.10.0
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> from scipy import ndimage
+ >>> a = np.zeros((6, 6), dtype=int)
+ >>> a[2:4, 2:4] = 1
+ >>> a[4, 4] = 1
+ >>> a[:2, :3] = 2
+ >>> a[0, 5] = 3
+ >>> a
+ array([[2, 2, 2, 0, 0, 3],
+ [2, 2, 2, 0, 0, 0],
+ [0, 0, 1, 1, 0, 0],
+ [0, 0, 1, 1, 0, 0],
+ [0, 0, 0, 0, 1, 0],
+ [0, 0, 0, 0, 0, 0]])
+ >>> val_indices = ndimage.value_indices(a)
+
+ The dictionary `val_indices` will have an entry for each distinct
+ value in the input array.
+
+ >>> val_indices.keys()
+ dict_keys([np.int64(0), np.int64(1), np.int64(2), np.int64(3)])
+
+ The entry for each value is an index tuple, locating the elements
+ with that value.
+
+ >>> ndx1 = val_indices[1]
+ >>> ndx1
+ (array([2, 2, 3, 3, 4]), array([2, 3, 2, 3, 4]))
+
+ This can be used to index into the original array, or any other
+ array with the same shape.
+
+ >>> a[ndx1]
+ array([1, 1, 1, 1, 1])
+
+ If the zeros were to be ignored, then the resulting dictionary
+ would no longer have an entry for zero.
+
+ >>> val_indices = ndimage.value_indices(a, ignore_value=0)
+ >>> val_indices.keys()
+ dict_keys([np.int64(1), np.int64(2), np.int64(3)])
+
+ """
+ # Cope with ignore_value being None, without too much extra complexity
+ # in the C code. If not None, the value is passed in as a numpy array
+ # with the same dtype as arr.
+ ignore_value_arr = np.zeros((1,), dtype=arr.dtype)
+ ignoreIsNone = (ignore_value is None)
+ if not ignoreIsNone:
+ ignore_value_arr[0] = ignore_value_arr.dtype.type(ignore_value)
+
+ val_indices = _nd_image.value_indices(arr, ignoreIsNone, ignore_value_arr)
+ return val_indices
+
+
+def labeled_comprehension(input, labels, index, func, out_dtype, default,
+ pass_positions=False):
+ """
+ Roughly equivalent to [func(input[labels == i]) for i in index].
+
+ Sequentially applies an arbitrary function (that works on array_like input)
+ to subsets of an N-D image array specified by `labels` and `index`.
+ The option exists to provide the function with positional parameters as the
+ second argument.
+
+ Parameters
+ ----------
+ input : array_like
+ Data from which to select `labels` to process.
+ labels : array_like or None
+ Labels to objects in `input`.
+ If not None, array must be same shape as `input`.
+ If None, `func` is applied to raveled `input`.
+ index : int, sequence of ints or None
+ Subset of `labels` to which to apply `func`.
+ If a scalar, a single value is returned.
+ If None, `func` is applied to all non-zero values of `labels`.
+ func : callable
+ Python function to apply to `labels` from `input`.
+ out_dtype : dtype
+ Dtype to use for `result`.
+ default : int, float or None
+ Default return value when a element of `index` does not exist
+ in `labels`.
+ pass_positions : bool, optional
+ If True, pass linear indices to `func` as a second argument.
+ Default is False.
+
+ Returns
+ -------
+ result : ndarray
+ Result of applying `func` to each of `labels` to `input` in `index`.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> a = np.array([[1, 2, 0, 0],
+ ... [5, 3, 0, 4],
+ ... [0, 0, 0, 7],
+ ... [9, 3, 0, 0]])
+ >>> from scipy import ndimage
+ >>> lbl, nlbl = ndimage.label(a)
+ >>> lbls = np.arange(1, nlbl+1)
+ >>> ndimage.labeled_comprehension(a, lbl, lbls, np.mean, float, 0)
+ array([ 2.75, 5.5 , 6. ])
+
+ Falling back to `default`:
+
+ >>> lbls = np.arange(1, nlbl+2)
+ >>> ndimage.labeled_comprehension(a, lbl, lbls, np.mean, float, -1)
+ array([ 2.75, 5.5 , 6. , -1. ])
+
+ Passing positions:
+
+ >>> def fn(val, pos):
+ ... print("fn says: %s : %s" % (val, pos))
+ ... return (val.sum()) if (pos.sum() % 2 == 0) else (-val.sum())
+ ...
+ >>> ndimage.labeled_comprehension(a, lbl, lbls, fn, float, 0, True)
+ fn says: [1 2 5 3] : [0 1 4 5]
+ fn says: [4 7] : [ 7 11]
+ fn says: [9 3] : [12 13]
+ array([ 11., 11., -12., 0.])
+
+ """
+
+ as_scalar = np.isscalar(index)
+ input = np.asarray(input)
+
+ if pass_positions:
+ positions = np.arange(input.size).reshape(input.shape)
+
+ if labels is None:
+ if index is not None:
+ raise ValueError("index without defined labels")
+ if not pass_positions:
+ return func(input.ravel())
+ else:
+ return func(input.ravel(), positions.ravel())
+
+ try:
+ input, labels = np.broadcast_arrays(input, labels)
+ except ValueError as e:
+ raise ValueError("input and labels must have the same shape "
+ "(excepting dimensions with width 1)") from e
+
+ if index is None:
+ if not pass_positions:
+ return func(input[labels > 0])
+ else:
+ return func(input[labels > 0], positions[labels > 0])
+
+ index = np.atleast_1d(index)
+ if np.any(index.astype(labels.dtype).astype(index.dtype) != index):
+ raise ValueError(f"Cannot convert index values from <{index.dtype}> to "
+ f"<{labels.dtype}> (labels' type) without loss of precision")
+
+ index = index.astype(labels.dtype)
+
+ # optimization: find min/max in index,
+ # and select those parts of labels, input, and positions
+ lo = index.min()
+ hi = index.max()
+ mask = (labels >= lo) & (labels <= hi)
+
+ # this also ravels the arrays
+ labels = labels[mask]
+ input = input[mask]
+ if pass_positions:
+ positions = positions[mask]
+
+ # sort everything by labels
+ label_order = labels.argsort()
+ labels = labels[label_order]
+ input = input[label_order]
+ if pass_positions:
+ positions = positions[label_order]
+
+ index_order = index.argsort()
+ sorted_index = index[index_order]
+
+ def do_map(inputs, output):
+ """labels must be sorted"""
+ nidx = sorted_index.size
+
+ # Find boundaries for each stretch of constant labels
+ # This could be faster, but we already paid N log N to sort labels.
+ lo = np.searchsorted(labels, sorted_index, side='left')
+ hi = np.searchsorted(labels, sorted_index, side='right')
+
+ for i, l, h in zip(range(nidx), lo, hi):
+ if l == h:
+ continue
+ output[i] = func(*[inp[l:h] for inp in inputs])
+
+ temp = np.empty(index.shape, out_dtype)
+ temp[:] = default
+ if not pass_positions:
+ do_map([input], temp)
+ else:
+ do_map([input, positions], temp)
+
+ output = np.zeros(index.shape, out_dtype)
+ output[index_order] = temp
+ if as_scalar:
+ output = output[0]
+
+ return output
+
+
+def _safely_castable_to_int(dt):
+ """Test whether the NumPy data type `dt` can be safely cast to an int."""
+ int_size = np.dtype(int).itemsize
+ safe = ((np.issubdtype(dt, np.signedinteger) and dt.itemsize <= int_size) or
+ (np.issubdtype(dt, np.unsignedinteger) and dt.itemsize < int_size))
+ return safe
+
+
+def _stats(input, labels=None, index=None, centered=False):
+ """Count, sum, and optionally compute (sum - centre)^2 of input by label
+
+ Parameters
+ ----------
+ input : array_like, N-D
+ The input data to be analyzed.
+ labels : array_like (N-D), optional
+ The labels of the data in `input`. This array must be broadcast
+ compatible with `input`; typically, it is the same shape as `input`.
+ If `labels` is None, all nonzero values in `input` are treated as
+ the single labeled group.
+ index : label or sequence of labels, optional
+ These are the labels of the groups for which the stats are computed.
+ If `index` is None, the stats are computed for the single group where
+ `labels` is greater than 0.
+ centered : bool, optional
+ If True, the centered sum of squares for each labeled group is
+ also returned. Default is False.
+
+ Returns
+ -------
+ counts : int or ndarray of ints
+ The number of elements in each labeled group.
+ sums : scalar or ndarray of scalars
+ The sums of the values in each labeled group.
+ sums_c : scalar or ndarray of scalars, optional
+ The sums of mean-centered squares of the values in each labeled group.
+ This is only returned if `centered` is True.
+
+ """
+ def single_group(vals):
+ if centered:
+ vals_c = vals - vals.mean()
+ return vals.size, vals.sum(), (vals_c * vals_c.conjugate()).sum()
+ else:
+ return vals.size, vals.sum()
+
+ if labels is None:
+ return single_group(input)
+
+ # ensure input and labels match sizes
+ input, labels = np.broadcast_arrays(input, labels)
+
+ if index is None:
+ return single_group(input[labels > 0])
+
+ if np.isscalar(index):
+ return single_group(input[labels == index])
+
+ def _sum_centered(labels):
+ # `labels` is expected to be an ndarray with the same shape as `input`.
+ # It must contain the label indices (which are not necessarily the labels
+ # themselves).
+ means = sums / counts
+ centered_input = input - means[labels]
+ # bincount expects 1-D inputs, so we ravel the arguments.
+ bc = np.bincount(labels.ravel(),
+ weights=(centered_input *
+ centered_input.conjugate()).ravel())
+ return bc
+
+ # Remap labels to unique integers if necessary, or if the largest
+ # label is larger than the number of values.
+
+ if (not _safely_castable_to_int(labels.dtype) or
+ labels.min() < 0 or labels.max() > labels.size):
+ # Use np.unique to generate the label indices. `new_labels` will
+ # be 1-D, but it should be interpreted as the flattened N-D array of
+ # label indices.
+ unique_labels, new_labels = np.unique(labels, return_inverse=True)
+ new_labels = np.reshape(new_labels, (-1,)) # flatten, since it may be >1-D
+ counts = np.bincount(new_labels)
+ sums = np.bincount(new_labels, weights=input.ravel())
+ if centered:
+ # Compute the sum of the mean-centered squares.
+ # We must reshape new_labels to the N-D shape of `input` before
+ # passing it _sum_centered.
+ sums_c = _sum_centered(new_labels.reshape(labels.shape))
+ idxs = np.searchsorted(unique_labels, index)
+ # make all of idxs valid
+ idxs[idxs >= unique_labels.size] = 0
+ found = (unique_labels[idxs] == index)
+ else:
+ # labels are an integer type allowed by bincount, and there aren't too
+ # many, so call bincount directly.
+ counts = np.bincount(labels.ravel())
+ sums = np.bincount(labels.ravel(), weights=input.ravel())
+ if centered:
+ sums_c = _sum_centered(labels)
+ # make sure all index values are valid
+ idxs = np.asanyarray(index, np.int_).copy()
+ found = (idxs >= 0) & (idxs < counts.size)
+ idxs[~found] = 0
+
+ counts = counts[idxs]
+ counts[~found] = 0
+ sums = sums[idxs]
+ sums[~found] = 0
+
+ if not centered:
+ return (counts, sums)
+ else:
+ sums_c = sums_c[idxs]
+ sums_c[~found] = 0
+ return (counts, sums, sums_c)
+
+
+def sum(input, labels=None, index=None):
+ """
+ Calculate the sum of the values of the array.
+
+ Notes
+ -----
+ This is an alias for `ndimage.sum_labels` kept for backwards compatibility
+ reasons, for new code please prefer `sum_labels`. See the `sum_labels`
+ docstring for more details.
+
+ """
+ return sum_labels(input, labels, index)
+
+
+def sum_labels(input, labels=None, index=None):
+ """
+ Calculate the sum of the values of the array.
+
+ Parameters
+ ----------
+ input : array_like
+ Values of `input` inside the regions defined by `labels`
+ are summed together.
+ labels : array_like of ints, optional
+ Assign labels to the values of the array. Has to have the same shape as
+ `input`.
+ index : array_like, optional
+ A single label number or a sequence of label numbers of
+ the objects to be measured.
+
+ Returns
+ -------
+ sum : ndarray or scalar
+ An array of the sums of values of `input` inside the regions defined
+ by `labels` with the same shape as `index`. If 'index' is None or scalar,
+ a scalar is returned.
+
+ See Also
+ --------
+ mean, median
+
+ Examples
+ --------
+ >>> from scipy import ndimage
+ >>> input = [0,1,2,3]
+ >>> labels = [1,1,2,2]
+ >>> ndimage.sum_labels(input, labels, index=[1,2])
+ [1.0, 5.0]
+ >>> ndimage.sum_labels(input, labels, index=1)
+ 1
+ >>> ndimage.sum_labels(input, labels)
+ 6
+
+
+ """
+ count, sum = _stats(input, labels, index)
+ return sum
+
+
+def mean(input, labels=None, index=None):
+ """
+ Calculate the mean of the values of an array at labels.
+
+ Parameters
+ ----------
+ input : array_like
+ Array on which to compute the mean of elements over distinct
+ regions.
+ labels : array_like, optional
+ Array of labels of same shape, or broadcastable to the same shape as
+ `input`. All elements sharing the same label form one region over
+ which the mean of the elements is computed.
+ index : int or sequence of ints, optional
+ Labels of the objects over which the mean is to be computed.
+ Default is None, in which case the mean for all values where label is
+ greater than 0 is calculated.
+
+ Returns
+ -------
+ out : list
+ Sequence of same length as `index`, with the mean of the different
+ regions labeled by the labels in `index`.
+
+ See Also
+ --------
+ variance, standard_deviation, minimum, maximum, sum, label
+
+ Examples
+ --------
+ >>> from scipy import ndimage
+ >>> import numpy as np
+ >>> a = np.arange(25).reshape((5,5))
+ >>> labels = np.zeros_like(a)
+ >>> labels[3:5,3:5] = 1
+ >>> index = np.unique(labels)
+ >>> labels
+ array([[0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0],
+ [0, 0, 0, 1, 1],
+ [0, 0, 0, 1, 1]])
+ >>> index
+ array([0, 1])
+ >>> ndimage.mean(a, labels=labels, index=index)
+ [10.285714285714286, 21.0]
+
+ """
+
+ count, sum = _stats(input, labels, index)
+ return sum / np.asanyarray(count).astype(np.float64)
+
+
+def variance(input, labels=None, index=None):
+ """
+ Calculate the variance of the values of an N-D image array, optionally at
+ specified sub-regions.
+
+ Parameters
+ ----------
+ input : array_like
+ Nd-image data to process.
+ labels : array_like, optional
+ Labels defining sub-regions in `input`.
+ If not None, must be same shape as `input`.
+ index : int or sequence of ints, optional
+ `labels` to include in output. If None (default), all values where
+ `labels` is non-zero are used.
+
+ Returns
+ -------
+ variance : float or ndarray
+ Values of variance, for each sub-region if `labels` and `index` are
+ specified.
+
+ See Also
+ --------
+ label, standard_deviation, maximum, minimum, extrema
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> a = np.array([[1, 2, 0, 0],
+ ... [5, 3, 0, 4],
+ ... [0, 0, 0, 7],
+ ... [9, 3, 0, 0]])
+ >>> from scipy import ndimage
+ >>> ndimage.variance(a)
+ 7.609375
+
+ Features to process can be specified using `labels` and `index`:
+
+ >>> lbl, nlbl = ndimage.label(a)
+ >>> ndimage.variance(a, lbl, index=np.arange(1, nlbl+1))
+ array([ 2.1875, 2.25 , 9. ])
+
+ If no index is given, all non-zero `labels` are processed:
+
+ >>> ndimage.variance(a, lbl)
+ 6.1875
+
+ """
+ count, sum, sum_c_sq = _stats(input, labels, index, centered=True)
+ return sum_c_sq / np.asanyarray(count).astype(float)
+
+
+def standard_deviation(input, labels=None, index=None):
+ """
+ Calculate the standard deviation of the values of an N-D image array,
+ optionally at specified sub-regions.
+
+ Parameters
+ ----------
+ input : array_like
+ N-D image data to process.
+ labels : array_like, optional
+ Labels to identify sub-regions in `input`.
+ If not None, must be same shape as `input`.
+ index : int or sequence of ints, optional
+ `labels` to include in output. If None (default), all values where
+ `labels` is non-zero are used.
+
+ Returns
+ -------
+ standard_deviation : float or ndarray
+ Values of standard deviation, for each sub-region if `labels` and
+ `index` are specified.
+
+ See Also
+ --------
+ label, variance, maximum, minimum, extrema
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> a = np.array([[1, 2, 0, 0],
+ ... [5, 3, 0, 4],
+ ... [0, 0, 0, 7],
+ ... [9, 3, 0, 0]])
+ >>> from scipy import ndimage
+ >>> ndimage.standard_deviation(a)
+ 2.7585095613392387
+
+ Features to process can be specified using `labels` and `index`:
+
+ >>> lbl, nlbl = ndimage.label(a)
+ >>> ndimage.standard_deviation(a, lbl, index=np.arange(1, nlbl+1))
+ array([ 1.479, 1.5 , 3. ])
+
+ If no index is given, non-zero `labels` are processed:
+
+ >>> ndimage.standard_deviation(a, lbl)
+ 2.4874685927665499
+
+ """
+ return np.sqrt(variance(input, labels, index))
+
+
+def _select(input, labels=None, index=None, find_min=False, find_max=False,
+ find_min_positions=False, find_max_positions=False,
+ find_median=False):
+ """Returns min, max, or both, plus their positions (if requested), and
+ median."""
+
+ input = np.asanyarray(input)
+
+ find_positions = find_min_positions or find_max_positions
+ positions = None
+ if find_positions:
+ positions = np.arange(input.size).reshape(input.shape)
+
+ def single_group(vals, positions):
+ result = []
+ if find_min:
+ result += [vals.min()]
+ if find_min_positions:
+ result += [positions[vals == vals.min()][0]]
+ if find_max:
+ result += [vals.max()]
+ if find_max_positions:
+ result += [positions[vals == vals.max()][0]]
+ if find_median:
+ result += [np.median(vals)]
+ return result
+
+ if labels is None:
+ return single_group(input, positions)
+
+ # ensure input and labels match sizes
+ input, labels = np.broadcast_arrays(input, labels)
+
+ if index is None:
+ mask = (labels > 0)
+ masked_positions = None
+ if find_positions:
+ masked_positions = positions[mask]
+ return single_group(input[mask], masked_positions)
+
+ if np.isscalar(index):
+ mask = (labels == index)
+ masked_positions = None
+ if find_positions:
+ masked_positions = positions[mask]
+ return single_group(input[mask], masked_positions)
+
+ # remap labels to unique integers if necessary, or if the largest
+ # label is larger than the number of values.
+ if (not _safely_castable_to_int(labels.dtype) or
+ labels.min() < 0 or labels.max() > labels.size):
+ # remap labels, and indexes
+ unique_labels, labels = np.unique(labels, return_inverse=True)
+ idxs = np.searchsorted(unique_labels, index)
+
+ # make all of idxs valid
+ idxs[idxs >= unique_labels.size] = 0
+ found = (unique_labels[idxs] == index)
+ else:
+ # labels are an integer type, and there aren't too many
+ idxs = np.asanyarray(index, np.int_).copy()
+ found = (idxs >= 0) & (idxs <= labels.max())
+
+ idxs[~ found] = labels.max() + 1
+
+ if find_median:
+ order = np.lexsort((input.ravel(), labels.ravel()))
+ else:
+ order = input.ravel().argsort()
+ input = input.ravel()[order]
+ labels = labels.ravel()[order]
+ if find_positions:
+ positions = positions.ravel()[order]
+
+ result = []
+ if find_min:
+ mins = np.zeros(labels.max() + 2, input.dtype)
+ mins[labels[::-1]] = input[::-1]
+ result += [mins[idxs]]
+ if find_min_positions:
+ minpos = np.zeros(labels.max() + 2, int)
+ minpos[labels[::-1]] = positions[::-1]
+ result += [minpos[idxs]]
+ if find_max:
+ maxs = np.zeros(labels.max() + 2, input.dtype)
+ maxs[labels] = input
+ result += [maxs[idxs]]
+ if find_max_positions:
+ maxpos = np.zeros(labels.max() + 2, int)
+ maxpos[labels] = positions
+ result += [maxpos[idxs]]
+ if find_median:
+ locs = np.arange(len(labels))
+ lo = np.zeros(labels.max() + 2, np.int_)
+ lo[labels[::-1]] = locs[::-1]
+ hi = np.zeros(labels.max() + 2, np.int_)
+ hi[labels] = locs
+ lo = lo[idxs]
+ hi = hi[idxs]
+ # lo is an index to the lowest value in input for each label,
+ # hi is an index to the largest value.
+ # move them to be either the same ((hi - lo) % 2 == 0) or next
+ # to each other ((hi - lo) % 2 == 1), then average.
+ step = (hi - lo) // 2
+ lo += step
+ hi -= step
+ if (np.issubdtype(input.dtype, np.integer)
+ or np.issubdtype(input.dtype, np.bool_)):
+ # avoid integer overflow or boolean addition (gh-12836)
+ result += [(input[lo].astype('d') + input[hi].astype('d')) / 2.0]
+ else:
+ result += [(input[lo] + input[hi]) / 2.0]
+
+ return result
+
+
+def minimum(input, labels=None, index=None):
+ """
+ Calculate the minimum of the values of an array over labeled regions.
+
+ Parameters
+ ----------
+ input : array_like
+ Array_like of values. For each region specified by `labels`, the
+ minimal values of `input` over the region is computed.
+ labels : array_like, optional
+ An array_like of integers marking different regions over which the
+ minimum value of `input` is to be computed. `labels` must have the
+ same shape as `input`. If `labels` is not specified, the minimum
+ over the whole array is returned.
+ index : array_like, optional
+ A list of region labels that are taken into account for computing the
+ minima. If index is None, the minimum over all elements where `labels`
+ is non-zero is returned.
+
+ Returns
+ -------
+ minimum : float or list of floats
+ List of minima of `input` over the regions determined by `labels` and
+ whose index is in `index`. If `index` or `labels` are not specified, a
+ float is returned: the minimal value of `input` if `labels` is None,
+ and the minimal value of elements where `labels` is greater than zero
+ if `index` is None.
+
+ See Also
+ --------
+ label, maximum, median, minimum_position, extrema, sum, mean, variance,
+ standard_deviation
+
+ Notes
+ -----
+ The function returns a Python list and not a NumPy array, use
+ `np.array` to convert the list to an array.
+
+ Examples
+ --------
+ >>> from scipy import ndimage
+ >>> import numpy as np
+ >>> a = np.array([[1, 2, 0, 0],
+ ... [5, 3, 0, 4],
+ ... [0, 0, 0, 7],
+ ... [9, 3, 0, 0]])
+ >>> labels, labels_nb = ndimage.label(a)
+ >>> labels
+ array([[1, 1, 0, 0],
+ [1, 1, 0, 2],
+ [0, 0, 0, 2],
+ [3, 3, 0, 0]])
+ >>> ndimage.minimum(a, labels=labels, index=np.arange(1, labels_nb + 1))
+ [1.0, 4.0, 3.0]
+ >>> ndimage.minimum(a)
+ 0.0
+ >>> ndimage.minimum(a, labels=labels)
+ 1.0
+
+ """
+ return _select(input, labels, index, find_min=True)[0]
+
+
+def maximum(input, labels=None, index=None):
+ """
+ Calculate the maximum of the values of an array over labeled regions.
+
+ Parameters
+ ----------
+ input : array_like
+ Array_like of values. For each region specified by `labels`, the
+ maximal values of `input` over the region is computed.
+ labels : array_like, optional
+ An array of integers marking different regions over which the
+ maximum value of `input` is to be computed. `labels` must have the
+ same shape as `input`. If `labels` is not specified, the maximum
+ over the whole array is returned.
+ index : array_like, optional
+ A list of region labels that are taken into account for computing the
+ maxima. If index is None, the maximum over all elements where `labels`
+ is non-zero is returned.
+
+ Returns
+ -------
+ output : float or list of floats
+ List of maxima of `input` over the regions determined by `labels` and
+ whose index is in `index`. If `index` or `labels` are not specified, a
+ float is returned: the maximal value of `input` if `labels` is None,
+ and the maximal value of elements where `labels` is greater than zero
+ if `index` is None.
+
+ See Also
+ --------
+ label, minimum, median, maximum_position, extrema, sum, mean, variance,
+ standard_deviation
+
+ Notes
+ -----
+ The function returns a Python list and not a NumPy array, use
+ `np.array` to convert the list to an array.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> a = np.arange(16).reshape((4,4))
+ >>> a
+ array([[ 0, 1, 2, 3],
+ [ 4, 5, 6, 7],
+ [ 8, 9, 10, 11],
+ [12, 13, 14, 15]])
+ >>> labels = np.zeros_like(a)
+ >>> labels[:2,:2] = 1
+ >>> labels[2:, 1:3] = 2
+ >>> labels
+ array([[1, 1, 0, 0],
+ [1, 1, 0, 0],
+ [0, 2, 2, 0],
+ [0, 2, 2, 0]])
+ >>> from scipy import ndimage
+ >>> ndimage.maximum(a)
+ 15.0
+ >>> ndimage.maximum(a, labels=labels, index=[1,2])
+ [5.0, 14.0]
+ >>> ndimage.maximum(a, labels=labels)
+ 14.0
+
+ >>> b = np.array([[1, 2, 0, 0],
+ ... [5, 3, 0, 4],
+ ... [0, 0, 0, 7],
+ ... [9, 3, 0, 0]])
+ >>> labels, labels_nb = ndimage.label(b)
+ >>> labels
+ array([[1, 1, 0, 0],
+ [1, 1, 0, 2],
+ [0, 0, 0, 2],
+ [3, 3, 0, 0]])
+ >>> ndimage.maximum(b, labels=labels, index=np.arange(1, labels_nb + 1))
+ [5.0, 7.0, 9.0]
+
+ """
+ return _select(input, labels, index, find_max=True)[0]
+
+
+def median(input, labels=None, index=None):
+ """
+ Calculate the median of the values of an array over labeled regions.
+
+ Parameters
+ ----------
+ input : array_like
+ Array_like of values. For each region specified by `labels`, the
+ median value of `input` over the region is computed.
+ labels : array_like, optional
+ An array_like of integers marking different regions over which the
+ median value of `input` is to be computed. `labels` must have the
+ same shape as `input`. If `labels` is not specified, the median
+ over the whole array is returned.
+ index : array_like, optional
+ A list of region labels that are taken into account for computing the
+ medians. If index is None, the median over all elements where `labels`
+ is non-zero is returned.
+
+ Returns
+ -------
+ median : float or list of floats
+ List of medians of `input` over the regions determined by `labels` and
+ whose index is in `index`. If `index` or `labels` are not specified, a
+ float is returned: the median value of `input` if `labels` is None,
+ and the median value of elements where `labels` is greater than zero
+ if `index` is None.
+
+ See Also
+ --------
+ label, minimum, maximum, extrema, sum, mean, variance, standard_deviation
+
+ Notes
+ -----
+ The function returns a Python list and not a NumPy array, use
+ `np.array` to convert the list to an array.
+
+ Examples
+ --------
+ >>> from scipy import ndimage
+ >>> import numpy as np
+ >>> a = np.array([[1, 2, 0, 1],
+ ... [5, 3, 0, 4],
+ ... [0, 0, 0, 7],
+ ... [9, 3, 0, 0]])
+ >>> labels, labels_nb = ndimage.label(a)
+ >>> labels
+ array([[1, 1, 0, 2],
+ [1, 1, 0, 2],
+ [0, 0, 0, 2],
+ [3, 3, 0, 0]])
+ >>> ndimage.median(a, labels=labels, index=np.arange(1, labels_nb + 1))
+ [2.5, 4.0, 6.0]
+ >>> ndimage.median(a)
+ 1.0
+ >>> ndimage.median(a, labels=labels)
+ 3.0
+
+ """
+ return _select(input, labels, index, find_median=True)[0]
+
+
+def minimum_position(input, labels=None, index=None):
+ """
+ Find the positions of the minimums of the values of an array at labels.
+
+ Parameters
+ ----------
+ input : array_like
+ Array_like of values.
+ labels : array_like, optional
+ An array of integers marking different regions over which the
+ position of the minimum value of `input` is to be computed.
+ `labels` must have the same shape as `input`. If `labels` is not
+ specified, the location of the first minimum over the whole
+ array is returned.
+
+ The `labels` argument only works when `index` is specified.
+ index : array_like, optional
+ A list of region labels that are taken into account for finding the
+ location of the minima. If `index` is None, the ``first`` minimum
+ over all elements where `labels` is non-zero is returned.
+
+ The `index` argument only works when `labels` is specified.
+
+ Returns
+ -------
+ output : list of tuples of ints
+ Tuple of ints or list of tuples of ints that specify the location
+ of minima of `input` over the regions determined by `labels` and
+ whose index is in `index`.
+
+ If `index` or `labels` are not specified, a tuple of ints is
+ returned specifying the location of the first minimal value of `input`.
+
+ See Also
+ --------
+ label, minimum, median, maximum_position, extrema, sum, mean, variance,
+ standard_deviation
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> a = np.array([[10, 20, 30],
+ ... [40, 80, 100],
+ ... [1, 100, 200]])
+ >>> b = np.array([[1, 2, 0, 1],
+ ... [5, 3, 0, 4],
+ ... [0, 0, 0, 7],
+ ... [9, 3, 0, 0]])
+
+ >>> from scipy import ndimage
+
+ >>> ndimage.minimum_position(a)
+ (2, 0)
+ >>> ndimage.minimum_position(b)
+ (0, 2)
+
+ Features to process can be specified using `labels` and `index`:
+
+ >>> label, pos = ndimage.label(a)
+ >>> ndimage.minimum_position(a, label, index=np.arange(1, pos+1))
+ [(2, 0)]
+
+ >>> label, pos = ndimage.label(b)
+ >>> ndimage.minimum_position(b, label, index=np.arange(1, pos+1))
+ [(0, 0), (0, 3), (3, 1)]
+
+ """
+ dims = np.array(np.asarray(input).shape)
+ # see np.unravel_index to understand this line.
+ dim_prod = np.cumprod([1] + list(dims[:0:-1]))[::-1]
+
+ result = _select(input, labels, index, find_min_positions=True)[0]
+
+ if np.isscalar(result):
+ return tuple((result // dim_prod) % dims)
+
+ return [tuple(v) for v in (result.reshape(-1, 1) // dim_prod) % dims]
+
+
+def maximum_position(input, labels=None, index=None):
+ """
+ Find the positions of the maximums of the values of an array at labels.
+
+ For each region specified by `labels`, the position of the maximum
+ value of `input` within the region is returned.
+
+ Parameters
+ ----------
+ input : array_like
+ Array_like of values.
+ labels : array_like, optional
+ An array of integers marking different regions over which the
+ position of the maximum value of `input` is to be computed.
+ `labels` must have the same shape as `input`. If `labels` is not
+ specified, the location of the first maximum over the whole
+ array is returned.
+
+ The `labels` argument only works when `index` is specified.
+ index : array_like, optional
+ A list of region labels that are taken into account for finding the
+ location of the maxima. If `index` is None, the first maximum
+ over all elements where `labels` is non-zero is returned.
+
+ The `index` argument only works when `labels` is specified.
+
+ Returns
+ -------
+ output : list of tuples of ints
+ List of tuples of ints that specify the location of maxima of
+ `input` over the regions determined by `labels` and whose index
+ is in `index`.
+
+ If `index` or `labels` are not specified, a tuple of ints is
+ returned specifying the location of the ``first`` maximal value
+ of `input`.
+
+ See Also
+ --------
+ label, minimum, median, maximum_position, extrema, sum, mean, variance,
+ standard_deviation
+
+ Examples
+ --------
+ >>> from scipy import ndimage
+ >>> import numpy as np
+ >>> a = np.array([[1, 2, 0, 0],
+ ... [5, 3, 0, 4],
+ ... [0, 0, 0, 7],
+ ... [9, 3, 0, 0]])
+ >>> ndimage.maximum_position(a)
+ (3, 0)
+
+ Features to process can be specified using `labels` and `index`:
+
+ >>> lbl = np.array([[0, 1, 2, 3],
+ ... [0, 1, 2, 3],
+ ... [0, 1, 2, 3],
+ ... [0, 1, 2, 3]])
+ >>> ndimage.maximum_position(a, lbl, 1)
+ (1, 1)
+
+ If no index is given, non-zero `labels` are processed:
+
+ >>> ndimage.maximum_position(a, lbl)
+ (2, 3)
+
+ If there are no maxima, the position of the first element is returned:
+
+ >>> ndimage.maximum_position(a, lbl, 2)
+ (0, 2)
+
+ """
+ dims = np.array(np.asarray(input).shape)
+ # see np.unravel_index to understand this line.
+ dim_prod = np.cumprod([1] + list(dims[:0:-1]))[::-1]
+
+ result = _select(input, labels, index, find_max_positions=True)[0]
+
+ if np.isscalar(result):
+ return tuple((result // dim_prod) % dims)
+
+ return [tuple(v) for v in (result.reshape(-1, 1) // dim_prod) % dims]
+
+
+def extrema(input, labels=None, index=None):
+ """
+ Calculate the minimums and maximums of the values of an array
+ at labels, along with their positions.
+
+ Parameters
+ ----------
+ input : ndarray
+ N-D image data to process.
+ labels : ndarray, optional
+ Labels of features in input.
+ If not None, must be same shape as `input`.
+ index : int or sequence of ints, optional
+ Labels to include in output. If None (default), all values where
+ non-zero `labels` are used.
+
+ Returns
+ -------
+ minimums, maximums : int or ndarray
+ Values of minimums and maximums in each feature.
+ min_positions, max_positions : tuple or list of tuples
+ Each tuple gives the N-D coordinates of the corresponding minimum
+ or maximum.
+
+ See Also
+ --------
+ maximum, minimum, maximum_position, minimum_position, center_of_mass
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> a = np.array([[1, 2, 0, 0],
+ ... [5, 3, 0, 4],
+ ... [0, 0, 0, 7],
+ ... [9, 3, 0, 0]])
+ >>> from scipy import ndimage
+ >>> ndimage.extrema(a)
+ (0, 9, (0, 2), (3, 0))
+
+ Features to process can be specified using `labels` and `index`:
+
+ >>> lbl, nlbl = ndimage.label(a)
+ >>> ndimage.extrema(a, lbl, index=np.arange(1, nlbl+1))
+ (array([1, 4, 3]),
+ array([5, 7, 9]),
+ [(0, 0), (1, 3), (3, 1)],
+ [(1, 0), (2, 3), (3, 0)])
+
+ If no index is given, non-zero `labels` are processed:
+
+ >>> ndimage.extrema(a, lbl)
+ (1, 9, (0, 0), (3, 0))
+
+ """
+ dims = np.array(np.asarray(input).shape)
+ # see np.unravel_index to understand this line.
+ dim_prod = np.cumprod([1] + list(dims[:0:-1]))[::-1]
+
+ minimums, min_positions, maximums, max_positions = _select(input, labels,
+ index,
+ find_min=True,
+ find_max=True,
+ find_min_positions=True,
+ find_max_positions=True)
+
+ if np.isscalar(minimums):
+ return (minimums, maximums, tuple((min_positions // dim_prod) % dims),
+ tuple((max_positions // dim_prod) % dims))
+
+ min_positions = [
+ tuple(v) for v in (min_positions.reshape(-1, 1) // dim_prod) % dims
+ ]
+ max_positions = [
+ tuple(v) for v in (max_positions.reshape(-1, 1) // dim_prod) % dims
+ ]
+
+ return minimums, maximums, min_positions, max_positions
+
+
+def center_of_mass(input, labels=None, index=None):
+ """
+ Calculate the center of mass of the values of an array at labels.
+
+ Parameters
+ ----------
+ input : ndarray
+ Data from which to calculate center-of-mass. The masses can either
+ be positive or negative.
+ labels : ndarray, optional
+ Labels for objects in `input`, as generated by `ndimage.label`.
+ Only used with `index`. Dimensions must be the same as `input`.
+ index : int or sequence of ints, optional
+ Labels for which to calculate centers-of-mass. If not specified,
+ the combined center of mass of all labels greater than zero
+ will be calculated. Only used with `labels`.
+
+ Returns
+ -------
+ center_of_mass : tuple, or list of tuples
+ Coordinates of centers-of-mass.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> a = np.array(([0,0,0,0],
+ ... [0,1,1,0],
+ ... [0,1,1,0],
+ ... [0,1,1,0]))
+ >>> from scipy import ndimage
+ >>> ndimage.center_of_mass(a)
+ (2.0, 1.5)
+
+ Calculation of multiple objects in an image
+
+ >>> b = np.array(([0,1,1,0],
+ ... [0,1,0,0],
+ ... [0,0,0,0],
+ ... [0,0,1,1],
+ ... [0,0,1,1]))
+ >>> lbl = ndimage.label(b)[0]
+ >>> ndimage.center_of_mass(b, lbl, [1,2])
+ [(0.33333333333333331, 1.3333333333333333), (3.5, 2.5)]
+
+ Negative masses are also accepted, which can occur for example when
+ bias is removed from measured data due to random noise.
+
+ >>> c = np.array(([-1,0,0,0],
+ ... [0,-1,-1,0],
+ ... [0,1,-1,0],
+ ... [0,1,1,0]))
+ >>> ndimage.center_of_mass(c)
+ (-4.0, 1.0)
+
+ If there are division by zero issues, the function does not raise an
+ error but rather issues a RuntimeWarning before returning inf and/or NaN.
+
+ >>> d = np.array([-1, 1])
+ >>> ndimage.center_of_mass(d)
+ (inf,)
+ """
+ normalizer = sum(input, labels, index)
+ grids = np.ogrid[[slice(0, i) for i in input.shape]]
+
+ results = [sum(input * grids[dir].astype(float), labels, index) / normalizer
+ for dir in range(input.ndim)]
+
+ if np.isscalar(results[0]):
+ return tuple(results)
+
+ return [tuple(v) for v in np.array(results).T]
+
+
+def histogram(input, min, max, bins, labels=None, index=None):
+ """
+ Calculate the histogram of the values of an array, optionally at labels.
+
+ Histogram calculates the frequency of values in an array within bins
+ determined by `min`, `max`, and `bins`. The `labels` and `index`
+ keywords can limit the scope of the histogram to specified sub-regions
+ within the array.
+
+ Parameters
+ ----------
+ input : array_like
+ Data for which to calculate histogram.
+ min, max : int
+ Minimum and maximum values of range of histogram bins.
+ bins : int
+ Number of bins.
+ labels : array_like, optional
+ Labels for objects in `input`.
+ If not None, must be same shape as `input`.
+ index : int or sequence of ints, optional
+ Label or labels for which to calculate histogram. If None, all values
+ where label is greater than zero are used
+
+ Returns
+ -------
+ hist : ndarray
+ Histogram counts.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> a = np.array([[ 0. , 0.2146, 0.5962, 0. ],
+ ... [ 0. , 0.7778, 0. , 0. ],
+ ... [ 0. , 0. , 0. , 0. ],
+ ... [ 0. , 0. , 0.7181, 0.2787],
+ ... [ 0. , 0. , 0.6573, 0.3094]])
+ >>> from scipy import ndimage
+ >>> ndimage.histogram(a, 0, 1, 10)
+ array([13, 0, 2, 1, 0, 1, 1, 2, 0, 0])
+
+ With labels and no indices, non-zero elements are counted:
+
+ >>> lbl, nlbl = ndimage.label(a)
+ >>> ndimage.histogram(a, 0, 1, 10, lbl)
+ array([0, 0, 2, 1, 0, 1, 1, 2, 0, 0])
+
+ Indices can be used to count only certain objects:
+
+ >>> ndimage.histogram(a, 0, 1, 10, lbl, 2)
+ array([0, 0, 1, 1, 0, 0, 1, 1, 0, 0])
+
+ """
+ _bins = np.linspace(min, max, bins + 1)
+
+ def _hist(vals):
+ return np.histogram(vals, _bins)[0]
+
+ return labeled_comprehension(input, labels, index, _hist, object, None,
+ pass_positions=False)
+
+
+def watershed_ift(input, markers, structure=None, output=None):
+ """
+ Apply watershed from markers using image foresting transform algorithm.
+
+ Parameters
+ ----------
+ input : array_like
+ Input.
+ markers : array_like
+ Markers are points within each watershed that form the beginning
+ of the process. Negative markers are considered background markers
+ which are processed after the other markers.
+ structure : structure element, optional
+ A structuring element defining the connectivity of the object can be
+ provided. If None, an element is generated with a squared
+ connectivity equal to one.
+ output : ndarray, optional
+ An output array can optionally be provided. The same shape as input.
+
+ Returns
+ -------
+ watershed_ift : ndarray
+ Output. Same shape as `input`.
+
+ References
+ ----------
+ .. [1] A.X. Falcao, J. Stolfi and R. de Alencar Lotufo, "The image
+ foresting transform: theory, algorithms, and applications",
+ Pattern Analysis and Machine Intelligence, vol. 26, pp. 19-29, 2004.
+
+ """
+ input = np.asarray(input)
+ if input.dtype.type not in [np.uint8, np.uint16]:
+ raise TypeError('only 8 and 16 unsigned inputs are supported')
+
+ if structure is None:
+ structure = _morphology.generate_binary_structure(input.ndim, 1)
+ structure = np.asarray(structure, dtype=bool)
+ if structure.ndim != input.ndim:
+ raise RuntimeError('structure and input must have equal rank')
+ for ii in structure.shape:
+ if ii != 3:
+ raise RuntimeError('structure dimensions must be equal to 3')
+
+ if not structure.flags.contiguous:
+ structure = structure.copy()
+ markers = np.asarray(markers)
+ if input.shape != markers.shape:
+ raise RuntimeError('input and markers must have equal shape')
+
+ integral_types = [np.int8,
+ np.int16,
+ np.int32,
+ np.int64,
+ np.intc,
+ np.intp]
+
+ if markers.dtype.type not in integral_types:
+ raise RuntimeError('marker should be of integer type')
+
+ if isinstance(output, np.ndarray):
+ if output.dtype.type not in integral_types:
+ raise RuntimeError('output should be of integer type')
+ else:
+ output = markers.dtype
+
+ output = _ni_support._get_output(output, input)
+ _nd_image.watershed_ift(input, markers, structure, output)
+ return output
diff --git a/parrot/lib/python3.10/site-packages/scipy/ndimage/_morphology.py b/parrot/lib/python3.10/site-packages/scipy/ndimage/_morphology.py
new file mode 100644
index 0000000000000000000000000000000000000000..22ada0b130f913021207250714ab860f483b3e1e
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/scipy/ndimage/_morphology.py
@@ -0,0 +1,2537 @@
+# Copyright (C) 2003-2005 Peter J. Verveer
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+#
+# 3. The name of the author may not be used to endorse or promote
+# products derived from this software without specific prior
+# written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import warnings
+import operator
+
+import numpy as np
+from . import _ni_support
+from . import _nd_image
+from . import _filters
+
+__all__ = ['iterate_structure', 'generate_binary_structure', 'binary_erosion',
+ 'binary_dilation', 'binary_opening', 'binary_closing',
+ 'binary_hit_or_miss', 'binary_propagation', 'binary_fill_holes',
+ 'grey_erosion', 'grey_dilation', 'grey_opening', 'grey_closing',
+ 'morphological_gradient', 'morphological_laplace', 'white_tophat',
+ 'black_tophat', 'distance_transform_bf', 'distance_transform_cdt',
+ 'distance_transform_edt']
+
+
+def _center_is_true(structure, origin):
+ structure = np.asarray(structure)
+ coor = tuple([oo + ss // 2 for ss, oo in zip(structure.shape,
+ origin)])
+ return bool(structure[coor])
+
+
+def iterate_structure(structure, iterations, origin=None):
+ """
+ Iterate a structure by dilating it with itself.
+
+ Parameters
+ ----------
+ structure : array_like
+ Structuring element (an array of bools, for example), to be dilated with
+ itself.
+ iterations : int
+ number of dilations performed on the structure with itself
+ origin : optional
+ If origin is None, only the iterated structure is returned. If
+ not, a tuple of the iterated structure and the modified origin is
+ returned.
+
+ Returns
+ -------
+ iterate_structure : ndarray of bools
+ A new structuring element obtained by dilating `structure`
+ (`iterations` - 1) times with itself.
+
+ See Also
+ --------
+ generate_binary_structure
+
+ Examples
+ --------
+ >>> from scipy import ndimage
+ >>> struct = ndimage.generate_binary_structure(2, 1)
+ >>> struct.astype(int)
+ array([[0, 1, 0],
+ [1, 1, 1],
+ [0, 1, 0]])
+ >>> ndimage.iterate_structure(struct, 2).astype(int)
+ array([[0, 0, 1, 0, 0],
+ [0, 1, 1, 1, 0],
+ [1, 1, 1, 1, 1],
+ [0, 1, 1, 1, 0],
+ [0, 0, 1, 0, 0]])
+ >>> ndimage.iterate_structure(struct, 3).astype(int)
+ array([[0, 0, 0, 1, 0, 0, 0],
+ [0, 0, 1, 1, 1, 0, 0],
+ [0, 1, 1, 1, 1, 1, 0],
+ [1, 1, 1, 1, 1, 1, 1],
+ [0, 1, 1, 1, 1, 1, 0],
+ [0, 0, 1, 1, 1, 0, 0],
+ [0, 0, 0, 1, 0, 0, 0]])
+
+ """
+ structure = np.asarray(structure)
+ if iterations < 2:
+ return structure.copy()
+ ni = iterations - 1
+ shape = [ii + ni * (ii - 1) for ii in structure.shape]
+ pos = [ni * (structure.shape[ii] // 2) for ii in range(len(shape))]
+ slc = tuple(slice(pos[ii], pos[ii] + structure.shape[ii], None)
+ for ii in range(len(shape)))
+ out = np.zeros(shape, bool)
+ out[slc] = structure != 0
+ out = binary_dilation(out, structure, iterations=ni)
+ if origin is None:
+ return out
+ else:
+ origin = _ni_support._normalize_sequence(origin, structure.ndim)
+ origin = [iterations * o for o in origin]
+ return out, origin
+
+
+def generate_binary_structure(rank, connectivity):
+ """
+ Generate a binary structure for binary morphological operations.
+
+ Parameters
+ ----------
+ rank : int
+ Number of dimensions of the array to which the structuring element
+ will be applied, as returned by `np.ndim`.
+ connectivity : int
+ `connectivity` determines which elements of the output array belong
+ to the structure, i.e., are considered as neighbors of the central
+ element. Elements up to a squared distance of `connectivity` from
+ the center are considered neighbors. `connectivity` may range from 1
+ (no diagonal elements are neighbors) to `rank` (all elements are
+ neighbors).
+
+ Returns
+ -------
+ output : ndarray of bools
+ Structuring element which may be used for binary morphological
+ operations, with `rank` dimensions and all dimensions equal to 3.
+
+ See Also
+ --------
+ iterate_structure, binary_dilation, binary_erosion
+
+ Notes
+ -----
+ `generate_binary_structure` can only create structuring elements with
+ dimensions equal to 3, i.e., minimal dimensions. For larger structuring
+ elements, that are useful e.g., for eroding large objects, one may either
+ use `iterate_structure`, or create directly custom arrays with
+ numpy functions such as `numpy.ones`.
+
+ Examples
+ --------
+ >>> from scipy import ndimage
+ >>> import numpy as np
+ >>> struct = ndimage.generate_binary_structure(2, 1)
+ >>> struct
+ array([[False, True, False],
+ [ True, True, True],
+ [False, True, False]], dtype=bool)
+ >>> a = np.zeros((5,5))
+ >>> a[2, 2] = 1
+ >>> a
+ array([[ 0., 0., 0., 0., 0.],
+ [ 0., 0., 0., 0., 0.],
+ [ 0., 0., 1., 0., 0.],
+ [ 0., 0., 0., 0., 0.],
+ [ 0., 0., 0., 0., 0.]])
+ >>> b = ndimage.binary_dilation(a, structure=struct).astype(a.dtype)
+ >>> b
+ array([[ 0., 0., 0., 0., 0.],
+ [ 0., 0., 1., 0., 0.],
+ [ 0., 1., 1., 1., 0.],
+ [ 0., 0., 1., 0., 0.],
+ [ 0., 0., 0., 0., 0.]])
+ >>> ndimage.binary_dilation(b, structure=struct).astype(a.dtype)
+ array([[ 0., 0., 1., 0., 0.],
+ [ 0., 1., 1., 1., 0.],
+ [ 1., 1., 1., 1., 1.],
+ [ 0., 1., 1., 1., 0.],
+ [ 0., 0., 1., 0., 0.]])
+ >>> struct = ndimage.generate_binary_structure(2, 2)
+ >>> struct
+ array([[ True, True, True],
+ [ True, True, True],
+ [ True, True, True]], dtype=bool)
+ >>> struct = ndimage.generate_binary_structure(3, 1)
+ >>> struct # no diagonal elements
+ array([[[False, False, False],
+ [False, True, False],
+ [False, False, False]],
+ [[False, True, False],
+ [ True, True, True],
+ [False, True, False]],
+ [[False, False, False],
+ [False, True, False],
+ [False, False, False]]], dtype=bool)
+
+ """
+ if connectivity < 1:
+ connectivity = 1
+ if rank < 1:
+ return np.array(True, dtype=bool)
+ output = np.fabs(np.indices([3] * rank) - 1)
+ output = np.add.reduce(output, 0)
+ return output <= connectivity
+
+
+def _binary_erosion(input, structure, iterations, mask, output,
+ border_value, origin, invert, brute_force):
+ try:
+ iterations = operator.index(iterations)
+ except TypeError as e:
+ raise TypeError('iterations parameter should be an integer') from e
+
+ input = np.asarray(input)
+ if np.iscomplexobj(input):
+ raise TypeError('Complex type not supported')
+ if structure is None:
+ structure = generate_binary_structure(input.ndim, 1)
+ else:
+ structure = np.asarray(structure, dtype=bool)
+ if structure.ndim != input.ndim:
+ raise RuntimeError('structure and input must have same dimensionality')
+ if not structure.flags.contiguous:
+ structure = structure.copy()
+ if structure.size < 1:
+ raise RuntimeError('structure must not be empty')
+ if mask is not None:
+ mask = np.asarray(mask)
+ if mask.shape != input.shape:
+ raise RuntimeError('mask and input must have equal sizes')
+ origin = _ni_support._normalize_sequence(origin, input.ndim)
+ cit = _center_is_true(structure, origin)
+ if isinstance(output, np.ndarray):
+ if np.iscomplexobj(output):
+ raise TypeError('Complex output type not supported')
+ else:
+ output = bool
+ output = _ni_support._get_output(output, input)
+ temp_needed = np.may_share_memory(input, output)
+ if temp_needed:
+ # input and output arrays cannot share memory
+ temp = output
+ output = _ni_support._get_output(output.dtype, input)
+ if iterations == 1:
+ _nd_image.binary_erosion(input, structure, mask, output,
+ border_value, origin, invert, cit, 0)
+ elif cit and not brute_force:
+ changed, coordinate_list = _nd_image.binary_erosion(
+ input, structure, mask, output,
+ border_value, origin, invert, cit, 1)
+ structure = structure[tuple([slice(None, None, -1)] *
+ structure.ndim)]
+ for ii in range(len(origin)):
+ origin[ii] = -origin[ii]
+ if not structure.shape[ii] & 1:
+ origin[ii] -= 1
+ if mask is not None:
+ mask = np.asarray(mask, dtype=np.int8)
+ if not structure.flags.contiguous:
+ structure = structure.copy()
+ _nd_image.binary_erosion2(output, structure, mask, iterations - 1,
+ origin, invert, coordinate_list)
+ else:
+ tmp_in = np.empty_like(input, dtype=bool)
+ tmp_out = output
+ if iterations >= 1 and not iterations & 1:
+ tmp_in, tmp_out = tmp_out, tmp_in
+ changed = _nd_image.binary_erosion(
+ input, structure, mask, tmp_out,
+ border_value, origin, invert, cit, 0)
+ ii = 1
+ while ii < iterations or (iterations < 1 and changed):
+ tmp_in, tmp_out = tmp_out, tmp_in
+ changed = _nd_image.binary_erosion(
+ tmp_in, structure, mask, tmp_out,
+ border_value, origin, invert, cit, 0)
+ ii += 1
+ if temp_needed:
+ temp[...] = output
+ output = temp
+ return output
+
+
+def binary_erosion(input, structure=None, iterations=1, mask=None, output=None,
+ border_value=0, origin=0, brute_force=False):
+ """
+ Multidimensional binary erosion with a given structuring element.
+
+ Binary erosion is a mathematical morphology operation used for image
+ processing.
+
+ Parameters
+ ----------
+ input : array_like
+ Binary image to be eroded. Non-zero (True) elements form
+ the subset to be eroded.
+ structure : array_like, optional
+ Structuring element used for the erosion. Non-zero elements are
+ considered True. If no structuring element is provided, an element
+ is generated with a square connectivity equal to one.
+ iterations : int, optional
+ The erosion is repeated `iterations` times (one, by default).
+ If iterations is less than 1, the erosion is repeated until the
+ result does not change anymore.
+ mask : array_like, optional
+ If a mask is given, only those elements with a True value at
+ the corresponding mask element are modified at each iteration.
+ output : ndarray, optional
+ Array of the same shape as input, into which the output is placed.
+ By default, a new array is created.
+ border_value : int (cast to 0 or 1), optional
+ Value at the border in the output array.
+ origin : int or tuple of ints, optional
+ Placement of the filter, by default 0.
+ brute_force : boolean, optional
+ Memory condition: if False, only the pixels whose value was changed in
+ the last iteration are tracked as candidates to be updated (eroded) in
+ the current iteration; if True all pixels are considered as candidates
+ for erosion, regardless of what happened in the previous iteration.
+ False by default.
+
+ Returns
+ -------
+ binary_erosion : ndarray of bools
+ Erosion of the input by the structuring element.
+
+ See Also
+ --------
+ grey_erosion, binary_dilation, binary_closing, binary_opening,
+ generate_binary_structure
+
+ Notes
+ -----
+ Erosion [1]_ is a mathematical morphology operation [2]_ that uses a
+ structuring element for shrinking the shapes in an image. The binary
+ erosion of an image by a structuring element is the locus of the points
+ where a superimposition of the structuring element centered on the point
+ is entirely contained in the set of non-zero elements of the image.
+
+ References
+ ----------
+ .. [1] https://en.wikipedia.org/wiki/Erosion_%28morphology%29
+ .. [2] https://en.wikipedia.org/wiki/Mathematical_morphology
+
+ Examples
+ --------
+ >>> from scipy import ndimage
+ >>> import numpy as np
+ >>> a = np.zeros((7,7), dtype=int)
+ >>> a[1:6, 2:5] = 1
+ >>> a
+ array([[0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 1, 1, 1, 0, 0],
+ [0, 0, 1, 1, 1, 0, 0],
+ [0, 0, 1, 1, 1, 0, 0],
+ [0, 0, 1, 1, 1, 0, 0],
+ [0, 0, 1, 1, 1, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0]])
+ >>> ndimage.binary_erosion(a).astype(a.dtype)
+ array([[0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 1, 0, 0, 0],
+ [0, 0, 0, 1, 0, 0, 0],
+ [0, 0, 0, 1, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0]])
+ >>> #Erosion removes objects smaller than the structure
+ >>> ndimage.binary_erosion(a, structure=np.ones((5,5))).astype(a.dtype)
+ array([[0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0]])
+
+ """
+ return _binary_erosion(input, structure, iterations, mask,
+ output, border_value, origin, 0, brute_force)
+
+
+def binary_dilation(input, structure=None, iterations=1, mask=None,
+ output=None, border_value=0, origin=0,
+ brute_force=False):
+ """
+ Multidimensional binary dilation with the given structuring element.
+
+ Parameters
+ ----------
+ input : array_like
+ Binary array_like to be dilated. Non-zero (True) elements form
+ the subset to be dilated.
+ structure : array_like, optional
+ Structuring element used for the dilation. Non-zero elements are
+ considered True. If no structuring element is provided an element
+ is generated with a square connectivity equal to one.
+ iterations : int, optional
+ The dilation is repeated `iterations` times (one, by default).
+ If iterations is less than 1, the dilation is repeated until the
+ result does not change anymore. Only an integer of iterations is
+ accepted.
+ mask : array_like, optional
+ If a mask is given, only those elements with a True value at
+ the corresponding mask element are modified at each iteration.
+ output : ndarray, optional
+ Array of the same shape as input, into which the output is placed.
+ By default, a new array is created.
+ border_value : int (cast to 0 or 1), optional
+ Value at the border in the output array.
+ origin : int or tuple of ints, optional
+ Placement of the filter, by default 0.
+ brute_force : boolean, optional
+ Memory condition: if False, only the pixels whose value was changed in
+ the last iteration are tracked as candidates to be updated (dilated)
+ in the current iteration; if True all pixels are considered as
+ candidates for dilation, regardless of what happened in the previous
+ iteration. False by default.
+
+ Returns
+ -------
+ binary_dilation : ndarray of bools
+ Dilation of the input by the structuring element.
+
+ See Also
+ --------
+ grey_dilation, binary_erosion, binary_closing, binary_opening,
+ generate_binary_structure
+
+ Notes
+ -----
+ Dilation [1]_ is a mathematical morphology operation [2]_ that uses a
+ structuring element for expanding the shapes in an image. The binary
+ dilation of an image by a structuring element is the locus of the points
+ covered by the structuring element, when its center lies within the
+ non-zero points of the image.
+
+ References
+ ----------
+ .. [1] https://en.wikipedia.org/wiki/Dilation_%28morphology%29
+ .. [2] https://en.wikipedia.org/wiki/Mathematical_morphology
+
+ Examples
+ --------
+ >>> from scipy import ndimage
+ >>> import numpy as np
+ >>> a = np.zeros((5, 5))
+ >>> a[2, 2] = 1
+ >>> a
+ array([[ 0., 0., 0., 0., 0.],
+ [ 0., 0., 0., 0., 0.],
+ [ 0., 0., 1., 0., 0.],
+ [ 0., 0., 0., 0., 0.],
+ [ 0., 0., 0., 0., 0.]])
+ >>> ndimage.binary_dilation(a)
+ array([[False, False, False, False, False],
+ [False, False, True, False, False],
+ [False, True, True, True, False],
+ [False, False, True, False, False],
+ [False, False, False, False, False]], dtype=bool)
+ >>> ndimage.binary_dilation(a).astype(a.dtype)
+ array([[ 0., 0., 0., 0., 0.],
+ [ 0., 0., 1., 0., 0.],
+ [ 0., 1., 1., 1., 0.],
+ [ 0., 0., 1., 0., 0.],
+ [ 0., 0., 0., 0., 0.]])
+ >>> # 3x3 structuring element with connectivity 1, used by default
+ >>> struct1 = ndimage.generate_binary_structure(2, 1)
+ >>> struct1
+ array([[False, True, False],
+ [ True, True, True],
+ [False, True, False]], dtype=bool)
+ >>> # 3x3 structuring element with connectivity 2
+ >>> struct2 = ndimage.generate_binary_structure(2, 2)
+ >>> struct2
+ array([[ True, True, True],
+ [ True, True, True],
+ [ True, True, True]], dtype=bool)
+ >>> ndimage.binary_dilation(a, structure=struct1).astype(a.dtype)
+ array([[ 0., 0., 0., 0., 0.],
+ [ 0., 0., 1., 0., 0.],
+ [ 0., 1., 1., 1., 0.],
+ [ 0., 0., 1., 0., 0.],
+ [ 0., 0., 0., 0., 0.]])
+ >>> ndimage.binary_dilation(a, structure=struct2).astype(a.dtype)
+ array([[ 0., 0., 0., 0., 0.],
+ [ 0., 1., 1., 1., 0.],
+ [ 0., 1., 1., 1., 0.],
+ [ 0., 1., 1., 1., 0.],
+ [ 0., 0., 0., 0., 0.]])
+ >>> ndimage.binary_dilation(a, structure=struct1,\\
+ ... iterations=2).astype(a.dtype)
+ array([[ 0., 0., 1., 0., 0.],
+ [ 0., 1., 1., 1., 0.],
+ [ 1., 1., 1., 1., 1.],
+ [ 0., 1., 1., 1., 0.],
+ [ 0., 0., 1., 0., 0.]])
+
+ """
+ input = np.asarray(input)
+ if structure is None:
+ structure = generate_binary_structure(input.ndim, 1)
+ origin = _ni_support._normalize_sequence(origin, input.ndim)
+ structure = np.asarray(structure)
+ structure = structure[tuple([slice(None, None, -1)] *
+ structure.ndim)]
+ for ii in range(len(origin)):
+ origin[ii] = -origin[ii]
+ if not structure.shape[ii] & 1:
+ origin[ii] -= 1
+
+ return _binary_erosion(input, structure, iterations, mask,
+ output, border_value, origin, 1, brute_force)
+
+
+def binary_opening(input, structure=None, iterations=1, output=None,
+ origin=0, mask=None, border_value=0, brute_force=False):
+ """
+ Multidimensional binary opening with the given structuring element.
+
+ The *opening* of an input image by a structuring element is the
+ *dilation* of the *erosion* of the image by the structuring element.
+
+ Parameters
+ ----------
+ input : array_like
+ Binary array_like to be opened. Non-zero (True) elements form
+ the subset to be opened.
+ structure : array_like, optional
+ Structuring element used for the opening. Non-zero elements are
+ considered True. If no structuring element is provided an element
+ is generated with a square connectivity equal to one (i.e., only
+ nearest neighbors are connected to the center, diagonally-connected
+ elements are not considered neighbors).
+ iterations : int, optional
+ The erosion step of the opening, then the dilation step are each
+ repeated `iterations` times (one, by default). If `iterations` is
+ less than 1, each operation is repeated until the result does
+ not change anymore. Only an integer of iterations is accepted.
+ output : ndarray, optional
+ Array of the same shape as input, into which the output is placed.
+ By default, a new array is created.
+ origin : int or tuple of ints, optional
+ Placement of the filter, by default 0.
+ mask : array_like, optional
+ If a mask is given, only those elements with a True value at
+ the corresponding mask element are modified at each iteration.
+
+ .. versionadded:: 1.1.0
+ border_value : int (cast to 0 or 1), optional
+ Value at the border in the output array.
+
+ .. versionadded:: 1.1.0
+ brute_force : boolean, optional
+ Memory condition: if False, only the pixels whose value was changed in
+ the last iteration are tracked as candidates to be updated in the
+ current iteration; if true all pixels are considered as candidates for
+ update, regardless of what happened in the previous iteration.
+ False by default.
+
+ .. versionadded:: 1.1.0
+
+ Returns
+ -------
+ binary_opening : ndarray of bools
+ Opening of the input by the structuring element.
+
+ See Also
+ --------
+ grey_opening, binary_closing, binary_erosion, binary_dilation,
+ generate_binary_structure
+
+ Notes
+ -----
+ *Opening* [1]_ is a mathematical morphology operation [2]_ that
+ consists in the succession of an erosion and a dilation of the
+ input with the same structuring element. Opening, therefore, removes
+ objects smaller than the structuring element.
+
+ Together with *closing* (`binary_closing`), opening can be used for
+ noise removal.
+
+ References
+ ----------
+ .. [1] https://en.wikipedia.org/wiki/Opening_%28morphology%29
+ .. [2] https://en.wikipedia.org/wiki/Mathematical_morphology
+
+ Examples
+ --------
+ >>> from scipy import ndimage
+ >>> import numpy as np
+ >>> a = np.zeros((5,5), dtype=int)
+ >>> a[1:4, 1:4] = 1; a[4, 4] = 1
+ >>> a
+ array([[0, 0, 0, 0, 0],
+ [0, 1, 1, 1, 0],
+ [0, 1, 1, 1, 0],
+ [0, 1, 1, 1, 0],
+ [0, 0, 0, 0, 1]])
+ >>> # Opening removes small objects
+ >>> ndimage.binary_opening(a, structure=np.ones((3,3))).astype(int)
+ array([[0, 0, 0, 0, 0],
+ [0, 1, 1, 1, 0],
+ [0, 1, 1, 1, 0],
+ [0, 1, 1, 1, 0],
+ [0, 0, 0, 0, 0]])
+ >>> # Opening can also smooth corners
+ >>> ndimage.binary_opening(a).astype(int)
+ array([[0, 0, 0, 0, 0],
+ [0, 0, 1, 0, 0],
+ [0, 1, 1, 1, 0],
+ [0, 0, 1, 0, 0],
+ [0, 0, 0, 0, 0]])
+ >>> # Opening is the dilation of the erosion of the input
+ >>> ndimage.binary_erosion(a).astype(int)
+ array([[0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0],
+ [0, 0, 1, 0, 0],
+ [0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0]])
+ >>> ndimage.binary_dilation(ndimage.binary_erosion(a)).astype(int)
+ array([[0, 0, 0, 0, 0],
+ [0, 0, 1, 0, 0],
+ [0, 1, 1, 1, 0],
+ [0, 0, 1, 0, 0],
+ [0, 0, 0, 0, 0]])
+
+ """
+ input = np.asarray(input)
+ if structure is None:
+ rank = input.ndim
+ structure = generate_binary_structure(rank, 1)
+
+ tmp = binary_erosion(input, structure, iterations, mask, None,
+ border_value, origin, brute_force)
+ return binary_dilation(tmp, structure, iterations, mask, output,
+ border_value, origin, brute_force)
+
+
+def binary_closing(input, structure=None, iterations=1, output=None,
+ origin=0, mask=None, border_value=0, brute_force=False):
+ """
+ Multidimensional binary closing with the given structuring element.
+
+ The *closing* of an input image by a structuring element is the
+ *erosion* of the *dilation* of the image by the structuring element.
+
+ Parameters
+ ----------
+ input : array_like
+ Binary array_like to be closed. Non-zero (True) elements form
+ the subset to be closed.
+ structure : array_like, optional
+ Structuring element used for the closing. Non-zero elements are
+ considered True. If no structuring element is provided an element
+ is generated with a square connectivity equal to one (i.e., only
+ nearest neighbors are connected to the center, diagonally-connected
+ elements are not considered neighbors).
+ iterations : int, optional
+ The dilation step of the closing, then the erosion step are each
+ repeated `iterations` times (one, by default). If iterations is
+ less than 1, each operations is repeated until the result does
+ not change anymore. Only an integer of iterations is accepted.
+ output : ndarray, optional
+ Array of the same shape as input, into which the output is placed.
+ By default, a new array is created.
+ origin : int or tuple of ints, optional
+ Placement of the filter, by default 0.
+ mask : array_like, optional
+ If a mask is given, only those elements with a True value at
+ the corresponding mask element are modified at each iteration.
+
+ .. versionadded:: 1.1.0
+ border_value : int (cast to 0 or 1), optional
+ Value at the border in the output array.
+
+ .. versionadded:: 1.1.0
+ brute_force : boolean, optional
+ Memory condition: if False, only the pixels whose value was changed in
+ the last iteration are tracked as candidates to be updated in the
+ current iteration; if true al pixels are considered as candidates for
+ update, regardless of what happened in the previous iteration.
+ False by default.
+
+ .. versionadded:: 1.1.0
+
+ Returns
+ -------
+ binary_closing : ndarray of bools
+ Closing of the input by the structuring element.
+
+ See Also
+ --------
+ grey_closing, binary_opening, binary_dilation, binary_erosion,
+ generate_binary_structure
+
+ Notes
+ -----
+ *Closing* [1]_ is a mathematical morphology operation [2]_ that
+ consists in the succession of a dilation and an erosion of the
+ input with the same structuring element. Closing therefore fills
+ holes smaller than the structuring element.
+
+ Together with *opening* (`binary_opening`), closing can be used for
+ noise removal.
+
+ References
+ ----------
+ .. [1] https://en.wikipedia.org/wiki/Closing_%28morphology%29
+ .. [2] https://en.wikipedia.org/wiki/Mathematical_morphology
+
+ Examples
+ --------
+ >>> from scipy import ndimage
+ >>> import numpy as np
+ >>> a = np.zeros((5,5), dtype=int)
+ >>> a[1:-1, 1:-1] = 1; a[2,2] = 0
+ >>> a
+ array([[0, 0, 0, 0, 0],
+ [0, 1, 1, 1, 0],
+ [0, 1, 0, 1, 0],
+ [0, 1, 1, 1, 0],
+ [0, 0, 0, 0, 0]])
+ >>> # Closing removes small holes
+ >>> ndimage.binary_closing(a).astype(int)
+ array([[0, 0, 0, 0, 0],
+ [0, 1, 1, 1, 0],
+ [0, 1, 1, 1, 0],
+ [0, 1, 1, 1, 0],
+ [0, 0, 0, 0, 0]])
+ >>> # Closing is the erosion of the dilation of the input
+ >>> ndimage.binary_dilation(a).astype(int)
+ array([[0, 1, 1, 1, 0],
+ [1, 1, 1, 1, 1],
+ [1, 1, 1, 1, 1],
+ [1, 1, 1, 1, 1],
+ [0, 1, 1, 1, 0]])
+ >>> ndimage.binary_erosion(ndimage.binary_dilation(a)).astype(int)
+ array([[0, 0, 0, 0, 0],
+ [0, 1, 1, 1, 0],
+ [0, 1, 1, 1, 0],
+ [0, 1, 1, 1, 0],
+ [0, 0, 0, 0, 0]])
+
+
+ >>> a = np.zeros((7,7), dtype=int)
+ >>> a[1:6, 2:5] = 1; a[1:3,3] = 0
+ >>> a
+ array([[0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 1, 0, 1, 0, 0],
+ [0, 0, 1, 0, 1, 0, 0],
+ [0, 0, 1, 1, 1, 0, 0],
+ [0, 0, 1, 1, 1, 0, 0],
+ [0, 0, 1, 1, 1, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0]])
+ >>> # In addition to removing holes, closing can also
+ >>> # coarsen boundaries with fine hollows.
+ >>> ndimage.binary_closing(a).astype(int)
+ array([[0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 1, 0, 1, 0, 0],
+ [0, 0, 1, 1, 1, 0, 0],
+ [0, 0, 1, 1, 1, 0, 0],
+ [0, 0, 1, 1, 1, 0, 0],
+ [0, 0, 1, 1, 1, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0]])
+ >>> ndimage.binary_closing(a, structure=np.ones((2,2))).astype(int)
+ array([[0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 1, 1, 1, 0, 0],
+ [0, 0, 1, 1, 1, 0, 0],
+ [0, 0, 1, 1, 1, 0, 0],
+ [0, 0, 1, 1, 1, 0, 0],
+ [0, 0, 1, 1, 1, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0]])
+
+ """
+ input = np.asarray(input)
+ if structure is None:
+ rank = input.ndim
+ structure = generate_binary_structure(rank, 1)
+
+ tmp = binary_dilation(input, structure, iterations, mask, None,
+ border_value, origin, brute_force)
+ return binary_erosion(tmp, structure, iterations, mask, output,
+ border_value, origin, brute_force)
+
+
+def binary_hit_or_miss(input, structure1=None, structure2=None,
+ output=None, origin1=0, origin2=None):
+ """
+ Multidimensional binary hit-or-miss transform.
+
+ The hit-or-miss transform finds the locations of a given pattern
+ inside the input image.
+
+ Parameters
+ ----------
+ input : array_like (cast to booleans)
+ Binary image where a pattern is to be detected.
+ structure1 : array_like (cast to booleans), optional
+ Part of the structuring element to be fitted to the foreground
+ (non-zero elements) of `input`. If no value is provided, a
+ structure of square connectivity 1 is chosen.
+ structure2 : array_like (cast to booleans), optional
+ Second part of the structuring element that has to miss completely
+ the foreground. If no value is provided, the complementary of
+ `structure1` is taken.
+ output : ndarray, optional
+ Array of the same shape as input, into which the output is placed.
+ By default, a new array is created.
+ origin1 : int or tuple of ints, optional
+ Placement of the first part of the structuring element `structure1`,
+ by default 0 for a centered structure.
+ origin2 : int or tuple of ints, optional
+ Placement of the second part of the structuring element `structure2`,
+ by default 0 for a centered structure. If a value is provided for
+ `origin1` and not for `origin2`, then `origin2` is set to `origin1`.
+
+ Returns
+ -------
+ binary_hit_or_miss : ndarray
+ Hit-or-miss transform of `input` with the given structuring
+ element (`structure1`, `structure2`).
+
+ See Also
+ --------
+ binary_erosion
+
+ References
+ ----------
+ .. [1] https://en.wikipedia.org/wiki/Hit-or-miss_transform
+
+ Examples
+ --------
+ >>> from scipy import ndimage
+ >>> import numpy as np
+ >>> a = np.zeros((7,7), dtype=int)
+ >>> a[1, 1] = 1; a[2:4, 2:4] = 1; a[4:6, 4:6] = 1
+ >>> a
+ array([[0, 0, 0, 0, 0, 0, 0],
+ [0, 1, 0, 0, 0, 0, 0],
+ [0, 0, 1, 1, 0, 0, 0],
+ [0, 0, 1, 1, 0, 0, 0],
+ [0, 0, 0, 0, 1, 1, 0],
+ [0, 0, 0, 0, 1, 1, 0],
+ [0, 0, 0, 0, 0, 0, 0]])
+ >>> structure1 = np.array([[1, 0, 0], [0, 1, 1], [0, 1, 1]])
+ >>> structure1
+ array([[1, 0, 0],
+ [0, 1, 1],
+ [0, 1, 1]])
+ >>> # Find the matches of structure1 in the array a
+ >>> ndimage.binary_hit_or_miss(a, structure1=structure1).astype(int)
+ array([[0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 1, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 1, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0]])
+ >>> # Change the origin of the filter
+ >>> # origin1=1 is equivalent to origin1=(1,1) here
+ >>> ndimage.binary_hit_or_miss(a, structure1=structure1,\\
+ ... origin1=1).astype(int)
+ array([[0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 1, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 1, 0],
+ [0, 0, 0, 0, 0, 0, 0]])
+
+ """
+ input = np.asarray(input)
+ if structure1 is None:
+ structure1 = generate_binary_structure(input.ndim, 1)
+ if structure2 is None:
+ structure2 = np.logical_not(structure1)
+ origin1 = _ni_support._normalize_sequence(origin1, input.ndim)
+ if origin2 is None:
+ origin2 = origin1
+ else:
+ origin2 = _ni_support._normalize_sequence(origin2, input.ndim)
+
+ tmp1 = _binary_erosion(input, structure1, 1, None, None, 0, origin1,
+ 0, False)
+ inplace = isinstance(output, np.ndarray)
+ result = _binary_erosion(input, structure2, 1, None, output, 0,
+ origin2, 1, False)
+ if inplace:
+ np.logical_not(output, output)
+ np.logical_and(tmp1, output, output)
+ else:
+ np.logical_not(result, result)
+ return np.logical_and(tmp1, result)
+
+
+def binary_propagation(input, structure=None, mask=None,
+ output=None, border_value=0, origin=0):
+ """
+ Multidimensional binary propagation with the given structuring element.
+
+ Parameters
+ ----------
+ input : array_like
+ Binary image to be propagated inside `mask`.
+ structure : array_like, optional
+ Structuring element used in the successive dilations. The output
+ may depend on the structuring element, especially if `mask` has
+ several connex components. If no structuring element is
+ provided, an element is generated with a squared connectivity equal
+ to one.
+ mask : array_like, optional
+ Binary mask defining the region into which `input` is allowed to
+ propagate.
+ output : ndarray, optional
+ Array of the same shape as input, into which the output is placed.
+ By default, a new array is created.
+ border_value : int (cast to 0 or 1), optional
+ Value at the border in the output array.
+ origin : int or tuple of ints, optional
+ Placement of the filter, by default 0.
+
+ Returns
+ -------
+ binary_propagation : ndarray
+ Binary propagation of `input` inside `mask`.
+
+ Notes
+ -----
+ This function is functionally equivalent to calling binary_dilation
+ with the number of iterations less than one: iterative dilation until
+ the result does not change anymore.
+
+ The succession of an erosion and propagation inside the original image
+ can be used instead of an *opening* for deleting small objects while
+ keeping the contours of larger objects untouched.
+
+ References
+ ----------
+ .. [1] http://cmm.ensmp.fr/~serra/cours/pdf/en/ch6en.pdf, slide 15.
+ .. [2] I.T. Young, J.J. Gerbrands, and L.J. van Vliet, "Fundamentals of
+ image processing", 1998
+ ftp://qiftp.tudelft.nl/DIPimage/docs/FIP2.3.pdf
+
+ Examples
+ --------
+ >>> from scipy import ndimage
+ >>> import numpy as np
+ >>> input = np.zeros((8, 8), dtype=int)
+ >>> input[2, 2] = 1
+ >>> mask = np.zeros((8, 8), dtype=int)
+ >>> mask[1:4, 1:4] = mask[4, 4] = mask[6:8, 6:8] = 1
+ >>> input
+ array([[0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 1, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0]])
+ >>> mask
+ array([[0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 1, 1, 1, 0, 0, 0, 0],
+ [0, 1, 1, 1, 0, 0, 0, 0],
+ [0, 1, 1, 1, 0, 0, 0, 0],
+ [0, 0, 0, 0, 1, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 1, 1],
+ [0, 0, 0, 0, 0, 0, 1, 1]])
+ >>> ndimage.binary_propagation(input, mask=mask).astype(int)
+ array([[0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 1, 1, 1, 0, 0, 0, 0],
+ [0, 1, 1, 1, 0, 0, 0, 0],
+ [0, 1, 1, 1, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0]])
+ >>> ndimage.binary_propagation(input, mask=mask,\\
+ ... structure=np.ones((3,3))).astype(int)
+ array([[0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 1, 1, 1, 0, 0, 0, 0],
+ [0, 1, 1, 1, 0, 0, 0, 0],
+ [0, 1, 1, 1, 0, 0, 0, 0],
+ [0, 0, 0, 0, 1, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0]])
+
+ >>> # Comparison between opening and erosion+propagation
+ >>> a = np.zeros((6,6), dtype=int)
+ >>> a[2:5, 2:5] = 1; a[0, 0] = 1; a[5, 5] = 1
+ >>> a
+ array([[1, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0],
+ [0, 0, 1, 1, 1, 0],
+ [0, 0, 1, 1, 1, 0],
+ [0, 0, 1, 1, 1, 0],
+ [0, 0, 0, 0, 0, 1]])
+ >>> ndimage.binary_opening(a).astype(int)
+ array([[0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 1, 0, 0],
+ [0, 0, 1, 1, 1, 0],
+ [0, 0, 0, 1, 0, 0],
+ [0, 0, 0, 0, 0, 0]])
+ >>> b = ndimage.binary_erosion(a)
+ >>> b.astype(int)
+ array([[0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 1, 0, 0],
+ [0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0]])
+ >>> ndimage.binary_propagation(b, mask=a).astype(int)
+ array([[0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0],
+ [0, 0, 1, 1, 1, 0],
+ [0, 0, 1, 1, 1, 0],
+ [0, 0, 1, 1, 1, 0],
+ [0, 0, 0, 0, 0, 0]])
+
+ """
+ return binary_dilation(input, structure, -1, mask, output,
+ border_value, origin)
+
+
+def binary_fill_holes(input, structure=None, output=None, origin=0):
+ """
+ Fill the holes in binary objects.
+
+
+ Parameters
+ ----------
+ input : array_like
+ N-D binary array with holes to be filled
+ structure : array_like, optional
+ Structuring element used in the computation; large-size elements
+ make computations faster but may miss holes separated from the
+ background by thin regions. The default element (with a square
+ connectivity equal to one) yields the intuitive result where all
+ holes in the input have been filled.
+ output : ndarray, optional
+ Array of the same shape as input, into which the output is placed.
+ By default, a new array is created.
+ origin : int, tuple of ints, optional
+ Position of the structuring element.
+
+ Returns
+ -------
+ out : ndarray
+ Transformation of the initial image `input` where holes have been
+ filled.
+
+ See Also
+ --------
+ binary_dilation, binary_propagation, label
+
+ Notes
+ -----
+ The algorithm used in this function consists in invading the complementary
+ of the shapes in `input` from the outer boundary of the image,
+ using binary dilations. Holes are not connected to the boundary and are
+ therefore not invaded. The result is the complementary subset of the
+ invaded region.
+
+ References
+ ----------
+ .. [1] https://en.wikipedia.org/wiki/Mathematical_morphology
+
+
+ Examples
+ --------
+ >>> from scipy import ndimage
+ >>> import numpy as np
+ >>> a = np.zeros((5, 5), dtype=int)
+ >>> a[1:4, 1:4] = 1
+ >>> a[2,2] = 0
+ >>> a
+ array([[0, 0, 0, 0, 0],
+ [0, 1, 1, 1, 0],
+ [0, 1, 0, 1, 0],
+ [0, 1, 1, 1, 0],
+ [0, 0, 0, 0, 0]])
+ >>> ndimage.binary_fill_holes(a).astype(int)
+ array([[0, 0, 0, 0, 0],
+ [0, 1, 1, 1, 0],
+ [0, 1, 1, 1, 0],
+ [0, 1, 1, 1, 0],
+ [0, 0, 0, 0, 0]])
+ >>> # Too big structuring element
+ >>> ndimage.binary_fill_holes(a, structure=np.ones((5,5))).astype(int)
+ array([[0, 0, 0, 0, 0],
+ [0, 1, 1, 1, 0],
+ [0, 1, 0, 1, 0],
+ [0, 1, 1, 1, 0],
+ [0, 0, 0, 0, 0]])
+
+ """
+ mask = np.logical_not(input)
+ tmp = np.zeros(mask.shape, bool)
+ inplace = isinstance(output, np.ndarray)
+ if inplace:
+ binary_dilation(tmp, structure, -1, mask, output, 1, origin)
+ np.logical_not(output, output)
+ else:
+ output = binary_dilation(tmp, structure, -1, mask, None, 1,
+ origin)
+ np.logical_not(output, output)
+ return output
+
+
+def grey_erosion(input, size=None, footprint=None, structure=None,
+ output=None, mode="reflect", cval=0.0, origin=0):
+ """
+ Calculate a greyscale erosion, using either a structuring element,
+ or a footprint corresponding to a flat structuring element.
+
+ Grayscale erosion is a mathematical morphology operation. For the
+ simple case of a full and flat structuring element, it can be viewed
+ as a minimum filter over a sliding window.
+
+ Parameters
+ ----------
+ input : array_like
+ Array over which the grayscale erosion is to be computed.
+ size : tuple of ints
+ Shape of a flat and full structuring element used for the grayscale
+ erosion. Optional if `footprint` or `structure` is provided.
+ footprint : array of ints, optional
+ Positions of non-infinite elements of a flat structuring element
+ used for the grayscale erosion. Non-zero values give the set of
+ neighbors of the center over which the minimum is chosen.
+ structure : array of ints, optional
+ Structuring element used for the grayscale erosion. `structure`
+ may be a non-flat structuring element. The `structure` array applies a
+ subtractive offset for each pixel in the neighborhood.
+ output : array, optional
+ An array used for storing the output of the erosion may be provided.
+ mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional
+ The `mode` parameter determines how the array borders are
+ handled, where `cval` is the value when mode is equal to
+ 'constant'. Default is 'reflect'
+ cval : scalar, optional
+ Value to fill past edges of input if `mode` is 'constant'. Default
+ is 0.0.
+ origin : scalar, optional
+ The `origin` parameter controls the placement of the filter.
+ Default 0
+
+ Returns
+ -------
+ output : ndarray
+ Grayscale erosion of `input`.
+
+ See Also
+ --------
+ binary_erosion, grey_dilation, grey_opening, grey_closing
+ generate_binary_structure, minimum_filter
+
+ Notes
+ -----
+ The grayscale erosion of an image input by a structuring element s defined
+ over a domain E is given by:
+
+ (input+s)(x) = min {input(y) - s(x-y), for y in E}
+
+ In particular, for structuring elements defined as
+ s(y) = 0 for y in E, the grayscale erosion computes the minimum of the
+ input image inside a sliding window defined by E.
+
+ Grayscale erosion [1]_ is a *mathematical morphology* operation [2]_.
+
+ References
+ ----------
+ .. [1] https://en.wikipedia.org/wiki/Erosion_%28morphology%29
+ .. [2] https://en.wikipedia.org/wiki/Mathematical_morphology
+
+ Examples
+ --------
+ >>> from scipy import ndimage
+ >>> import numpy as np
+ >>> a = np.zeros((7,7), dtype=int)
+ >>> a[1:6, 1:6] = 3
+ >>> a[4,4] = 2; a[2,3] = 1
+ >>> a
+ array([[0, 0, 0, 0, 0, 0, 0],
+ [0, 3, 3, 3, 3, 3, 0],
+ [0, 3, 3, 1, 3, 3, 0],
+ [0, 3, 3, 3, 3, 3, 0],
+ [0, 3, 3, 3, 2, 3, 0],
+ [0, 3, 3, 3, 3, 3, 0],
+ [0, 0, 0, 0, 0, 0, 0]])
+ >>> ndimage.grey_erosion(a, size=(3,3))
+ array([[0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 1, 1, 1, 0, 0],
+ [0, 0, 1, 1, 1, 0, 0],
+ [0, 0, 3, 2, 2, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0]])
+ >>> footprint = ndimage.generate_binary_structure(2, 1)
+ >>> footprint
+ array([[False, True, False],
+ [ True, True, True],
+ [False, True, False]], dtype=bool)
+ >>> # Diagonally-connected elements are not considered neighbors
+ >>> ndimage.grey_erosion(a, footprint=footprint)
+ array([[0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 1, 1, 1, 0, 0],
+ [0, 0, 3, 1, 2, 0, 0],
+ [0, 0, 3, 2, 2, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0]])
+
+ """
+ if size is None and footprint is None and structure is None:
+ raise ValueError("size, footprint, or structure must be specified")
+
+ return _filters._min_or_max_filter(input, size, footprint, structure,
+ output, mode, cval, origin, 1)
+
+
+def grey_dilation(input, size=None, footprint=None, structure=None,
+ output=None, mode="reflect", cval=0.0, origin=0):
+ """
+ Calculate a greyscale dilation, using either a structuring element,
+ or a footprint corresponding to a flat structuring element.
+
+ Grayscale dilation is a mathematical morphology operation. For the
+ simple case of a full and flat structuring element, it can be viewed
+ as a maximum filter over a sliding window.
+
+ Parameters
+ ----------
+ input : array_like
+ Array over which the grayscale dilation is to be computed.
+ size : tuple of ints
+ Shape of a flat and full structuring element used for the grayscale
+ dilation. Optional if `footprint` or `structure` is provided.
+ footprint : array of ints, optional
+ Positions of non-infinite elements of a flat structuring element
+ used for the grayscale dilation. Non-zero values give the set of
+ neighbors of the center over which the maximum is chosen.
+ structure : array of ints, optional
+ Structuring element used for the grayscale dilation. `structure`
+ may be a non-flat structuring element. The `structure` array applies an
+ additive offset for each pixel in the neighborhood.
+ output : array, optional
+ An array used for storing the output of the dilation may be provided.
+ mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional
+ The `mode` parameter determines how the array borders are
+ handled, where `cval` is the value when mode is equal to
+ 'constant'. Default is 'reflect'
+ cval : scalar, optional
+ Value to fill past edges of input if `mode` is 'constant'. Default
+ is 0.0.
+ origin : scalar, optional
+ The `origin` parameter controls the placement of the filter.
+ Default 0
+
+ Returns
+ -------
+ grey_dilation : ndarray
+ Grayscale dilation of `input`.
+
+ See Also
+ --------
+ binary_dilation, grey_erosion, grey_closing, grey_opening
+ generate_binary_structure, maximum_filter
+
+ Notes
+ -----
+ The grayscale dilation of an image input by a structuring element s defined
+ over a domain E is given by:
+
+ (input+s)(x) = max {input(y) + s(x-y), for y in E}
+
+ In particular, for structuring elements defined as
+ s(y) = 0 for y in E, the grayscale dilation computes the maximum of the
+ input image inside a sliding window defined by E.
+
+ Grayscale dilation [1]_ is a *mathematical morphology* operation [2]_.
+
+ References
+ ----------
+ .. [1] https://en.wikipedia.org/wiki/Dilation_%28morphology%29
+ .. [2] https://en.wikipedia.org/wiki/Mathematical_morphology
+
+ Examples
+ --------
+ >>> from scipy import ndimage
+ >>> import numpy as np
+ >>> a = np.zeros((7,7), dtype=int)
+ >>> a[2:5, 2:5] = 1
+ >>> a[4,4] = 2; a[2,3] = 3
+ >>> a
+ array([[0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 1, 3, 1, 0, 0],
+ [0, 0, 1, 1, 1, 0, 0],
+ [0, 0, 1, 1, 2, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0]])
+ >>> ndimage.grey_dilation(a, size=(3,3))
+ array([[0, 0, 0, 0, 0, 0, 0],
+ [0, 1, 3, 3, 3, 1, 0],
+ [0, 1, 3, 3, 3, 1, 0],
+ [0, 1, 3, 3, 3, 2, 0],
+ [0, 1, 1, 2, 2, 2, 0],
+ [0, 1, 1, 2, 2, 2, 0],
+ [0, 0, 0, 0, 0, 0, 0]])
+ >>> ndimage.grey_dilation(a, footprint=np.ones((3,3)))
+ array([[0, 0, 0, 0, 0, 0, 0],
+ [0, 1, 3, 3, 3, 1, 0],
+ [0, 1, 3, 3, 3, 1, 0],
+ [0, 1, 3, 3, 3, 2, 0],
+ [0, 1, 1, 2, 2, 2, 0],
+ [0, 1, 1, 2, 2, 2, 0],
+ [0, 0, 0, 0, 0, 0, 0]])
+ >>> s = ndimage.generate_binary_structure(2,1)
+ >>> s
+ array([[False, True, False],
+ [ True, True, True],
+ [False, True, False]], dtype=bool)
+ >>> ndimage.grey_dilation(a, footprint=s)
+ array([[0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 1, 3, 1, 0, 0],
+ [0, 1, 3, 3, 3, 1, 0],
+ [0, 1, 1, 3, 2, 1, 0],
+ [0, 1, 1, 2, 2, 2, 0],
+ [0, 0, 1, 1, 2, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0]])
+ >>> ndimage.grey_dilation(a, size=(3,3), structure=np.ones((3,3)))
+ array([[1, 1, 1, 1, 1, 1, 1],
+ [1, 2, 4, 4, 4, 2, 1],
+ [1, 2, 4, 4, 4, 2, 1],
+ [1, 2, 4, 4, 4, 3, 1],
+ [1, 2, 2, 3, 3, 3, 1],
+ [1, 2, 2, 3, 3, 3, 1],
+ [1, 1, 1, 1, 1, 1, 1]])
+
+ """
+ if size is None and footprint is None and structure is None:
+ raise ValueError("size, footprint, or structure must be specified")
+ if structure is not None:
+ structure = np.asarray(structure)
+ structure = structure[tuple([slice(None, None, -1)] *
+ structure.ndim)]
+ if footprint is not None:
+ footprint = np.asarray(footprint)
+ footprint = footprint[tuple([slice(None, None, -1)] *
+ footprint.ndim)]
+
+ input = np.asarray(input)
+ origin = _ni_support._normalize_sequence(origin, input.ndim)
+ for ii in range(len(origin)):
+ origin[ii] = -origin[ii]
+ if footprint is not None:
+ sz = footprint.shape[ii]
+ elif structure is not None:
+ sz = structure.shape[ii]
+ elif np.isscalar(size):
+ sz = size
+ else:
+ sz = size[ii]
+ if not sz & 1:
+ origin[ii] -= 1
+
+ return _filters._min_or_max_filter(input, size, footprint, structure,
+ output, mode, cval, origin, 0)
+
+
+def grey_opening(input, size=None, footprint=None, structure=None,
+ output=None, mode="reflect", cval=0.0, origin=0):
+ """
+ Multidimensional grayscale opening.
+
+ A grayscale opening consists in the succession of a grayscale erosion,
+ and a grayscale dilation.
+
+ Parameters
+ ----------
+ input : array_like
+ Array over which the grayscale opening is to be computed.
+ size : tuple of ints
+ Shape of a flat and full structuring element used for the grayscale
+ opening. Optional if `footprint` or `structure` is provided.
+ footprint : array of ints, optional
+ Positions of non-infinite elements of a flat structuring element
+ used for the grayscale opening.
+ structure : array of ints, optional
+ Structuring element used for the grayscale opening. `structure`
+ may be a non-flat structuring element. The `structure` array applies
+ offsets to the pixels in a neighborhood (the offset is additive during
+ dilation and subtractive during erosion).
+ output : array, optional
+ An array used for storing the output of the opening may be provided.
+ mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
+ The `mode` parameter determines how the array borders are
+ handled, where `cval` is the value when mode is equal to
+ 'constant'. Default is 'reflect'
+ cval : scalar, optional
+ Value to fill past edges of input if `mode` is 'constant'. Default
+ is 0.0.
+ origin : scalar, optional
+ The `origin` parameter controls the placement of the filter.
+ Default 0
+
+ Returns
+ -------
+ grey_opening : ndarray
+ Result of the grayscale opening of `input` with `structure`.
+
+ See Also
+ --------
+ binary_opening, grey_dilation, grey_erosion, grey_closing
+ generate_binary_structure
+
+ Notes
+ -----
+ The action of a grayscale opening with a flat structuring element amounts
+ to smoothen high local maxima, whereas binary opening erases small objects.
+
+ References
+ ----------
+ .. [1] https://en.wikipedia.org/wiki/Mathematical_morphology
+
+ Examples
+ --------
+ >>> from scipy import ndimage
+ >>> import numpy as np
+ >>> a = np.arange(36).reshape((6,6))
+ >>> a[3, 3] = 50
+ >>> a
+ array([[ 0, 1, 2, 3, 4, 5],
+ [ 6, 7, 8, 9, 10, 11],
+ [12, 13, 14, 15, 16, 17],
+ [18, 19, 20, 50, 22, 23],
+ [24, 25, 26, 27, 28, 29],
+ [30, 31, 32, 33, 34, 35]])
+ >>> ndimage.grey_opening(a, size=(3,3))
+ array([[ 0, 1, 2, 3, 4, 4],
+ [ 6, 7, 8, 9, 10, 10],
+ [12, 13, 14, 15, 16, 16],
+ [18, 19, 20, 22, 22, 22],
+ [24, 25, 26, 27, 28, 28],
+ [24, 25, 26, 27, 28, 28]])
+ >>> # Note that the local maximum a[3,3] has disappeared
+
+ """
+ if (size is not None) and (footprint is not None):
+ warnings.warn("ignoring size because footprint is set",
+ UserWarning, stacklevel=2)
+ tmp = grey_erosion(input, size, footprint, structure, None, mode,
+ cval, origin)
+ return grey_dilation(tmp, size, footprint, structure, output, mode,
+ cval, origin)
+
+
+def grey_closing(input, size=None, footprint=None, structure=None,
+ output=None, mode="reflect", cval=0.0, origin=0):
+ """
+ Multidimensional grayscale closing.
+
+ A grayscale closing consists in the succession of a grayscale dilation,
+ and a grayscale erosion.
+
+ Parameters
+ ----------
+ input : array_like
+ Array over which the grayscale closing is to be computed.
+ size : tuple of ints
+ Shape of a flat and full structuring element used for the grayscale
+ closing. Optional if `footprint` or `structure` is provided.
+ footprint : array of ints, optional
+ Positions of non-infinite elements of a flat structuring element
+ used for the grayscale closing.
+ structure : array of ints, optional
+ Structuring element used for the grayscale closing. `structure`
+ may be a non-flat structuring element. The `structure` array applies
+ offsets to the pixels in a neighborhood (the offset is additive during
+ dilation and subtractive during erosion)
+ output : array, optional
+ An array used for storing the output of the closing may be provided.
+ mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
+ The `mode` parameter determines how the array borders are
+ handled, where `cval` is the value when mode is equal to
+ 'constant'. Default is 'reflect'
+ cval : scalar, optional
+ Value to fill past edges of input if `mode` is 'constant'. Default
+ is 0.0.
+ origin : scalar, optional
+ The `origin` parameter controls the placement of the filter.
+ Default 0
+
+ Returns
+ -------
+ grey_closing : ndarray
+ Result of the grayscale closing of `input` with `structure`.
+
+ See Also
+ --------
+ binary_closing, grey_dilation, grey_erosion, grey_opening,
+ generate_binary_structure
+
+ Notes
+ -----
+ The action of a grayscale closing with a flat structuring element amounts
+ to smoothen deep local minima, whereas binary closing fills small holes.
+
+ References
+ ----------
+ .. [1] https://en.wikipedia.org/wiki/Mathematical_morphology
+
+ Examples
+ --------
+ >>> from scipy import ndimage
+ >>> import numpy as np
+ >>> a = np.arange(36).reshape((6,6))
+ >>> a[3,3] = 0
+ >>> a
+ array([[ 0, 1, 2, 3, 4, 5],
+ [ 6, 7, 8, 9, 10, 11],
+ [12, 13, 14, 15, 16, 17],
+ [18, 19, 20, 0, 22, 23],
+ [24, 25, 26, 27, 28, 29],
+ [30, 31, 32, 33, 34, 35]])
+ >>> ndimage.grey_closing(a, size=(3,3))
+ array([[ 7, 7, 8, 9, 10, 11],
+ [ 7, 7, 8, 9, 10, 11],
+ [13, 13, 14, 15, 16, 17],
+ [19, 19, 20, 20, 22, 23],
+ [25, 25, 26, 27, 28, 29],
+ [31, 31, 32, 33, 34, 35]])
+ >>> # Note that the local minimum a[3,3] has disappeared
+
+ """
+ if (size is not None) and (footprint is not None):
+ warnings.warn("ignoring size because footprint is set",
+ UserWarning, stacklevel=2)
+ tmp = grey_dilation(input, size, footprint, structure, None, mode,
+ cval, origin)
+ return grey_erosion(tmp, size, footprint, structure, output, mode,
+ cval, origin)
+
+
+def morphological_gradient(input, size=None, footprint=None, structure=None,
+ output=None, mode="reflect", cval=0.0, origin=0):
+ """
+ Multidimensional morphological gradient.
+
+ The morphological gradient is calculated as the difference between a
+ dilation and an erosion of the input with a given structuring element.
+
+ Parameters
+ ----------
+ input : array_like
+ Array over which to compute the morphlogical gradient.
+ size : tuple of ints
+ Shape of a flat and full structuring element used for the mathematical
+ morphology operations. Optional if `footprint` or `structure` is
+ provided. A larger `size` yields a more blurred gradient.
+ footprint : array of ints, optional
+ Positions of non-infinite elements of a flat structuring element
+ used for the morphology operations. Larger footprints
+ give a more blurred morphological gradient.
+ structure : array of ints, optional
+ Structuring element used for the morphology operations. `structure` may
+ be a non-flat structuring element. The `structure` array applies
+ offsets to the pixels in a neighborhood (the offset is additive during
+ dilation and subtractive during erosion)
+ output : array, optional
+ An array used for storing the output of the morphological gradient
+ may be provided.
+ mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
+ The `mode` parameter determines how the array borders are
+ handled, where `cval` is the value when mode is equal to
+ 'constant'. Default is 'reflect'
+ cval : scalar, optional
+ Value to fill past edges of input if `mode` is 'constant'. Default
+ is 0.0.
+ origin : scalar, optional
+ The `origin` parameter controls the placement of the filter.
+ Default 0
+
+ Returns
+ -------
+ morphological_gradient : ndarray
+ Morphological gradient of `input`.
+
+ See Also
+ --------
+ grey_dilation, grey_erosion, gaussian_gradient_magnitude
+
+ Notes
+ -----
+ For a flat structuring element, the morphological gradient
+ computed at a given point corresponds to the maximal difference
+ between elements of the input among the elements covered by the
+ structuring element centered on the point.
+
+ References
+ ----------
+ .. [1] https://en.wikipedia.org/wiki/Mathematical_morphology
+
+ Examples
+ --------
+ >>> from scipy import ndimage
+ >>> import numpy as np
+ >>> a = np.zeros((7,7), dtype=int)
+ >>> a[2:5, 2:5] = 1
+ >>> ndimage.morphological_gradient(a, size=(3,3))
+ array([[0, 0, 0, 0, 0, 0, 0],
+ [0, 1, 1, 1, 1, 1, 0],
+ [0, 1, 1, 1, 1, 1, 0],
+ [0, 1, 1, 0, 1, 1, 0],
+ [0, 1, 1, 1, 1, 1, 0],
+ [0, 1, 1, 1, 1, 1, 0],
+ [0, 0, 0, 0, 0, 0, 0]])
+ >>> # The morphological gradient is computed as the difference
+ >>> # between a dilation and an erosion
+ >>> ndimage.grey_dilation(a, size=(3,3)) -\\
+ ... ndimage.grey_erosion(a, size=(3,3))
+ array([[0, 0, 0, 0, 0, 0, 0],
+ [0, 1, 1, 1, 1, 1, 0],
+ [0, 1, 1, 1, 1, 1, 0],
+ [0, 1, 1, 0, 1, 1, 0],
+ [0, 1, 1, 1, 1, 1, 0],
+ [0, 1, 1, 1, 1, 1, 0],
+ [0, 0, 0, 0, 0, 0, 0]])
+ >>> a = np.zeros((7,7), dtype=int)
+ >>> a[2:5, 2:5] = 1
+ >>> a[4,4] = 2; a[2,3] = 3
+ >>> a
+ array([[0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 1, 3, 1, 0, 0],
+ [0, 0, 1, 1, 1, 0, 0],
+ [0, 0, 1, 1, 2, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0]])
+ >>> ndimage.morphological_gradient(a, size=(3,3))
+ array([[0, 0, 0, 0, 0, 0, 0],
+ [0, 1, 3, 3, 3, 1, 0],
+ [0, 1, 3, 3, 3, 1, 0],
+ [0, 1, 3, 2, 3, 2, 0],
+ [0, 1, 1, 2, 2, 2, 0],
+ [0, 1, 1, 2, 2, 2, 0],
+ [0, 0, 0, 0, 0, 0, 0]])
+
+ """
+ tmp = grey_dilation(input, size, footprint, structure, None, mode,
+ cval, origin)
+ if isinstance(output, np.ndarray):
+ grey_erosion(input, size, footprint, structure, output, mode,
+ cval, origin)
+ return np.subtract(tmp, output, output)
+ else:
+ return (tmp - grey_erosion(input, size, footprint, structure,
+ None, mode, cval, origin))
+
+
+def morphological_laplace(input, size=None, footprint=None,
+ structure=None, output=None,
+ mode="reflect", cval=0.0, origin=0):
+ """
+ Multidimensional morphological laplace.
+
+ Parameters
+ ----------
+ input : array_like
+ Input.
+ size : tuple of ints
+ Shape of a flat and full structuring element used for the mathematical
+ morphology operations. Optional if `footprint` or `structure` is
+ provided.
+ footprint : array of ints, optional
+ Positions of non-infinite elements of a flat structuring element
+ used for the morphology operations.
+ structure : array of ints, optional
+ Structuring element used for the morphology operations. `structure` may
+ be a non-flat structuring element. The `structure` array applies
+ offsets to the pixels in a neighborhood (the offset is additive during
+ dilation and subtractive during erosion)
+ output : ndarray, optional
+ An output array can optionally be provided.
+ mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional
+ The mode parameter determines how the array borders are handled.
+ For 'constant' mode, values beyond borders are set to be `cval`.
+ Default is 'reflect'.
+ cval : scalar, optional
+ Value to fill past edges of input if mode is 'constant'.
+ Default is 0.0
+ origin : origin, optional
+ The origin parameter controls the placement of the filter.
+
+ Returns
+ -------
+ morphological_laplace : ndarray
+ Output
+
+ """
+ tmp1 = grey_dilation(input, size, footprint, structure, None, mode,
+ cval, origin)
+ if isinstance(output, np.ndarray):
+ grey_erosion(input, size, footprint, structure, output, mode,
+ cval, origin)
+ np.add(tmp1, output, output)
+ np.subtract(output, input, output)
+ return np.subtract(output, input, output)
+ else:
+ tmp2 = grey_erosion(input, size, footprint, structure, None, mode,
+ cval, origin)
+ np.add(tmp1, tmp2, tmp2)
+ np.subtract(tmp2, input, tmp2)
+ np.subtract(tmp2, input, tmp2)
+ return tmp2
+
+
+def white_tophat(input, size=None, footprint=None, structure=None,
+ output=None, mode="reflect", cval=0.0, origin=0):
+ """
+ Multidimensional white tophat filter.
+
+ Parameters
+ ----------
+ input : array_like
+ Input.
+ size : tuple of ints
+ Shape of a flat and full structuring element used for the filter.
+ Optional if `footprint` or `structure` is provided.
+ footprint : array of ints, optional
+ Positions of elements of a flat structuring element
+ used for the white tophat filter.
+ structure : array of ints, optional
+ Structuring element used for the filter. `structure` may be a non-flat
+ structuring element. The `structure` array applies offsets to the
+ pixels in a neighborhood (the offset is additive during dilation and
+ subtractive during erosion)
+ output : array, optional
+ An array used for storing the output of the filter may be provided.
+ mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
+ The `mode` parameter determines how the array borders are
+ handled, where `cval` is the value when mode is equal to
+ 'constant'. Default is 'reflect'
+ cval : scalar, optional
+ Value to fill past edges of input if `mode` is 'constant'.
+ Default is 0.0.
+ origin : scalar, optional
+ The `origin` parameter controls the placement of the filter.
+ Default is 0.
+
+ Returns
+ -------
+ output : ndarray
+ Result of the filter of `input` with `structure`.
+
+ See Also
+ --------
+ black_tophat
+
+ Examples
+ --------
+ Subtract gray background from a bright peak.
+
+ >>> from scipy.ndimage import generate_binary_structure, white_tophat
+ >>> import numpy as np
+ >>> square = generate_binary_structure(rank=2, connectivity=3)
+ >>> bright_on_gray = np.array([[2, 3, 3, 3, 2],
+ ... [3, 4, 5, 4, 3],
+ ... [3, 5, 9, 5, 3],
+ ... [3, 4, 5, 4, 3],
+ ... [2, 3, 3, 3, 2]])
+ >>> white_tophat(input=bright_on_gray, structure=square)
+ array([[0, 0, 0, 0, 0],
+ [0, 0, 1, 0, 0],
+ [0, 1, 5, 1, 0],
+ [0, 0, 1, 0, 0],
+ [0, 0, 0, 0, 0]])
+
+ """
+ if (size is not None) and (footprint is not None):
+ warnings.warn("ignoring size because footprint is set",
+ UserWarning, stacklevel=2)
+ tmp = grey_erosion(input, size, footprint, structure, None, mode,
+ cval, origin)
+ tmp = grey_dilation(tmp, size, footprint, structure, output, mode,
+ cval, origin)
+ if tmp is None:
+ tmp = output
+
+ if input.dtype == np.bool_ and tmp.dtype == np.bool_:
+ np.bitwise_xor(input, tmp, out=tmp)
+ else:
+ np.subtract(input, tmp, out=tmp)
+ return tmp
+
+
+def black_tophat(input, size=None, footprint=None,
+ structure=None, output=None, mode="reflect",
+ cval=0.0, origin=0):
+ """
+ Multidimensional black tophat filter.
+
+ Parameters
+ ----------
+ input : array_like
+ Input.
+ size : tuple of ints, optional
+ Shape of a flat and full structuring element used for the filter.
+ Optional if `footprint` or `structure` is provided.
+ footprint : array of ints, optional
+ Positions of non-infinite elements of a flat structuring element
+ used for the black tophat filter.
+ structure : array of ints, optional
+ Structuring element used for the filter. `structure` may be a non-flat
+ structuring element. The `structure` array applies offsets to the
+ pixels in a neighborhood (the offset is additive during dilation and
+ subtractive during erosion)
+ output : array, optional
+ An array used for storing the output of the filter may be provided.
+ mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
+ The `mode` parameter determines how the array borders are
+ handled, where `cval` is the value when mode is equal to
+ 'constant'. Default is 'reflect'
+ cval : scalar, optional
+ Value to fill past edges of input if `mode` is 'constant'. Default
+ is 0.0.
+ origin : scalar, optional
+ The `origin` parameter controls the placement of the filter.
+ Default 0
+
+ Returns
+ -------
+ black_tophat : ndarray
+ Result of the filter of `input` with `structure`.
+
+ See Also
+ --------
+ white_tophat, grey_opening, grey_closing
+
+ Examples
+ --------
+ Change dark peak to bright peak and subtract background.
+
+ >>> from scipy.ndimage import generate_binary_structure, black_tophat
+ >>> import numpy as np
+ >>> square = generate_binary_structure(rank=2, connectivity=3)
+ >>> dark_on_gray = np.array([[7, 6, 6, 6, 7],
+ ... [6, 5, 4, 5, 6],
+ ... [6, 4, 0, 4, 6],
+ ... [6, 5, 4, 5, 6],
+ ... [7, 6, 6, 6, 7]])
+ >>> black_tophat(input=dark_on_gray, structure=square)
+ array([[0, 0, 0, 0, 0],
+ [0, 0, 1, 0, 0],
+ [0, 1, 5, 1, 0],
+ [0, 0, 1, 0, 0],
+ [0, 0, 0, 0, 0]])
+
+ """
+ if (size is not None) and (footprint is not None):
+ warnings.warn("ignoring size because footprint is set",
+ UserWarning, stacklevel=2)
+ tmp = grey_dilation(input, size, footprint, structure, None, mode,
+ cval, origin)
+ tmp = grey_erosion(tmp, size, footprint, structure, output, mode,
+ cval, origin)
+ if tmp is None:
+ tmp = output
+
+ if input.dtype == np.bool_ and tmp.dtype == np.bool_:
+ np.bitwise_xor(tmp, input, out=tmp)
+ else:
+ np.subtract(tmp, input, out=tmp)
+ return tmp
+
+
+def distance_transform_bf(input, metric="euclidean", sampling=None,
+ return_distances=True, return_indices=False,
+ distances=None, indices=None):
+ """
+ Distance transform function by a brute force algorithm.
+
+ This function calculates the distance transform of the `input`, by
+ replacing each foreground (non-zero) element, with its
+ shortest distance to the background (any zero-valued element).
+
+ In addition to the distance transform, the feature transform can
+ be calculated. In this case the index of the closest background
+ element to each foreground element is returned in a separate array.
+
+ Parameters
+ ----------
+ input : array_like
+ Input
+ metric : {'euclidean', 'taxicab', 'chessboard'}, optional
+ 'cityblock' and 'manhattan' are also valid, and map to 'taxicab'.
+ The default is 'euclidean'.
+ sampling : float, or sequence of float, optional
+ This parameter is only used when `metric` is 'euclidean'.
+ Spacing of elements along each dimension. If a sequence, must be of
+ length equal to the input rank; if a single number, this is used for
+ all axes. If not specified, a grid spacing of unity is implied.
+ return_distances : bool, optional
+ Whether to calculate the distance transform.
+ Default is True.
+ return_indices : bool, optional
+ Whether to calculate the feature transform.
+ Default is False.
+ distances : ndarray, optional
+ An output array to store the calculated distance transform, instead of
+ returning it.
+ `return_distances` must be True.
+ It must be the same shape as `input`, and of type float64 if `metric`
+ is 'euclidean', uint32 otherwise.
+ indices : int32 ndarray, optional
+ An output array to store the calculated feature transform, instead of
+ returning it.
+ `return_indicies` must be True.
+ Its shape must be `(input.ndim,) + input.shape`.
+
+ Returns
+ -------
+ distances : ndarray, optional
+ The calculated distance transform. Returned only when
+ `return_distances` is True and `distances` is not supplied.
+ It will have the same shape as the input array.
+ indices : int32 ndarray, optional
+ The calculated feature transform. It has an input-shaped array for each
+ dimension of the input. See distance_transform_edt documentation for an
+ example.
+ Returned only when `return_indices` is True and `indices` is not
+ supplied.
+
+ See Also
+ --------
+ distance_transform_cdt : Faster distance transform for taxicab and
+ chessboard metrics
+ distance_transform_edt : Faster distance transform for euclidean metric
+
+ Notes
+ -----
+ This function employs a slow brute force algorithm. See also the
+ function `distance_transform_cdt` for more efficient taxicab [1]_ and
+ chessboard algorithms [2]_.
+
+ References
+ ----------
+ .. [1] Taxicab distance. Wikipedia, 2023.
+ https://en.wikipedia.org/wiki/Taxicab_geometry
+ .. [2] Chessboard distance. Wikipedia, 2023.
+ https://en.wikipedia.org/wiki/Chebyshev_distance
+
+ Examples
+ --------
+ Import the necessary modules.
+
+ >>> import numpy as np
+ >>> from scipy.ndimage import distance_transform_bf
+ >>> import matplotlib.pyplot as plt
+ >>> from mpl_toolkits.axes_grid1 import ImageGrid
+
+ First, we create a toy binary image.
+
+ >>> def add_circle(center_x, center_y, radius, image, fillvalue=1):
+ ... # fill circular area with 1
+ ... xx, yy = np.mgrid[:image.shape[0], :image.shape[1]]
+ ... circle = (xx - center_x) ** 2 + (yy - center_y) ** 2
+ ... circle_shape = np.sqrt(circle) < radius
+ ... image[circle_shape] = fillvalue
+ ... return image
+ >>> image = np.zeros((100, 100), dtype=np.uint8)
+ >>> image[35:65, 20:80] = 1
+ >>> image = add_circle(28, 65, 10, image)
+ >>> image = add_circle(37, 30, 10, image)
+ >>> image = add_circle(70, 45, 20, image)
+ >>> image = add_circle(45, 80, 10, image)
+
+ Next, we set up the figure.
+
+ >>> fig = plt.figure(figsize=(8, 8)) # set up the figure structure
+ >>> grid = ImageGrid(fig, 111, nrows_ncols=(2, 2), axes_pad=(0.4, 0.3),
+ ... label_mode="1", share_all=True,
+ ... cbar_location="right", cbar_mode="each",
+ ... cbar_size="7%", cbar_pad="2%")
+ >>> for ax in grid:
+ ... ax.axis('off') # remove axes from images
+
+ The top left image is the original binary image.
+
+ >>> binary_image = grid[0].imshow(image, cmap='gray')
+ >>> cbar_binary_image = grid.cbar_axes[0].colorbar(binary_image)
+ >>> cbar_binary_image.set_ticks([0, 1])
+ >>> grid[0].set_title("Binary image: foreground in white")
+
+ The distance transform calculates the distance between foreground pixels
+ and the image background according to a distance metric. Available metrics
+ in `distance_transform_bf` are: ``euclidean`` (default), ``taxicab``
+ and ``chessboard``. The top right image contains the distance transform
+ based on the ``euclidean`` metric.
+
+ >>> distance_transform_euclidean = distance_transform_bf(image)
+ >>> euclidean_transform = grid[1].imshow(distance_transform_euclidean,
+ ... cmap='gray')
+ >>> cbar_euclidean = grid.cbar_axes[1].colorbar(euclidean_transform)
+ >>> colorbar_ticks = [0, 10, 20]
+ >>> cbar_euclidean.set_ticks(colorbar_ticks)
+ >>> grid[1].set_title("Euclidean distance")
+
+ The lower left image contains the distance transform using the ``taxicab``
+ metric.
+
+ >>> distance_transform_taxicab = distance_transform_bf(image,
+ ... metric='taxicab')
+ >>> taxicab_transformation = grid[2].imshow(distance_transform_taxicab,
+ ... cmap='gray')
+ >>> cbar_taxicab = grid.cbar_axes[2].colorbar(taxicab_transformation)
+ >>> cbar_taxicab.set_ticks(colorbar_ticks)
+ >>> grid[2].set_title("Taxicab distance")
+
+ Finally, the lower right image contains the distance transform using the
+ ``chessboard`` metric.
+
+ >>> distance_transform_cb = distance_transform_bf(image,
+ ... metric='chessboard')
+ >>> chessboard_transformation = grid[3].imshow(distance_transform_cb,
+ ... cmap='gray')
+ >>> cbar_taxicab = grid.cbar_axes[3].colorbar(chessboard_transformation)
+ >>> cbar_taxicab.set_ticks(colorbar_ticks)
+ >>> grid[3].set_title("Chessboard distance")
+ >>> plt.show()
+
+ """
+ ft_inplace = isinstance(indices, np.ndarray)
+ dt_inplace = isinstance(distances, np.ndarray)
+ _distance_tranform_arg_check(
+ dt_inplace, ft_inplace, return_distances, return_indices
+ )
+
+ tmp1 = np.asarray(input) != 0
+ struct = generate_binary_structure(tmp1.ndim, tmp1.ndim)
+ tmp2 = binary_dilation(tmp1, struct)
+ tmp2 = np.logical_xor(tmp1, tmp2)
+ tmp1 = tmp1.astype(np.int8) - tmp2.astype(np.int8)
+ metric = metric.lower()
+ if metric == 'euclidean':
+ metric = 1
+ elif metric in ['taxicab', 'cityblock', 'manhattan']:
+ metric = 2
+ elif metric == 'chessboard':
+ metric = 3
+ else:
+ raise RuntimeError('distance metric not supported')
+ if sampling is not None:
+ sampling = _ni_support._normalize_sequence(sampling, tmp1.ndim)
+ sampling = np.asarray(sampling, dtype=np.float64)
+ if not sampling.flags.contiguous:
+ sampling = sampling.copy()
+ if return_indices:
+ ft = np.zeros(tmp1.shape, dtype=np.int32)
+ else:
+ ft = None
+ if return_distances:
+ if distances is None:
+ if metric == 1:
+ dt = np.zeros(tmp1.shape, dtype=np.float64)
+ else:
+ dt = np.zeros(tmp1.shape, dtype=np.uint32)
+ else:
+ if distances.shape != tmp1.shape:
+ raise RuntimeError('distances array has wrong shape')
+ if metric == 1:
+ if distances.dtype.type != np.float64:
+ raise RuntimeError('distances array must be float64')
+ else:
+ if distances.dtype.type != np.uint32:
+ raise RuntimeError('distances array must be uint32')
+ dt = distances
+ else:
+ dt = None
+
+ _nd_image.distance_transform_bf(tmp1, metric, sampling, dt, ft)
+ if return_indices:
+ if isinstance(indices, np.ndarray):
+ if indices.dtype.type != np.int32:
+ raise RuntimeError('indices array must be int32')
+ if indices.shape != (tmp1.ndim,) + tmp1.shape:
+ raise RuntimeError('indices array has wrong shape')
+ tmp2 = indices
+ else:
+ tmp2 = np.indices(tmp1.shape, dtype=np.int32)
+ ft = np.ravel(ft)
+ for ii in range(tmp2.shape[0]):
+ rtmp = np.ravel(tmp2[ii, ...])[ft]
+ rtmp.shape = tmp1.shape
+ tmp2[ii, ...] = rtmp
+ ft = tmp2
+
+ # construct and return the result
+ result = []
+ if return_distances and not dt_inplace:
+ result.append(dt)
+ if return_indices and not ft_inplace:
+ result.append(ft)
+
+ if len(result) == 2:
+ return tuple(result)
+ elif len(result) == 1:
+ return result[0]
+ else:
+ return None
+
+
+def distance_transform_cdt(input, metric='chessboard', return_distances=True,
+ return_indices=False, distances=None, indices=None):
+ """
+ Distance transform for chamfer type of transforms.
+
+ This function calculates the distance transform of the `input`, by
+ replacing each foreground (non-zero) element, with its
+ shortest distance to the background (any zero-valued element).
+
+ In addition to the distance transform, the feature transform can
+ be calculated. In this case the index of the closest background
+ element to each foreground element is returned in a separate array.
+
+ Parameters
+ ----------
+ input : array_like
+ Input. Values of 0 are treated as background.
+ metric : {'chessboard', 'taxicab'} or array_like, optional
+ The `metric` determines the type of chamfering that is done. If the
+ `metric` is equal to 'taxicab' a structure is generated using
+ `generate_binary_structure` with a squared distance equal to 1. If
+ the `metric` is equal to 'chessboard', a `metric` is generated
+ using `generate_binary_structure` with a squared distance equal to
+ the dimensionality of the array. These choices correspond to the
+ common interpretations of the 'taxicab' and the 'chessboard'
+ distance metrics in two dimensions.
+ A custom metric may be provided, in the form of a matrix where
+ each dimension has a length of three.
+ 'cityblock' and 'manhattan' are also valid, and map to 'taxicab'.
+ The default is 'chessboard'.
+ return_distances : bool, optional
+ Whether to calculate the distance transform.
+ Default is True.
+ return_indices : bool, optional
+ Whether to calculate the feature transform.
+ Default is False.
+ distances : int32 ndarray, optional
+ An output array to store the calculated distance transform, instead of
+ returning it.
+ `return_distances` must be True.
+ It must be the same shape as `input`.
+ indices : int32 ndarray, optional
+ An output array to store the calculated feature transform, instead of
+ returning it.
+ `return_indicies` must be True.
+ Its shape must be `(input.ndim,) + input.shape`.
+
+ Returns
+ -------
+ distances : int32 ndarray, optional
+ The calculated distance transform. Returned only when
+ `return_distances` is True, and `distances` is not supplied.
+ It will have the same shape as the input array.
+ indices : int32 ndarray, optional
+ The calculated feature transform. It has an input-shaped array for each
+ dimension of the input. See distance_transform_edt documentation for an
+ example.
+ Returned only when `return_indices` is True, and `indices` is not
+ supplied.
+
+ See Also
+ --------
+ distance_transform_edt : Fast distance transform for euclidean metric
+ distance_transform_bf : Distance transform for different metrics using
+ a slower brute force algorithm
+
+ Examples
+ --------
+ Import the necessary modules.
+
+ >>> import numpy as np
+ >>> from scipy.ndimage import distance_transform_cdt
+ >>> import matplotlib.pyplot as plt
+ >>> from mpl_toolkits.axes_grid1 import ImageGrid
+
+ First, we create a toy binary image.
+
+ >>> def add_circle(center_x, center_y, radius, image, fillvalue=1):
+ ... # fill circular area with 1
+ ... xx, yy = np.mgrid[:image.shape[0], :image.shape[1]]
+ ... circle = (xx - center_x) ** 2 + (yy - center_y) ** 2
+ ... circle_shape = np.sqrt(circle) < radius
+ ... image[circle_shape] = fillvalue
+ ... return image
+ >>> image = np.zeros((100, 100), dtype=np.uint8)
+ >>> image[35:65, 20:80] = 1
+ >>> image = add_circle(28, 65, 10, image)
+ >>> image = add_circle(37, 30, 10, image)
+ >>> image = add_circle(70, 45, 20, image)
+ >>> image = add_circle(45, 80, 10, image)
+
+ Next, we set up the figure.
+
+ >>> fig = plt.figure(figsize=(5, 15))
+ >>> grid = ImageGrid(fig, 111, nrows_ncols=(3, 1), axes_pad=(0.5, 0.3),
+ ... label_mode="1", share_all=True,
+ ... cbar_location="right", cbar_mode="each",
+ ... cbar_size="7%", cbar_pad="2%")
+ >>> for ax in grid:
+ ... ax.axis('off')
+ >>> top, middle, bottom = grid
+ >>> colorbar_ticks = [0, 10, 20]
+
+ The top image contains the original binary image.
+
+ >>> binary_image = top.imshow(image, cmap='gray')
+ >>> cbar_binary_image = top.cax.colorbar(binary_image)
+ >>> cbar_binary_image.set_ticks([0, 1])
+ >>> top.set_title("Binary image: foreground in white")
+
+ The middle image contains the distance transform using the ``taxicab``
+ metric.
+
+ >>> distance_taxicab = distance_transform_cdt(image, metric="taxicab")
+ >>> taxicab_transform = middle.imshow(distance_taxicab, cmap='gray')
+ >>> cbar_taxicab = middle.cax.colorbar(taxicab_transform)
+ >>> cbar_taxicab.set_ticks(colorbar_ticks)
+ >>> middle.set_title("Taxicab metric")
+
+ The bottom image contains the distance transform using the ``chessboard``
+ metric.
+
+ >>> distance_chessboard = distance_transform_cdt(image,
+ ... metric="chessboard")
+ >>> chessboard_transform = bottom.imshow(distance_chessboard, cmap='gray')
+ >>> cbar_chessboard = bottom.cax.colorbar(chessboard_transform)
+ >>> cbar_chessboard.set_ticks(colorbar_ticks)
+ >>> bottom.set_title("Chessboard metric")
+ >>> plt.tight_layout()
+ >>> plt.show()
+
+ """
+ ft_inplace = isinstance(indices, np.ndarray)
+ dt_inplace = isinstance(distances, np.ndarray)
+ _distance_tranform_arg_check(
+ dt_inplace, ft_inplace, return_distances, return_indices
+ )
+ input = np.asarray(input)
+ if isinstance(metric, str):
+ if metric in ['taxicab', 'cityblock', 'manhattan']:
+ rank = input.ndim
+ metric = generate_binary_structure(rank, 1)
+ elif metric == 'chessboard':
+ rank = input.ndim
+ metric = generate_binary_structure(rank, rank)
+ else:
+ raise ValueError('invalid metric provided')
+ else:
+ try:
+ metric = np.asarray(metric)
+ except Exception as e:
+ raise ValueError('invalid metric provided') from e
+ for s in metric.shape:
+ if s != 3:
+ raise ValueError('metric sizes must be equal to 3')
+
+ if not metric.flags.contiguous:
+ metric = metric.copy()
+ if dt_inplace:
+ if distances.dtype.type != np.int32:
+ raise ValueError('distances must be of int32 type')
+ if distances.shape != input.shape:
+ raise ValueError('distances has wrong shape')
+ dt = distances
+ dt[...] = np.where(input, -1, 0).astype(np.int32)
+ else:
+ dt = np.where(input, -1, 0).astype(np.int32)
+
+ rank = dt.ndim
+ if return_indices:
+ ft = np.arange(dt.size, dtype=np.int32)
+ ft.shape = dt.shape
+ else:
+ ft = None
+
+ _nd_image.distance_transform_op(metric, dt, ft)
+ dt = dt[tuple([slice(None, None, -1)] * rank)]
+ if return_indices:
+ ft = ft[tuple([slice(None, None, -1)] * rank)]
+ _nd_image.distance_transform_op(metric, dt, ft)
+ dt = dt[tuple([slice(None, None, -1)] * rank)]
+ if return_indices:
+ ft = ft[tuple([slice(None, None, -1)] * rank)]
+ ft = np.ravel(ft)
+ if ft_inplace:
+ if indices.dtype.type != np.int32:
+ raise ValueError('indices array must be int32')
+ if indices.shape != (dt.ndim,) + dt.shape:
+ raise ValueError('indices array has wrong shape')
+ tmp = indices
+ else:
+ tmp = np.indices(dt.shape, dtype=np.int32)
+ for ii in range(tmp.shape[0]):
+ rtmp = np.ravel(tmp[ii, ...])[ft]
+ rtmp.shape = dt.shape
+ tmp[ii, ...] = rtmp
+ ft = tmp
+
+ # construct and return the result
+ result = []
+ if return_distances and not dt_inplace:
+ result.append(dt)
+ if return_indices and not ft_inplace:
+ result.append(ft)
+
+ if len(result) == 2:
+ return tuple(result)
+ elif len(result) == 1:
+ return result[0]
+ else:
+ return None
+
+
+def distance_transform_edt(input, sampling=None, return_distances=True,
+ return_indices=False, distances=None, indices=None):
+ """
+ Exact Euclidean distance transform.
+
+ This function calculates the distance transform of the `input`, by
+ replacing each foreground (non-zero) element, with its
+ shortest distance to the background (any zero-valued element).
+
+ In addition to the distance transform, the feature transform can
+ be calculated. In this case the index of the closest background
+ element to each foreground element is returned in a separate array.
+
+ Parameters
+ ----------
+ input : array_like
+ Input data to transform. Can be any type but will be converted
+ into binary: 1 wherever input equates to True, 0 elsewhere.
+ sampling : float, or sequence of float, optional
+ Spacing of elements along each dimension. If a sequence, must be of
+ length equal to the input rank; if a single number, this is used for
+ all axes. If not specified, a grid spacing of unity is implied.
+ return_distances : bool, optional
+ Whether to calculate the distance transform.
+ Default is True.
+ return_indices : bool, optional
+ Whether to calculate the feature transform.
+ Default is False.
+ distances : float64 ndarray, optional
+ An output array to store the calculated distance transform, instead of
+ returning it.
+ `return_distances` must be True.
+ It must be the same shape as `input`.
+ indices : int32 ndarray, optional
+ An output array to store the calculated feature transform, instead of
+ returning it.
+ `return_indicies` must be True.
+ Its shape must be `(input.ndim,) + input.shape`.
+
+ Returns
+ -------
+ distances : float64 ndarray, optional
+ The calculated distance transform. Returned only when
+ `return_distances` is True and `distances` is not supplied.
+ It will have the same shape as the input array.
+ indices : int32 ndarray, optional
+ The calculated feature transform. It has an input-shaped array for each
+ dimension of the input. See example below.
+ Returned only when `return_indices` is True and `indices` is not
+ supplied.
+
+ Notes
+ -----
+ The Euclidean distance transform gives values of the Euclidean
+ distance::
+
+ n
+ y_i = sqrt(sum (x[i]-b[i])**2)
+ i
+
+ where b[i] is the background point (value 0) with the smallest
+ Euclidean distance to input points x[i], and n is the
+ number of dimensions.
+
+ Examples
+ --------
+ >>> from scipy import ndimage
+ >>> import numpy as np
+ >>> a = np.array(([0,1,1,1,1],
+ ... [0,0,1,1,1],
+ ... [0,1,1,1,1],
+ ... [0,1,1,1,0],
+ ... [0,1,1,0,0]))
+ >>> ndimage.distance_transform_edt(a)
+ array([[ 0. , 1. , 1.4142, 2.2361, 3. ],
+ [ 0. , 0. , 1. , 2. , 2. ],
+ [ 0. , 1. , 1.4142, 1.4142, 1. ],
+ [ 0. , 1. , 1.4142, 1. , 0. ],
+ [ 0. , 1. , 1. , 0. , 0. ]])
+
+ With a sampling of 2 units along x, 1 along y:
+
+ >>> ndimage.distance_transform_edt(a, sampling=[2,1])
+ array([[ 0. , 1. , 2. , 2.8284, 3.6056],
+ [ 0. , 0. , 1. , 2. , 3. ],
+ [ 0. , 1. , 2. , 2.2361, 2. ],
+ [ 0. , 1. , 2. , 1. , 0. ],
+ [ 0. , 1. , 1. , 0. , 0. ]])
+
+ Asking for indices as well:
+
+ >>> edt, inds = ndimage.distance_transform_edt(a, return_indices=True)
+ >>> inds
+ array([[[0, 0, 1, 1, 3],
+ [1, 1, 1, 1, 3],
+ [2, 2, 1, 3, 3],
+ [3, 3, 4, 4, 3],
+ [4, 4, 4, 4, 4]],
+ [[0, 0, 1, 1, 4],
+ [0, 1, 1, 1, 4],
+ [0, 0, 1, 4, 4],
+ [0, 0, 3, 3, 4],
+ [0, 0, 3, 3, 4]]])
+
+ With arrays provided for inplace outputs:
+
+ >>> indices = np.zeros(((np.ndim(a),) + a.shape), dtype=np.int32)
+ >>> ndimage.distance_transform_edt(a, return_indices=True, indices=indices)
+ array([[ 0. , 1. , 1.4142, 2.2361, 3. ],
+ [ 0. , 0. , 1. , 2. , 2. ],
+ [ 0. , 1. , 1.4142, 1.4142, 1. ],
+ [ 0. , 1. , 1.4142, 1. , 0. ],
+ [ 0. , 1. , 1. , 0. , 0. ]])
+ >>> indices
+ array([[[0, 0, 1, 1, 3],
+ [1, 1, 1, 1, 3],
+ [2, 2, 1, 3, 3],
+ [3, 3, 4, 4, 3],
+ [4, 4, 4, 4, 4]],
+ [[0, 0, 1, 1, 4],
+ [0, 1, 1, 1, 4],
+ [0, 0, 1, 4, 4],
+ [0, 0, 3, 3, 4],
+ [0, 0, 3, 3, 4]]])
+
+ """
+ ft_inplace = isinstance(indices, np.ndarray)
+ dt_inplace = isinstance(distances, np.ndarray)
+ _distance_tranform_arg_check(
+ dt_inplace, ft_inplace, return_distances, return_indices
+ )
+
+ # calculate the feature transform
+ input = np.atleast_1d(np.where(input, 1, 0).astype(np.int8))
+ if sampling is not None:
+ sampling = _ni_support._normalize_sequence(sampling, input.ndim)
+ sampling = np.asarray(sampling, dtype=np.float64)
+ if not sampling.flags.contiguous:
+ sampling = sampling.copy()
+
+ if ft_inplace:
+ ft = indices
+ if ft.shape != (input.ndim,) + input.shape:
+ raise RuntimeError('indices array has wrong shape')
+ if ft.dtype.type != np.int32:
+ raise RuntimeError('indices array must be int32')
+ else:
+ ft = np.zeros((input.ndim,) + input.shape, dtype=np.int32)
+
+ _nd_image.euclidean_feature_transform(input, sampling, ft)
+ # if requested, calculate the distance transform
+ if return_distances:
+ dt = ft - np.indices(input.shape, dtype=ft.dtype)
+ dt = dt.astype(np.float64)
+ if sampling is not None:
+ for ii in range(len(sampling)):
+ dt[ii, ...] *= sampling[ii]
+ np.multiply(dt, dt, dt)
+ if dt_inplace:
+ dt = np.add.reduce(dt, axis=0)
+ if distances.shape != dt.shape:
+ raise RuntimeError('distances array has wrong shape')
+ if distances.dtype.type != np.float64:
+ raise RuntimeError('distances array must be float64')
+ np.sqrt(dt, distances)
+ else:
+ dt = np.add.reduce(dt, axis=0)
+ dt = np.sqrt(dt)
+
+ # construct and return the result
+ result = []
+ if return_distances and not dt_inplace:
+ result.append(dt)
+ if return_indices and not ft_inplace:
+ result.append(ft)
+
+ if len(result) == 2:
+ return tuple(result)
+ elif len(result) == 1:
+ return result[0]
+ else:
+ return None
+
+
+def _distance_tranform_arg_check(distances_out, indices_out,
+ return_distances, return_indices):
+ """Raise a RuntimeError if the arguments are invalid"""
+ error_msgs = []
+ if (not return_distances) and (not return_indices):
+ error_msgs.append(
+ 'at least one of return_distances/return_indices must be True')
+ if distances_out and not return_distances:
+ error_msgs.append(
+ 'return_distances must be True if distances is supplied'
+ )
+ if indices_out and not return_indices:
+ error_msgs.append('return_indices must be True if indices is supplied')
+ if error_msgs:
+ raise RuntimeError(', '.join(error_msgs))
diff --git a/parrot/lib/python3.10/site-packages/scipy/ndimage/_ni_support.py b/parrot/lib/python3.10/site-packages/scipy/ndimage/_ni_support.py
new file mode 100644
index 0000000000000000000000000000000000000000..ae8875f2ad20244604e02a0d8649a815956d4975
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/scipy/ndimage/_ni_support.py
@@ -0,0 +1,119 @@
+# Copyright (C) 2003-2005 Peter J. Verveer
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+#
+# 3. The name of the author may not be used to endorse or promote
+# products derived from this software without specific prior
+# written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from collections.abc import Iterable
+import operator
+import warnings
+import numpy as np
+
+
+def _extend_mode_to_code(mode):
+ """Convert an extension mode to the corresponding integer code.
+ """
+ if mode == 'nearest':
+ return 0
+ elif mode == 'wrap':
+ return 1
+ elif mode in ['reflect', 'grid-mirror']:
+ return 2
+ elif mode == 'mirror':
+ return 3
+ elif mode == 'constant':
+ return 4
+ elif mode == 'grid-wrap':
+ return 5
+ elif mode == 'grid-constant':
+ return 6
+ else:
+ raise RuntimeError('boundary mode not supported')
+
+
+def _normalize_sequence(input, rank):
+ """If input is a scalar, create a sequence of length equal to the
+ rank by duplicating the input. If input is a sequence,
+ check if its length is equal to the length of array.
+ """
+ is_str = isinstance(input, str)
+ if not is_str and isinstance(input, Iterable):
+ normalized = list(input)
+ if len(normalized) != rank:
+ err = "sequence argument must have length equal to input rank"
+ raise RuntimeError(err)
+ else:
+ normalized = [input] * rank
+ return normalized
+
+
+def _get_output(output, input, shape=None, complex_output=False):
+ if shape is None:
+ shape = input.shape
+ if output is None:
+ if not complex_output:
+ output = np.zeros(shape, dtype=input.dtype.name)
+ else:
+ complex_type = np.promote_types(input.dtype, np.complex64)
+ output = np.zeros(shape, dtype=complex_type)
+ elif isinstance(output, (type, np.dtype)):
+ # Classes (like `np.float32`) and dtypes are interpreted as dtype
+ if complex_output and np.dtype(output).kind != 'c':
+ warnings.warn("promoting specified output dtype to complex", stacklevel=3)
+ output = np.promote_types(output, np.complex64)
+ output = np.zeros(shape, dtype=output)
+ elif isinstance(output, str):
+ output = np.dtype(output)
+ if complex_output and output.kind != 'c':
+ raise RuntimeError("output must have complex dtype")
+ elif not issubclass(output.type, np.number):
+ raise RuntimeError("output must have numeric dtype")
+ output = np.zeros(shape, dtype=output)
+ elif output.shape != shape:
+ raise RuntimeError("output shape not correct")
+ elif complex_output and output.dtype.kind != 'c':
+ raise RuntimeError("output must have complex dtype")
+ return output
+
+
+def _check_axes(axes, ndim):
+ if axes is None:
+ return tuple(range(ndim))
+ elif np.isscalar(axes):
+ axes = (operator.index(axes),)
+ elif isinstance(axes, Iterable):
+ for ax in axes:
+ axes = tuple(operator.index(ax) for ax in axes)
+ if ax < -ndim or ax > ndim - 1:
+ raise ValueError(f"specified axis: {ax} is out of range")
+ axes = tuple(ax % ndim if ax < 0 else ax for ax in axes)
+ else:
+ message = "axes must be an integer, iterable of integers, or None"
+ raise ValueError(message)
+ if len(tuple(set(axes))) != len(axes):
+ raise ValueError("axes must be unique")
+ return axes
diff --git a/parrot/lib/python3.10/site-packages/scipy/ndimage/filters.py b/parrot/lib/python3.10/site-packages/scipy/ndimage/filters.py
new file mode 100644
index 0000000000000000000000000000000000000000..e16d9d279a9585b2454c46ee09cf22143de833a6
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/scipy/ndimage/filters.py
@@ -0,0 +1,27 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.ndimage` namespace for importing the functions
+# included below.
+
+from scipy._lib.deprecation import _sub_module_deprecation
+
+
+__all__ = [ # noqa: F822
+ 'correlate1d', 'convolve1d', 'gaussian_filter1d',
+ 'gaussian_filter', 'prewitt', 'sobel', 'generic_laplace',
+ 'laplace', 'gaussian_laplace', 'generic_gradient_magnitude',
+ 'gaussian_gradient_magnitude', 'correlate', 'convolve',
+ 'uniform_filter1d', 'uniform_filter', 'minimum_filter1d',
+ 'maximum_filter1d', 'minimum_filter', 'maximum_filter',
+ 'rank_filter', 'median_filter', 'percentile_filter',
+ 'generic_filter1d', 'generic_filter'
+]
+
+
+def __dir__():
+ return __all__
+
+
+def __getattr__(name):
+ return _sub_module_deprecation(sub_package='ndimage', module='filters',
+ private_modules=['_filters'], all=__all__,
+ attribute=name)
diff --git a/parrot/lib/python3.10/site-packages/scipy/ndimage/measurements.py b/parrot/lib/python3.10/site-packages/scipy/ndimage/measurements.py
new file mode 100644
index 0000000000000000000000000000000000000000..22f76b01840ffb829205bd1d28a7ad1f9ac5db61
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/scipy/ndimage/measurements.py
@@ -0,0 +1,24 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.ndimage` namespace for importing the functions
+# included below.
+
+from scipy._lib.deprecation import _sub_module_deprecation
+
+
+__all__ = [ # noqa: F822
+ 'label', 'find_objects', 'labeled_comprehension',
+ 'sum', 'mean', 'variance', 'standard_deviation',
+ 'minimum', 'maximum', 'median', 'minimum_position',
+ 'maximum_position', 'extrema', 'center_of_mass',
+ 'histogram', 'watershed_ift', 'sum_labels'
+]
+
+
+def __dir__():
+ return __all__
+
+
+def __getattr__(name):
+ return _sub_module_deprecation(sub_package='ndimage', module='measurements',
+ private_modules=['_measurements'], all=__all__,
+ attribute=name)
diff --git a/parrot/lib/python3.10/site-packages/scipy/ndimage/morphology.py b/parrot/lib/python3.10/site-packages/scipy/ndimage/morphology.py
new file mode 100644
index 0000000000000000000000000000000000000000..e522e7df3a4b06b7e04ed8c2d0ecaff2a98b951d
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/scipy/ndimage/morphology.py
@@ -0,0 +1,27 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.ndimage` namespace for importing the functions
+# included below.
+
+from scipy._lib.deprecation import _sub_module_deprecation
+
+
+__all__ = [ # noqa: F822
+ 'iterate_structure', 'generate_binary_structure',
+ 'binary_erosion', 'binary_dilation', 'binary_opening',
+ 'binary_closing', 'binary_hit_or_miss', 'binary_propagation',
+ 'binary_fill_holes', 'grey_erosion', 'grey_dilation',
+ 'grey_opening', 'grey_closing', 'morphological_gradient',
+ 'morphological_laplace', 'white_tophat', 'black_tophat',
+ 'distance_transform_bf', 'distance_transform_cdt',
+ 'distance_transform_edt'
+]
+
+
+def __dir__():
+ return __all__
+
+
+def __getattr__(name):
+ return _sub_module_deprecation(sub_package='ndimage', module='morphology',
+ private_modules=['_morphology'], all=__all__,
+ attribute=name)
diff --git a/parrot/lib/python3.10/site-packages/scipy/ndimage/tests/test_interpolation.py b/parrot/lib/python3.10/site-packages/scipy/ndimage/tests/test_interpolation.py
new file mode 100644
index 0000000000000000000000000000000000000000..c92cfb558a0fafac3b881540afaf6b05165f5dc5
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/scipy/ndimage/tests/test_interpolation.py
@@ -0,0 +1,1327 @@
+import sys
+
+import numpy as np
+from numpy.testing import (assert_, assert_equal, assert_array_equal,
+ assert_array_almost_equal, assert_allclose,
+ suppress_warnings)
+import pytest
+from pytest import raises as assert_raises
+import scipy.ndimage as ndimage
+
+from . import types
+
+eps = 1e-12
+
+ndimage_to_numpy_mode = {
+ 'mirror': 'reflect',
+ 'reflect': 'symmetric',
+ 'grid-mirror': 'symmetric',
+ 'grid-wrap': 'wrap',
+ 'nearest': 'edge',
+ 'grid-constant': 'constant',
+}
+
+
+class TestNdimageInterpolation:
+
+ @pytest.mark.parametrize(
+ 'mode, expected_value',
+ [('nearest', [1.5, 2.5, 3.5, 4, 4, 4, 4]),
+ ('wrap', [1.5, 2.5, 3.5, 1.5, 2.5, 3.5, 1.5]),
+ ('grid-wrap', [1.5, 2.5, 3.5, 2.5, 1.5, 2.5, 3.5]),
+ ('mirror', [1.5, 2.5, 3.5, 3.5, 2.5, 1.5, 1.5]),
+ ('reflect', [1.5, 2.5, 3.5, 4, 3.5, 2.5, 1.5]),
+ ('constant', [1.5, 2.5, 3.5, -1, -1, -1, -1]),
+ ('grid-constant', [1.5, 2.5, 3.5, 1.5, -1, -1, -1])]
+ )
+ def test_boundaries(self, mode, expected_value):
+ def shift(x):
+ return (x[0] + 0.5,)
+
+ data = np.array([1, 2, 3, 4.])
+ assert_array_equal(
+ expected_value,
+ ndimage.geometric_transform(data, shift, cval=-1, mode=mode,
+ output_shape=(7,), order=1))
+
+ @pytest.mark.parametrize(
+ 'mode, expected_value',
+ [('nearest', [1, 1, 2, 3]),
+ ('wrap', [3, 1, 2, 3]),
+ ('grid-wrap', [4, 1, 2, 3]),
+ ('mirror', [2, 1, 2, 3]),
+ ('reflect', [1, 1, 2, 3]),
+ ('constant', [-1, 1, 2, 3]),
+ ('grid-constant', [-1, 1, 2, 3])]
+ )
+ def test_boundaries2(self, mode, expected_value):
+ def shift(x):
+ return (x[0] - 0.9,)
+
+ data = np.array([1, 2, 3, 4])
+ assert_array_equal(
+ expected_value,
+ ndimage.geometric_transform(data, shift, cval=-1, mode=mode,
+ output_shape=(4,)))
+
+ @pytest.mark.parametrize('mode', ['mirror', 'reflect', 'grid-mirror',
+ 'grid-wrap', 'grid-constant',
+ 'nearest'])
+ @pytest.mark.parametrize('order', range(6))
+ def test_boundary_spline_accuracy(self, mode, order):
+ """Tests based on examples from gh-2640"""
+ data = np.arange(-6, 7, dtype=float)
+ x = np.linspace(-8, 15, num=1000)
+ y = ndimage.map_coordinates(data, [x], order=order, mode=mode)
+
+ # compute expected value using explicit padding via np.pad
+ npad = 32
+ pad_mode = ndimage_to_numpy_mode.get(mode)
+ padded = np.pad(data, npad, mode=pad_mode)
+ expected = ndimage.map_coordinates(padded, [npad + x], order=order,
+ mode=mode)
+
+ atol = 1e-5 if mode == 'grid-constant' else 1e-12
+ assert_allclose(y, expected, rtol=1e-7, atol=atol)
+
+ @pytest.mark.parametrize('order', range(2, 6))
+ @pytest.mark.parametrize('dtype', types)
+ def test_spline01(self, dtype, order):
+ data = np.ones([], dtype)
+ out = ndimage.spline_filter(data, order=order)
+ assert_array_almost_equal(out, 1)
+
+ @pytest.mark.parametrize('order', range(2, 6))
+ @pytest.mark.parametrize('dtype', types)
+ def test_spline02(self, dtype, order):
+ data = np.array([1], dtype)
+ out = ndimage.spline_filter(data, order=order)
+ assert_array_almost_equal(out, [1])
+
+ @pytest.mark.parametrize('order', range(2, 6))
+ @pytest.mark.parametrize('dtype', types)
+ def test_spline03(self, dtype, order):
+ data = np.ones([], dtype)
+ out = ndimage.spline_filter(data, order, output=dtype)
+ assert_array_almost_equal(out, 1)
+
+ @pytest.mark.parametrize('order', range(2, 6))
+ @pytest.mark.parametrize('dtype', types)
+ def test_spline04(self, dtype, order):
+ data = np.ones([4], dtype)
+ out = ndimage.spline_filter(data, order)
+ assert_array_almost_equal(out, [1, 1, 1, 1])
+
+ @pytest.mark.parametrize('order', range(2, 6))
+ @pytest.mark.parametrize('dtype', types)
+ def test_spline05(self, dtype, order):
+ data = np.ones([4, 4], dtype)
+ out = ndimage.spline_filter(data, order=order)
+ assert_array_almost_equal(out, [[1, 1, 1, 1],
+ [1, 1, 1, 1],
+ [1, 1, 1, 1],
+ [1, 1, 1, 1]])
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_geometric_transform01(self, order):
+ data = np.array([1])
+
+ def mapping(x):
+ return x
+
+ out = ndimage.geometric_transform(data, mapping, data.shape,
+ order=order)
+ assert_array_almost_equal(out, [1])
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_geometric_transform02(self, order):
+ data = np.ones([4])
+
+ def mapping(x):
+ return x
+
+ out = ndimage.geometric_transform(data, mapping, data.shape,
+ order=order)
+ assert_array_almost_equal(out, [1, 1, 1, 1])
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_geometric_transform03(self, order):
+ data = np.ones([4])
+
+ def mapping(x):
+ return (x[0] - 1,)
+
+ out = ndimage.geometric_transform(data, mapping, data.shape,
+ order=order)
+ assert_array_almost_equal(out, [0, 1, 1, 1])
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_geometric_transform04(self, order):
+ data = np.array([4, 1, 3, 2])
+
+ def mapping(x):
+ return (x[0] - 1,)
+
+ out = ndimage.geometric_transform(data, mapping, data.shape,
+ order=order)
+ assert_array_almost_equal(out, [0, 4, 1, 3])
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ @pytest.mark.parametrize('dtype', [np.float64, np.complex128])
+ def test_geometric_transform05(self, order, dtype):
+ data = np.array([[1, 1, 1, 1],
+ [1, 1, 1, 1],
+ [1, 1, 1, 1]], dtype=dtype)
+ expected = np.array([[0, 1, 1, 1],
+ [0, 1, 1, 1],
+ [0, 1, 1, 1]], dtype=dtype)
+ if data.dtype.kind == 'c':
+ data -= 1j * data
+ expected -= 1j * expected
+
+ def mapping(x):
+ return (x[0], x[1] - 1)
+
+ out = ndimage.geometric_transform(data, mapping, data.shape,
+ order=order)
+ assert_array_almost_equal(out, expected)
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_geometric_transform06(self, order):
+ data = np.array([[4, 1, 3, 2],
+ [7, 6, 8, 5],
+ [3, 5, 3, 6]])
+
+ def mapping(x):
+ return (x[0], x[1] - 1)
+
+ out = ndimage.geometric_transform(data, mapping, data.shape,
+ order=order)
+ assert_array_almost_equal(out, [[0, 4, 1, 3],
+ [0, 7, 6, 8],
+ [0, 3, 5, 3]])
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_geometric_transform07(self, order):
+ data = np.array([[4, 1, 3, 2],
+ [7, 6, 8, 5],
+ [3, 5, 3, 6]])
+
+ def mapping(x):
+ return (x[0] - 1, x[1])
+
+ out = ndimage.geometric_transform(data, mapping, data.shape,
+ order=order)
+ assert_array_almost_equal(out, [[0, 0, 0, 0],
+ [4, 1, 3, 2],
+ [7, 6, 8, 5]])
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_geometric_transform08(self, order):
+ data = np.array([[4, 1, 3, 2],
+ [7, 6, 8, 5],
+ [3, 5, 3, 6]])
+
+ def mapping(x):
+ return (x[0] - 1, x[1] - 1)
+
+ out = ndimage.geometric_transform(data, mapping, data.shape,
+ order=order)
+ assert_array_almost_equal(out, [[0, 0, 0, 0],
+ [0, 4, 1, 3],
+ [0, 7, 6, 8]])
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_geometric_transform10(self, order):
+ data = np.array([[4, 1, 3, 2],
+ [7, 6, 8, 5],
+ [3, 5, 3, 6]])
+
+ def mapping(x):
+ return (x[0] - 1, x[1] - 1)
+
+ if (order > 1):
+ filtered = ndimage.spline_filter(data, order=order)
+ else:
+ filtered = data
+ out = ndimage.geometric_transform(filtered, mapping, data.shape,
+ order=order, prefilter=False)
+ assert_array_almost_equal(out, [[0, 0, 0, 0],
+ [0, 4, 1, 3],
+ [0, 7, 6, 8]])
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_geometric_transform13(self, order):
+ data = np.ones([2], np.float64)
+
+ def mapping(x):
+ return (x[0] // 2,)
+
+ out = ndimage.geometric_transform(data, mapping, [4], order=order)
+ assert_array_almost_equal(out, [1, 1, 1, 1])
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_geometric_transform14(self, order):
+ data = [1, 5, 2, 6, 3, 7, 4, 4]
+
+ def mapping(x):
+ return (2 * x[0],)
+
+ out = ndimage.geometric_transform(data, mapping, [4], order=order)
+ assert_array_almost_equal(out, [1, 2, 3, 4])
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_geometric_transform15(self, order):
+ data = [1, 2, 3, 4]
+
+ def mapping(x):
+ return (x[0] / 2,)
+
+ out = ndimage.geometric_transform(data, mapping, [8], order=order)
+ assert_array_almost_equal(out[::2], [1, 2, 3, 4])
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_geometric_transform16(self, order):
+ data = [[1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9.0, 10, 11, 12]]
+
+ def mapping(x):
+ return (x[0], x[1] * 2)
+
+ out = ndimage.geometric_transform(data, mapping, (3, 2),
+ order=order)
+ assert_array_almost_equal(out, [[1, 3], [5, 7], [9, 11]])
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_geometric_transform17(self, order):
+ data = [[1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 11, 12]]
+
+ def mapping(x):
+ return (x[0] * 2, x[1])
+
+ out = ndimage.geometric_transform(data, mapping, (1, 4),
+ order=order)
+ assert_array_almost_equal(out, [[1, 2, 3, 4]])
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_geometric_transform18(self, order):
+ data = [[1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 11, 12]]
+
+ def mapping(x):
+ return (x[0] * 2, x[1] * 2)
+
+ out = ndimage.geometric_transform(data, mapping, (1, 2),
+ order=order)
+ assert_array_almost_equal(out, [[1, 3]])
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_geometric_transform19(self, order):
+ data = [[1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 11, 12]]
+
+ def mapping(x):
+ return (x[0], x[1] / 2)
+
+ out = ndimage.geometric_transform(data, mapping, (3, 8),
+ order=order)
+ assert_array_almost_equal(out[..., ::2], data)
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_geometric_transform20(self, order):
+ data = [[1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 11, 12]]
+
+ def mapping(x):
+ return (x[0] / 2, x[1])
+
+ out = ndimage.geometric_transform(data, mapping, (6, 4),
+ order=order)
+ assert_array_almost_equal(out[::2, ...], data)
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_geometric_transform21(self, order):
+ data = [[1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 11, 12]]
+
+ def mapping(x):
+ return (x[0] / 2, x[1] / 2)
+
+ out = ndimage.geometric_transform(data, mapping, (6, 8),
+ order=order)
+ assert_array_almost_equal(out[::2, ::2], data)
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_geometric_transform22(self, order):
+ data = np.array([[1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 11, 12]], np.float64)
+
+ def mapping1(x):
+ return (x[0] / 2, x[1] / 2)
+
+ def mapping2(x):
+ return (x[0] * 2, x[1] * 2)
+
+ out = ndimage.geometric_transform(data, mapping1,
+ (6, 8), order=order)
+ out = ndimage.geometric_transform(out, mapping2,
+ (3, 4), order=order)
+ assert_array_almost_equal(out, data)
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_geometric_transform23(self, order):
+ data = [[1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 11, 12]]
+
+ def mapping(x):
+ return (1, x[0] * 2)
+
+ out = ndimage.geometric_transform(data, mapping, (2,), order=order)
+ out = out.astype(np.int32)
+ assert_array_almost_equal(out, [5, 7])
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_geometric_transform24(self, order):
+ data = [[1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 11, 12]]
+
+ def mapping(x, a, b):
+ return (a, x[0] * b)
+
+ out = ndimage.geometric_transform(
+ data, mapping, (2,), order=order, extra_arguments=(1,),
+ extra_keywords={'b': 2})
+ assert_array_almost_equal(out, [5, 7])
+
+ def test_geometric_transform_grid_constant_order1(self):
+ # verify interpolation outside the original bounds
+ x = np.array([[1, 2, 3],
+ [4, 5, 6]], dtype=float)
+
+ def mapping(x):
+ return (x[0] - 0.5), (x[1] - 0.5)
+
+ expected_result = np.array([[0.25, 0.75, 1.25],
+ [1.25, 3.00, 4.00]])
+ assert_array_almost_equal(
+ ndimage.geometric_transform(x, mapping, mode='grid-constant',
+ order=1),
+ expected_result,
+ )
+
+ @pytest.mark.parametrize('mode', ['grid-constant', 'grid-wrap', 'nearest',
+ 'mirror', 'reflect'])
+ @pytest.mark.parametrize('order', range(6))
+ def test_geometric_transform_vs_padded(self, order, mode):
+ x = np.arange(144, dtype=float).reshape(12, 12)
+
+ def mapping(x):
+ return (x[0] - 0.4), (x[1] + 2.3)
+
+ # Manually pad and then extract center after the transform to get the
+ # expected result.
+ npad = 24
+ pad_mode = ndimage_to_numpy_mode.get(mode)
+ xp = np.pad(x, npad, mode=pad_mode)
+ center_slice = tuple([slice(npad, -npad)] * x.ndim)
+ expected_result = ndimage.geometric_transform(
+ xp, mapping, mode=mode, order=order)[center_slice]
+
+ assert_allclose(
+ ndimage.geometric_transform(x, mapping, mode=mode,
+ order=order),
+ expected_result,
+ rtol=1e-7,
+ )
+
+ def test_geometric_transform_endianness_with_output_parameter(self):
+ # geometric transform given output ndarray or dtype with
+ # non-native endianness. see issue #4127
+ data = np.array([1])
+
+ def mapping(x):
+ return x
+
+ for out in [data.dtype, data.dtype.newbyteorder(),
+ np.empty_like(data),
+ np.empty_like(data).astype(data.dtype.newbyteorder())]:
+ returned = ndimage.geometric_transform(data, mapping, data.shape,
+ output=out)
+ result = out if returned is None else returned
+ assert_array_almost_equal(result, [1])
+
+ def test_geometric_transform_with_string_output(self):
+ data = np.array([1])
+
+ def mapping(x):
+ return x
+
+ out = ndimage.geometric_transform(data, mapping, output='f')
+ assert_(out.dtype is np.dtype('f'))
+ assert_array_almost_equal(out, [1])
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ @pytest.mark.parametrize('dtype', [np.float64, np.complex128])
+ def test_map_coordinates01(self, order, dtype):
+ data = np.array([[4, 1, 3, 2],
+ [7, 6, 8, 5],
+ [3, 5, 3, 6]])
+ expected = np.array([[0, 0, 0, 0],
+ [0, 4, 1, 3],
+ [0, 7, 6, 8]])
+ if data.dtype.kind == 'c':
+ data = data - 1j * data
+ expected = expected - 1j * expected
+
+ idx = np.indices(data.shape)
+ idx -= 1
+
+ out = ndimage.map_coordinates(data, idx, order=order)
+ assert_array_almost_equal(out, expected)
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_map_coordinates02(self, order):
+ data = np.array([[4, 1, 3, 2],
+ [7, 6, 8, 5],
+ [3, 5, 3, 6]])
+ idx = np.indices(data.shape, np.float64)
+ idx -= 0.5
+
+ out1 = ndimage.shift(data, 0.5, order=order)
+ out2 = ndimage.map_coordinates(data, idx, order=order)
+ assert_array_almost_equal(out1, out2)
+
+ def test_map_coordinates03(self):
+ data = np.array([[4, 1, 3, 2],
+ [7, 6, 8, 5],
+ [3, 5, 3, 6]], order='F')
+ idx = np.indices(data.shape) - 1
+ out = ndimage.map_coordinates(data, idx)
+ assert_array_almost_equal(out, [[0, 0, 0, 0],
+ [0, 4, 1, 3],
+ [0, 7, 6, 8]])
+ assert_array_almost_equal(out, ndimage.shift(data, (1, 1)))
+ idx = np.indices(data[::2].shape) - 1
+ out = ndimage.map_coordinates(data[::2], idx)
+ assert_array_almost_equal(out, [[0, 0, 0, 0],
+ [0, 4, 1, 3]])
+ assert_array_almost_equal(out, ndimage.shift(data[::2], (1, 1)))
+ idx = np.indices(data[:, ::2].shape) - 1
+ out = ndimage.map_coordinates(data[:, ::2], idx)
+ assert_array_almost_equal(out, [[0, 0], [0, 4], [0, 7]])
+ assert_array_almost_equal(out, ndimage.shift(data[:, ::2], (1, 1)))
+
+ def test_map_coordinates_endianness_with_output_parameter(self):
+ # output parameter given as array or dtype with either endianness
+ # see issue #4127
+ data = np.array([[1, 2], [7, 6]])
+ expected = np.array([[0, 0], [0, 1]])
+ idx = np.indices(data.shape)
+ idx -= 1
+ for out in [
+ data.dtype,
+ data.dtype.newbyteorder(),
+ np.empty_like(expected),
+ np.empty_like(expected).astype(expected.dtype.newbyteorder())
+ ]:
+ returned = ndimage.map_coordinates(data, idx, output=out)
+ result = out if returned is None else returned
+ assert_array_almost_equal(result, expected)
+
+ def test_map_coordinates_with_string_output(self):
+ data = np.array([[1]])
+ idx = np.indices(data.shape)
+ out = ndimage.map_coordinates(data, idx, output='f')
+ assert_(out.dtype is np.dtype('f'))
+ assert_array_almost_equal(out, [[1]])
+
+ @pytest.mark.skipif('win32' in sys.platform or np.intp(0).itemsize < 8,
+ reason='do not run on 32 bit or windows '
+ '(no sparse memory)')
+ def test_map_coordinates_large_data(self):
+ # check crash on large data
+ try:
+ n = 30000
+ a = np.empty(n**2, dtype=np.float32).reshape(n, n)
+ # fill the part we might read
+ a[n - 3:, n - 3:] = 0
+ ndimage.map_coordinates(a, [[n - 1.5], [n - 1.5]], order=1)
+ except MemoryError as e:
+ raise pytest.skip('Not enough memory available') from e
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_affine_transform01(self, order):
+ data = np.array([1])
+ out = ndimage.affine_transform(data, [[1]], order=order)
+ assert_array_almost_equal(out, [1])
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_affine_transform02(self, order):
+ data = np.ones([4])
+ out = ndimage.affine_transform(data, [[1]], order=order)
+ assert_array_almost_equal(out, [1, 1, 1, 1])
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_affine_transform03(self, order):
+ data = np.ones([4])
+ out = ndimage.affine_transform(data, [[1]], -1, order=order)
+ assert_array_almost_equal(out, [0, 1, 1, 1])
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_affine_transform04(self, order):
+ data = np.array([4, 1, 3, 2])
+ out = ndimage.affine_transform(data, [[1]], -1, order=order)
+ assert_array_almost_equal(out, [0, 4, 1, 3])
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ @pytest.mark.parametrize('dtype', [np.float64, np.complex128])
+ def test_affine_transform05(self, order, dtype):
+ data = np.array([[1, 1, 1, 1],
+ [1, 1, 1, 1],
+ [1, 1, 1, 1]], dtype=dtype)
+ expected = np.array([[0, 1, 1, 1],
+ [0, 1, 1, 1],
+ [0, 1, 1, 1]], dtype=dtype)
+ if data.dtype.kind == 'c':
+ data -= 1j * data
+ expected -= 1j * expected
+ out = ndimage.affine_transform(data, [[1, 0], [0, 1]],
+ [0, -1], order=order)
+ assert_array_almost_equal(out, expected)
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_affine_transform06(self, order):
+ data = np.array([[4, 1, 3, 2],
+ [7, 6, 8, 5],
+ [3, 5, 3, 6]])
+ out = ndimage.affine_transform(data, [[1, 0], [0, 1]],
+ [0, -1], order=order)
+ assert_array_almost_equal(out, [[0, 4, 1, 3],
+ [0, 7, 6, 8],
+ [0, 3, 5, 3]])
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_affine_transform07(self, order):
+ data = np.array([[4, 1, 3, 2],
+ [7, 6, 8, 5],
+ [3, 5, 3, 6]])
+ out = ndimage.affine_transform(data, [[1, 0], [0, 1]],
+ [-1, 0], order=order)
+ assert_array_almost_equal(out, [[0, 0, 0, 0],
+ [4, 1, 3, 2],
+ [7, 6, 8, 5]])
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_affine_transform08(self, order):
+ data = np.array([[4, 1, 3, 2],
+ [7, 6, 8, 5],
+ [3, 5, 3, 6]])
+ out = ndimage.affine_transform(data, [[1, 0], [0, 1]],
+ [-1, -1], order=order)
+ assert_array_almost_equal(out, [[0, 0, 0, 0],
+ [0, 4, 1, 3],
+ [0, 7, 6, 8]])
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_affine_transform09(self, order):
+ data = np.array([[4, 1, 3, 2],
+ [7, 6, 8, 5],
+ [3, 5, 3, 6]])
+ if (order > 1):
+ filtered = ndimage.spline_filter(data, order=order)
+ else:
+ filtered = data
+ out = ndimage.affine_transform(filtered, [[1, 0], [0, 1]],
+ [-1, -1], order=order,
+ prefilter=False)
+ assert_array_almost_equal(out, [[0, 0, 0, 0],
+ [0, 4, 1, 3],
+ [0, 7, 6, 8]])
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_affine_transform10(self, order):
+ data = np.ones([2], np.float64)
+ out = ndimage.affine_transform(data, [[0.5]], output_shape=(4,),
+ order=order)
+ assert_array_almost_equal(out, [1, 1, 1, 0])
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_affine_transform11(self, order):
+ data = [1, 5, 2, 6, 3, 7, 4, 4]
+ out = ndimage.affine_transform(data, [[2]], 0, (4,), order=order)
+ assert_array_almost_equal(out, [1, 2, 3, 4])
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_affine_transform12(self, order):
+ data = [1, 2, 3, 4]
+ out = ndimage.affine_transform(data, [[0.5]], 0, (8,), order=order)
+ assert_array_almost_equal(out[::2], [1, 2, 3, 4])
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_affine_transform13(self, order):
+ data = [[1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9.0, 10, 11, 12]]
+ out = ndimage.affine_transform(data, [[1, 0], [0, 2]], 0, (3, 2),
+ order=order)
+ assert_array_almost_equal(out, [[1, 3], [5, 7], [9, 11]])
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_affine_transform14(self, order):
+ data = [[1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 11, 12]]
+ out = ndimage.affine_transform(data, [[2, 0], [0, 1]], 0, (1, 4),
+ order=order)
+ assert_array_almost_equal(out, [[1, 2, 3, 4]])
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_affine_transform15(self, order):
+ data = [[1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 11, 12]]
+ out = ndimage.affine_transform(data, [[2, 0], [0, 2]], 0, (1, 2),
+ order=order)
+ assert_array_almost_equal(out, [[1, 3]])
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_affine_transform16(self, order):
+ data = [[1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 11, 12]]
+ out = ndimage.affine_transform(data, [[1, 0.0], [0, 0.5]], 0,
+ (3, 8), order=order)
+ assert_array_almost_equal(out[..., ::2], data)
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_affine_transform17(self, order):
+ data = [[1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 11, 12]]
+ out = ndimage.affine_transform(data, [[0.5, 0], [0, 1]], 0,
+ (6, 4), order=order)
+ assert_array_almost_equal(out[::2, ...], data)
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_affine_transform18(self, order):
+ data = [[1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 11, 12]]
+ out = ndimage.affine_transform(data, [[0.5, 0], [0, 0.5]], 0,
+ (6, 8), order=order)
+ assert_array_almost_equal(out[::2, ::2], data)
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_affine_transform19(self, order):
+ data = np.array([[1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 11, 12]], np.float64)
+ out = ndimage.affine_transform(data, [[0.5, 0], [0, 0.5]], 0,
+ (6, 8), order=order)
+ out = ndimage.affine_transform(out, [[2.0, 0], [0, 2.0]], 0,
+ (3, 4), order=order)
+ assert_array_almost_equal(out, data)
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_affine_transform20(self, order):
+ data = [[1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 11, 12]]
+ out = ndimage.affine_transform(data, [[0], [2]], 0, (2,),
+ order=order)
+ assert_array_almost_equal(out, [1, 3])
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_affine_transform21(self, order):
+ data = [[1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 11, 12]]
+ out = ndimage.affine_transform(data, [[2], [0]], 0, (2,),
+ order=order)
+ assert_array_almost_equal(out, [1, 9])
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_affine_transform22(self, order):
+ # shift and offset interaction; see issue #1547
+ data = np.array([4, 1, 3, 2])
+ out = ndimage.affine_transform(data, [[2]], [-1], (3,),
+ order=order)
+ assert_array_almost_equal(out, [0, 1, 2])
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_affine_transform23(self, order):
+ # shift and offset interaction; see issue #1547
+ data = np.array([4, 1, 3, 2])
+ out = ndimage.affine_transform(data, [[0.5]], [-1], (8,),
+ order=order)
+ assert_array_almost_equal(out[::2], [0, 4, 1, 3])
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_affine_transform24(self, order):
+ # consistency between diagonal and non-diagonal case; see issue #1547
+ data = np.array([4, 1, 3, 2])
+ with suppress_warnings() as sup:
+ sup.filter(UserWarning,
+ 'The behavior of affine_transform with a 1-D array .* '
+ 'has changed')
+ out1 = ndimage.affine_transform(data, [2], -1, order=order)
+ out2 = ndimage.affine_transform(data, [[2]], -1, order=order)
+ assert_array_almost_equal(out1, out2)
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_affine_transform25(self, order):
+ # consistency between diagonal and non-diagonal case; see issue #1547
+ data = np.array([4, 1, 3, 2])
+ with suppress_warnings() as sup:
+ sup.filter(UserWarning,
+ 'The behavior of affine_transform with a 1-D array .* '
+ 'has changed')
+ out1 = ndimage.affine_transform(data, [0.5], -1, order=order)
+ out2 = ndimage.affine_transform(data, [[0.5]], -1, order=order)
+ assert_array_almost_equal(out1, out2)
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_affine_transform26(self, order):
+ # test homogeneous coordinates
+ data = np.array([[4, 1, 3, 2],
+ [7, 6, 8, 5],
+ [3, 5, 3, 6]])
+ if (order > 1):
+ filtered = ndimage.spline_filter(data, order=order)
+ else:
+ filtered = data
+ tform_original = np.eye(2)
+ offset_original = -np.ones((2, 1))
+ tform_h1 = np.hstack((tform_original, offset_original))
+ tform_h2 = np.vstack((tform_h1, [[0, 0, 1]]))
+ out1 = ndimage.affine_transform(filtered, tform_original,
+ offset_original.ravel(),
+ order=order, prefilter=False)
+ out2 = ndimage.affine_transform(filtered, tform_h1, order=order,
+ prefilter=False)
+ out3 = ndimage.affine_transform(filtered, tform_h2, order=order,
+ prefilter=False)
+ for out in [out1, out2, out3]:
+ assert_array_almost_equal(out, [[0, 0, 0, 0],
+ [0, 4, 1, 3],
+ [0, 7, 6, 8]])
+
+ def test_affine_transform27(self):
+ # test valid homogeneous transformation matrix
+ data = np.array([[4, 1, 3, 2],
+ [7, 6, 8, 5],
+ [3, 5, 3, 6]])
+ tform_h1 = np.hstack((np.eye(2), -np.ones((2, 1))))
+ tform_h2 = np.vstack((tform_h1, [[5, 2, 1]]))
+ assert_raises(ValueError, ndimage.affine_transform, data, tform_h2)
+
+ def test_affine_transform_1d_endianness_with_output_parameter(self):
+ # 1d affine transform given output ndarray or dtype with
+ # either endianness. see issue #7388
+ data = np.ones((2, 2))
+ for out in [np.empty_like(data),
+ np.empty_like(data).astype(data.dtype.newbyteorder()),
+ data.dtype, data.dtype.newbyteorder()]:
+ with suppress_warnings() as sup:
+ sup.filter(UserWarning,
+ 'The behavior of affine_transform with a 1-D array '
+ '.* has changed')
+ returned = ndimage.affine_transform(data, [1, 1], output=out)
+ result = out if returned is None else returned
+ assert_array_almost_equal(result, [[1, 1], [1, 1]])
+
+ def test_affine_transform_multi_d_endianness_with_output_parameter(self):
+ # affine transform given output ndarray or dtype with either endianness
+ # see issue #4127
+ data = np.array([1])
+ for out in [data.dtype, data.dtype.newbyteorder(),
+ np.empty_like(data),
+ np.empty_like(data).astype(data.dtype.newbyteorder())]:
+ returned = ndimage.affine_transform(data, [[1]], output=out)
+ result = out if returned is None else returned
+ assert_array_almost_equal(result, [1])
+
+ def test_affine_transform_output_shape(self):
+ # don't require output_shape when out of a different size is given
+ data = np.arange(8, dtype=np.float64)
+ out = np.ones((16,))
+
+ ndimage.affine_transform(data, [[1]], output=out)
+ assert_array_almost_equal(out[:8], data)
+
+ # mismatched output shape raises an error
+ with pytest.raises(RuntimeError):
+ ndimage.affine_transform(
+ data, [[1]], output=out, output_shape=(12,))
+
+ def test_affine_transform_with_string_output(self):
+ data = np.array([1])
+ out = ndimage.affine_transform(data, [[1]], output='f')
+ assert_(out.dtype is np.dtype('f'))
+ assert_array_almost_equal(out, [1])
+
+ @pytest.mark.parametrize('shift',
+ [(1, 0), (0, 1), (-1, 1), (3, -5), (2, 7)])
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_affine_transform_shift_via_grid_wrap(self, shift, order):
+ # For mode 'grid-wrap', integer shifts should match np.roll
+ x = np.array([[0, 1],
+ [2, 3]])
+ affine = np.zeros((2, 3))
+ affine[:2, :2] = np.eye(2)
+ affine[:, 2] = shift
+ assert_array_almost_equal(
+ ndimage.affine_transform(x, affine, mode='grid-wrap', order=order),
+ np.roll(x, shift, axis=(0, 1)),
+ )
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_affine_transform_shift_reflect(self, order):
+ # shift by x.shape results in reflection
+ x = np.array([[0, 1, 2],
+ [3, 4, 5]])
+ affine = np.zeros((2, 3))
+ affine[:2, :2] = np.eye(2)
+ affine[:, 2] = x.shape
+ assert_array_almost_equal(
+ ndimage.affine_transform(x, affine, mode='reflect', order=order),
+ x[::-1, ::-1],
+ )
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_shift01(self, order):
+ data = np.array([1])
+ out = ndimage.shift(data, [1], order=order)
+ assert_array_almost_equal(out, [0])
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_shift02(self, order):
+ data = np.ones([4])
+ out = ndimage.shift(data, [1], order=order)
+ assert_array_almost_equal(out, [0, 1, 1, 1])
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_shift03(self, order):
+ data = np.ones([4])
+ out = ndimage.shift(data, -1, order=order)
+ assert_array_almost_equal(out, [1, 1, 1, 0])
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_shift04(self, order):
+ data = np.array([4, 1, 3, 2])
+ out = ndimage.shift(data, 1, order=order)
+ assert_array_almost_equal(out, [0, 4, 1, 3])
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ @pytest.mark.parametrize('dtype', [np.float64, np.complex128])
+ def test_shift05(self, order, dtype):
+ data = np.array([[1, 1, 1, 1],
+ [1, 1, 1, 1],
+ [1, 1, 1, 1]], dtype=dtype)
+ expected = np.array([[0, 1, 1, 1],
+ [0, 1, 1, 1],
+ [0, 1, 1, 1]], dtype=dtype)
+ if data.dtype.kind == 'c':
+ data -= 1j * data
+ expected -= 1j * expected
+ out = ndimage.shift(data, [0, 1], order=order)
+ assert_array_almost_equal(out, expected)
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ @pytest.mark.parametrize('mode', ['constant', 'grid-constant'])
+ @pytest.mark.parametrize('dtype', [np.float64, np.complex128])
+ def test_shift_with_nonzero_cval(self, order, mode, dtype):
+ data = np.array([[1, 1, 1, 1],
+ [1, 1, 1, 1],
+ [1, 1, 1, 1]], dtype=dtype)
+
+ expected = np.array([[0, 1, 1, 1],
+ [0, 1, 1, 1],
+ [0, 1, 1, 1]], dtype=dtype)
+
+ if data.dtype.kind == 'c':
+ data -= 1j * data
+ expected -= 1j * expected
+ cval = 5.0
+ expected[:, 0] = cval # specific to shift of [0, 1] used below
+ out = ndimage.shift(data, [0, 1], order=order, mode=mode, cval=cval)
+ assert_array_almost_equal(out, expected)
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_shift06(self, order):
+ data = np.array([[4, 1, 3, 2],
+ [7, 6, 8, 5],
+ [3, 5, 3, 6]])
+ out = ndimage.shift(data, [0, 1], order=order)
+ assert_array_almost_equal(out, [[0, 4, 1, 3],
+ [0, 7, 6, 8],
+ [0, 3, 5, 3]])
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_shift07(self, order):
+ data = np.array([[4, 1, 3, 2],
+ [7, 6, 8, 5],
+ [3, 5, 3, 6]])
+ out = ndimage.shift(data, [1, 0], order=order)
+ assert_array_almost_equal(out, [[0, 0, 0, 0],
+ [4, 1, 3, 2],
+ [7, 6, 8, 5]])
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_shift08(self, order):
+ data = np.array([[4, 1, 3, 2],
+ [7, 6, 8, 5],
+ [3, 5, 3, 6]])
+ out = ndimage.shift(data, [1, 1], order=order)
+ assert_array_almost_equal(out, [[0, 0, 0, 0],
+ [0, 4, 1, 3],
+ [0, 7, 6, 8]])
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_shift09(self, order):
+ data = np.array([[4, 1, 3, 2],
+ [7, 6, 8, 5],
+ [3, 5, 3, 6]])
+ if (order > 1):
+ filtered = ndimage.spline_filter(data, order=order)
+ else:
+ filtered = data
+ out = ndimage.shift(filtered, [1, 1], order=order, prefilter=False)
+ assert_array_almost_equal(out, [[0, 0, 0, 0],
+ [0, 4, 1, 3],
+ [0, 7, 6, 8]])
+
+ @pytest.mark.parametrize('shift',
+ [(1, 0), (0, 1), (-1, 1), (3, -5), (2, 7)])
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_shift_grid_wrap(self, shift, order):
+ # For mode 'grid-wrap', integer shifts should match np.roll
+ x = np.array([[0, 1],
+ [2, 3]])
+ assert_array_almost_equal(
+ ndimage.shift(x, shift, mode='grid-wrap', order=order),
+ np.roll(x, shift, axis=(0, 1)),
+ )
+
+ @pytest.mark.parametrize('shift',
+ [(1, 0), (0, 1), (-1, 1), (3, -5), (2, 7)])
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_shift_grid_constant1(self, shift, order):
+ # For integer shifts, 'constant' and 'grid-constant' should be equal
+ x = np.arange(20).reshape((5, 4))
+ assert_array_almost_equal(
+ ndimage.shift(x, shift, mode='grid-constant', order=order),
+ ndimage.shift(x, shift, mode='constant', order=order),
+ )
+
+ def test_shift_grid_constant_order1(self):
+ x = np.array([[1, 2, 3],
+ [4, 5, 6]], dtype=float)
+ expected_result = np.array([[0.25, 0.75, 1.25],
+ [1.25, 3.00, 4.00]])
+ assert_array_almost_equal(
+ ndimage.shift(x, (0.5, 0.5), mode='grid-constant', order=1),
+ expected_result,
+ )
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_shift_reflect(self, order):
+ # shift by x.shape results in reflection
+ x = np.array([[0, 1, 2],
+ [3, 4, 5]])
+ assert_array_almost_equal(
+ ndimage.shift(x, x.shape, mode='reflect', order=order),
+ x[::-1, ::-1],
+ )
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ @pytest.mark.parametrize('prefilter', [False, True])
+ def test_shift_nearest_boundary(self, order, prefilter):
+ # verify that shifting at least order // 2 beyond the end of the array
+ # gives a value equal to the edge value.
+ x = np.arange(16)
+ kwargs = dict(mode='nearest', order=order, prefilter=prefilter)
+ assert_array_almost_equal(
+ ndimage.shift(x, order // 2 + 1, **kwargs)[0], x[0],
+ )
+ assert_array_almost_equal(
+ ndimage.shift(x, -order // 2 - 1, **kwargs)[-1], x[-1],
+ )
+
+ @pytest.mark.parametrize('mode', ['grid-constant', 'grid-wrap', 'nearest',
+ 'mirror', 'reflect'])
+ @pytest.mark.parametrize('order', range(6))
+ def test_shift_vs_padded(self, order, mode):
+ x = np.arange(144, dtype=float).reshape(12, 12)
+ shift = (0.4, -2.3)
+
+ # manually pad and then extract center to get expected result
+ npad = 32
+ pad_mode = ndimage_to_numpy_mode.get(mode)
+ xp = np.pad(x, npad, mode=pad_mode)
+ center_slice = tuple([slice(npad, -npad)] * x.ndim)
+ expected_result = ndimage.shift(
+ xp, shift, mode=mode, order=order)[center_slice]
+
+ assert_allclose(
+ ndimage.shift(x, shift, mode=mode, order=order),
+ expected_result,
+ rtol=1e-7,
+ )
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_zoom1(self, order):
+ for z in [2, [2, 2]]:
+ arr = np.array(list(range(25))).reshape((5, 5)).astype(float)
+ arr = ndimage.zoom(arr, z, order=order)
+ assert_equal(arr.shape, (10, 10))
+ assert_(np.all(arr[-1, :] != 0))
+ assert_(np.all(arr[-1, :] >= (20 - eps)))
+ assert_(np.all(arr[0, :] <= (5 + eps)))
+ assert_(np.all(arr >= (0 - eps)))
+ assert_(np.all(arr <= (24 + eps)))
+
+ def test_zoom2(self):
+ arr = np.arange(12).reshape((3, 4))
+ out = ndimage.zoom(ndimage.zoom(arr, 2), 0.5)
+ assert_array_equal(out, arr)
+
+ def test_zoom3(self):
+ arr = np.array([[1, 2]])
+ out1 = ndimage.zoom(arr, (2, 1))
+ out2 = ndimage.zoom(arr, (1, 2))
+
+ assert_array_almost_equal(out1, np.array([[1, 2], [1, 2]]))
+ assert_array_almost_equal(out2, np.array([[1, 1, 2, 2]]))
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ @pytest.mark.parametrize('dtype', [np.float64, np.complex128])
+ def test_zoom_affine01(self, order, dtype):
+ data = np.asarray([[1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 11, 12]], dtype=dtype)
+ if data.dtype.kind == 'c':
+ data -= 1j * data
+ with suppress_warnings() as sup:
+ sup.filter(UserWarning,
+ 'The behavior of affine_transform with a 1-D array .* '
+ 'has changed')
+ out = ndimage.affine_transform(data, [0.5, 0.5], 0,
+ (6, 8), order=order)
+ assert_array_almost_equal(out[::2, ::2], data)
+
+ def test_zoom_infinity(self):
+ # Ticket #1419 regression test
+ dim = 8
+ ndimage.zoom(np.zeros((dim, dim)), 1. / dim, mode='nearest')
+
+ def test_zoom_zoomfactor_one(self):
+ # Ticket #1122 regression test
+ arr = np.zeros((1, 5, 5))
+ zoom = (1.0, 2.0, 2.0)
+
+ out = ndimage.zoom(arr, zoom, cval=7)
+ ref = np.zeros((1, 10, 10))
+ assert_array_almost_equal(out, ref)
+
+ def test_zoom_output_shape_roundoff(self):
+ arr = np.zeros((3, 11, 25))
+ zoom = (4.0 / 3, 15.0 / 11, 29.0 / 25)
+ out = ndimage.zoom(arr, zoom)
+ assert_array_equal(out.shape, (4, 15, 29))
+
+ @pytest.mark.parametrize('zoom', [(1, 1), (3, 5), (8, 2), (8, 8)])
+ @pytest.mark.parametrize('mode', ['nearest', 'constant', 'wrap', 'reflect',
+ 'mirror', 'grid-wrap', 'grid-mirror',
+ 'grid-constant'])
+ def test_zoom_by_int_order0(self, zoom, mode):
+ # order 0 zoom should be the same as replication via np.kron
+ # Note: This is not True for general x shapes when grid_mode is False,
+ # but works here for all modes because the size ratio happens to
+ # always be an integer when x.shape = (2, 2).
+ x = np.array([[0, 1],
+ [2, 3]], dtype=float)
+ # x = np.arange(16, dtype=float).reshape(4, 4)
+ assert_array_almost_equal(
+ ndimage.zoom(x, zoom, order=0, mode=mode),
+ np.kron(x, np.ones(zoom))
+ )
+
+ @pytest.mark.parametrize('shape', [(2, 3), (4, 4)])
+ @pytest.mark.parametrize('zoom', [(1, 1), (3, 5), (8, 2), (8, 8)])
+ @pytest.mark.parametrize('mode', ['nearest', 'reflect', 'mirror',
+ 'grid-wrap', 'grid-constant'])
+ def test_zoom_grid_by_int_order0(self, shape, zoom, mode):
+ # When grid_mode is True, order 0 zoom should be the same as
+ # replication via np.kron. The only exceptions to this are the
+ # non-grid modes 'constant' and 'wrap'.
+ x = np.arange(np.prod(shape), dtype=float).reshape(shape)
+ assert_array_almost_equal(
+ ndimage.zoom(x, zoom, order=0, mode=mode, grid_mode=True),
+ np.kron(x, np.ones(zoom))
+ )
+
+ @pytest.mark.parametrize('mode', ['constant', 'wrap'])
+ def test_zoom_grid_mode_warnings(self, mode):
+ # Warn on use of non-grid modes when grid_mode is True
+ x = np.arange(9, dtype=float).reshape((3, 3))
+ with pytest.warns(UserWarning,
+ match="It is recommended to use mode"):
+ ndimage.zoom(x, 2, mode=mode, grid_mode=True),
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_rotate01(self, order):
+ data = np.array([[0, 0, 0, 0],
+ [0, 1, 1, 0],
+ [0, 0, 0, 0]], dtype=np.float64)
+ out = ndimage.rotate(data, 0, order=order)
+ assert_array_almost_equal(out, data)
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_rotate02(self, order):
+ data = np.array([[0, 0, 0, 0],
+ [0, 1, 0, 0],
+ [0, 0, 0, 0]], dtype=np.float64)
+ expected = np.array([[0, 0, 0],
+ [0, 0, 0],
+ [0, 1, 0],
+ [0, 0, 0]], dtype=np.float64)
+ out = ndimage.rotate(data, 90, order=order)
+ assert_array_almost_equal(out, expected)
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ @pytest.mark.parametrize('dtype', [np.float64, np.complex128])
+ def test_rotate03(self, order, dtype):
+ data = np.array([[0, 0, 0, 0, 0],
+ [0, 1, 1, 0, 0],
+ [0, 0, 0, 0, 0]], dtype=dtype)
+ expected = np.array([[0, 0, 0],
+ [0, 0, 0],
+ [0, 1, 0],
+ [0, 1, 0],
+ [0, 0, 0]], dtype=dtype)
+ if data.dtype.kind == 'c':
+ data -= 1j * data
+ expected -= 1j * expected
+ out = ndimage.rotate(data, 90, order=order)
+ assert_array_almost_equal(out, expected)
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_rotate04(self, order):
+ data = np.array([[0, 0, 0, 0, 0],
+ [0, 1, 1, 0, 0],
+ [0, 0, 0, 0, 0]], dtype=np.float64)
+ expected = np.array([[0, 0, 0, 0, 0],
+ [0, 0, 1, 0, 0],
+ [0, 0, 1, 0, 0]], dtype=np.float64)
+ out = ndimage.rotate(data, 90, reshape=False, order=order)
+ assert_array_almost_equal(out, expected)
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_rotate05(self, order):
+ data = np.empty((4, 3, 3))
+ for i in range(3):
+ data[:, :, i] = np.array([[0, 0, 0],
+ [0, 1, 0],
+ [0, 1, 0],
+ [0, 0, 0]], dtype=np.float64)
+ expected = np.array([[0, 0, 0, 0],
+ [0, 1, 1, 0],
+ [0, 0, 0, 0]], dtype=np.float64)
+ out = ndimage.rotate(data, 90, order=order)
+ for i in range(3):
+ assert_array_almost_equal(out[:, :, i], expected)
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_rotate06(self, order):
+ data = np.empty((3, 4, 3))
+ for i in range(3):
+ data[:, :, i] = np.array([[0, 0, 0, 0],
+ [0, 1, 1, 0],
+ [0, 0, 0, 0]], dtype=np.float64)
+ expected = np.array([[0, 0, 0],
+ [0, 1, 0],
+ [0, 1, 0],
+ [0, 0, 0]], dtype=np.float64)
+ out = ndimage.rotate(data, 90, order=order)
+ for i in range(3):
+ assert_array_almost_equal(out[:, :, i], expected)
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_rotate07(self, order):
+ data = np.array([[[0, 0, 0, 0, 0],
+ [0, 1, 1, 0, 0],
+ [0, 0, 0, 0, 0]]] * 2, dtype=np.float64)
+ data = data.transpose()
+ expected = np.array([[[0, 0, 0],
+ [0, 1, 0],
+ [0, 1, 0],
+ [0, 0, 0],
+ [0, 0, 0]]] * 2, dtype=np.float64)
+ expected = expected.transpose([2, 1, 0])
+ out = ndimage.rotate(data, 90, axes=(0, 1), order=order)
+ assert_array_almost_equal(out, expected)
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_rotate08(self, order):
+ data = np.array([[[0, 0, 0, 0, 0],
+ [0, 1, 1, 0, 0],
+ [0, 0, 0, 0, 0]]] * 2, dtype=np.float64)
+ data = data.transpose()
+ expected = np.array([[[0, 0, 1, 0, 0],
+ [0, 0, 1, 0, 0],
+ [0, 0, 0, 0, 0]]] * 2, dtype=np.float64)
+ expected = expected.transpose()
+ out = ndimage.rotate(data, 90, axes=(0, 1), reshape=False, order=order)
+ assert_array_almost_equal(out, expected)
+
+ def test_rotate09(self):
+ data = np.array([[0, 0, 0, 0, 0],
+ [0, 1, 1, 0, 0],
+ [0, 0, 0, 0, 0]] * 2, dtype=np.float64)
+ with assert_raises(ValueError):
+ ndimage.rotate(data, 90, axes=(0, data.ndim))
+
+ def test_rotate10(self):
+ data = np.arange(45, dtype=np.float64).reshape((3, 5, 3))
+
+ # The output of ndimage.rotate before refactoring
+ expected = np.array([[[0.0, 0.0, 0.0],
+ [0.0, 0.0, 0.0],
+ [6.54914793, 7.54914793, 8.54914793],
+ [10.84520162, 11.84520162, 12.84520162],
+ [0.0, 0.0, 0.0]],
+ [[6.19286575, 7.19286575, 8.19286575],
+ [13.4730712, 14.4730712, 15.4730712],
+ [21.0, 22.0, 23.0],
+ [28.5269288, 29.5269288, 30.5269288],
+ [35.80713425, 36.80713425, 37.80713425]],
+ [[0.0, 0.0, 0.0],
+ [31.15479838, 32.15479838, 33.15479838],
+ [35.45085207, 36.45085207, 37.45085207],
+ [0.0, 0.0, 0.0],
+ [0.0, 0.0, 0.0]]])
+
+ out = ndimage.rotate(data, angle=12, reshape=False)
+ assert_array_almost_equal(out, expected)
+
+ def test_rotate_exact_180(self):
+ a = np.tile(np.arange(5), (5, 1))
+ b = ndimage.rotate(ndimage.rotate(a, 180), -180)
+ assert_equal(a, b)
+
+
+def test_zoom_output_shape():
+ """Ticket #643"""
+ x = np.arange(12).reshape((3, 4))
+ ndimage.zoom(x, 2, output=np.zeros((6, 8)))
diff --git a/parrot/lib/python3.10/site-packages/scipy/ndimage/tests/test_measurements.py b/parrot/lib/python3.10/site-packages/scipy/ndimage/tests/test_measurements.py
new file mode 100644
index 0000000000000000000000000000000000000000..a55b1a6014348ba022f9982900a3cf5e1bcf62af
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/scipy/ndimage/tests/test_measurements.py
@@ -0,0 +1,1419 @@
+import os.path
+
+import numpy as np
+from numpy.testing import (
+ assert_,
+ assert_allclose,
+ assert_almost_equal,
+ assert_array_almost_equal,
+ assert_array_equal,
+ assert_equal,
+ suppress_warnings,
+)
+import pytest
+from pytest import raises as assert_raises
+
+import scipy.ndimage as ndimage
+
+
+from . import types
+
+
+class Test_measurements_stats:
+ """ndimage._measurements._stats() is a utility used by other functions."""
+
+ def test_a(self):
+ x = [0, 1, 2, 6]
+ labels = [0, 0, 1, 1]
+ index = [0, 1]
+ for shp in [(4,), (2, 2)]:
+ x = np.array(x).reshape(shp)
+ labels = np.array(labels).reshape(shp)
+ counts, sums = ndimage._measurements._stats(
+ x, labels=labels, index=index)
+ assert_array_equal(counts, [2, 2])
+ assert_array_equal(sums, [1.0, 8.0])
+
+ def test_b(self):
+ # Same data as test_a, but different labels. The label 9 exceeds the
+ # length of 'labels', so this test will follow a different code path.
+ x = [0, 1, 2, 6]
+ labels = [0, 0, 9, 9]
+ index = [0, 9]
+ for shp in [(4,), (2, 2)]:
+ x = np.array(x).reshape(shp)
+ labels = np.array(labels).reshape(shp)
+ counts, sums = ndimage._measurements._stats(
+ x, labels=labels, index=index)
+ assert_array_equal(counts, [2, 2])
+ assert_array_equal(sums, [1.0, 8.0])
+
+ def test_a_centered(self):
+ x = [0, 1, 2, 6]
+ labels = [0, 0, 1, 1]
+ index = [0, 1]
+ for shp in [(4,), (2, 2)]:
+ x = np.array(x).reshape(shp)
+ labels = np.array(labels).reshape(shp)
+ counts, sums, centers = ndimage._measurements._stats(
+ x, labels=labels, index=index, centered=True)
+ assert_array_equal(counts, [2, 2])
+ assert_array_equal(sums, [1.0, 8.0])
+ assert_array_equal(centers, [0.5, 8.0])
+
+ def test_b_centered(self):
+ x = [0, 1, 2, 6]
+ labels = [0, 0, 9, 9]
+ index = [0, 9]
+ for shp in [(4,), (2, 2)]:
+ x = np.array(x).reshape(shp)
+ labels = np.array(labels).reshape(shp)
+ counts, sums, centers = ndimage._measurements._stats(
+ x, labels=labels, index=index, centered=True)
+ assert_array_equal(counts, [2, 2])
+ assert_array_equal(sums, [1.0, 8.0])
+ assert_array_equal(centers, [0.5, 8.0])
+
+ def test_nonint_labels(self):
+ x = [0, 1, 2, 6]
+ labels = [0.0, 0.0, 9.0, 9.0]
+ index = [0.0, 9.0]
+ for shp in [(4,), (2, 2)]:
+ x = np.array(x).reshape(shp)
+ labels = np.array(labels).reshape(shp)
+ counts, sums, centers = ndimage._measurements._stats(
+ x, labels=labels, index=index, centered=True)
+ assert_array_equal(counts, [2, 2])
+ assert_array_equal(sums, [1.0, 8.0])
+ assert_array_equal(centers, [0.5, 8.0])
+
+
+class Test_measurements_select:
+ """ndimage._measurements._select() is a utility used by other functions."""
+
+ def test_basic(self):
+ x = [0, 1, 6, 2]
+ cases = [
+ ([0, 0, 1, 1], [0, 1]), # "Small" integer labels
+ ([0, 0, 9, 9], [0, 9]), # A label larger than len(labels)
+ ([0.0, 0.0, 7.0, 7.0], [0.0, 7.0]), # Non-integer labels
+ ]
+ for labels, index in cases:
+ result = ndimage._measurements._select(
+ x, labels=labels, index=index)
+ assert_(len(result) == 0)
+ result = ndimage._measurements._select(
+ x, labels=labels, index=index, find_max=True)
+ assert_(len(result) == 1)
+ assert_array_equal(result[0], [1, 6])
+ result = ndimage._measurements._select(
+ x, labels=labels, index=index, find_min=True)
+ assert_(len(result) == 1)
+ assert_array_equal(result[0], [0, 2])
+ result = ndimage._measurements._select(
+ x, labels=labels, index=index, find_min=True,
+ find_min_positions=True)
+ assert_(len(result) == 2)
+ assert_array_equal(result[0], [0, 2])
+ assert_array_equal(result[1], [0, 3])
+ assert_equal(result[1].dtype.kind, 'i')
+ result = ndimage._measurements._select(
+ x, labels=labels, index=index, find_max=True,
+ find_max_positions=True)
+ assert_(len(result) == 2)
+ assert_array_equal(result[0], [1, 6])
+ assert_array_equal(result[1], [1, 2])
+ assert_equal(result[1].dtype.kind, 'i')
+
+
+def test_label01():
+ data = np.ones([])
+ out, n = ndimage.label(data)
+ assert_array_almost_equal(out, 1)
+ assert_equal(n, 1)
+
+
+def test_label02():
+ data = np.zeros([])
+ out, n = ndimage.label(data)
+ assert_array_almost_equal(out, 0)
+ assert_equal(n, 0)
+
+
+def test_label03():
+ data = np.ones([1])
+ out, n = ndimage.label(data)
+ assert_array_almost_equal(out, [1])
+ assert_equal(n, 1)
+
+
+def test_label04():
+ data = np.zeros([1])
+ out, n = ndimage.label(data)
+ assert_array_almost_equal(out, [0])
+ assert_equal(n, 0)
+
+
+def test_label05():
+ data = np.ones([5])
+ out, n = ndimage.label(data)
+ assert_array_almost_equal(out, [1, 1, 1, 1, 1])
+ assert_equal(n, 1)
+
+
+def test_label06():
+ data = np.array([1, 0, 1, 1, 0, 1])
+ out, n = ndimage.label(data)
+ assert_array_almost_equal(out, [1, 0, 2, 2, 0, 3])
+ assert_equal(n, 3)
+
+
+def test_label07():
+ data = np.array([[0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0]])
+ out, n = ndimage.label(data)
+ assert_array_almost_equal(out, [[0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0]])
+ assert_equal(n, 0)
+
+
+def test_label08():
+ data = np.array([[1, 0, 0, 0, 0, 0],
+ [0, 0, 1, 1, 0, 0],
+ [0, 0, 1, 1, 1, 0],
+ [1, 1, 0, 0, 0, 0],
+ [1, 1, 0, 0, 0, 0],
+ [0, 0, 0, 1, 1, 0]])
+ out, n = ndimage.label(data)
+ assert_array_almost_equal(out, [[1, 0, 0, 0, 0, 0],
+ [0, 0, 2, 2, 0, 0],
+ [0, 0, 2, 2, 2, 0],
+ [3, 3, 0, 0, 0, 0],
+ [3, 3, 0, 0, 0, 0],
+ [0, 0, 0, 4, 4, 0]])
+ assert_equal(n, 4)
+
+
+def test_label09():
+ data = np.array([[1, 0, 0, 0, 0, 0],
+ [0, 0, 1, 1, 0, 0],
+ [0, 0, 1, 1, 1, 0],
+ [1, 1, 0, 0, 0, 0],
+ [1, 1, 0, 0, 0, 0],
+ [0, 0, 0, 1, 1, 0]])
+ struct = ndimage.generate_binary_structure(2, 2)
+ out, n = ndimage.label(data, struct)
+ assert_array_almost_equal(out, [[1, 0, 0, 0, 0, 0],
+ [0, 0, 2, 2, 0, 0],
+ [0, 0, 2, 2, 2, 0],
+ [2, 2, 0, 0, 0, 0],
+ [2, 2, 0, 0, 0, 0],
+ [0, 0, 0, 3, 3, 0]])
+ assert_equal(n, 3)
+
+
+def test_label10():
+ data = np.array([[0, 0, 0, 0, 0, 0],
+ [0, 1, 1, 0, 1, 0],
+ [0, 1, 1, 1, 1, 0],
+ [0, 0, 0, 0, 0, 0]])
+ struct = ndimage.generate_binary_structure(2, 2)
+ out, n = ndimage.label(data, struct)
+ assert_array_almost_equal(out, [[0, 0, 0, 0, 0, 0],
+ [0, 1, 1, 0, 1, 0],
+ [0, 1, 1, 1, 1, 0],
+ [0, 0, 0, 0, 0, 0]])
+ assert_equal(n, 1)
+
+
+def test_label11():
+ for type in types:
+ data = np.array([[1, 0, 0, 0, 0, 0],
+ [0, 0, 1, 1, 0, 0],
+ [0, 0, 1, 1, 1, 0],
+ [1, 1, 0, 0, 0, 0],
+ [1, 1, 0, 0, 0, 0],
+ [0, 0, 0, 1, 1, 0]], type)
+ out, n = ndimage.label(data)
+ expected = [[1, 0, 0, 0, 0, 0],
+ [0, 0, 2, 2, 0, 0],
+ [0, 0, 2, 2, 2, 0],
+ [3, 3, 0, 0, 0, 0],
+ [3, 3, 0, 0, 0, 0],
+ [0, 0, 0, 4, 4, 0]]
+ assert_array_almost_equal(out, expected)
+ assert_equal(n, 4)
+
+
+def test_label11_inplace():
+ for type in types:
+ data = np.array([[1, 0, 0, 0, 0, 0],
+ [0, 0, 1, 1, 0, 0],
+ [0, 0, 1, 1, 1, 0],
+ [1, 1, 0, 0, 0, 0],
+ [1, 1, 0, 0, 0, 0],
+ [0, 0, 0, 1, 1, 0]], type)
+ n = ndimage.label(data, output=data)
+ expected = [[1, 0, 0, 0, 0, 0],
+ [0, 0, 2, 2, 0, 0],
+ [0, 0, 2, 2, 2, 0],
+ [3, 3, 0, 0, 0, 0],
+ [3, 3, 0, 0, 0, 0],
+ [0, 0, 0, 4, 4, 0]]
+ assert_array_almost_equal(data, expected)
+ assert_equal(n, 4)
+
+
+def test_label12():
+ for type in types:
+ data = np.array([[0, 0, 0, 0, 1, 1],
+ [0, 0, 0, 0, 0, 1],
+ [0, 0, 1, 0, 1, 1],
+ [0, 0, 1, 1, 1, 1],
+ [0, 0, 0, 1, 1, 0]], type)
+ out, n = ndimage.label(data)
+ expected = [[0, 0, 0, 0, 1, 1],
+ [0, 0, 0, 0, 0, 1],
+ [0, 0, 1, 0, 1, 1],
+ [0, 0, 1, 1, 1, 1],
+ [0, 0, 0, 1, 1, 0]]
+ assert_array_almost_equal(out, expected)
+ assert_equal(n, 1)
+
+
+def test_label13():
+ for type in types:
+ data = np.array([[1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1],
+ [1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1],
+ [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
+ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]],
+ type)
+ out, n = ndimage.label(data)
+ expected = [[1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1],
+ [1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1],
+ [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
+ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
+ assert_array_almost_equal(out, expected)
+ assert_equal(n, 1)
+
+
+def test_label_output_typed():
+ data = np.ones([5])
+ for t in types:
+ output = np.zeros([5], dtype=t)
+ n = ndimage.label(data, output=output)
+ assert_array_almost_equal(output, 1)
+ assert_equal(n, 1)
+
+
+def test_label_output_dtype():
+ data = np.ones([5])
+ for t in types:
+ output, n = ndimage.label(data, output=t)
+ assert_array_almost_equal(output, 1)
+ assert output.dtype == t
+
+
+def test_label_output_wrong_size():
+ data = np.ones([5])
+ for t in types:
+ output = np.zeros([10], t)
+ assert_raises((RuntimeError, ValueError),
+ ndimage.label, data, output=output)
+
+
+def test_label_structuring_elements():
+ data = np.loadtxt(os.path.join(os.path.dirname(
+ __file__), "data", "label_inputs.txt"))
+ strels = np.loadtxt(os.path.join(
+ os.path.dirname(__file__), "data", "label_strels.txt"))
+ results = np.loadtxt(os.path.join(
+ os.path.dirname(__file__), "data", "label_results.txt"))
+ data = data.reshape((-1, 7, 7))
+ strels = strels.reshape((-1, 3, 3))
+ results = results.reshape((-1, 7, 7))
+ r = 0
+ for i in range(data.shape[0]):
+ d = data[i, :, :]
+ for j in range(strels.shape[0]):
+ s = strels[j, :, :]
+ assert_equal(ndimage.label(d, s)[0], results[r, :, :])
+ r += 1
+
+
+def test_ticket_742():
+ def SE(img, thresh=.7, size=4):
+ mask = img > thresh
+ rank = len(mask.shape)
+ la, co = ndimage.label(mask,
+ ndimage.generate_binary_structure(rank, rank))
+ _ = ndimage.find_objects(la)
+
+ if np.dtype(np.intp) != np.dtype('i'):
+ shape = (3, 1240, 1240)
+ a = np.random.rand(np.prod(shape)).reshape(shape)
+ # shouldn't crash
+ SE(a)
+
+
+def test_gh_issue_3025():
+ """Github issue #3025 - improper merging of labels"""
+ d = np.zeros((60, 320))
+ d[:, :257] = 1
+ d[:, 260:] = 1
+ d[36, 257] = 1
+ d[35, 258] = 1
+ d[35, 259] = 1
+ assert ndimage.label(d, np.ones((3, 3)))[1] == 1
+
+
+def test_label_default_dtype():
+ test_array = np.random.rand(10, 10)
+ label, no_features = ndimage.label(test_array > 0.5)
+ assert_(label.dtype in (np.int32, np.int64))
+ # Shouldn't raise an exception
+ ndimage.find_objects(label)
+
+
+def test_find_objects01():
+ data = np.ones([], dtype=int)
+ out = ndimage.find_objects(data)
+ assert_(out == [()])
+
+
+def test_find_objects02():
+ data = np.zeros([], dtype=int)
+ out = ndimage.find_objects(data)
+ assert_(out == [])
+
+
+def test_find_objects03():
+ data = np.ones([1], dtype=int)
+ out = ndimage.find_objects(data)
+ assert_equal(out, [(slice(0, 1, None),)])
+
+
+def test_find_objects04():
+ data = np.zeros([1], dtype=int)
+ out = ndimage.find_objects(data)
+ assert_equal(out, [])
+
+
+def test_find_objects05():
+ data = np.ones([5], dtype=int)
+ out = ndimage.find_objects(data)
+ assert_equal(out, [(slice(0, 5, None),)])
+
+
+def test_find_objects06():
+ data = np.array([1, 0, 2, 2, 0, 3])
+ out = ndimage.find_objects(data)
+ assert_equal(out, [(slice(0, 1, None),),
+ (slice(2, 4, None),),
+ (slice(5, 6, None),)])
+
+
+def test_find_objects07():
+ data = np.array([[0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0]])
+ out = ndimage.find_objects(data)
+ assert_equal(out, [])
+
+
+def test_find_objects08():
+ data = np.array([[1, 0, 0, 0, 0, 0],
+ [0, 0, 2, 2, 0, 0],
+ [0, 0, 2, 2, 2, 0],
+ [3, 3, 0, 0, 0, 0],
+ [3, 3, 0, 0, 0, 0],
+ [0, 0, 0, 4, 4, 0]])
+ out = ndimage.find_objects(data)
+ assert_equal(out, [(slice(0, 1, None), slice(0, 1, None)),
+ (slice(1, 3, None), slice(2, 5, None)),
+ (slice(3, 5, None), slice(0, 2, None)),
+ (slice(5, 6, None), slice(3, 5, None))])
+
+
+def test_find_objects09():
+ data = np.array([[1, 0, 0, 0, 0, 0],
+ [0, 0, 2, 2, 0, 0],
+ [0, 0, 2, 2, 2, 0],
+ [0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 4, 4, 0]])
+ out = ndimage.find_objects(data)
+ assert_equal(out, [(slice(0, 1, None), slice(0, 1, None)),
+ (slice(1, 3, None), slice(2, 5, None)),
+ None,
+ (slice(5, 6, None), slice(3, 5, None))])
+
+
+def test_value_indices01():
+ "Test dictionary keys and entries"
+ data = np.array([[1, 0, 0, 0, 0, 0],
+ [0, 0, 2, 2, 0, 0],
+ [0, 0, 2, 2, 2, 0],
+ [0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 4, 4, 0]])
+ vi = ndimage.value_indices(data, ignore_value=0)
+ true_keys = [1, 2, 4]
+ assert_equal(list(vi.keys()), true_keys)
+
+ truevi = {}
+ for k in true_keys:
+ truevi[k] = np.where(data == k)
+
+ vi = ndimage.value_indices(data, ignore_value=0)
+ assert_equal(vi, truevi)
+
+
+def test_value_indices02():
+ "Test input checking"
+ data = np.zeros((5, 4), dtype=np.float32)
+ msg = "Parameter 'arr' must be an integer array"
+ with assert_raises(ValueError, match=msg):
+ ndimage.value_indices(data)
+
+
+def test_value_indices03():
+ "Test different input array shapes, from 1-D to 4-D"
+ for shape in [(36,), (18, 2), (3, 3, 4), (3, 3, 2, 2)]:
+ a = np.array((12*[1]+12*[2]+12*[3]), dtype=np.int32).reshape(shape)
+ trueKeys = np.unique(a)
+ vi = ndimage.value_indices(a)
+ assert_equal(list(vi.keys()), list(trueKeys))
+ for k in trueKeys:
+ trueNdx = np.where(a == k)
+ assert_equal(vi[k], trueNdx)
+
+
+def test_sum01():
+ for type in types:
+ input = np.array([], type)
+ output = ndimage.sum(input)
+ assert_equal(output, 0.0)
+
+
+def test_sum02():
+ for type in types:
+ input = np.zeros([0, 4], type)
+ output = ndimage.sum(input)
+ assert_equal(output, 0.0)
+
+
+def test_sum03():
+ for type in types:
+ input = np.ones([], type)
+ output = ndimage.sum(input)
+ assert_almost_equal(output, 1.0)
+
+
+def test_sum04():
+ for type in types:
+ input = np.array([1, 2], type)
+ output = ndimage.sum(input)
+ assert_almost_equal(output, 3.0)
+
+
+def test_sum05():
+ for type in types:
+ input = np.array([[1, 2], [3, 4]], type)
+ output = ndimage.sum(input)
+ assert_almost_equal(output, 10.0)
+
+
+def test_sum06():
+ labels = np.array([], bool)
+ for type in types:
+ input = np.array([], type)
+ output = ndimage.sum(input, labels=labels)
+ assert_equal(output, 0.0)
+
+
+def test_sum07():
+ labels = np.ones([0, 4], bool)
+ for type in types:
+ input = np.zeros([0, 4], type)
+ output = ndimage.sum(input, labels=labels)
+ assert_equal(output, 0.0)
+
+
+def test_sum08():
+ labels = np.array([1, 0], bool)
+ for type in types:
+ input = np.array([1, 2], type)
+ output = ndimage.sum(input, labels=labels)
+ assert_equal(output, 1.0)
+
+
+def test_sum09():
+ labels = np.array([1, 0], bool)
+ for type in types:
+ input = np.array([[1, 2], [3, 4]], type)
+ output = ndimage.sum(input, labels=labels)
+ assert_almost_equal(output, 4.0)
+
+
+def test_sum10():
+ labels = np.array([1, 0], bool)
+ input = np.array([[1, 2], [3, 4]], bool)
+ output = ndimage.sum(input, labels=labels)
+ assert_almost_equal(output, 2.0)
+
+
+def test_sum11():
+ labels = np.array([1, 2], np.int8)
+ for type in types:
+ input = np.array([[1, 2], [3, 4]], type)
+ output = ndimage.sum(input, labels=labels,
+ index=2)
+ assert_almost_equal(output, 6.0)
+
+
+def test_sum12():
+ labels = np.array([[1, 2], [2, 4]], np.int8)
+ for type in types:
+ input = np.array([[1, 2], [3, 4]], type)
+ output = ndimage.sum(input, labels=labels, index=[4, 8, 2])
+ assert_array_almost_equal(output, [4.0, 0.0, 5.0])
+
+
+def test_sum_labels():
+ labels = np.array([[1, 2], [2, 4]], np.int8)
+ for type in types:
+ input = np.array([[1, 2], [3, 4]], type)
+ output_sum = ndimage.sum(input, labels=labels, index=[4, 8, 2])
+ output_labels = ndimage.sum_labels(
+ input, labels=labels, index=[4, 8, 2])
+
+ assert (output_sum == output_labels).all()
+ assert_array_almost_equal(output_labels, [4.0, 0.0, 5.0])
+
+
+def test_mean01():
+ labels = np.array([1, 0], bool)
+ for type in types:
+ input = np.array([[1, 2], [3, 4]], type)
+ output = ndimage.mean(input, labels=labels)
+ assert_almost_equal(output, 2.0)
+
+
+def test_mean02():
+ labels = np.array([1, 0], bool)
+ input = np.array([[1, 2], [3, 4]], bool)
+ output = ndimage.mean(input, labels=labels)
+ assert_almost_equal(output, 1.0)
+
+
+def test_mean03():
+ labels = np.array([1, 2])
+ for type in types:
+ input = np.array([[1, 2], [3, 4]], type)
+ output = ndimage.mean(input, labels=labels,
+ index=2)
+ assert_almost_equal(output, 3.0)
+
+
+def test_mean04():
+ labels = np.array([[1, 2], [2, 4]], np.int8)
+ with np.errstate(all='ignore'):
+ for type in types:
+ input = np.array([[1, 2], [3, 4]], type)
+ output = ndimage.mean(input, labels=labels,
+ index=[4, 8, 2])
+ assert_array_almost_equal(output[[0, 2]], [4.0, 2.5])
+ assert_(np.isnan(output[1]))
+
+
+def test_minimum01():
+ labels = np.array([1, 0], bool)
+ for type in types:
+ input = np.array([[1, 2], [3, 4]], type)
+ output = ndimage.minimum(input, labels=labels)
+ assert_almost_equal(output, 1.0)
+
+
+def test_minimum02():
+ labels = np.array([1, 0], bool)
+ input = np.array([[2, 2], [2, 4]], bool)
+ output = ndimage.minimum(input, labels=labels)
+ assert_almost_equal(output, 1.0)
+
+
+def test_minimum03():
+ labels = np.array([1, 2])
+ for type in types:
+ input = np.array([[1, 2], [3, 4]], type)
+ output = ndimage.minimum(input, labels=labels,
+ index=2)
+ assert_almost_equal(output, 2.0)
+
+
+def test_minimum04():
+ labels = np.array([[1, 2], [2, 3]])
+ for type in types:
+ input = np.array([[1, 2], [3, 4]], type)
+ output = ndimage.minimum(input, labels=labels,
+ index=[2, 3, 8])
+ assert_array_almost_equal(output, [2.0, 4.0, 0.0])
+
+
+def test_maximum01():
+ labels = np.array([1, 0], bool)
+ for type in types:
+ input = np.array([[1, 2], [3, 4]], type)
+ output = ndimage.maximum(input, labels=labels)
+ assert_almost_equal(output, 3.0)
+
+
+def test_maximum02():
+ labels = np.array([1, 0], bool)
+ input = np.array([[2, 2], [2, 4]], bool)
+ output = ndimage.maximum(input, labels=labels)
+ assert_almost_equal(output, 1.0)
+
+
+def test_maximum03():
+ labels = np.array([1, 2])
+ for type in types:
+ input = np.array([[1, 2], [3, 4]], type)
+ output = ndimage.maximum(input, labels=labels,
+ index=2)
+ assert_almost_equal(output, 4.0)
+
+
+def test_maximum04():
+ labels = np.array([[1, 2], [2, 3]])
+ for type in types:
+ input = np.array([[1, 2], [3, 4]], type)
+ output = ndimage.maximum(input, labels=labels,
+ index=[2, 3, 8])
+ assert_array_almost_equal(output, [3.0, 4.0, 0.0])
+
+
+def test_maximum05():
+ # Regression test for ticket #501 (Trac)
+ x = np.array([-3, -2, -1])
+ assert_equal(ndimage.maximum(x), -1)
+
+
+def test_median01():
+ a = np.array([[1, 2, 0, 1],
+ [5, 3, 0, 4],
+ [0, 0, 0, 7],
+ [9, 3, 0, 0]])
+ labels = np.array([[1, 1, 0, 2],
+ [1, 1, 0, 2],
+ [0, 0, 0, 2],
+ [3, 3, 0, 0]])
+ output = ndimage.median(a, labels=labels, index=[1, 2, 3])
+ assert_array_almost_equal(output, [2.5, 4.0, 6.0])
+
+
+def test_median02():
+ a = np.array([[1, 2, 0, 1],
+ [5, 3, 0, 4],
+ [0, 0, 0, 7],
+ [9, 3, 0, 0]])
+ output = ndimage.median(a)
+ assert_almost_equal(output, 1.0)
+
+
+def test_median03():
+ a = np.array([[1, 2, 0, 1],
+ [5, 3, 0, 4],
+ [0, 0, 0, 7],
+ [9, 3, 0, 0]])
+ labels = np.array([[1, 1, 0, 2],
+ [1, 1, 0, 2],
+ [0, 0, 0, 2],
+ [3, 3, 0, 0]])
+ output = ndimage.median(a, labels=labels)
+ assert_almost_equal(output, 3.0)
+
+
+def test_median_gh12836_bool():
+ # test boolean addition fix on example from gh-12836
+ a = np.asarray([1, 1], dtype=bool)
+ output = ndimage.median(a, labels=np.ones((2,)), index=[1])
+ assert_array_almost_equal(output, [1.0])
+
+
+def test_median_no_int_overflow():
+ # test integer overflow fix on example from gh-12836
+ a = np.asarray([65, 70], dtype=np.int8)
+ output = ndimage.median(a, labels=np.ones((2,)), index=[1])
+ assert_array_almost_equal(output, [67.5])
+
+
+def test_variance01():
+ with np.errstate(all='ignore'):
+ for type in types:
+ input = np.array([], type)
+ with suppress_warnings() as sup:
+ sup.filter(RuntimeWarning, "Mean of empty slice")
+ output = ndimage.variance(input)
+ assert_(np.isnan(output))
+
+
+def test_variance02():
+ for type in types:
+ input = np.array([1], type)
+ output = ndimage.variance(input)
+ assert_almost_equal(output, 0.0)
+
+
+def test_variance03():
+ for type in types:
+ input = np.array([1, 3], type)
+ output = ndimage.variance(input)
+ assert_almost_equal(output, 1.0)
+
+
+def test_variance04():
+ input = np.array([1, 0], bool)
+ output = ndimage.variance(input)
+ assert_almost_equal(output, 0.25)
+
+
+def test_variance05():
+ labels = [2, 2, 3]
+ for type in types:
+ input = np.array([1, 3, 8], type)
+ output = ndimage.variance(input, labels, 2)
+ assert_almost_equal(output, 1.0)
+
+
+def test_variance06():
+ labels = [2, 2, 3, 3, 4]
+ with np.errstate(all='ignore'):
+ for type in types:
+ input = np.array([1, 3, 8, 10, 8], type)
+ output = ndimage.variance(input, labels, [2, 3, 4])
+ assert_array_almost_equal(output, [1.0, 1.0, 0.0])
+
+
+def test_standard_deviation01():
+ with np.errstate(all='ignore'):
+ for type in types:
+ input = np.array([], type)
+ with suppress_warnings() as sup:
+ sup.filter(RuntimeWarning, "Mean of empty slice")
+ output = ndimage.standard_deviation(input)
+ assert_(np.isnan(output))
+
+
+def test_standard_deviation02():
+ for type in types:
+ input = np.array([1], type)
+ output = ndimage.standard_deviation(input)
+ assert_almost_equal(output, 0.0)
+
+
+def test_standard_deviation03():
+ for type in types:
+ input = np.array([1, 3], type)
+ output = ndimage.standard_deviation(input)
+ assert_almost_equal(output, np.sqrt(1.0))
+
+
+def test_standard_deviation04():
+ input = np.array([1, 0], bool)
+ output = ndimage.standard_deviation(input)
+ assert_almost_equal(output, 0.5)
+
+
+def test_standard_deviation05():
+ labels = [2, 2, 3]
+ for type in types:
+ input = np.array([1, 3, 8], type)
+ output = ndimage.standard_deviation(input, labels, 2)
+ assert_almost_equal(output, 1.0)
+
+
+def test_standard_deviation06():
+ labels = [2, 2, 3, 3, 4]
+ with np.errstate(all='ignore'):
+ for type in types:
+ input = np.array([1, 3, 8, 10, 8], type)
+ output = ndimage.standard_deviation(input, labels, [2, 3, 4])
+ assert_array_almost_equal(output, [1.0, 1.0, 0.0])
+
+
+def test_standard_deviation07():
+ labels = [1]
+ with np.errstate(all='ignore'):
+ for type in types:
+ input = np.array([-0.00619519], type)
+ output = ndimage.standard_deviation(input, labels, [1])
+ assert_array_almost_equal(output, [0])
+
+
+def test_minimum_position01():
+ labels = np.array([1, 0], bool)
+ for type in types:
+ input = np.array([[1, 2], [3, 4]], type)
+ output = ndimage.minimum_position(input, labels=labels)
+ assert_equal(output, (0, 0))
+
+
+def test_minimum_position02():
+ for type in types:
+ input = np.array([[5, 4, 2, 5],
+ [3, 7, 0, 2],
+ [1, 5, 1, 1]], type)
+ output = ndimage.minimum_position(input)
+ assert_equal(output, (1, 2))
+
+
+def test_minimum_position03():
+ input = np.array([[5, 4, 2, 5],
+ [3, 7, 0, 2],
+ [1, 5, 1, 1]], bool)
+ output = ndimage.minimum_position(input)
+ assert_equal(output, (1, 2))
+
+
+def test_minimum_position04():
+ input = np.array([[5, 4, 2, 5],
+ [3, 7, 1, 2],
+ [1, 5, 1, 1]], bool)
+ output = ndimage.minimum_position(input)
+ assert_equal(output, (0, 0))
+
+
+def test_minimum_position05():
+ labels = [1, 2, 0, 4]
+ for type in types:
+ input = np.array([[5, 4, 2, 5],
+ [3, 7, 0, 2],
+ [1, 5, 2, 3]], type)
+ output = ndimage.minimum_position(input, labels)
+ assert_equal(output, (2, 0))
+
+
+def test_minimum_position06():
+ labels = [1, 2, 3, 4]
+ for type in types:
+ input = np.array([[5, 4, 2, 5],
+ [3, 7, 0, 2],
+ [1, 5, 1, 1]], type)
+ output = ndimage.minimum_position(input, labels, 2)
+ assert_equal(output, (0, 1))
+
+
+def test_minimum_position07():
+ labels = [1, 2, 3, 4]
+ for type in types:
+ input = np.array([[5, 4, 2, 5],
+ [3, 7, 0, 2],
+ [1, 5, 1, 1]], type)
+ output = ndimage.minimum_position(input, labels,
+ [2, 3])
+ assert_equal(output[0], (0, 1))
+ assert_equal(output[1], (1, 2))
+
+
+def test_maximum_position01():
+ labels = np.array([1, 0], bool)
+ for type in types:
+ input = np.array([[1, 2], [3, 4]], type)
+ output = ndimage.maximum_position(input,
+ labels=labels)
+ assert_equal(output, (1, 0))
+
+
+def test_maximum_position02():
+ for type in types:
+ input = np.array([[5, 4, 2, 5],
+ [3, 7, 8, 2],
+ [1, 5, 1, 1]], type)
+ output = ndimage.maximum_position(input)
+ assert_equal(output, (1, 2))
+
+
+def test_maximum_position03():
+ input = np.array([[5, 4, 2, 5],
+ [3, 7, 8, 2],
+ [1, 5, 1, 1]], bool)
+ output = ndimage.maximum_position(input)
+ assert_equal(output, (0, 0))
+
+
+def test_maximum_position04():
+ labels = [1, 2, 0, 4]
+ for type in types:
+ input = np.array([[5, 4, 2, 5],
+ [3, 7, 8, 2],
+ [1, 5, 1, 1]], type)
+ output = ndimage.maximum_position(input, labels)
+ assert_equal(output, (1, 1))
+
+
+def test_maximum_position05():
+ labels = [1, 2, 0, 4]
+ for type in types:
+ input = np.array([[5, 4, 2, 5],
+ [3, 7, 8, 2],
+ [1, 5, 1, 1]], type)
+ output = ndimage.maximum_position(input, labels, 1)
+ assert_equal(output, (0, 0))
+
+
+def test_maximum_position06():
+ labels = [1, 2, 0, 4]
+ for type in types:
+ input = np.array([[5, 4, 2, 5],
+ [3, 7, 8, 2],
+ [1, 5, 1, 1]], type)
+ output = ndimage.maximum_position(input, labels,
+ [1, 2])
+ assert_equal(output[0], (0, 0))
+ assert_equal(output[1], (1, 1))
+
+
+def test_maximum_position07():
+ # Test float labels
+ labels = np.array([1.0, 2.5, 0.0, 4.5])
+ for type in types:
+ input = np.array([[5, 4, 2, 5],
+ [3, 7, 8, 2],
+ [1, 5, 1, 1]], type)
+ output = ndimage.maximum_position(input, labels,
+ [1.0, 4.5])
+ assert_equal(output[0], (0, 0))
+ assert_equal(output[1], (0, 3))
+
+
+def test_extrema01():
+ labels = np.array([1, 0], bool)
+ for type in types:
+ input = np.array([[1, 2], [3, 4]], type)
+ output1 = ndimage.extrema(input, labels=labels)
+ output2 = ndimage.minimum(input, labels=labels)
+ output3 = ndimage.maximum(input, labels=labels)
+ output4 = ndimage.minimum_position(input,
+ labels=labels)
+ output5 = ndimage.maximum_position(input,
+ labels=labels)
+ assert_equal(output1, (output2, output3, output4, output5))
+
+
+def test_extrema02():
+ labels = np.array([1, 2])
+ for type in types:
+ input = np.array([[1, 2], [3, 4]], type)
+ output1 = ndimage.extrema(input, labels=labels,
+ index=2)
+ output2 = ndimage.minimum(input, labels=labels,
+ index=2)
+ output3 = ndimage.maximum(input, labels=labels,
+ index=2)
+ output4 = ndimage.minimum_position(input,
+ labels=labels, index=2)
+ output5 = ndimage.maximum_position(input,
+ labels=labels, index=2)
+ assert_equal(output1, (output2, output3, output4, output5))
+
+
+def test_extrema03():
+ labels = np.array([[1, 2], [2, 3]])
+ for type in types:
+ input = np.array([[1, 2], [3, 4]], type)
+ output1 = ndimage.extrema(input, labels=labels,
+ index=[2, 3, 8])
+ output2 = ndimage.minimum(input, labels=labels,
+ index=[2, 3, 8])
+ output3 = ndimage.maximum(input, labels=labels,
+ index=[2, 3, 8])
+ output4 = ndimage.minimum_position(input,
+ labels=labels, index=[2, 3, 8])
+ output5 = ndimage.maximum_position(input,
+ labels=labels, index=[2, 3, 8])
+ assert_array_almost_equal(output1[0], output2)
+ assert_array_almost_equal(output1[1], output3)
+ assert_array_almost_equal(output1[2], output4)
+ assert_array_almost_equal(output1[3], output5)
+
+
+def test_extrema04():
+ labels = [1, 2, 0, 4]
+ for type in types:
+ input = np.array([[5, 4, 2, 5],
+ [3, 7, 8, 2],
+ [1, 5, 1, 1]], type)
+ output1 = ndimage.extrema(input, labels, [1, 2])
+ output2 = ndimage.minimum(input, labels, [1, 2])
+ output3 = ndimage.maximum(input, labels, [1, 2])
+ output4 = ndimage.minimum_position(input, labels,
+ [1, 2])
+ output5 = ndimage.maximum_position(input, labels,
+ [1, 2])
+ assert_array_almost_equal(output1[0], output2)
+ assert_array_almost_equal(output1[1], output3)
+ assert_array_almost_equal(output1[2], output4)
+ assert_array_almost_equal(output1[3], output5)
+
+
+def test_center_of_mass01():
+ expected = [0.0, 0.0]
+ for type in types:
+ input = np.array([[1, 0], [0, 0]], type)
+ output = ndimage.center_of_mass(input)
+ assert_array_almost_equal(output, expected)
+
+
+def test_center_of_mass02():
+ expected = [1, 0]
+ for type in types:
+ input = np.array([[0, 0], [1, 0]], type)
+ output = ndimage.center_of_mass(input)
+ assert_array_almost_equal(output, expected)
+
+
+def test_center_of_mass03():
+ expected = [0, 1]
+ for type in types:
+ input = np.array([[0, 1], [0, 0]], type)
+ output = ndimage.center_of_mass(input)
+ assert_array_almost_equal(output, expected)
+
+
+def test_center_of_mass04():
+ expected = [1, 1]
+ for type in types:
+ input = np.array([[0, 0], [0, 1]], type)
+ output = ndimage.center_of_mass(input)
+ assert_array_almost_equal(output, expected)
+
+
+def test_center_of_mass05():
+ expected = [0.5, 0.5]
+ for type in types:
+ input = np.array([[1, 1], [1, 1]], type)
+ output = ndimage.center_of_mass(input)
+ assert_array_almost_equal(output, expected)
+
+
+def test_center_of_mass06():
+ expected = [0.5, 0.5]
+ input = np.array([[1, 2], [3, 1]], bool)
+ output = ndimage.center_of_mass(input)
+ assert_array_almost_equal(output, expected)
+
+
+def test_center_of_mass07():
+ labels = [1, 0]
+ expected = [0.5, 0.0]
+ input = np.array([[1, 2], [3, 1]], bool)
+ output = ndimage.center_of_mass(input, labels)
+ assert_array_almost_equal(output, expected)
+
+
+def test_center_of_mass08():
+ labels = [1, 2]
+ expected = [0.5, 1.0]
+ input = np.array([[5, 2], [3, 1]], bool)
+ output = ndimage.center_of_mass(input, labels, 2)
+ assert_array_almost_equal(output, expected)
+
+
+def test_center_of_mass09():
+ labels = [1, 2]
+ expected = [(0.5, 0.0), (0.5, 1.0)]
+ input = np.array([[1, 2], [1, 1]], bool)
+ output = ndimage.center_of_mass(input, labels, [1, 2])
+ assert_array_almost_equal(output, expected)
+
+
+def test_histogram01():
+ expected = np.ones(10)
+ input = np.arange(10)
+ output = ndimage.histogram(input, 0, 10, 10)
+ assert_array_almost_equal(output, expected)
+
+
+def test_histogram02():
+ labels = [1, 1, 1, 1, 2, 2, 2, 2]
+ expected = [0, 2, 0, 1, 1]
+ input = np.array([1, 1, 3, 4, 3, 3, 3, 3])
+ output = ndimage.histogram(input, 0, 4, 5, labels, 1)
+ assert_array_almost_equal(output, expected)
+
+
+def test_histogram03():
+ labels = [1, 0, 1, 1, 2, 2, 2, 2]
+ expected1 = [0, 1, 0, 1, 1]
+ expected2 = [0, 0, 0, 3, 0]
+ input = np.array([1, 1, 3, 4, 3, 5, 3, 3])
+ output = ndimage.histogram(input, 0, 4, 5, labels, (1, 2))
+
+ assert_array_almost_equal(output[0], expected1)
+ assert_array_almost_equal(output[1], expected2)
+
+
+def test_stat_funcs_2d():
+ a = np.array([[5, 6, 0, 0, 0], [8, 9, 0, 0, 0], [0, 0, 0, 3, 5]])
+ lbl = np.array([[1, 1, 0, 0, 0], [1, 1, 0, 0, 0], [0, 0, 0, 2, 2]])
+
+ mean = ndimage.mean(a, labels=lbl, index=[1, 2])
+ assert_array_equal(mean, [7.0, 4.0])
+
+ var = ndimage.variance(a, labels=lbl, index=[1, 2])
+ assert_array_equal(var, [2.5, 1.0])
+
+ std = ndimage.standard_deviation(a, labels=lbl, index=[1, 2])
+ assert_array_almost_equal(std, np.sqrt([2.5, 1.0]))
+
+ med = ndimage.median(a, labels=lbl, index=[1, 2])
+ assert_array_equal(med, [7.0, 4.0])
+
+ min = ndimage.minimum(a, labels=lbl, index=[1, 2])
+ assert_array_equal(min, [5, 3])
+
+ max = ndimage.maximum(a, labels=lbl, index=[1, 2])
+ assert_array_equal(max, [9, 5])
+
+
+class TestWatershedIft:
+
+ def test_watershed_ift01(self):
+ data = np.array([[0, 0, 0, 0, 0, 0, 0],
+ [0, 1, 1, 1, 1, 1, 0],
+ [0, 1, 0, 0, 0, 1, 0],
+ [0, 1, 0, 0, 0, 1, 0],
+ [0, 1, 0, 0, 0, 1, 0],
+ [0, 1, 1, 1, 1, 1, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0]], np.uint8)
+ markers = np.array([[-1, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 1, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0]], np.int8)
+ out = ndimage.watershed_ift(data, markers, structure=[[1, 1, 1],
+ [1, 1, 1],
+ [1, 1, 1]])
+ expected = [[-1, -1, -1, -1, -1, -1, -1],
+ [-1, 1, 1, 1, 1, 1, -1],
+ [-1, 1, 1, 1, 1, 1, -1],
+ [-1, 1, 1, 1, 1, 1, -1],
+ [-1, 1, 1, 1, 1, 1, -1],
+ [-1, 1, 1, 1, 1, 1, -1],
+ [-1, -1, -1, -1, -1, -1, -1],
+ [-1, -1, -1, -1, -1, -1, -1]]
+ assert_array_almost_equal(out, expected)
+
+ def test_watershed_ift02(self):
+ data = np.array([[0, 0, 0, 0, 0, 0, 0],
+ [0, 1, 1, 1, 1, 1, 0],
+ [0, 1, 0, 0, 0, 1, 0],
+ [0, 1, 0, 0, 0, 1, 0],
+ [0, 1, 0, 0, 0, 1, 0],
+ [0, 1, 1, 1, 1, 1, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0]], np.uint8)
+ markers = np.array([[-1, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 1, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0]], np.int8)
+ out = ndimage.watershed_ift(data, markers)
+ expected = [[-1, -1, -1, -1, -1, -1, -1],
+ [-1, -1, 1, 1, 1, -1, -1],
+ [-1, 1, 1, 1, 1, 1, -1],
+ [-1, 1, 1, 1, 1, 1, -1],
+ [-1, 1, 1, 1, 1, 1, -1],
+ [-1, -1, 1, 1, 1, -1, -1],
+ [-1, -1, -1, -1, -1, -1, -1],
+ [-1, -1, -1, -1, -1, -1, -1]]
+ assert_array_almost_equal(out, expected)
+
+ def test_watershed_ift03(self):
+ data = np.array([[0, 0, 0, 0, 0, 0, 0],
+ [0, 1, 1, 1, 1, 1, 0],
+ [0, 1, 0, 1, 0, 1, 0],
+ [0, 1, 0, 1, 0, 1, 0],
+ [0, 1, 0, 1, 0, 1, 0],
+ [0, 1, 1, 1, 1, 1, 0],
+ [0, 0, 0, 0, 0, 0, 0]], np.uint8)
+ markers = np.array([[0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 2, 0, 3, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, -1]], np.int8)
+ out = ndimage.watershed_ift(data, markers)
+ expected = [[-1, -1, -1, -1, -1, -1, -1],
+ [-1, -1, 2, -1, 3, -1, -1],
+ [-1, 2, 2, 3, 3, 3, -1],
+ [-1, 2, 2, 3, 3, 3, -1],
+ [-1, 2, 2, 3, 3, 3, -1],
+ [-1, -1, 2, -1, 3, -1, -1],
+ [-1, -1, -1, -1, -1, -1, -1]]
+ assert_array_almost_equal(out, expected)
+
+ def test_watershed_ift04(self):
+ data = np.array([[0, 0, 0, 0, 0, 0, 0],
+ [0, 1, 1, 1, 1, 1, 0],
+ [0, 1, 0, 1, 0, 1, 0],
+ [0, 1, 0, 1, 0, 1, 0],
+ [0, 1, 0, 1, 0, 1, 0],
+ [0, 1, 1, 1, 1, 1, 0],
+ [0, 0, 0, 0, 0, 0, 0]], np.uint8)
+ markers = np.array([[0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 2, 0, 3, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, -1]],
+ np.int8)
+ out = ndimage.watershed_ift(data, markers,
+ structure=[[1, 1, 1],
+ [1, 1, 1],
+ [1, 1, 1]])
+ expected = [[-1, -1, -1, -1, -1, -1, -1],
+ [-1, 2, 2, 3, 3, 3, -1],
+ [-1, 2, 2, 3, 3, 3, -1],
+ [-1, 2, 2, 3, 3, 3, -1],
+ [-1, 2, 2, 3, 3, 3, -1],
+ [-1, 2, 2, 3, 3, 3, -1],
+ [-1, -1, -1, -1, -1, -1, -1]]
+ assert_array_almost_equal(out, expected)
+
+ def test_watershed_ift05(self):
+ data = np.array([[0, 0, 0, 0, 0, 0, 0],
+ [0, 1, 1, 1, 1, 1, 0],
+ [0, 1, 0, 1, 0, 1, 0],
+ [0, 1, 0, 1, 0, 1, 0],
+ [0, 1, 0, 1, 0, 1, 0],
+ [0, 1, 1, 1, 1, 1, 0],
+ [0, 0, 0, 0, 0, 0, 0]], np.uint8)
+ markers = np.array([[0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 3, 0, 2, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, -1]],
+ np.int8)
+ out = ndimage.watershed_ift(data, markers,
+ structure=[[1, 1, 1],
+ [1, 1, 1],
+ [1, 1, 1]])
+ expected = [[-1, -1, -1, -1, -1, -1, -1],
+ [-1, 3, 3, 2, 2, 2, -1],
+ [-1, 3, 3, 2, 2, 2, -1],
+ [-1, 3, 3, 2, 2, 2, -1],
+ [-1, 3, 3, 2, 2, 2, -1],
+ [-1, 3, 3, 2, 2, 2, -1],
+ [-1, -1, -1, -1, -1, -1, -1]]
+ assert_array_almost_equal(out, expected)
+
+ def test_watershed_ift06(self):
+ data = np.array([[0, 1, 0, 0, 0, 1, 0],
+ [0, 1, 0, 0, 0, 1, 0],
+ [0, 1, 0, 0, 0, 1, 0],
+ [0, 1, 1, 1, 1, 1, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0]], np.uint8)
+ markers = np.array([[-1, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 1, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0]], np.int8)
+ out = ndimage.watershed_ift(data, markers,
+ structure=[[1, 1, 1],
+ [1, 1, 1],
+ [1, 1, 1]])
+ expected = [[-1, 1, 1, 1, 1, 1, -1],
+ [-1, 1, 1, 1, 1, 1, -1],
+ [-1, 1, 1, 1, 1, 1, -1],
+ [-1, 1, 1, 1, 1, 1, -1],
+ [-1, -1, -1, -1, -1, -1, -1],
+ [-1, -1, -1, -1, -1, -1, -1]]
+ assert_array_almost_equal(out, expected)
+
+ def test_watershed_ift07(self):
+ shape = (7, 6)
+ data = np.zeros(shape, dtype=np.uint8)
+ data = data.transpose()
+ data[...] = np.array([[0, 1, 0, 0, 0, 1, 0],
+ [0, 1, 0, 0, 0, 1, 0],
+ [0, 1, 0, 0, 0, 1, 0],
+ [0, 1, 1, 1, 1, 1, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0]], np.uint8)
+ markers = np.array([[-1, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 1, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0]], np.int8)
+ out = np.zeros(shape, dtype=np.int16)
+ out = out.transpose()
+ ndimage.watershed_ift(data, markers,
+ structure=[[1, 1, 1],
+ [1, 1, 1],
+ [1, 1, 1]],
+ output=out)
+ expected = [[-1, 1, 1, 1, 1, 1, -1],
+ [-1, 1, 1, 1, 1, 1, -1],
+ [-1, 1, 1, 1, 1, 1, -1],
+ [-1, 1, 1, 1, 1, 1, -1],
+ [-1, -1, -1, -1, -1, -1, -1],
+ [-1, -1, -1, -1, -1, -1, -1]]
+ assert_array_almost_equal(out, expected)
+
+ def test_watershed_ift08(self):
+ # Test cost larger than uint8. See gh-10069.
+ data = np.array([[256, 0],
+ [0, 0]], np.uint16)
+ markers = np.array([[1, 0],
+ [0, 0]], np.int8)
+ out = ndimage.watershed_ift(data, markers)
+ expected = [[1, 1],
+ [1, 1]]
+ assert_array_almost_equal(out, expected)
+
+ def test_watershed_ift09(self):
+ # Test large cost. See gh-19575
+ data = np.array([[np.iinfo(np.uint16).max, 0],
+ [0, 0]], np.uint16)
+ markers = np.array([[1, 0],
+ [0, 0]], np.int8)
+ out = ndimage.watershed_ift(data, markers)
+ expected = [[1, 1],
+ [1, 1]]
+ assert_allclose(out, expected)
+
+
+@pytest.mark.parametrize("dt", [np.intc, np.uintc])
+def test_gh_19423(dt):
+ rng = np.random.default_rng(123)
+ max_val = 8
+ image = rng.integers(low=0, high=max_val, size=(10, 12)).astype(dtype=dt)
+ val_idx = ndimage.value_indices(image)
+ assert len(val_idx.keys()) == max_val
diff --git a/parrot/lib/python3.10/site-packages/scipy/odr/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/odr/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f1ffcdf3f265ff0cc09fbcacbd6a547691cc332c
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/odr/__pycache__/__init__.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/scipy/odr/__pycache__/odrpack.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/odr/__pycache__/odrpack.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c8ff704c90030228786e54282b7a385179bc5536
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/odr/__pycache__/odrpack.cpython-310.pyc differ
diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages/cleanup_autograd_context_req.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages/cleanup_autograd_context_req.h
new file mode 100644
index 0000000000000000000000000000000000000000..525790b8c86b4d10f03b6f84a73a31c6a253bef1
--- /dev/null
+++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages/cleanup_autograd_context_req.h
@@ -0,0 +1,29 @@
+#pragma once
+
+#include
+#include
+#include
+
+namespace torch {
+namespace distributed {
+namespace autograd {
+
+// Used to request other workers to clean up their autograd context.
+class TORCH_API CleanupAutogradContextReq : public rpc::RpcCommandBase {
+ public:
+ explicit CleanupAutogradContextReq(int64_t context_id);
+ // Serialization and deserialization methods.
+ c10::intrusive_ptr toMessageImpl() && override;
+ static std::unique_ptr fromMessage(
+ const rpc::Message& message);
+
+ // Retrieve the context id we are cleaning up with this message.
+ int64_t getContextId();
+
+ private:
+ int64_t context_id_;
+};
+
+} // namespace autograd
+} // namespace distributed
+} // namespace torch
diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages/rpc_with_autograd.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages/rpc_with_autograd.h
new file mode 100644
index 0000000000000000000000000000000000000000..6d0b6111cc88cd5a1df33d334851f8d17e166941
--- /dev/null
+++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages/rpc_with_autograd.h
@@ -0,0 +1,98 @@
+#pragma once
+
+#include
+#include
+#include
+
+namespace torch {
+namespace distributed {
+namespace autograd {
+
+// Represents an RPC that includes autograd information. This class basically
+// wraps another `RpcCommandBase` object which represents the actual RPC and has
+// additional autograd information associated with that RPC.
+class TORCH_API RpcWithAutograd final : public rpc::RpcCommandBase {
+ public:
+ // Used when we are sending an RPC over the wire.
+ RpcWithAutograd(
+ rpc::worker_id_t fromWorkerId,
+ rpc::MessageType messageType,
+ const AutogradMetadata& autogradMetadata,
+ c10::intrusive_ptr wrappedMessage,
+ rpc::DeviceMap deviceMap = {});
+
+ // Used when receiving an RPC over the wire.
+ RpcWithAutograd(
+ rpc::worker_id_t fromWorkerId,
+ rpc::MessageType messageType,
+ const AutogradMetadata& autogradMetadata,
+ std::unique_ptr wrappedRpc,
+ rpc::MessageType wrappedMessageType,
+ std::vector tensors,
+ rpc::DeviceMap deviceMap = {});
+
+ c10::intrusive_ptr toMessageImpl() && override;
+
+ static std::unique_ptr fromMessage(
+ const rpc::Message& message);
+
+ // Retrieves tensors as part of this RPC, which need to be considered for
+ // autograd computations.
+ std::vector& tensors();
+
+ const AutogradMetadata& autogradMetadata() const;
+
+ RpcCommandBase& wrappedRpc();
+
+ void setWrappedRpc(std::unique_ptr wrappedRpc);
+
+ std::unique_ptr moveWrappedRpc() &&;
+
+ // Message type of the wrapped RPC.
+ rpc::MessageType wrappedMessageType() const;
+
+ // Retrieve the worker id from which the RPC originated.
+ rpc::worker_id_t fromWorkerId() const;
+
+ // Retrieve the device map.
+ const rpc::DeviceMap& deviceMap();
+
+ private:
+ // WorkerId from which this RPC originated. This is necessary for knowing
+ // which worker we need to contact during the backward pass.
+ rpc::worker_id_t fromWorkerId_;
+
+ // Message type for this call.
+ rpc::MessageType messageType_;
+
+ AutogradMetadata autogradMetadata_;
+
+ // Since wrappedMessage_ is destructively constructed from wrappedRpc_,
+ // they are valid exclusively. They are used for different purpose.
+ // wrappedRpc_ is used while constructing receive rpcWithAutograd;
+ // wrappedMessage_ is used while constructing send rpcWithAutograd;
+
+ // When receive rpcWithAutograd is constructed fromMessage, it is valid;
+ // When send rpcWithAutograd is constructed before toMessage, it is nullptr;
+ std::unique_ptr wrappedRpc_;
+
+ // Serialized message representing wrappedRpc_. Used mostly as a cache to
+ // avoid serializing the request twice.
+ // When receive rpcWithAutograd is constructed fromMessage, it is nullptr;
+ // When send rpcWithAutograd is constructed before toMessage, it is valid;
+ c10::intrusive_ptr wrappedMessage_;
+
+ // message type of the wrappedMessage, this is stored separately since
+ // wrappedMessage_ is not always guaranteed to be populated.
+ rpc::MessageType wrappedMessageType_;
+
+ // Tensors part of the wrappedRpc that need to be considered for autograd.
+ std::vector tensors_;
+
+ // Device mapping for tensors that are sent across an RPC to another node.
+ rpc::DeviceMap deviceMap_;
+};
+
+} // namespace autograd
+} // namespace distributed
+} // namespace torch
diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages/rref_backward_req.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages/rref_backward_req.h
new file mode 100644
index 0000000000000000000000000000000000000000..6dc4413cfa50980af4df98bd88c9fd57e86a2a75
--- /dev/null
+++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages/rref_backward_req.h
@@ -0,0 +1,39 @@
+#pragma once
+
+#include
+#include
+#include
+
+namespace torch {
+namespace distributed {
+namespace autograd {
+
+// Internal system RPC to invoke distributed backward pass on remote nodes when
+// 'rref.backward()' is invoked.
+class TORCH_API RRefBackwardReq : public rpc::RpcCommandBase {
+ public:
+ RRefBackwardReq(
+ const rpc::RRefId& rrefId,
+ int64_t autogradContextId,
+ bool retainGraph = false);
+
+ const rpc::RRefId& getRRefId() const;
+
+ int64_t getAutogradContextId() const;
+
+ bool retainGraph() const;
+
+ // Serialization and deserialization methods.
+ c10::intrusive_ptr toMessageImpl() && override;
+ static std::unique_ptr fromMessage(
+ const rpc::Message& message);
+
+ private:
+ const rpc::RRefId rrefId_;
+ const int64_t autogradContextId_;
+ const bool retainGraph_;
+};
+
+} // namespace autograd
+} // namespace distributed
+} // namespace torch
diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Backend.hpp b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Backend.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..df000667d26385c128a8e1cf3d625027196570c2
--- /dev/null
+++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Backend.hpp
@@ -0,0 +1,383 @@
+#pragma once
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include
+#include
+
+#include
+#include
+#include
+#include
+
+constexpr auto kBackendDefaultTimeout =
+ std::chrono::milliseconds(30 * 60 * 1000);
+
+namespace c10d {
+
+class TORCH_API Backend : public torch::CustomClassHolder {
+ public:
+ // Backend Options is a base struct that defines the basic options
+ // when constructing a Backend. Each Backend subclass should
+ // extend this struct and define its options if it wants to provide more
+ // config options (beyond basic ones defined here) to end user.
+ struct TORCH_API Options : torch::CustomClassHolder {
+ explicit Options(
+ std::string backend,
+ std::chrono::milliseconds timeout = kBackendDefaultTimeout)
+ : timeout(timeout), backend(std::move(backend)) {}
+ ~Options() override = default;
+
+ std::chrono::milliseconds timeout;
+
+ // backend name
+ const std::string backend;
+ };
+
+ explicit Backend(int rank, int size);
+ ~Backend() override = 0;
+
+ int getRank() const {
+ return rank_;
+ }
+
+ int getSize() const {
+ return size_;
+ }
+
+ // Returns an unique opaque ID of this backend that can be used to correlate
+ // with its collectives.
+ int64_t getID() const {
+ return reinterpret_cast(this);
+ }
+
+ virtual void startCoalescing() {
+ TORCH_CHECK(
+ false,
+ c10::str(
+ "Backend ",
+ getBackendName(),
+ " does not implement startCoalescing"));
+ }
+
+ virtual c10::intrusive_ptr endCoalescing() {
+ TORCH_CHECK(
+ false,
+ c10::str(
+ "Backend ", getBackendName(), " does not implement endCoalescing"));
+ }
+
+ // Subclasses must override this method to return the backend name
+ virtual const std::string getBackendName() const {
+ TORCH_INTERNAL_ASSERT(false, "getBackendName is not implemented.");
+ };
+
+ virtual c10::intrusive_ptr broadcast(
+ std::vector& /* tensors */,
+ const BroadcastOptions& /* opts */ = BroadcastOptions()) {
+ TORCH_CHECK(
+ false,
+ c10::str("Backend ", getBackendName(), " does not support broadcast"));
+ }
+
+ virtual c10::intrusive_ptr allreduce(
+ std::vector& /* tensors */,
+ const AllreduceOptions& /* opts */ = AllreduceOptions()) {
+ TORCH_CHECK(
+ false,
+ c10::str("Backend ", getBackendName(), " does not support allreduce"));
+ }
+
+ virtual c10::intrusive_ptr allreduce_sparse(
+ std::vector& /* tensors */,
+ const AllreduceOptions& /* opts */ = AllreduceOptions()) {
+ TORCH_CHECK(
+ false,
+ c10::str(
+ "Backend ",
+ getBackendName(),
+ " does not support allreduce sparse"));
+ }
+
+ virtual c10::intrusive_ptr allreduce_coalesced(
+ std::vector& /* tensors */,
+ const AllreduceCoalescedOptions& /* opts */ =
+ AllreduceCoalescedOptions()) {
+ TORCH_CHECK(
+ false,
+ c10::str(
+ "Backend ",
+ getBackendName(),
+ " does not support allreduce_coalesced"));
+ }
+
+ virtual c10::intrusive_ptr reduce(
+ std::vector& /* tensors */,
+ const ReduceOptions& /* opts */ = ReduceOptions()) {
+ TORCH_CHECK(
+ false,
+ c10::str("Backend ", getBackendName(), " does not support reduce"));
+ }
+
+ virtual c10::intrusive_ptr allgather(
+ std::vector>& /* outputTensors */,
+ std::vector& /* inputTensors */,
+ const AllgatherOptions& /* opts */ = AllgatherOptions()) {
+ TORCH_CHECK(
+ false,
+ c10::str("Backend ", getBackendName(), " does not support allgather"));
+ }
+
+ // Gathers a single tensor inputBuffer into a single buffer outputBuffer that
+ // is interpreted as a contiguous collection of size inputBuffer * WORLD_SIZE.
+ // For implementers of ProcessGroup API and advanced users only.
+ // Note: this function will be deprecated in near future.
+ virtual c10::intrusive_ptr _allgather_base(
+ at::Tensor& /* outputBuffer */,
+ at::Tensor& /* inputBuffer */,
+ const AllgatherOptions& /* opts */ = AllgatherOptions()) {
+ TORCH_CHECK(
+ false,
+ c10::str(
+ "Backend ", getBackendName(), " does not support _allgather_base"));
+ }
+
+ // This function is deprecated and will be moved out of Backend to comms:
+ // * do not add dependencies on this function,
+ // * do not implement it in your Backend, implement _allgather_base
+ // instead.
+ virtual c10::intrusive_ptr allgather_coalesced(
+ std::vector>& /* outputTensorLists */,
+ std::vector& /* inputTensors */,
+ const AllgatherOptions& /* opts */ = AllgatherOptions()) {
+ TORCH_CHECK(
+ false,
+ c10::str(
+ "Backend ",
+ getBackendName(),
+ " does not support allgather_coalesced"));
+ }
+
+ // This function is a coalesced version of `allgather_into_tensor` (currently
+ // still named as `_allgather_base`). Each tensor in the vector corresponds to
+ // an input/output of one `allgather_into_tensor` operation.
+ virtual c10::intrusive_ptr allgather_into_tensor_coalesced(
+ std::vector& /* outputs */,
+ std::vector& /* inputs */,
+ const AllgatherOptions& /* opts */ = AllgatherOptions()) {
+ TORCH_CHECK(
+ false,
+ c10::str(
+ "Backend ",
+ getBackendName(),
+ " does not support allgather_into_tensor_coalesced"));
+ }
+
+ virtual c10::intrusive_ptr gather(
+ std::vector>& /* outputTensors */,
+ std::vector& /* inputTensors */,
+ const GatherOptions& /* opts */ = GatherOptions()) {
+ TORCH_CHECK(
+ false,
+ c10::str("Backend ", getBackendName(), " does not support gather"));
+ }
+
+ virtual c10::intrusive_ptr scatter(
+ std::vector& /* outputTensors */,
+ std::vector>& /* inputTensors */,
+ const ScatterOptions& /* opts */ = ScatterOptions()) {
+ TORCH_CHECK(
+ false,
+ c10::str("Backend ", getBackendName(), " does not support scatter"));
+ }
+
+ virtual c10::intrusive_ptr reduce_scatter(
+ std::vector& /* outputTensors */,
+ std::vector>& /* inputTensors */,
+ const ReduceScatterOptions& /* opts */ = ReduceScatterOptions()) {
+ TORCH_CHECK(
+ false,
+ c10::str(
+ "Backend ", getBackendName(), " does not support reduce_scatter"));
+ }
+
+ virtual c10::intrusive_ptr _reduce_scatter_base(
+ at::Tensor& /* outputBuffer */,
+ at::Tensor& /* inputBuffer */,
+ const ReduceScatterOptions& /* opts */ = ReduceScatterOptions()) {
+ TORCH_CHECK(
+ false,
+ c10::str(
+ "Backend ",
+ getBackendName(),
+ " does not support _reduce_scatter_base"));
+ }
+
+ // This function is a coalesced version of `reduce_scatter_tensor` (currently
+ // still named as `_reduce_scatter_base`). Each tensor in the vector
+ // corresponds to an input/output of one `reduce_scatter_tensor` operation.
+ virtual c10::intrusive_ptr reduce_scatter_tensor_coalesced(
+ std::vector& /* outputs */,
+ std::vector& /* inputs */,
+ const ReduceScatterOptions& /* opts */ = ReduceScatterOptions()) {
+ TORCH_CHECK(
+ false,
+ c10::str(
+ "Backend ",
+ getBackendName(),
+ " does not support reduce_scatter_tensor_coalesced"));
+ }
+
+ virtual c10::intrusive_ptr alltoall_base(
+ at::Tensor& /* outputBuffer */,
+ at::Tensor& /* inputBuffer */,
+ std::vector& /* outputSplitSizes */,
+ std::vector& /* inputSplitSizes */,
+ const AllToAllOptions& /* opts */ = AllToAllOptions()) {
+ TORCH_CHECK(
+ false,
+ c10::str(
+ "Backend ", getBackendName(), " does not support alltoall_base"));
+ }
+
+ virtual c10::intrusive_ptr alltoall(
+ std::vector& /* outputTensors */,
+ std::vector& /* inputTensors */,
+ const AllToAllOptions& opts = AllToAllOptions()) {
+ TORCH_CHECK(
+ false,
+ c10::str("Backend ", getBackendName(), " does not support alltoall"));
+ }
+
+ virtual void monitoredBarrier(
+ const BarrierOptions& /* unused */,
+ bool /* unused */ = false) {
+ auto backendName = getBackendName();
+ TORCH_CHECK(
+ false,
+ c10::str(
+ "Backend ",
+ backendName,
+ " does not support monitoredBarrier, only GLOO supports monitored barrier."));
+ }
+
+ // Agrees on an initial sequence number for the whole group by having rank 0
+ // create it and broadcast it to other ranks using the store. Only implemented
+ // for GLOO and NCCL backends currently.
+ virtual void setSequenceNumberForGroup() {
+ auto backendName = getBackendName();
+ TORCH_CHECK(
+ false,
+ c10::str(
+ "Backend ",
+ backendName,
+ " does not yet support sequence numbers."));
+ }
+
+ // Retrieves the current sequence number for the whole group, which should be
+ // in sync. If the returned number is not consistent across the group, it
+ // may indicate that there is some sort of collective desynchronization.
+ virtual uint64_t getSequenceNumberForGroup() {
+ auto backendName = getBackendName();
+ TORCH_CHECK(
+ false,
+ c10::str(
+ "Backend ",
+ backendName,
+ " does not yet support sequence numbers."));
+ }
+
+ virtual c10::intrusive_ptr send(
+ std::vector& /* tensors */,
+ int /* dstRank */,
+ int /* tag */) {
+ TORCH_CHECK(
+ false,
+ c10::str("Backend ", getBackendName(), " does not support send"));
+ }
+
+ virtual c10::intrusive_ptr recv(
+ std::vector& /* tensors */,
+ int /* srcRank */,
+ int /* tag */) {
+ TORCH_CHECK(
+ false,
+ c10::str("Backend ", getBackendName(), " does not support recv"));
+ }
+
+ virtual c10::intrusive_ptr recvAnysource(
+ std::vector& /* tensors */,
+ int /* tag */) {
+ TORCH_CHECK(
+ false,
+ c10::str(
+ "Backend ", getBackendName(), " does not support recvAnysource"));
+ }
+
+ virtual c10::intrusive_ptr barrier(
+ const BarrierOptions& /* opts */ = BarrierOptions()) {
+ TORCH_CHECK(
+ false,
+ c10::str("Backend ", getBackendName(), " does not support barrier"));
+ }
+
+ virtual void registerOnCompletionHook(
+ std::function)>&& hook) {
+ TORCH_CHECK(
+ false,
+ "Only ProcessGrouppNCCL supports onCompletion hook, but got ",
+ getBackendName(),
+ " backend.");
+ }
+
+ virtual void waitForPendingWorks() {
+ TORCH_CHECK(
+ false,
+ "Only ProcessGrouppNCCL supports waitForPendingWorks, but got ",
+ getBackendName(),
+ " backend.");
+ }
+
+ virtual void enableCollectivesTiming() {
+ TORCH_CHECK(
+ false,
+ "Backend ",
+ getBackendName(),
+ " is missing implementation of enableCollectivesTiming.");
+ }
+
+ bool hasHooks() const {
+ return onCompletionHook_ != nullptr;
+ }
+
+ // Do not call this directly, use ProcessGroup::setGroupName instead.
+ void setGroupName(const std::string& name) {
+ pg_name_ = name;
+ }
+
+ const std::string& getGroupName() const {
+ return pg_name_;
+ }
+
+ protected:
+ // Implementations of this interface need to call this to setup
+ // appropriate logging etc.
+ void init();
+
+ const int rank_;
+ const int size_;
+ // Debug level setting. It is parsed once when ProcessGroup is constructed and
+ // remains the same across use of this process group.
+ DebugLevel dist_debug_level_;
+ std::string pg_name_;
+
+ std::function)> onCompletionHook_;
+};
+
+} // namespace c10d
diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Functional.hpp b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Functional.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..03cb8c42c193b177872b3983cde8124e85f6ee2e
--- /dev/null
+++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Functional.hpp
@@ -0,0 +1,12 @@
+#include
+
+namespace c10d_functional {
+
+void register_process_group(
+ const std::string& tag,
+ c10::intrusive_ptr pg);
+
+c10::intrusive_ptr resolve_process_group(
+ const std::string& tag);
+
+} // namespace c10d_functional
diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/GroupRegistry.hpp b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/GroupRegistry.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..3fa0ca69892a6d12686415b0087d6002b764f8a9
--- /dev/null
+++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/GroupRegistry.hpp
@@ -0,0 +1,14 @@
+#pragma once
+
+#include
+
+namespace c10d {
+
+C10_EXPORT void register_process_group(
+ const std::string& group_name,
+ c10::intrusive_ptr group);
+
+C10_EXPORT c10::intrusive_ptr resolve_process_group(
+ const std::string& group_name);
+
+} // namespace c10d
diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/HashStore.hpp b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/HashStore.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..b691de302a389ece3dda5a539796c5d080f6073f
--- /dev/null
+++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/HashStore.hpp
@@ -0,0 +1,61 @@
+#pragma once
+
+#include
+
+#include
+#include
+#include
+
+#include
+
+namespace c10d {
+
+class TORCH_API HashStore : public Store {
+ public:
+ ~HashStore() override = default;
+
+ void set(const std::string& key, const std::vector& data) override;
+
+ std::vector compareSet(
+ const std::string& key,
+ const std::vector& expectedValue,
+ const std::vector& desiredValue) override;
+
+ std::vector get(const std::string& key) override;
+
+ void wait(const std::vector& keys) override {
+ wait(keys, Store::kDefaultTimeout);
+ }
+
+ void wait(
+ const std::vector& keys,
+ const std::chrono::milliseconds& timeout) override;
+
+ int64_t add(const std::string& key, int64_t value) override;
+
+ int64_t getNumKeys() override;
+
+ bool check(const std::vector& keys) override;
+
+ bool deleteKey(const std::string& key) override;
+
+ void append(const std::string& key, const std::vector& value)
+ override;
+
+ std::vector> multiGet(
+ const std::vector& keys) override;
+
+ void multiSet(
+ const std::vector& keys,
+ const std::vector>& values) override;
+
+ // Returns true if this store support append, multiGet and multiSet
+ bool hasExtendedApi() const override;
+
+ protected:
+ std::unordered_map> map_;
+ std::mutex m_;
+ std::condition_variable cv_;
+};
+
+} // namespace c10d
diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ParamCommsUtils.hpp b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ParamCommsUtils.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..25a0b6cdfec5e32b86df56b0e0aa5ce78b3afff3
--- /dev/null
+++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ParamCommsUtils.hpp
@@ -0,0 +1,139 @@
+#pragma once
+
+#include
+#include
+#include
+#include
+#include
+#include
+
+namespace torch {
+
+class TORCH_API ParamCommsDebugInfo : public c10::DebugInfoBase {
+ public:
+ ParamCommsDebugInfo() = default;
+ ParamCommsDebugInfo(
+ int rank,
+ std::string&& colName,
+ int inNelems,
+ int outNelems,
+ at::ScalarType dType,
+ std::vector inSplitSizes,
+ std::vector outSplitSizes,
+ int worldSize);
+
+ ~ParamCommsDebugInfo() override = default;
+
+ int getRank() const {
+ return rank_;
+ }
+
+ int getWorldSize() const {
+ return worldSize_;
+ }
+
+ const std::string getColumnName() const {
+ return columnName_;
+ }
+
+ int getInMessageNelems() const {
+ return inMessageNelems_;
+ }
+
+ int getOutMessageNelems() const {
+ return outMessageNelems_;
+ }
+
+ at::ScalarType getDType() const {
+ return dType_;
+ }
+
+ const std::vector& getInputSplitSizes() const {
+ return inputSplitSizes_;
+ }
+
+ const std::vector& getOutputSplitSizes() const {
+ return outputSplitSizes_;
+ }
+
+ private:
+ int rank_{};
+ int worldSize_{};
+ std::string columnName_;
+ int inMessageNelems_{};
+ int outMessageNelems_{};
+ at::ScalarType dType_ = at::kByte;
+ std::vector inputSplitSizes_;
+ std::vector outputSplitSizes_;
+};
+
+#define RECORD_PARAM_COMMS( \
+ seq, \
+ pg_ptr, \
+ rank, \
+ colName, \
+ inNelems, \
+ outNelems, \
+ dType, \
+ inSplitSizes, \
+ outSplitSizes, \
+ worldSize) \
+ auto paramCommsInfo = std::make_shared( \
+ rank, \
+ colName, \
+ inNelems, \
+ outNelems, \
+ dType, \
+ inSplitSizes, \
+ outSplitSizes, \
+ worldSize); \
+ c10::DebugInfoGuard g(c10::DebugInfoKind::PARAM_COMMS_INFO, paramCommsInfo); \
+ std::initializer_list paramList = { \
+ c10::IValue(seq), \
+ c10::IValue(pg_ptr), \
+ rank, \
+ colName, \
+ inSplitSizes, \
+ outSplitSizes, \
+ worldSize}; \
+ c10::ArrayRef paramInputs(paramList); \
+ RECORD_FUNCTION(at::kParamCommsCallName, paramInputs);
+
+#define RECORD_PARAM_COMMS_DATA( \
+ seq, \
+ pg_ptr, \
+ InputTensors, \
+ OutputTensors, \
+ rank, \
+ colName, \
+ inNelems, \
+ outNelems, \
+ dType, \
+ inSplitSizes, \
+ outSplitSizes, \
+ worldSize) \
+ auto paramCommsInfo = std::make_shared( \
+ rank, \
+ colName, \
+ inNelems, \
+ outNelems, \
+ dType, \
+ inSplitSizes, \
+ outSplitSizes, \
+ worldSize); \
+ c10::DebugInfoGuard g(c10::DebugInfoKind::PARAM_COMMS_INFO, paramCommsInfo); \
+ std::initializer_list paramList = { \
+ c10::IValue(InputTensors), \
+ c10::IValue(seq), \
+ c10::IValue(pg_ptr), \
+ rank, \
+ colName, \
+ inSplitSizes, \
+ outSplitSizes, \
+ worldSize}; \
+ c10::ArrayRef paramInputs(paramList); \
+ RECORD_FUNCTION_WITH_INPUTS_OUTPUTS( \
+ at::kParamCommsCallName, \
+ paramInputs, \
+ std::vector(1, c10::IValue(OutputTensors)));
+} // namespace torch
diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/PrefixStore.hpp b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/PrefixStore.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..74399554b8cd0d000b43c55a72dd37bd9fdc8d1f
--- /dev/null
+++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/PrefixStore.hpp
@@ -0,0 +1,64 @@
+#pragma once
+
+#include
+#include
+
+namespace c10d {
+
+class TORCH_API PrefixStore : public Store {
+ public:
+ explicit PrefixStore(std::string prefix, c10::intrusive_ptr store);
+
+ using Store::set;
+ void set(const std::string& key, const std::vector& value) override;
+
+ using Store::compareSet;
+ std::vector compareSet(
+ const std::string& key,
+ const std::vector& expectedValue,
+ const std::vector& desiredValue) override;
+
+ std::vector get(const std::string& key) override;
+
+ int64_t add(const std::string& key, int64_t value) override;
+
+ bool deleteKey(const std::string& key) override;
+
+ int64_t getNumKeys() override;
+
+ bool check(const std::vector& keys) override;
+
+ void wait(const std::vector& keys) override;
+
+ void wait(
+ const std::vector& keys,
+ const std::chrono::milliseconds& timeout) override;
+
+ const std::chrono::milliseconds& getTimeout() const noexcept override;
+
+ void setTimeout(const std::chrono::milliseconds& timeout) override;
+
+ void append(const std::string& key, const std::vector& value)
+ override;
+
+ std::vector> multiGet(
+ const std::vector& keys) override;
+
+ void multiSet(
+ const std::vector& keys,
+ const std::vector>& values) override;
+
+ // Returns true if this store support append, multiGet and multiSet
+ bool hasExtendedApi() const override;
+
+ c10::intrusive_ptr getUnderlyingStore();
+
+ protected:
+ std::string prefix_;
+ c10::intrusive_ptr store_;
+
+ std::string joinKey(const std::string& key);
+ std::vector joinKeys(const std::vector& keys);
+};
+
+} // namespace c10d
diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroupNCCL.hpp b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroupNCCL.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..3567cb35722f74594bffa6fde003dadd00575bb0
--- /dev/null
+++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroupNCCL.hpp
@@ -0,0 +1,918 @@
+#pragma once
+
+#ifdef USE_C10D_NCCL
+
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include
+#include
+#include
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include
+
+namespace c10d {
+// Environment variable which controls whether we perform a NCCL healt check
+// which ensures communicators are healthy at the beginning of init.
+static std::vector TORCH_ENABLE_NCCL_HEALTH_CHECK = {
+ "TORCH_ENABLE_NCCL_HEALTH_CHECK",
+ "ENABLE_NCCL_HEALTH_CHECK"};
+
+// Environment variable which controls whether or not wait() is blocking or
+// non-blocking.
+static std::vector TORCH_NCCL_BLOCKING_WAIT = {
+ "TORCH_NCCL_BLOCKING_WAIT",
+ "NCCL_BLOCKING_WAIT"};
+
+// Environment variable which controls whether or not we perform Async Error
+// Handling with NCCL.
+static std::vector TORCH_NCCL_ASYNC_ERROR_HANDLING = {
+ "TORCH_NCCL_ASYNC_ERROR_HANDLING",
+ "NCCL_ASYNC_ERROR_HANDLING"};
+
+// Environment Variable to control whether dumping debug info on watchdog
+// timeout is enabled. This variable must be set together with
+// TORCH_NCCL_ENABLE_MONITORING=1 and TORCH_NCCL_TRACE_BUFFER_SIZE > 0.
+static std::vector TORCH_NCCL_DUMP_ON_TIMEOUT = {
+ "TORCH_NCCL_DUMP_ON_TIMEOUT"};
+
+// Environment Variable to control whether Desync Debug is enabled.
+// This variable must be set together with TORCH_NCCL_ASYNC_ERROR_HANDLING.
+static std::vector TORCH_NCCL_DESYNC_DEBUG = {
+ "TORCH_NCCL_DESYNC_DEBUG",
+ "NCCL_DESYNC_DEBUG"};
+
+static std::vector TORCH_NCCL_ENABLE_TIMING = {
+ "TORCH_NCCL_ENABLE_TIMING",
+ "NCCL_ENABLE_TIMING"};
+
+static std::vector TORCH_NCCL_ENABLE_MONITORING = {
+ "TORCH_NCCL_ENABLE_MONITORING"};
+
+static std::vector TORCH_NCCL_HEARTBEAT_TIMEOUT_SEC = {
+ "TORCH_NCCL_HEARTBEAT_TIMEOUT_SEC"};
+
+static std::vector TORCH_NCCL_TRACE_BUFFER_SIZE = {
+ "TORCH_NCCL_TRACE_BUFFER_SIZE"};
+
+constexpr const char* NCCL_BACKEND_NAME = "nccl";
+
+constexpr auto kProcessGroupNCCLDefaultTimeout =
+ std::chrono::milliseconds(10 * 60 * 1000);
+
+// NoHandling: do not handle asynchronous NCCL errors
+// TearDown: tear down process upon error, see `WorkNCCL::handleException`
+// CleanUpOnly: just clean up collectives and abort communicators without
+// tearing down process SkipCleanUp: (this is a temporary option and can be
+// removed in future) tear down process without cleaning up NCCL communicators.
+// This should be used as a last resort in case `ncclCommAbort` itself is
+// hanging
+enum ErrorHandlingMode {
+ NoHandling = 0,
+ TearDown = 1,
+ CleanUpOnly = 2,
+ SkipCleanUp = 3
+};
+
+#define SHOULD_CLEAN_UP(a) (a != NoHandling && a != SkipCleanUp)
+
+#define SHOULD_TEAR_DOWN(a) (a != NoHandling && a != CleanUpOnly)
+
+// If set, ProcessGroupNCCL doesn't use recordStream calls to ensure
+// caching allocator safety for tensors used on both user-facing and
+// internal comm streams.
+// Instead, it stashes live references to those tensors until after
+// user-facing streams are synced with comm streams.
+// See stashed_for_allocator_safety_ below.
+static std::vector TORCH_NCCL_AVOID_RECORD_STREAMS = {
+ "TORCH_NCCL_AVOID_RECORD_STREAMS"};
+
+// If set, ProcessGroupNCCL registers postAlloc and preFree hooks to cuda cache
+// allocator so that whenever a tensor is allocated or freed, ProcessGroupNCCL
+// can register/deregister the tensor on all available NCCL communicators.
+static std::vector TORCH_NCCL_USE_TENSOR_REGISTER_ALLOCATOR_HOOK =
+ {"TORCH_NCCL_USE_TENSOR_REGISTER_ALLOCATOR_HOOK",
+ "NCCL_USE_TENSOR_REGISTER_ALLOCATOR_HOOK"};
+
+// ProcessGroupNCCL implements NCCL bindings for c10d.
+//
+// All functions of the class are expected to be called in the same order
+// across all processes in the process group. This is the only way that we
+// can guarantee to match up the same calls among all processes.
+//
+// All NCCL functions provided by this class are asynchronous functions. More
+// specifically, each NCCL call is scheduled on a separate CUDA stream that is
+// different from the current CUDA stream. This is for the purpose of
+// achieving potentially concurrency and better performance. As a result,
+// it is the callers' responsibility to make sure that the CUDA stream their
+// code works on needs to wait for the NCCL operation from
+// this class.
+//
+// This can be done by calling:
+//
+// either WorkNCCL::wait() or WorkNCCL::synchronize(), both achieves the same
+// functionality and are synonyms.
+//
+// Also note that WorkNCCL::finishedGPUExecution() is a helper function only
+// provided by ProcessGroupNCCL to check if the NCCL operation of WorkNCCL has
+// finished execution on the GPU (not just scheduled).
+//
+// Example on using the NCCL process group
+//
+// ProcessGroupNCCL pg(store, rank, size);
+// std::shared_ptr work = pg.allreduce(tensors);
+//
+// // At this point, NCCL kernel has already by queued successfully
+// // Now, let current stream wait for the NCCL to finish, this function is
+// // async operation as well
+//
+// work->wait()
+//
+// // Now continue on other work in the current stream.
+class TORCH_API ProcessGroupNCCL : public Backend {
+ public:
+ class WorkNCCL : public Work, public std::enable_shared_from_this {
+ public:
+ friend struct WorkInfo;
+
+ // Constructor takes a list of CUDA devices
+ WorkNCCL(
+ const std::vector& devices,
+ int rank,
+ OpType opType,
+ uint64_t seq,
+ const char* profilingTitle = nullptr,
+ const c10::optional>& inputs = c10::nullopt,
+ bool desyncDebug = false,
+ bool enableTiming = false);
+ // Copy constructor doing partial copy without outputs_. Cleanup thread
+ // monitors and removes finished works. However it will deadlock when
+ // destructs outputs_ tensors who are view tensors in autograd graph.
+ WorkNCCL(const WorkNCCL& w);
+
+ ~WorkNCCL() override;
+
+ // Checks if the NCCL kernel has started to execute.
+ bool isStarted();
+
+ // Checks if request has completed. In this specific case of NCCL, it checks
+ // if the NCCL operation has completed on the GPU in its own NCCL stream.
+ // Non-blocking operation.
+ bool isCompleted() override;
+
+ bool isSuccess() const override;
+
+ // Same as calling synchronize() for NCCL work.
+ bool wait(std::chrono::milliseconds timeout = kNoTimeout) override;
+
+ void abort() override;
+
+ // Let current stream wait on the completing of the NCCL work
+ // Throws on exceptions. Blocking operation, which will wait for work
+ // completion.
+ void synchronize() override;
+
+ // Synchronize streams by blocking each on the NCCL stream
+ void synchronizeStreams();
+
+ // Helper function to handle exception (throw if needed).
+ void handleException(ErrorHandlingMode asyncErrorHandling);
+
+ // Helper function that checks if the NCCL kernels have finished
+ // execution on the GPUs
+ bool finishedGPUExecution();
+
+ // Get a Future object that will be marked as completed internally.
+ c10::intrusive_ptr getFuture() override;
+
+ float getDuration() const override;
+
+ uint64_t getSequencenumber() const override;
+
+ // Helper function that sets an exception_ptr on the WorkNCCL object.
+ void setException(std::exception_ptr exception_ptr);
+
+ // Helper function that returns True if the WorkNCCL object has timed out
+ // and False otherwise.
+ // In case of timeout, set exception on the WorkNCCL object.
+ bool checkTimeout(
+ c10::optional timeout = c10::nullopt);
+
+ std::vector result() override;
+
+ protected:
+ // The cached list of CUDA devices to operate on
+ std::vector devices_;
+
+ // The start CUDA events of NCCL operator tracking this work item on
+ // multiple CUDA devices. These start CUDA events are needed by desync
+ // debugging if enabled.
+ std::shared_ptr> ncclStartEvents_;
+
+ // The end CUDA events of NCCL operator tracking this work item on
+ // multiple CUDA devices.
+ std::shared_ptr> ncclEndEvents_;
+
+ // The NCCL communicators used for this work item.
+ std::vector> ncclComms_;
+
+ // Tensors used for barrier op
+ std::vector barrierTensors_;
+
+ // Clone of blockingWait_ from ProcessGroupNCCL.
+ bool blockingWait_ = false;
+
+ // Clone of avoidRecordStreams_ from ProcessGroupNCCL.
+ bool avoidRecordStreams_ = false;
+
+ // Clone of opTimeout_ from ProcessGroupNCCL.
+ std::chrono::milliseconds opTimeout_;
+
+ // Time point representing when the work started.
+ std::chrono::time_point workStartTime_;
+
+ // Record the collective sequential number.
+ uint64_t seq_;
+
+ // Indicates if the nccl start event has been updated to the store trace.
+ // This will be used by desync debug.
+ bool startTraceUpdated_{false};
+
+ // Record collective sizes for debug. We only record the size on the first
+ // device as multi-device per process is deprecated
+ size_t numelIn_ = -1;
+ size_t numelOut_ = -1;
+
+ // Wrapper method for the static checkForNCCLErrors which can be overridden
+ // for tests.
+ virtual std::exception_ptr checkForNCCLErrors(
+ const std::vector>& ncclComms) const;
+
+ friend std::ostream& operator<<(
+ std::ostream& output,
+ const WorkNCCL& workNCCL);
+
+ private:
+ // Helper function for synchronize
+ void synchronizeInternal(std::chrono::milliseconds timeout);
+
+ // Checks for NCCL errors and sets an appropriate exception_ptr.
+ void checkAndSetException();
+
+ // Just checks whether GPU execution has started, without modifying
+ // exception_ptr.
+ bool startedGPUExecutionInternal() const;
+
+ // Just checks whether GPU execution has completed, without modifying
+ // exception_ptr.
+ bool finishedGPUExecutionInternal() const;
+
+ // Reference to the store so that we can write aborted communicators
+ // to the store.
+ c10::intrusive_ptr store_;
+
+ // Store a reference to NCCL collective's outputs, used by result and to
+ // give a more descriptive message when representing the Work as a string.
+ std::shared_ptr> outputs_;
+
+ // TORCH_NCCL_AVOID_RECORD_STREAMS implementation helper.
+ // Stores references to participating non-output tensors (ie inputs,
+ // flattened intermediates).
+ // We'll clear this list in synchronizeStreams, just after user-facing
+ // stream(s) are synced with the nccl work stream(s).
+ // By keeping these refs (as well as outputs_) alive until after the
+ // collective's work rejoins the user-facing streams, we achieve
+ // caching allocator safety without any recordStream calls.
+ // For in-place collectives, some refs stashed here may alias outputs_,
+ // but that doesn't do any harm.
+ std::shared_ptr> stashed_for_allocator_safety_;
+
+ // The future returned by getFuture.
+ c10::intrusive_ptr future_;
+
+ bool timingEnabled_;
+ // unique id used to tell the trace buffer that this
+ // work has completed
+ c10::optional trace_id_;
+ friend class ProcessGroupNCCL;
+ };
+
+ class CoalescedWorkNCCL
+ : public Work,
+ public std::enable_shared_from_this {
+ public:
+ // Constructor takes a list of WorkNCCL works
+ CoalescedWorkNCCL(
+ std::vector works,
+ int rank,
+ OpType opType);
+
+ ~CoalescedWorkNCCL() override;
+
+ // Same as calling synchronize() for NCCL work.
+ bool wait(std::chrono::milliseconds timeout = kNoTimeout) override;
+
+ protected:
+ // The cached list of CUDA devices to operate on
+ std::vector works_;
+
+ friend class ProcessGroupNCCL;
+ };
+
+ struct Options : Backend::Options {
+ // NOTE: timeout in ProcessGroupNCCL::Options denote the timeout for
+ // operations. This is only used when blockingWait_ is enabled.
+ explicit Options(bool is_high_priority_stream = false);
+
+ // return intrusive_ptr of the object
+ static c10::intrusive_ptr create(
+ bool is_high_priority_stream = false) {
+ return c10::make_intrusive(is_high_priority_stream);
+ }
+
+ // Schedule NCCL operations on high priority CUDA streams
+ bool is_high_priority_stream;
+
+#ifdef NCCL_HAS_COMM_NONBLOCKING
+ // Configure ranks
+ ncclConfig_t config = NCCL_CONFIG_INITIALIZER;
+#endif
+
+ // Optional "parent" backend and color to create communicators from
+ // via `ncclCommSplit`
+ std::shared_ptr split_from;
+ int64_t split_color{0};
+ };
+
+ // If you wish to create multiple process groups, each with a potentially
+ // different rank and size, you can do so by passing a new store instance
+ // to each one. If you have only a single store object, you can
+ // use the `c10d::PrefixStore` to derive scoped instances.
+ // This is also what the Python API in torch.distributed does.
+ //
+ // The process group instance keeps a reference to the store because
+ // it may be used long after the constructor runs. In fact, the constructor
+ // doesn't create any NCCL communicators. A single NCCL communicator can
+ // only be used on a specific set of devices, and are therefore created
+ // on-demand when a collective runs. If another collective is executed later,
+ // against a different set of devices, the process group creates another NCCL
+ // communicator. These NCCL communicators are cached and reused if possible.
+ //
+ ProcessGroupNCCL(
+ const c10::intrusive_ptr& store,
+ int rank,
+ int size,
+ c10::intrusive_ptr options = Options::create());
+
+ // This constructor includes the deprecated `groupName` argument.
+ // If you have existing code that uses the `groupName`, you can replace
+ // it by specifying a `c10d::PrefixStore(groupName, store)` for store.
+ C10_DEPRECATED ProcessGroupNCCL(
+ const c10::intrusive_ptr& store,
+ int rank,
+ int size,
+ const std::string& groupName,
+ c10::intrusive_ptr options = Options::create())
+ : ProcessGroupNCCL(store, rank, size, options) {}
+
+ ~ProcessGroupNCCL() override;
+
+ c10::intrusive_ptr getOptions() {
+ return options_;
+ }
+
+ const std::string getBackendName() const override {
+ return std::string(NCCL_BACKEND_NAME);
+ }
+
+ void startCoalescing() override;
+
+ c10::intrusive_ptr endCoalescing() override;
+
+ c10::intrusive_ptr broadcast(
+ std::vector& tensors,
+ const BroadcastOptions& opts = BroadcastOptions()) override;
+
+ c10::intrusive_ptr _broadcast_oop(
+ std::vector& outputTensors,
+ std::vector& inputTensors,
+ const BroadcastOptions& opts = BroadcastOptions());
+
+ c10::intrusive_ptr allreduce_sparse(
+ std::vector& tensors,
+ const AllreduceOptions& opts = AllreduceOptions()) override;
+
+ c10::intrusive_ptr allreduce(
+ std::vector& tensors,
+ const AllreduceOptions& opts = AllreduceOptions()) override;
+
+ c10::intrusive_ptr allreduce_coalesced(
+ std::vector& tensors,
+ const AllreduceCoalescedOptions& opts =
+ AllreduceCoalescedOptions()) override;
+
+ c10::intrusive_ptr reduce(
+ std::vector& tensors,
+ const ReduceOptions& opts = ReduceOptions()) override;
+
+ c10::intrusive_ptr _reduce_oop(
+ std::vector& outputTensors,
+ std::vector& inputTensors,
+ const ReduceOptions& opts = ReduceOptions());
+
+ c10::intrusive_ptr allgather(
+ std::vector>& outputTensors,
+ std::vector& inputTensors,
+ const AllgatherOptions& opts = AllgatherOptions()) override;
+
+ c10::intrusive_ptr _allgather_base(
+ at::Tensor& outputbuffer,
+ at::Tensor& inputbuffer,
+ const AllgatherOptions& opts = AllgatherOptions()) override;
+
+ c10::intrusive_ptr allgather_coalesced(
+ std::vector>& outputTensorLists,
+ std::vector& inputTensors,
+ const AllgatherOptions& opts = AllgatherOptions()) override;
+
+ c10::intrusive_ptr allgather_into_tensor_coalesced(
+ std::vector& outputs,
+ std::vector& inputs,
+ const AllgatherOptions& opts = AllgatherOptions()) override;
+
+ c10::intrusive_ptr reduce_scatter(
+ std::vector& outputTensors,
+ std::vector>& inputTensors,
+ const ReduceScatterOptions& opts = ReduceScatterOptions()) override;
+
+ c10::intrusive_ptr _reduce_scatter_base(
+ at::Tensor& outputTensor,
+ at::Tensor& inputTensor,
+ const ReduceScatterOptions& opts = ReduceScatterOptions()) override;
+
+ c10::intrusive_ptr reduce_scatter_tensor_coalesced(
+ std::vector& outputs,
+ std::vector& inputs,
+ const ReduceScatterOptions& opts = ReduceScatterOptions()) override;
+
+ c10::intrusive_ptr barrier(
+ const BarrierOptions& opts = BarrierOptions()) override;
+
+ c10::intrusive_ptr alltoall_base(
+ at::Tensor& outputTensor,
+ at::Tensor& inputTensor,
+ std::vector& outputSplitSizes,
+ std::vector& inputSplitSizes,
+ const AllToAllOptions& opts = AllToAllOptions()) override;
+
+ c10::intrusive_ptr alltoall(
+ std::vector& outputTensors,
+ std::vector& inputTensors,
+ const AllToAllOptions& opts = AllToAllOptions()) override;
+
+ c10::intrusive_ptr send(
+ std::vector& tensors,
+ int dstRank,
+ int tag) override;
+
+ c10::intrusive_ptr