Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- .gitattributes +11 -0
- evalkit_eagle/lib/python3.10/site-packages/scipy/datasets/__init__.py +90 -0
- evalkit_eagle/lib/python3.10/site-packages/scipy/datasets/__pycache__/__init__.cpython-310.pyc +0 -0
- evalkit_eagle/lib/python3.10/site-packages/scipy/datasets/__pycache__/_download_all.cpython-310.pyc +0 -0
- evalkit_eagle/lib/python3.10/site-packages/scipy/datasets/__pycache__/_fetchers.cpython-310.pyc +0 -0
- evalkit_eagle/lib/python3.10/site-packages/scipy/datasets/__pycache__/_registry.cpython-310.pyc +0 -0
- evalkit_eagle/lib/python3.10/site-packages/scipy/datasets/__pycache__/_utils.cpython-310.pyc +0 -0
- evalkit_eagle/lib/python3.10/site-packages/scipy/datasets/_download_all.py +57 -0
- evalkit_eagle/lib/python3.10/site-packages/scipy/datasets/_fetchers.py +219 -0
- evalkit_eagle/lib/python3.10/site-packages/scipy/datasets/_registry.py +26 -0
- evalkit_eagle/lib/python3.10/site-packages/scipy/datasets/_utils.py +81 -0
- evalkit_eagle/lib/python3.10/site-packages/scipy/datasets/tests/__init__.py +0 -0
- evalkit_eagle/lib/python3.10/site-packages/scipy/datasets/tests/__pycache__/__init__.cpython-310.pyc +0 -0
- evalkit_eagle/lib/python3.10/site-packages/scipy/datasets/tests/__pycache__/test_data.cpython-310.pyc +0 -0
- evalkit_eagle/lib/python3.10/site-packages/scipy/datasets/tests/test_data.py +128 -0
- evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/__pycache__/__init__.cpython-310.pyc +0 -0
- evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/__pycache__/_cubature.cpython-310.pyc +0 -0
- evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/__pycache__/_odepack_py.cpython-310.pyc +0 -0
- evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/__pycache__/_quad_vec.cpython-310.pyc +0 -0
- evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/__pycache__/_quadpack_py.cpython-310.pyc +0 -0
- evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/__pycache__/_quadrature.cpython-310.pyc +0 -0
- evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/__pycache__/_tanhsinh.cpython-310.pyc +0 -0
- evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/__pycache__/lsoda.cpython-310.pyc +0 -0
- evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/__pycache__/odepack.cpython-310.pyc +0 -0
- evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/__pycache__/quadpack.cpython-310.pyc +0 -0
- evalkit_eagle/lib/python3.10/site-packages/scipy/ndimage/__init__.py +173 -0
- evalkit_eagle/lib/python3.10/site-packages/scipy/ndimage/_ctest.cpython-310-x86_64-linux-gnu.so +0 -0
- evalkit_eagle/lib/python3.10/site-packages/scipy/ndimage/_cytest.cpython-310-x86_64-linux-gnu.so +0 -0
- evalkit_eagle/lib/python3.10/site-packages/scipy/ndimage/_delegators.py +297 -0
- evalkit_eagle/lib/python3.10/site-packages/scipy/ndimage/_filters.py +1965 -0
- evalkit_eagle/lib/python3.10/site-packages/scipy/ndimage/_fourier.py +306 -0
- evalkit_eagle/lib/python3.10/site-packages/scipy/ndimage/_interpolation.py +1003 -0
- evalkit_eagle/lib/python3.10/site-packages/scipy/ndimage/_measurements.py +1687 -0
- evalkit_eagle/lib/python3.10/site-packages/scipy/ndimage/_morphology.py +0 -0
- evalkit_eagle/lib/python3.10/site-packages/scipy/ndimage/_ndimage_api.py +15 -0
- evalkit_eagle/lib/python3.10/site-packages/scipy/ndimage/_ni_docstrings.py +210 -0
- evalkit_eagle/lib/python3.10/site-packages/scipy/ndimage/_ni_label.cpython-310-x86_64-linux-gnu.so +3 -0
- evalkit_eagle/lib/python3.10/site-packages/scipy/ndimage/_ni_support.py +143 -0
- evalkit_eagle/lib/python3.10/site-packages/scipy/ndimage/_rank_filter_1d.cpython-310-x86_64-linux-gnu.so +0 -0
- evalkit_eagle/lib/python3.10/site-packages/scipy/ndimage/_support_alternative_backends.py +72 -0
- evalkit_eagle/lib/python3.10/site-packages/scipy/ndimage/filters.py +27 -0
- evalkit_eagle/lib/python3.10/site-packages/scipy/ndimage/fourier.py +21 -0
- evalkit_eagle/lib/python3.10/site-packages/scipy/ndimage/interpolation.py +22 -0
- evalkit_eagle/lib/python3.10/site-packages/scipy/ndimage/measurements.py +24 -0
- evalkit_eagle/lib/python3.10/site-packages/scipy/ndimage/morphology.py +27 -0
- evalkit_eagle/lib/python3.10/site-packages/scipy/ndimage/tests/__init__.py +12 -0
- evalkit_eagle/lib/python3.10/site-packages/scipy/ndimage/tests/test_c_api.py +102 -0
- evalkit_eagle/lib/python3.10/site-packages/scipy/ndimage/tests/test_datatypes.py +67 -0
- evalkit_eagle/lib/python3.10/site-packages/scipy/ndimage/tests/test_filters.py +0 -0
- evalkit_eagle/lib/python3.10/site-packages/scipy/ndimage/tests/test_interpolation.py +1484 -0
.gitattributes
CHANGED
|
@@ -832,3 +832,14 @@ evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_odepack.cpython-310-
|
|
| 832 |
evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_vode.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 833 |
evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_lsoda.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 834 |
evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_dop.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 832 |
evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_vode.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 833 |
evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_lsoda.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 834 |
evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_dop.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 835 |
+
evalkit_eagle/lib/python3.10/site-packages/scipy/odr/__odrpack.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 836 |
+
evalkit_eagle/lib/python3.10/site-packages/scipy/ndimage/_ni_label.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 837 |
+
infer_4_47_1/lib/libasan.so.6 filter=lfs diff=lfs merge=lfs -text
|
| 838 |
+
infer_4_47_1/lib/libform.a filter=lfs diff=lfs merge=lfs -text
|
| 839 |
+
infer_4_47_1/lib/liblsan.so filter=lfs diff=lfs merge=lfs -text
|
| 840 |
+
infer_4_47_1/lib/libatomic.so.1.2.0 filter=lfs diff=lfs merge=lfs -text
|
| 841 |
+
infer_4_47_1/lib/libz.so.1 filter=lfs diff=lfs merge=lfs -text
|
| 842 |
+
infer_4_47_1/lib/libz.so filter=lfs diff=lfs merge=lfs -text
|
| 843 |
+
infer_4_47_1/lib/libsqlite3.so.0 filter=lfs diff=lfs merge=lfs -text
|
| 844 |
+
infer_4_47_1/lib/libstdc++.so.6.0.29 filter=lfs diff=lfs merge=lfs -text
|
| 845 |
+
infer_4_47_1/lib/libncurses.a filter=lfs diff=lfs merge=lfs -text
|
evalkit_eagle/lib/python3.10/site-packages/scipy/datasets/__init__.py
ADDED
|
@@ -0,0 +1,90 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
================================
|
| 3 |
+
Datasets (:mod:`scipy.datasets`)
|
| 4 |
+
================================
|
| 5 |
+
|
| 6 |
+
.. currentmodule:: scipy.datasets
|
| 7 |
+
|
| 8 |
+
Dataset Methods
|
| 9 |
+
===============
|
| 10 |
+
|
| 11 |
+
.. autosummary::
|
| 12 |
+
:toctree: generated/
|
| 13 |
+
|
| 14 |
+
ascent
|
| 15 |
+
face
|
| 16 |
+
electrocardiogram
|
| 17 |
+
|
| 18 |
+
Utility Methods
|
| 19 |
+
===============
|
| 20 |
+
|
| 21 |
+
.. autosummary::
|
| 22 |
+
:toctree: generated/
|
| 23 |
+
|
| 24 |
+
download_all -- Download all the dataset files to specified path.
|
| 25 |
+
clear_cache -- Clear cached dataset directory.
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
Usage of Datasets
|
| 29 |
+
=================
|
| 30 |
+
|
| 31 |
+
SciPy dataset methods can be simply called as follows: ``'<dataset-name>()'``
|
| 32 |
+
This downloads the dataset files over the network once, and saves the cache,
|
| 33 |
+
before returning a `numpy.ndarray` object representing the dataset.
|
| 34 |
+
|
| 35 |
+
Note that the return data structure and data type might be different for
|
| 36 |
+
different dataset methods. For a more detailed example on usage, please look
|
| 37 |
+
into the particular dataset method documentation above.
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
How dataset retrieval and storage works
|
| 41 |
+
=======================================
|
| 42 |
+
|
| 43 |
+
SciPy dataset files are stored within individual GitHub repositories under the
|
| 44 |
+
SciPy GitHub organization, following a naming convention as
|
| 45 |
+
``'dataset-<name>'``, for example `scipy.datasets.face` files live at
|
| 46 |
+
https://github.com/scipy/dataset-face. The `scipy.datasets` submodule utilizes
|
| 47 |
+
and depends on `Pooch <https://www.fatiando.org/pooch/latest/>`_, a Python
|
| 48 |
+
package built to simplify fetching data files. Pooch uses these repos to
|
| 49 |
+
retrieve the respective dataset files when calling the dataset function.
|
| 50 |
+
|
| 51 |
+
A registry of all the datasets, essentially a mapping of filenames with their
|
| 52 |
+
SHA256 hash and repo urls are maintained, which Pooch uses to handle and verify
|
| 53 |
+
the downloads on function call. After downloading the dataset once, the files
|
| 54 |
+
are saved in the system cache directory under ``'scipy-data'``.
|
| 55 |
+
|
| 56 |
+
Dataset cache locations may vary on different platforms.
|
| 57 |
+
|
| 58 |
+
For macOS::
|
| 59 |
+
|
| 60 |
+
'~/Library/Caches/scipy-data'
|
| 61 |
+
|
| 62 |
+
For Linux and other Unix-like platforms::
|
| 63 |
+
|
| 64 |
+
'~/.cache/scipy-data' # or the value of the XDG_CACHE_HOME env var, if defined
|
| 65 |
+
|
| 66 |
+
For Windows::
|
| 67 |
+
|
| 68 |
+
'C:\\Users\\<user>\\AppData\\Local\\<AppAuthor>\\scipy-data\\Cache'
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
In environments with constrained network connectivity for various security
|
| 72 |
+
reasons or on systems without continuous internet connections, one may manually
|
| 73 |
+
load the cache of the datasets by placing the contents of the dataset repo in
|
| 74 |
+
the above mentioned cache directory to avoid fetching dataset errors without
|
| 75 |
+
the internet connectivity.
|
| 76 |
+
|
| 77 |
+
"""
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
from ._fetchers import face, ascent, electrocardiogram
|
| 81 |
+
from ._download_all import download_all
|
| 82 |
+
from ._utils import clear_cache
|
| 83 |
+
|
| 84 |
+
__all__ = ['ascent', 'electrocardiogram', 'face',
|
| 85 |
+
'download_all', 'clear_cache']
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
from scipy._lib._testutils import PytestTester
|
| 89 |
+
test = PytestTester(__name__)
|
| 90 |
+
del PytestTester
|
evalkit_eagle/lib/python3.10/site-packages/scipy/datasets/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (2.98 kB). View file
|
|
|
evalkit_eagle/lib/python3.10/site-packages/scipy/datasets/__pycache__/_download_all.cpython-310.pyc
ADDED
|
Binary file (1.71 kB). View file
|
|
|
evalkit_eagle/lib/python3.10/site-packages/scipy/datasets/__pycache__/_fetchers.cpython-310.pyc
ADDED
|
Binary file (6.28 kB). View file
|
|
|
evalkit_eagle/lib/python3.10/site-packages/scipy/datasets/__pycache__/_registry.cpython-310.pyc
ADDED
|
Binary file (765 Bytes). View file
|
|
|
evalkit_eagle/lib/python3.10/site-packages/scipy/datasets/__pycache__/_utils.cpython-310.pyc
ADDED
|
Binary file (2.36 kB). View file
|
|
|
evalkit_eagle/lib/python3.10/site-packages/scipy/datasets/_download_all.py
ADDED
|
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Platform independent script to download all the
|
| 3 |
+
`scipy.datasets` module data files.
|
| 4 |
+
This doesn't require a full scipy build.
|
| 5 |
+
|
| 6 |
+
Run: python _download_all.py <download_dir>
|
| 7 |
+
"""
|
| 8 |
+
|
| 9 |
+
import argparse
|
| 10 |
+
try:
|
| 11 |
+
import pooch
|
| 12 |
+
except ImportError:
|
| 13 |
+
pooch = None
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
if __package__ is None or __package__ == '':
|
| 17 |
+
# Running as python script, use absolute import
|
| 18 |
+
import _registry # type: ignore
|
| 19 |
+
else:
|
| 20 |
+
# Running as python module, use relative import
|
| 21 |
+
from . import _registry
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def download_all(path=None):
|
| 25 |
+
"""
|
| 26 |
+
Utility method to download all the dataset files
|
| 27 |
+
for `scipy.datasets` module.
|
| 28 |
+
|
| 29 |
+
Parameters
|
| 30 |
+
----------
|
| 31 |
+
path : str, optional
|
| 32 |
+
Directory path to download all the dataset files.
|
| 33 |
+
If None, default to the system cache_dir detected by pooch.
|
| 34 |
+
"""
|
| 35 |
+
if pooch is None:
|
| 36 |
+
raise ImportError("Missing optional dependency 'pooch' required "
|
| 37 |
+
"for scipy.datasets module. Please use pip or "
|
| 38 |
+
"conda to install 'pooch'.")
|
| 39 |
+
if path is None:
|
| 40 |
+
path = pooch.os_cache('scipy-data')
|
| 41 |
+
for dataset_name, dataset_hash in _registry.registry.items():
|
| 42 |
+
pooch.retrieve(url=_registry.registry_urls[dataset_name],
|
| 43 |
+
known_hash=dataset_hash,
|
| 44 |
+
fname=dataset_name, path=path)
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
def main():
|
| 48 |
+
parser = argparse.ArgumentParser(description='Download SciPy data files.')
|
| 49 |
+
parser.add_argument("path", nargs='?', type=str,
|
| 50 |
+
default=pooch.os_cache('scipy-data'),
|
| 51 |
+
help="Directory path to download all the data files.")
|
| 52 |
+
args = parser.parse_args()
|
| 53 |
+
download_all(args.path)
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
if __name__ == "__main__":
|
| 57 |
+
main()
|
evalkit_eagle/lib/python3.10/site-packages/scipy/datasets/_fetchers.py
ADDED
|
@@ -0,0 +1,219 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from numpy import array, frombuffer, load
|
| 2 |
+
from ._registry import registry, registry_urls
|
| 3 |
+
|
| 4 |
+
try:
|
| 5 |
+
import pooch
|
| 6 |
+
except ImportError:
|
| 7 |
+
pooch = None
|
| 8 |
+
data_fetcher = None
|
| 9 |
+
else:
|
| 10 |
+
data_fetcher = pooch.create(
|
| 11 |
+
# Use the default cache folder for the operating system
|
| 12 |
+
# Pooch uses appdirs (https://github.com/ActiveState/appdirs) to
|
| 13 |
+
# select an appropriate directory for the cache on each platform.
|
| 14 |
+
path=pooch.os_cache("scipy-data"),
|
| 15 |
+
|
| 16 |
+
# The remote data is on Github
|
| 17 |
+
# base_url is a required param, even though we override this
|
| 18 |
+
# using individual urls in the registry.
|
| 19 |
+
base_url="https://github.com/scipy/",
|
| 20 |
+
registry=registry,
|
| 21 |
+
urls=registry_urls
|
| 22 |
+
)
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def fetch_data(dataset_name, data_fetcher=data_fetcher):
|
| 26 |
+
if data_fetcher is None:
|
| 27 |
+
raise ImportError("Missing optional dependency 'pooch' required "
|
| 28 |
+
"for scipy.datasets module. Please use pip or "
|
| 29 |
+
"conda to install 'pooch'.")
|
| 30 |
+
# The "fetch" method returns the full path to the downloaded data file.
|
| 31 |
+
return data_fetcher.fetch(dataset_name)
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def ascent():
|
| 35 |
+
"""
|
| 36 |
+
Get an 8-bit grayscale bit-depth, 512 x 512 derived image for easy
|
| 37 |
+
use in demos.
|
| 38 |
+
|
| 39 |
+
The image is derived from
|
| 40 |
+
https://pixnio.com/people/accent-to-the-top
|
| 41 |
+
|
| 42 |
+
Parameters
|
| 43 |
+
----------
|
| 44 |
+
None
|
| 45 |
+
|
| 46 |
+
Returns
|
| 47 |
+
-------
|
| 48 |
+
ascent : ndarray
|
| 49 |
+
convenient image to use for testing and demonstration
|
| 50 |
+
|
| 51 |
+
Examples
|
| 52 |
+
--------
|
| 53 |
+
>>> import scipy.datasets
|
| 54 |
+
>>> ascent = scipy.datasets.ascent()
|
| 55 |
+
>>> ascent.shape
|
| 56 |
+
(512, 512)
|
| 57 |
+
>>> ascent.max()
|
| 58 |
+
np.uint8(255)
|
| 59 |
+
|
| 60 |
+
>>> import matplotlib.pyplot as plt
|
| 61 |
+
>>> plt.gray()
|
| 62 |
+
>>> plt.imshow(ascent)
|
| 63 |
+
>>> plt.show()
|
| 64 |
+
|
| 65 |
+
"""
|
| 66 |
+
import pickle
|
| 67 |
+
|
| 68 |
+
# The file will be downloaded automatically the first time this is run,
|
| 69 |
+
# returning the path to the downloaded file. Afterwards, Pooch finds
|
| 70 |
+
# it in the local cache and doesn't repeat the download.
|
| 71 |
+
fname = fetch_data("ascent.dat")
|
| 72 |
+
# Now we just need to load it with our standard Python tools.
|
| 73 |
+
with open(fname, 'rb') as f:
|
| 74 |
+
ascent = array(pickle.load(f))
|
| 75 |
+
return ascent
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
def electrocardiogram():
|
| 79 |
+
"""
|
| 80 |
+
Load an electrocardiogram as an example for a 1-D signal.
|
| 81 |
+
|
| 82 |
+
The returned signal is a 5 minute long electrocardiogram (ECG), a medical
|
| 83 |
+
recording of the heart's electrical activity, sampled at 360 Hz.
|
| 84 |
+
|
| 85 |
+
Returns
|
| 86 |
+
-------
|
| 87 |
+
ecg : ndarray
|
| 88 |
+
The electrocardiogram in millivolt (mV) sampled at 360 Hz.
|
| 89 |
+
|
| 90 |
+
Notes
|
| 91 |
+
-----
|
| 92 |
+
The provided signal is an excerpt (19:35 to 24:35) from the `record 208`_
|
| 93 |
+
(lead MLII) provided by the MIT-BIH Arrhythmia Database [1]_ on
|
| 94 |
+
PhysioNet [2]_. The excerpt includes noise induced artifacts, typical
|
| 95 |
+
heartbeats as well as pathological changes.
|
| 96 |
+
|
| 97 |
+
.. _record 208: https://physionet.org/physiobank/database/html/mitdbdir/records.htm#208
|
| 98 |
+
|
| 99 |
+
.. versionadded:: 1.1.0
|
| 100 |
+
|
| 101 |
+
References
|
| 102 |
+
----------
|
| 103 |
+
.. [1] Moody GB, Mark RG. The impact of the MIT-BIH Arrhythmia Database.
|
| 104 |
+
IEEE Eng in Med and Biol 20(3):45-50 (May-June 2001).
|
| 105 |
+
(PMID: 11446209); :doi:`10.13026/C2F305`
|
| 106 |
+
.. [2] Goldberger AL, Amaral LAN, Glass L, Hausdorff JM, Ivanov PCh,
|
| 107 |
+
Mark RG, Mietus JE, Moody GB, Peng C-K, Stanley HE. PhysioBank,
|
| 108 |
+
PhysioToolkit, and PhysioNet: Components of a New Research Resource
|
| 109 |
+
for Complex Physiologic Signals. Circulation 101(23):e215-e220;
|
| 110 |
+
:doi:`10.1161/01.CIR.101.23.e215`
|
| 111 |
+
|
| 112 |
+
Examples
|
| 113 |
+
--------
|
| 114 |
+
>>> from scipy.datasets import electrocardiogram
|
| 115 |
+
>>> ecg = electrocardiogram()
|
| 116 |
+
>>> ecg
|
| 117 |
+
array([-0.245, -0.215, -0.185, ..., -0.405, -0.395, -0.385], shape=(108000,))
|
| 118 |
+
>>> ecg.shape, ecg.mean(), ecg.std()
|
| 119 |
+
((108000,), -0.16510875, 0.5992473991177294)
|
| 120 |
+
|
| 121 |
+
As stated the signal features several areas with a different morphology.
|
| 122 |
+
E.g., the first few seconds show the electrical activity of a heart in
|
| 123 |
+
normal sinus rhythm as seen below.
|
| 124 |
+
|
| 125 |
+
>>> import numpy as np
|
| 126 |
+
>>> import matplotlib.pyplot as plt
|
| 127 |
+
>>> fs = 360
|
| 128 |
+
>>> time = np.arange(ecg.size) / fs
|
| 129 |
+
>>> plt.plot(time, ecg)
|
| 130 |
+
>>> plt.xlabel("time in s")
|
| 131 |
+
>>> plt.ylabel("ECG in mV")
|
| 132 |
+
>>> plt.xlim(9, 10.2)
|
| 133 |
+
>>> plt.ylim(-1, 1.5)
|
| 134 |
+
>>> plt.show()
|
| 135 |
+
|
| 136 |
+
After second 16, however, the first premature ventricular contractions,
|
| 137 |
+
also called extrasystoles, appear. These have a different morphology
|
| 138 |
+
compared to typical heartbeats. The difference can easily be observed
|
| 139 |
+
in the following plot.
|
| 140 |
+
|
| 141 |
+
>>> plt.plot(time, ecg)
|
| 142 |
+
>>> plt.xlabel("time in s")
|
| 143 |
+
>>> plt.ylabel("ECG in mV")
|
| 144 |
+
>>> plt.xlim(46.5, 50)
|
| 145 |
+
>>> plt.ylim(-2, 1.5)
|
| 146 |
+
>>> plt.show()
|
| 147 |
+
|
| 148 |
+
At several points large artifacts disturb the recording, e.g.:
|
| 149 |
+
|
| 150 |
+
>>> plt.plot(time, ecg)
|
| 151 |
+
>>> plt.xlabel("time in s")
|
| 152 |
+
>>> plt.ylabel("ECG in mV")
|
| 153 |
+
>>> plt.xlim(207, 215)
|
| 154 |
+
>>> plt.ylim(-2, 3.5)
|
| 155 |
+
>>> plt.show()
|
| 156 |
+
|
| 157 |
+
Finally, examining the power spectrum reveals that most of the biosignal is
|
| 158 |
+
made up of lower frequencies. At 60 Hz the noise induced by the mains
|
| 159 |
+
electricity can be clearly observed.
|
| 160 |
+
|
| 161 |
+
>>> from scipy.signal import welch
|
| 162 |
+
>>> f, Pxx = welch(ecg, fs=fs, nperseg=2048, scaling="spectrum")
|
| 163 |
+
>>> plt.semilogy(f, Pxx)
|
| 164 |
+
>>> plt.xlabel("Frequency in Hz")
|
| 165 |
+
>>> plt.ylabel("Power spectrum of the ECG in mV**2")
|
| 166 |
+
>>> plt.xlim(f[[0, -1]])
|
| 167 |
+
>>> plt.show()
|
| 168 |
+
"""
|
| 169 |
+
fname = fetch_data("ecg.dat")
|
| 170 |
+
with load(fname) as file:
|
| 171 |
+
ecg = file["ecg"].astype(int) # np.uint16 -> int
|
| 172 |
+
# Convert raw output of ADC to mV: (ecg - adc_zero) / adc_gain
|
| 173 |
+
ecg = (ecg - 1024) / 200.0
|
| 174 |
+
return ecg
|
| 175 |
+
|
| 176 |
+
|
| 177 |
+
def face(gray=False):
|
| 178 |
+
"""
|
| 179 |
+
Get a 1024 x 768, color image of a raccoon face.
|
| 180 |
+
|
| 181 |
+
The image is derived from
|
| 182 |
+
https://pixnio.com/fauna-animals/raccoons/raccoon-procyon-lotor
|
| 183 |
+
|
| 184 |
+
Parameters
|
| 185 |
+
----------
|
| 186 |
+
gray : bool, optional
|
| 187 |
+
If True return 8-bit grey-scale image, otherwise return a color image
|
| 188 |
+
|
| 189 |
+
Returns
|
| 190 |
+
-------
|
| 191 |
+
face : ndarray
|
| 192 |
+
image of a raccoon face
|
| 193 |
+
|
| 194 |
+
Examples
|
| 195 |
+
--------
|
| 196 |
+
>>> import scipy.datasets
|
| 197 |
+
>>> face = scipy.datasets.face()
|
| 198 |
+
>>> face.shape
|
| 199 |
+
(768, 1024, 3)
|
| 200 |
+
>>> face.max()
|
| 201 |
+
np.uint8(255)
|
| 202 |
+
|
| 203 |
+
>>> import matplotlib.pyplot as plt
|
| 204 |
+
>>> plt.gray()
|
| 205 |
+
>>> plt.imshow(face)
|
| 206 |
+
>>> plt.show()
|
| 207 |
+
|
| 208 |
+
"""
|
| 209 |
+
import bz2
|
| 210 |
+
fname = fetch_data("face.dat")
|
| 211 |
+
with open(fname, 'rb') as f:
|
| 212 |
+
rawdata = f.read()
|
| 213 |
+
face_data = bz2.decompress(rawdata)
|
| 214 |
+
face = frombuffer(face_data, dtype='uint8')
|
| 215 |
+
face.shape = (768, 1024, 3)
|
| 216 |
+
if gray is True:
|
| 217 |
+
face = (0.21 * face[:, :, 0] + 0.71 * face[:, :, 1] +
|
| 218 |
+
0.07 * face[:, :, 2]).astype('uint8')
|
| 219 |
+
return face
|
evalkit_eagle/lib/python3.10/site-packages/scipy/datasets/_registry.py
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
##########################################################################
|
| 2 |
+
# This file serves as the dataset registry for SciPy Datasets SubModule.
|
| 3 |
+
##########################################################################
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
# To generate the SHA256 hash, use the command
|
| 7 |
+
# openssl sha256 <filename>
|
| 8 |
+
registry = {
|
| 9 |
+
"ascent.dat": "03ce124c1afc880f87b55f6b061110e2e1e939679184f5614e38dacc6c1957e2",
|
| 10 |
+
"ecg.dat": "f20ad3365fb9b7f845d0e5c48b6fe67081377ee466c3a220b7f69f35c8958baf",
|
| 11 |
+
"face.dat": "9d8b0b4d081313e2b485748c770472e5a95ed1738146883d84c7030493e82886"
|
| 12 |
+
}
|
| 13 |
+
|
| 14 |
+
registry_urls = {
|
| 15 |
+
"ascent.dat": "https://raw.githubusercontent.com/scipy/dataset-ascent/main/ascent.dat",
|
| 16 |
+
"ecg.dat": "https://raw.githubusercontent.com/scipy/dataset-ecg/main/ecg.dat",
|
| 17 |
+
"face.dat": "https://raw.githubusercontent.com/scipy/dataset-face/main/face.dat"
|
| 18 |
+
}
|
| 19 |
+
|
| 20 |
+
# dataset method mapping with their associated filenames
|
| 21 |
+
# <method_name> : ["filename1", "filename2", ...]
|
| 22 |
+
method_files_map = {
|
| 23 |
+
"ascent": ["ascent.dat"],
|
| 24 |
+
"electrocardiogram": ["ecg.dat"],
|
| 25 |
+
"face": ["face.dat"]
|
| 26 |
+
}
|
evalkit_eagle/lib/python3.10/site-packages/scipy/datasets/_utils.py
ADDED
|
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import shutil
|
| 3 |
+
from ._registry import method_files_map
|
| 4 |
+
|
| 5 |
+
try:
|
| 6 |
+
import platformdirs
|
| 7 |
+
except ImportError:
|
| 8 |
+
platformdirs = None # type: ignore[assignment]
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def _clear_cache(datasets, cache_dir=None, method_map=None):
|
| 12 |
+
if method_map is None:
|
| 13 |
+
# Use SciPy Datasets method map
|
| 14 |
+
method_map = method_files_map
|
| 15 |
+
if cache_dir is None:
|
| 16 |
+
# Use default cache_dir path
|
| 17 |
+
if platformdirs is None:
|
| 18 |
+
# platformdirs is pooch dependency
|
| 19 |
+
raise ImportError("Missing optional dependency 'pooch' required "
|
| 20 |
+
"for scipy.datasets module. Please use pip or "
|
| 21 |
+
"conda to install 'pooch'.")
|
| 22 |
+
cache_dir = platformdirs.user_cache_dir("scipy-data")
|
| 23 |
+
|
| 24 |
+
if not os.path.exists(cache_dir):
|
| 25 |
+
print(f"Cache Directory {cache_dir} doesn't exist. Nothing to clear.")
|
| 26 |
+
return
|
| 27 |
+
|
| 28 |
+
if datasets is None:
|
| 29 |
+
print(f"Cleaning the cache directory {cache_dir}!")
|
| 30 |
+
shutil.rmtree(cache_dir)
|
| 31 |
+
else:
|
| 32 |
+
if not isinstance(datasets, (list, tuple)):
|
| 33 |
+
# single dataset method passed should be converted to list
|
| 34 |
+
datasets = [datasets, ]
|
| 35 |
+
for dataset in datasets:
|
| 36 |
+
assert callable(dataset)
|
| 37 |
+
dataset_name = dataset.__name__ # Name of the dataset method
|
| 38 |
+
if dataset_name not in method_map:
|
| 39 |
+
raise ValueError(f"Dataset method {dataset_name} doesn't "
|
| 40 |
+
"exist. Please check if the passed dataset "
|
| 41 |
+
"is a subset of the following dataset "
|
| 42 |
+
f"methods: {list(method_map.keys())}")
|
| 43 |
+
|
| 44 |
+
data_files = method_map[dataset_name]
|
| 45 |
+
data_filepaths = [os.path.join(cache_dir, file)
|
| 46 |
+
for file in data_files]
|
| 47 |
+
for data_filepath in data_filepaths:
|
| 48 |
+
if os.path.exists(data_filepath):
|
| 49 |
+
print("Cleaning the file "
|
| 50 |
+
f"{os.path.split(data_filepath)[1]} "
|
| 51 |
+
f"for dataset {dataset_name}")
|
| 52 |
+
os.remove(data_filepath)
|
| 53 |
+
else:
|
| 54 |
+
print(f"Path {data_filepath} doesn't exist. "
|
| 55 |
+
"Nothing to clear.")
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
def clear_cache(datasets=None):
|
| 59 |
+
"""
|
| 60 |
+
Cleans the scipy datasets cache directory.
|
| 61 |
+
|
| 62 |
+
If a scipy.datasets method or a list/tuple of the same is
|
| 63 |
+
provided, then clear_cache removes all the data files
|
| 64 |
+
associated to the passed dataset method callable(s).
|
| 65 |
+
|
| 66 |
+
By default, it removes all the cached data files.
|
| 67 |
+
|
| 68 |
+
Parameters
|
| 69 |
+
----------
|
| 70 |
+
datasets : callable or list/tuple of callable or None
|
| 71 |
+
|
| 72 |
+
Examples
|
| 73 |
+
--------
|
| 74 |
+
>>> from scipy import datasets
|
| 75 |
+
>>> ascent_array = datasets.ascent()
|
| 76 |
+
>>> ascent_array.shape
|
| 77 |
+
(512, 512)
|
| 78 |
+
>>> datasets.clear_cache([datasets.ascent])
|
| 79 |
+
Cleaning the file ascent.dat for dataset ascent
|
| 80 |
+
"""
|
| 81 |
+
_clear_cache(datasets)
|
evalkit_eagle/lib/python3.10/site-packages/scipy/datasets/tests/__init__.py
ADDED
|
File without changes
|
evalkit_eagle/lib/python3.10/site-packages/scipy/datasets/tests/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (179 Bytes). View file
|
|
|
evalkit_eagle/lib/python3.10/site-packages/scipy/datasets/tests/__pycache__/test_data.cpython-310.pyc
ADDED
|
Binary file (3.95 kB). View file
|
|
|
evalkit_eagle/lib/python3.10/site-packages/scipy/datasets/tests/test_data.py
ADDED
|
@@ -0,0 +1,128 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from scipy.datasets._registry import registry
|
| 2 |
+
from scipy.datasets._fetchers import data_fetcher
|
| 3 |
+
from scipy.datasets._utils import _clear_cache
|
| 4 |
+
from scipy.datasets import ascent, face, electrocardiogram, download_all
|
| 5 |
+
from numpy.testing import assert_equal, assert_almost_equal
|
| 6 |
+
import os
|
| 7 |
+
from threading import get_ident
|
| 8 |
+
import pytest
|
| 9 |
+
|
| 10 |
+
try:
|
| 11 |
+
import pooch
|
| 12 |
+
except ImportError:
|
| 13 |
+
raise ImportError("Missing optional dependency 'pooch' required "
|
| 14 |
+
"for scipy.datasets module. Please use pip or "
|
| 15 |
+
"conda to install 'pooch'.")
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
data_dir = data_fetcher.path # type: ignore
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def _has_hash(path, expected_hash):
|
| 22 |
+
"""Check if the provided path has the expected hash."""
|
| 23 |
+
if not os.path.exists(path):
|
| 24 |
+
return False
|
| 25 |
+
return pooch.file_hash(path) == expected_hash
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
class TestDatasets:
|
| 29 |
+
|
| 30 |
+
@pytest.fixture(scope='module', autouse=True)
|
| 31 |
+
def test_download_all(self):
|
| 32 |
+
# This fixture requires INTERNET CONNECTION
|
| 33 |
+
|
| 34 |
+
# test_setup phase
|
| 35 |
+
download_all()
|
| 36 |
+
|
| 37 |
+
yield
|
| 38 |
+
|
| 39 |
+
@pytest.mark.fail_slow(10)
|
| 40 |
+
def test_existence_all(self):
|
| 41 |
+
assert len(os.listdir(data_dir)) >= len(registry)
|
| 42 |
+
|
| 43 |
+
def test_ascent(self):
|
| 44 |
+
assert_equal(ascent().shape, (512, 512))
|
| 45 |
+
|
| 46 |
+
# hash check
|
| 47 |
+
assert _has_hash(os.path.join(data_dir, "ascent.dat"),
|
| 48 |
+
registry["ascent.dat"])
|
| 49 |
+
|
| 50 |
+
def test_face(self):
|
| 51 |
+
assert_equal(face().shape, (768, 1024, 3))
|
| 52 |
+
|
| 53 |
+
# hash check
|
| 54 |
+
assert _has_hash(os.path.join(data_dir, "face.dat"),
|
| 55 |
+
registry["face.dat"])
|
| 56 |
+
|
| 57 |
+
def test_electrocardiogram(self):
|
| 58 |
+
# Test shape, dtype and stats of signal
|
| 59 |
+
ecg = electrocardiogram()
|
| 60 |
+
assert_equal(ecg.dtype, float)
|
| 61 |
+
assert_equal(ecg.shape, (108000,))
|
| 62 |
+
assert_almost_equal(ecg.mean(), -0.16510875)
|
| 63 |
+
assert_almost_equal(ecg.std(), 0.5992473991177294)
|
| 64 |
+
|
| 65 |
+
# hash check
|
| 66 |
+
assert _has_hash(os.path.join(data_dir, "ecg.dat"),
|
| 67 |
+
registry["ecg.dat"])
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
def test_clear_cache(tmp_path):
|
| 71 |
+
# Note: `tmp_path` is a pytest fixture, it handles cleanup
|
| 72 |
+
thread_basepath = tmp_path / str(get_ident())
|
| 73 |
+
thread_basepath.mkdir()
|
| 74 |
+
|
| 75 |
+
dummy_basepath = thread_basepath / "dummy_cache_dir"
|
| 76 |
+
dummy_basepath.mkdir()
|
| 77 |
+
|
| 78 |
+
# Create three dummy dataset files for dummy dataset methods
|
| 79 |
+
dummy_method_map = {}
|
| 80 |
+
for i in range(4):
|
| 81 |
+
dummy_method_map[f"data{i}"] = [f"data{i}.dat"]
|
| 82 |
+
data_filepath = dummy_basepath / f"data{i}.dat"
|
| 83 |
+
data_filepath.write_text("")
|
| 84 |
+
|
| 85 |
+
# clear files associated to single dataset method data0
|
| 86 |
+
# also test callable argument instead of list of callables
|
| 87 |
+
def data0():
|
| 88 |
+
pass
|
| 89 |
+
_clear_cache(datasets=data0, cache_dir=dummy_basepath,
|
| 90 |
+
method_map=dummy_method_map)
|
| 91 |
+
assert not os.path.exists(dummy_basepath/"data0.dat")
|
| 92 |
+
|
| 93 |
+
# clear files associated to multiple dataset methods "data3" and "data4"
|
| 94 |
+
def data1():
|
| 95 |
+
pass
|
| 96 |
+
|
| 97 |
+
def data2():
|
| 98 |
+
pass
|
| 99 |
+
_clear_cache(datasets=[data1, data2], cache_dir=dummy_basepath,
|
| 100 |
+
method_map=dummy_method_map)
|
| 101 |
+
assert not os.path.exists(dummy_basepath/"data1.dat")
|
| 102 |
+
assert not os.path.exists(dummy_basepath/"data2.dat")
|
| 103 |
+
|
| 104 |
+
# clear multiple dataset files "data3_0.dat" and "data3_1.dat"
|
| 105 |
+
# associated with dataset method "data3"
|
| 106 |
+
def data4():
|
| 107 |
+
pass
|
| 108 |
+
# create files
|
| 109 |
+
(dummy_basepath / "data4_0.dat").write_text("")
|
| 110 |
+
(dummy_basepath / "data4_1.dat").write_text("")
|
| 111 |
+
|
| 112 |
+
dummy_method_map["data4"] = ["data4_0.dat", "data4_1.dat"]
|
| 113 |
+
_clear_cache(datasets=[data4], cache_dir=dummy_basepath,
|
| 114 |
+
method_map=dummy_method_map)
|
| 115 |
+
assert not os.path.exists(dummy_basepath/"data4_0.dat")
|
| 116 |
+
assert not os.path.exists(dummy_basepath/"data4_1.dat")
|
| 117 |
+
|
| 118 |
+
# wrong dataset method should raise ValueError since it
|
| 119 |
+
# doesn't exist in the dummy_method_map
|
| 120 |
+
def data5():
|
| 121 |
+
pass
|
| 122 |
+
with pytest.raises(ValueError):
|
| 123 |
+
_clear_cache(datasets=[data5], cache_dir=dummy_basepath,
|
| 124 |
+
method_map=dummy_method_map)
|
| 125 |
+
|
| 126 |
+
# remove all dataset cache
|
| 127 |
+
_clear_cache(datasets=None, cache_dir=dummy_basepath)
|
| 128 |
+
assert not os.path.exists(dummy_basepath)
|
evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (4.76 kB). View file
|
|
|
evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/__pycache__/_cubature.cpython-310.pyc
ADDED
|
Binary file (20.5 kB). View file
|
|
|
evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/__pycache__/_odepack_py.cpython-310.pyc
ADDED
|
Binary file (11 kB). View file
|
|
|
evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/__pycache__/_quad_vec.cpython-310.pyc
ADDED
|
Binary file (16.2 kB). View file
|
|
|
evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/__pycache__/_quadpack_py.cpython-310.pyc
ADDED
|
Binary file (49 kB). View file
|
|
|
evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/__pycache__/_quadrature.cpython-310.pyc
ADDED
|
Binary file (39.2 kB). View file
|
|
|
evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/__pycache__/_tanhsinh.cpython-310.pyc
ADDED
|
Binary file (41.1 kB). View file
|
|
|
evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/__pycache__/lsoda.cpython-310.pyc
ADDED
|
Binary file (587 Bytes). View file
|
|
|
evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/__pycache__/odepack.cpython-310.pyc
ADDED
|
Binary file (616 Bytes). View file
|
|
|
evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/__pycache__/quadpack.cpython-310.pyc
ADDED
|
Binary file (649 Bytes). View file
|
|
|
evalkit_eagle/lib/python3.10/site-packages/scipy/ndimage/__init__.py
ADDED
|
@@ -0,0 +1,173 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
=========================================================
|
| 3 |
+
Multidimensional image processing (:mod:`scipy.ndimage`)
|
| 4 |
+
=========================================================
|
| 5 |
+
|
| 6 |
+
.. currentmodule:: scipy.ndimage
|
| 7 |
+
|
| 8 |
+
This package contains various functions for multidimensional image
|
| 9 |
+
processing.
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
Filters
|
| 13 |
+
=======
|
| 14 |
+
|
| 15 |
+
.. autosummary::
|
| 16 |
+
:toctree: generated/
|
| 17 |
+
|
| 18 |
+
convolve - Multidimensional convolution
|
| 19 |
+
convolve1d - 1-D convolution along the given axis
|
| 20 |
+
correlate - Multidimensional correlation
|
| 21 |
+
correlate1d - 1-D correlation along the given axis
|
| 22 |
+
gaussian_filter
|
| 23 |
+
gaussian_filter1d
|
| 24 |
+
gaussian_gradient_magnitude
|
| 25 |
+
gaussian_laplace
|
| 26 |
+
generic_filter - Multidimensional filter using a given function
|
| 27 |
+
generic_filter1d - 1-D generic filter along the given axis
|
| 28 |
+
generic_gradient_magnitude
|
| 29 |
+
generic_laplace
|
| 30 |
+
laplace - N-D Laplace filter based on approximate second derivatives
|
| 31 |
+
maximum_filter
|
| 32 |
+
maximum_filter1d
|
| 33 |
+
median_filter - Calculates a multidimensional median filter
|
| 34 |
+
minimum_filter
|
| 35 |
+
minimum_filter1d
|
| 36 |
+
percentile_filter - Calculates a multidimensional percentile filter
|
| 37 |
+
prewitt
|
| 38 |
+
rank_filter - Calculates a multidimensional rank filter
|
| 39 |
+
sobel
|
| 40 |
+
uniform_filter - Multidimensional uniform filter
|
| 41 |
+
uniform_filter1d - 1-D uniform filter along the given axis
|
| 42 |
+
|
| 43 |
+
Fourier filters
|
| 44 |
+
===============
|
| 45 |
+
|
| 46 |
+
.. autosummary::
|
| 47 |
+
:toctree: generated/
|
| 48 |
+
|
| 49 |
+
fourier_ellipsoid
|
| 50 |
+
fourier_gaussian
|
| 51 |
+
fourier_shift
|
| 52 |
+
fourier_uniform
|
| 53 |
+
|
| 54 |
+
Interpolation
|
| 55 |
+
=============
|
| 56 |
+
|
| 57 |
+
.. autosummary::
|
| 58 |
+
:toctree: generated/
|
| 59 |
+
|
| 60 |
+
affine_transform - Apply an affine transformation
|
| 61 |
+
geometric_transform - Apply an arbitrary geometric transform
|
| 62 |
+
map_coordinates - Map input array to new coordinates by interpolation
|
| 63 |
+
rotate - Rotate an array
|
| 64 |
+
shift - Shift an array
|
| 65 |
+
spline_filter
|
| 66 |
+
spline_filter1d
|
| 67 |
+
zoom - Zoom an array
|
| 68 |
+
|
| 69 |
+
Measurements
|
| 70 |
+
============
|
| 71 |
+
|
| 72 |
+
.. autosummary::
|
| 73 |
+
:toctree: generated/
|
| 74 |
+
|
| 75 |
+
center_of_mass - The center of mass of the values of an array at labels
|
| 76 |
+
extrema - Min's and max's of an array at labels, with their positions
|
| 77 |
+
find_objects - Find objects in a labeled array
|
| 78 |
+
histogram - Histogram of the values of an array, optionally at labels
|
| 79 |
+
label - Label features in an array
|
| 80 |
+
labeled_comprehension
|
| 81 |
+
maximum
|
| 82 |
+
maximum_position
|
| 83 |
+
mean - Mean of the values of an array at labels
|
| 84 |
+
median
|
| 85 |
+
minimum
|
| 86 |
+
minimum_position
|
| 87 |
+
standard_deviation - Standard deviation of an N-D image array
|
| 88 |
+
sum_labels - Sum of the values of the array
|
| 89 |
+
value_indices - Find indices of each distinct value in given array
|
| 90 |
+
variance - Variance of the values of an N-D image array
|
| 91 |
+
watershed_ift
|
| 92 |
+
|
| 93 |
+
Morphology
|
| 94 |
+
==========
|
| 95 |
+
|
| 96 |
+
.. autosummary::
|
| 97 |
+
:toctree: generated/
|
| 98 |
+
|
| 99 |
+
binary_closing
|
| 100 |
+
binary_dilation
|
| 101 |
+
binary_erosion
|
| 102 |
+
binary_fill_holes
|
| 103 |
+
binary_hit_or_miss
|
| 104 |
+
binary_opening
|
| 105 |
+
binary_propagation
|
| 106 |
+
black_tophat
|
| 107 |
+
distance_transform_bf
|
| 108 |
+
distance_transform_cdt
|
| 109 |
+
distance_transform_edt
|
| 110 |
+
generate_binary_structure
|
| 111 |
+
grey_closing
|
| 112 |
+
grey_dilation
|
| 113 |
+
grey_erosion
|
| 114 |
+
grey_opening
|
| 115 |
+
iterate_structure
|
| 116 |
+
morphological_gradient
|
| 117 |
+
morphological_laplace
|
| 118 |
+
white_tophat
|
| 119 |
+
|
| 120 |
+
"""
|
| 121 |
+
|
| 122 |
+
# Copyright (C) 2003-2005 Peter J. Verveer
|
| 123 |
+
#
|
| 124 |
+
# Redistribution and use in source and binary forms, with or without
|
| 125 |
+
# modification, are permitted provided that the following conditions
|
| 126 |
+
# are met:
|
| 127 |
+
#
|
| 128 |
+
# 1. Redistributions of source code must retain the above copyright
|
| 129 |
+
# notice, this list of conditions and the following disclaimer.
|
| 130 |
+
#
|
| 131 |
+
# 2. Redistributions in binary form must reproduce the above
|
| 132 |
+
# copyright notice, this list of conditions and the following
|
| 133 |
+
# disclaimer in the documentation and/or other materials provided
|
| 134 |
+
# with the distribution.
|
| 135 |
+
#
|
| 136 |
+
# 3. The name of the author may not be used to endorse or promote
|
| 137 |
+
# products derived from this software without specific prior
|
| 138 |
+
# written permission.
|
| 139 |
+
#
|
| 140 |
+
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
|
| 141 |
+
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
| 142 |
+
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
| 143 |
+
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
|
| 144 |
+
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
| 145 |
+
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
|
| 146 |
+
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
| 147 |
+
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
| 148 |
+
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
| 149 |
+
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
| 150 |
+
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
| 151 |
+
|
| 152 |
+
# bring in the public functionality from private namespaces
|
| 153 |
+
|
| 154 |
+
# mypy: ignore-errors
|
| 155 |
+
|
| 156 |
+
from ._support_alternative_backends import *
|
| 157 |
+
|
| 158 |
+
# adjust __all__ and do not leak implementation details
|
| 159 |
+
from . import _support_alternative_backends
|
| 160 |
+
__all__ = _support_alternative_backends.__all__
|
| 161 |
+
del _support_alternative_backends, _ndimage_api, _delegators # noqa: F821
|
| 162 |
+
|
| 163 |
+
|
| 164 |
+
# Deprecated namespaces, to be removed in v2.0.0
|
| 165 |
+
from . import filters
|
| 166 |
+
from . import fourier
|
| 167 |
+
from . import interpolation
|
| 168 |
+
from . import measurements
|
| 169 |
+
from . import morphology
|
| 170 |
+
|
| 171 |
+
from scipy._lib._testutils import PytestTester
|
| 172 |
+
test = PytestTester(__name__)
|
| 173 |
+
del PytestTester
|
evalkit_eagle/lib/python3.10/site-packages/scipy/ndimage/_ctest.cpython-310-x86_64-linux-gnu.so
ADDED
|
Binary file (17 kB). View file
|
|
|
evalkit_eagle/lib/python3.10/site-packages/scipy/ndimage/_cytest.cpython-310-x86_64-linux-gnu.so
ADDED
|
Binary file (91 kB). View file
|
|
|
evalkit_eagle/lib/python3.10/site-packages/scipy/ndimage/_delegators.py
ADDED
|
@@ -0,0 +1,297 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Delegators for alternative backends in scipy.ndimage.
|
| 2 |
+
|
| 3 |
+
The signature of `func_signature` must match the signature of ndimage.func.
|
| 4 |
+
The job of a `func_signature` is to know which arguments of `ndimage.func`
|
| 5 |
+
are arrays.
|
| 6 |
+
|
| 7 |
+
* signatures are generated by
|
| 8 |
+
|
| 9 |
+
--------------
|
| 10 |
+
import inspect
|
| 11 |
+
from scipy import ndimage
|
| 12 |
+
|
| 13 |
+
names = [x for x in dir(ndimage) if not x.startswith('_')]
|
| 14 |
+
objs = [getattr(ndimage, name) for name in names]
|
| 15 |
+
funcs = [obj for obj in objs if inspect.isroutine(obj)]
|
| 16 |
+
|
| 17 |
+
for func in funcs:
|
| 18 |
+
sig = inspect.signature(func)
|
| 19 |
+
print(f"def {func.__name__}_signature{sig}:\n\tpass\n\n")
|
| 20 |
+
---------------
|
| 21 |
+
|
| 22 |
+
* which arguments to delegate on: manually trawled the documentation for
|
| 23 |
+
array-like and array arguments
|
| 24 |
+
|
| 25 |
+
"""
|
| 26 |
+
import numpy as np
|
| 27 |
+
from scipy._lib._array_api import array_namespace
|
| 28 |
+
from scipy.ndimage._ni_support import _skip_if_dtype, _skip_if_int
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def affine_transform_signature(
|
| 32 |
+
input, matrix, offset=0.0, output_shape=None, output=None, *args, **kwds
|
| 33 |
+
):
|
| 34 |
+
return array_namespace(input, matrix, _skip_if_dtype(output))
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def binary_closing_signature(
|
| 38 |
+
input, structure=None, iterations=1, output=None, *args, **kwds
|
| 39 |
+
):
|
| 40 |
+
return array_namespace(input, structure, _skip_if_dtype(output))
|
| 41 |
+
|
| 42 |
+
binary_opening_signature = binary_closing_signature
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
def binary_dilation_signature(
|
| 46 |
+
input, structure=None, iterations=1, mask=None, output=None, *args, **kwds
|
| 47 |
+
):
|
| 48 |
+
return array_namespace(input, structure, _skip_if_dtype(output), mask)
|
| 49 |
+
|
| 50 |
+
binary_erosion_signature = binary_dilation_signature
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
def binary_fill_holes_signature(
|
| 54 |
+
input, structure=None, output=None, origin=0, *args, **kwargs
|
| 55 |
+
):
|
| 56 |
+
return array_namespace(input, structure, _skip_if_dtype(output))
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
def label_signature(input, structure=None, output=None, origin=0):
|
| 60 |
+
return array_namespace(input, structure, _skip_if_dtype(output))
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
def binary_hit_or_miss_signature(
|
| 64 |
+
input, structure1=None, structure2=None, output=None, *args, **kwds
|
| 65 |
+
):
|
| 66 |
+
return array_namespace(input, structure1, structure2, _skip_if_dtype(output))
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
def binary_propagation_signature(
|
| 70 |
+
input, structure=None, mask=None, output=None, *args, **kwds
|
| 71 |
+
):
|
| 72 |
+
return array_namespace(input, structure, mask, _skip_if_dtype(output))
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
def convolve_signature(input, weights, output=None, *args, **kwds):
|
| 76 |
+
return array_namespace(input, weights, _skip_if_dtype(output))
|
| 77 |
+
|
| 78 |
+
correlate_signature = convolve_signature
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
def convolve1d_signature(input, weights, axis=-1, output=None, *args, **kwds):
|
| 82 |
+
return array_namespace(input, weights, _skip_if_dtype(output))
|
| 83 |
+
|
| 84 |
+
correlate1d_signature = convolve1d_signature
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
def distance_transform_bf_signature(
|
| 88 |
+
input, metric='euclidean', sampling=None, return_distances=True,
|
| 89 |
+
return_indices=False, distances=None, indices=None
|
| 90 |
+
):
|
| 91 |
+
return array_namespace(input, distances, indices)
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
def distance_transform_cdt_signature(
|
| 95 |
+
input, metric='chessboard', return_distances=True, return_indices=False,
|
| 96 |
+
distances=None, indices=None
|
| 97 |
+
):
|
| 98 |
+
return array_namespace(input, distances, indices)
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
def distance_transform_edt_signature(
|
| 102 |
+
input, sampling=None, return_distances=True, return_indices=False,
|
| 103 |
+
distances=None, indices=None
|
| 104 |
+
):
|
| 105 |
+
return array_namespace(input, distances, indices)
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
def find_objects_signature(input, max_label=0):
|
| 109 |
+
return array_namespace(input)
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
def fourier_ellipsoid_signature(input, size, n=-1, axis=-1, output=None):
|
| 113 |
+
return array_namespace(input, _skip_if_dtype(output))
|
| 114 |
+
|
| 115 |
+
fourier_uniform_signature = fourier_ellipsoid_signature
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
def fourier_gaussian_signature(input, sigma, n=-1, axis=-1, output=None):
|
| 119 |
+
return array_namespace(input, _skip_if_dtype(output))
|
| 120 |
+
|
| 121 |
+
def fourier_shift_signature(input, shift, n=-1, axis=-1, output=None):
|
| 122 |
+
return array_namespace(input, _skip_if_dtype(output))
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
def gaussian_filter_signature(input, sigma, order=0, output=None, *args, **kwds):
|
| 126 |
+
return array_namespace(input, _skip_if_dtype(output))
|
| 127 |
+
|
| 128 |
+
|
| 129 |
+
def gaussian_filter1d_signature(
|
| 130 |
+
input, sigma, axis=-1, order=0, output=None, *args, **kwds
|
| 131 |
+
):
|
| 132 |
+
return array_namespace(input, _skip_if_dtype(output))
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
def gaussian_gradient_magnitude_signature(input, sigma, output=None, *args, **kwds):
|
| 136 |
+
return array_namespace(input, _skip_if_dtype(output))
|
| 137 |
+
|
| 138 |
+
gaussian_laplace_signature = gaussian_gradient_magnitude_signature
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
def generate_binary_structure_signature(rank, connectivity):
|
| 142 |
+
# XXX: no input arrays; always return numpy
|
| 143 |
+
return np
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
def generic_filter_signature(
|
| 147 |
+
input, function, size=None, footprint=None, output=None, *args, **kwds
|
| 148 |
+
):
|
| 149 |
+
# XXX: function LowLevelCallable w/backends
|
| 150 |
+
return array_namespace(input, footprint, _skip_if_dtype(output))
|
| 151 |
+
|
| 152 |
+
|
| 153 |
+
def generic_filter1d_signature(
|
| 154 |
+
input, function, filter_size, axis=-1, output=None, *args, **kwds
|
| 155 |
+
):
|
| 156 |
+
return array_namespace(input, _skip_if_dtype(output))
|
| 157 |
+
|
| 158 |
+
|
| 159 |
+
def generic_gradient_magnitude_signature(
|
| 160 |
+
input, derivative, output=None, *args, **kwds
|
| 161 |
+
):
|
| 162 |
+
# XXX: function LowLevelCallable w/backends
|
| 163 |
+
return array_namespace(input, _skip_if_dtype(output))
|
| 164 |
+
|
| 165 |
+
|
| 166 |
+
def generic_laplace_signature(input, derivative2, output=None, *args, **kwds):
|
| 167 |
+
# XXX: function LowLevelCallable w/backends
|
| 168 |
+
return array_namespace(input, _skip_if_dtype(output))
|
| 169 |
+
|
| 170 |
+
|
| 171 |
+
def geometric_transform_signature(
|
| 172 |
+
input, mapping, output_shape=None, output=None, *args, **kwds
|
| 173 |
+
):
|
| 174 |
+
return array_namespace(input, _skip_if_dtype(output))
|
| 175 |
+
|
| 176 |
+
|
| 177 |
+
def histogram_signature(input, min, max, bins, labels=None, index=None):
|
| 178 |
+
return array_namespace(input, labels)
|
| 179 |
+
|
| 180 |
+
|
| 181 |
+
def iterate_structure_signature(structure, iterations, origin=None):
|
| 182 |
+
return array_namespace(structure)
|
| 183 |
+
|
| 184 |
+
|
| 185 |
+
def labeled_comprehension_signature(input, labels, *args, **kwds):
|
| 186 |
+
return array_namespace(input, labels)
|
| 187 |
+
|
| 188 |
+
|
| 189 |
+
def laplace_signature(input, output=None, *args, **kwds):
|
| 190 |
+
return array_namespace(input, _skip_if_dtype(output))
|
| 191 |
+
|
| 192 |
+
|
| 193 |
+
def map_coordinates_signature(input, coordinates, output=None, *args, **kwds):
|
| 194 |
+
return array_namespace(input, coordinates, _skip_if_dtype(output))
|
| 195 |
+
|
| 196 |
+
|
| 197 |
+
def maximum_filter1d_signature(input, size, axis=-1, output=None, *args, **kwds):
|
| 198 |
+
return array_namespace(input, _skip_if_dtype(output))
|
| 199 |
+
|
| 200 |
+
minimum_filter1d_signature = maximum_filter1d_signature
|
| 201 |
+
uniform_filter1d_signature = maximum_filter1d_signature
|
| 202 |
+
|
| 203 |
+
|
| 204 |
+
def maximum_signature(input, labels=None, index=None):
|
| 205 |
+
return array_namespace(input, labels, _skip_if_int(index))
|
| 206 |
+
|
| 207 |
+
minimum_signature = maximum_signature
|
| 208 |
+
median_signature = maximum_signature
|
| 209 |
+
mean_signature = maximum_signature
|
| 210 |
+
variance_signature = maximum_signature
|
| 211 |
+
standard_deviation_signature = maximum_signature
|
| 212 |
+
sum_labels_signature = maximum_signature
|
| 213 |
+
sum_signature = maximum_signature # ndimage.sum is sum_labels
|
| 214 |
+
|
| 215 |
+
maximum_position_signature = maximum_signature
|
| 216 |
+
minimum_position_signature = maximum_signature
|
| 217 |
+
|
| 218 |
+
extrema_signature = maximum_signature
|
| 219 |
+
center_of_mass_signature = extrema_signature
|
| 220 |
+
|
| 221 |
+
|
| 222 |
+
def median_filter_signature(
|
| 223 |
+
input, size=None, footprint=None, output=None, *args, **kwds
|
| 224 |
+
):
|
| 225 |
+
return array_namespace(input, footprint, _skip_if_dtype(output))
|
| 226 |
+
|
| 227 |
+
minimum_filter_signature = median_filter_signature
|
| 228 |
+
maximum_filter_signature = median_filter_signature
|
| 229 |
+
|
| 230 |
+
|
| 231 |
+
def morphological_gradient_signature(
|
| 232 |
+
input, size=None, footprint=None, structure=None, output=None, *args, **kwds
|
| 233 |
+
):
|
| 234 |
+
return array_namespace(input, footprint, structure, _skip_if_dtype(output))
|
| 235 |
+
|
| 236 |
+
morphological_laplace_signature = morphological_gradient_signature
|
| 237 |
+
white_tophat_signature = morphological_gradient_signature
|
| 238 |
+
black_tophat_signature = morphological_gradient_signature
|
| 239 |
+
grey_closing_signature = morphological_gradient_signature
|
| 240 |
+
grey_dilation_signature = morphological_gradient_signature
|
| 241 |
+
grey_erosion_signature = morphological_gradient_signature
|
| 242 |
+
grey_opening_signature = morphological_gradient_signature
|
| 243 |
+
|
| 244 |
+
|
| 245 |
+
def percentile_filter_signature(
|
| 246 |
+
input, percentile, size=None, footprint=None, output=None, *args, **kwds
|
| 247 |
+
):
|
| 248 |
+
return array_namespace(input, footprint, _skip_if_dtype(output))
|
| 249 |
+
|
| 250 |
+
|
| 251 |
+
def prewitt_signature(input, axis=-1, output=None, *args, **kwds):
|
| 252 |
+
return array_namespace(input, _skip_if_dtype(output))
|
| 253 |
+
|
| 254 |
+
sobel_signature = prewitt_signature
|
| 255 |
+
|
| 256 |
+
|
| 257 |
+
def rank_filter_signature(
|
| 258 |
+
input, rank, size=None, footprint=None, output=None, *args, **kwds
|
| 259 |
+
):
|
| 260 |
+
return array_namespace(input, footprint, _skip_if_dtype(output))
|
| 261 |
+
|
| 262 |
+
|
| 263 |
+
def rotate_signature(
|
| 264 |
+
input, angle, axes=(1, 0), reshape=True, output=None , *args, **kwds
|
| 265 |
+
):
|
| 266 |
+
return array_namespace(input, _skip_if_dtype(output))
|
| 267 |
+
|
| 268 |
+
|
| 269 |
+
def shift_signature(input, shift, output=None, *args, **kwds):
|
| 270 |
+
return array_namespace(input, _skip_if_dtype(output))
|
| 271 |
+
|
| 272 |
+
|
| 273 |
+
def spline_filter_signature(input, order=3, output=np.float64, *args, **kwds):
|
| 274 |
+
return array_namespace(input, _skip_if_dtype(output))
|
| 275 |
+
|
| 276 |
+
|
| 277 |
+
def spline_filter1d_signature(
|
| 278 |
+
input, order=3, axis=-1, output=np.float64, *args, **kwds
|
| 279 |
+
):
|
| 280 |
+
return array_namespace(input, _skip_if_dtype(output))
|
| 281 |
+
|
| 282 |
+
|
| 283 |
+
def uniform_filter_signature(input, size=3, output=None, *args, **kwds):
|
| 284 |
+
return array_namespace(input, _skip_if_dtype(output))
|
| 285 |
+
|
| 286 |
+
|
| 287 |
+
def value_indices_signature(arr, *args, **kwds):
|
| 288 |
+
return array_namespace(arr)
|
| 289 |
+
|
| 290 |
+
|
| 291 |
+
def watershed_ift_signature(input, markers, structure=None, output=None):
|
| 292 |
+
return array_namespace(input, markers, structure, _skip_if_dtype(output))
|
| 293 |
+
|
| 294 |
+
|
| 295 |
+
def zoom_signature(input, zoom, output=None, *args, **kwds):
|
| 296 |
+
return array_namespace(input, _skip_if_dtype(output))
|
| 297 |
+
|
evalkit_eagle/lib/python3.10/site-packages/scipy/ndimage/_filters.py
ADDED
|
@@ -0,0 +1,1965 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (C) 2003-2005 Peter J. Verveer
|
| 2 |
+
#
|
| 3 |
+
# Redistribution and use in source and binary forms, with or without
|
| 4 |
+
# modification, are permitted provided that the following conditions
|
| 5 |
+
# are met:
|
| 6 |
+
#
|
| 7 |
+
# 1. Redistributions of source code must retain the above copyright
|
| 8 |
+
# notice, this list of conditions and the following disclaimer.
|
| 9 |
+
#
|
| 10 |
+
# 2. Redistributions in binary form must reproduce the above
|
| 11 |
+
# copyright notice, this list of conditions and the following
|
| 12 |
+
# disclaimer in the documentation and/or other materials provided
|
| 13 |
+
# with the distribution.
|
| 14 |
+
#
|
| 15 |
+
# 3. The name of the author may not be used to endorse or promote
|
| 16 |
+
# products derived from this software without specific prior
|
| 17 |
+
# written permission.
|
| 18 |
+
#
|
| 19 |
+
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
|
| 20 |
+
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
| 21 |
+
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
| 22 |
+
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
|
| 23 |
+
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
| 24 |
+
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
|
| 25 |
+
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
| 26 |
+
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
| 27 |
+
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
| 28 |
+
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
| 29 |
+
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
| 30 |
+
|
| 31 |
+
from collections.abc import Iterable
|
| 32 |
+
import numbers
|
| 33 |
+
import warnings
|
| 34 |
+
import numpy as np
|
| 35 |
+
import operator
|
| 36 |
+
|
| 37 |
+
from scipy._lib._util import normalize_axis_index
|
| 38 |
+
from . import _ni_support
|
| 39 |
+
from . import _nd_image
|
| 40 |
+
from . import _ni_docstrings
|
| 41 |
+
from . import _rank_filter_1d
|
| 42 |
+
|
| 43 |
+
__all__ = ['correlate1d', 'convolve1d', 'gaussian_filter1d', 'gaussian_filter',
|
| 44 |
+
'prewitt', 'sobel', 'generic_laplace', 'laplace',
|
| 45 |
+
'gaussian_laplace', 'generic_gradient_magnitude',
|
| 46 |
+
'gaussian_gradient_magnitude', 'correlate', 'convolve',
|
| 47 |
+
'uniform_filter1d', 'uniform_filter', 'minimum_filter1d',
|
| 48 |
+
'maximum_filter1d', 'minimum_filter', 'maximum_filter',
|
| 49 |
+
'rank_filter', 'median_filter', 'percentile_filter',
|
| 50 |
+
'generic_filter1d', 'generic_filter']
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
def _invalid_origin(origin, lenw):
|
| 54 |
+
return (origin < -(lenw // 2)) or (origin > (lenw - 1) // 2)
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
def _complex_via_real_components(func, input, weights, output, cval, **kwargs):
|
| 58 |
+
"""Complex convolution via a linear combination of real convolutions."""
|
| 59 |
+
complex_input = input.dtype.kind == 'c'
|
| 60 |
+
complex_weights = weights.dtype.kind == 'c'
|
| 61 |
+
if complex_input and complex_weights:
|
| 62 |
+
# real component of the output
|
| 63 |
+
func(input.real, weights.real, output=output.real,
|
| 64 |
+
cval=np.real(cval), **kwargs)
|
| 65 |
+
output.real -= func(input.imag, weights.imag, output=None,
|
| 66 |
+
cval=np.imag(cval), **kwargs)
|
| 67 |
+
# imaginary component of the output
|
| 68 |
+
func(input.real, weights.imag, output=output.imag,
|
| 69 |
+
cval=np.real(cval), **kwargs)
|
| 70 |
+
output.imag += func(input.imag, weights.real, output=None,
|
| 71 |
+
cval=np.imag(cval), **kwargs)
|
| 72 |
+
elif complex_input:
|
| 73 |
+
func(input.real, weights, output=output.real, cval=np.real(cval),
|
| 74 |
+
**kwargs)
|
| 75 |
+
func(input.imag, weights, output=output.imag, cval=np.imag(cval),
|
| 76 |
+
**kwargs)
|
| 77 |
+
else:
|
| 78 |
+
if np.iscomplexobj(cval):
|
| 79 |
+
raise ValueError("Cannot provide a complex-valued cval when the "
|
| 80 |
+
"input is real.")
|
| 81 |
+
func(input, weights.real, output=output.real, cval=cval, **kwargs)
|
| 82 |
+
func(input, weights.imag, output=output.imag, cval=cval, **kwargs)
|
| 83 |
+
return output
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
def _expand_origin(ndim_image, axes, origin):
|
| 87 |
+
num_axes = len(axes)
|
| 88 |
+
origins = _ni_support._normalize_sequence(origin, num_axes)
|
| 89 |
+
if num_axes < ndim_image:
|
| 90 |
+
# set origin = 0 for any axes not being filtered
|
| 91 |
+
origins_temp = [0,] * ndim_image
|
| 92 |
+
for o, ax in zip(origins, axes):
|
| 93 |
+
origins_temp[ax] = o
|
| 94 |
+
origins = origins_temp
|
| 95 |
+
return origins
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
def _expand_footprint(ndim_image, axes, footprint,
|
| 99 |
+
footprint_name="footprint"):
|
| 100 |
+
num_axes = len(axes)
|
| 101 |
+
if num_axes < ndim_image:
|
| 102 |
+
if footprint.ndim != num_axes:
|
| 103 |
+
raise RuntimeError(f"{footprint_name}.ndim ({footprint.ndim}) "
|
| 104 |
+
f"must match len(axes) ({num_axes})")
|
| 105 |
+
|
| 106 |
+
footprint = np.expand_dims(
|
| 107 |
+
footprint,
|
| 108 |
+
tuple(ax for ax in range(ndim_image) if ax not in axes)
|
| 109 |
+
)
|
| 110 |
+
return footprint
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
def _expand_mode(ndim_image, axes, mode):
|
| 114 |
+
num_axes = len(axes)
|
| 115 |
+
if not isinstance(mode, str) and isinstance(mode, Iterable):
|
| 116 |
+
# set mode = 'constant' for any axes not being filtered
|
| 117 |
+
modes = _ni_support._normalize_sequence(mode, num_axes)
|
| 118 |
+
modes_temp = ['constant'] * ndim_image
|
| 119 |
+
for m, ax in zip(modes, axes):
|
| 120 |
+
modes_temp[ax] = m
|
| 121 |
+
mode = modes_temp
|
| 122 |
+
return mode
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
@_ni_docstrings.docfiller
|
| 126 |
+
def correlate1d(input, weights, axis=-1, output=None, mode="reflect",
|
| 127 |
+
cval=0.0, origin=0):
|
| 128 |
+
"""Calculate a 1-D correlation along the given axis.
|
| 129 |
+
|
| 130 |
+
The lines of the array along the given axis are correlated with the
|
| 131 |
+
given weights.
|
| 132 |
+
|
| 133 |
+
Parameters
|
| 134 |
+
----------
|
| 135 |
+
%(input)s
|
| 136 |
+
weights : array
|
| 137 |
+
1-D sequence of numbers.
|
| 138 |
+
%(axis)s
|
| 139 |
+
%(output)s
|
| 140 |
+
%(mode_reflect)s
|
| 141 |
+
%(cval)s
|
| 142 |
+
%(origin)s
|
| 143 |
+
|
| 144 |
+
Returns
|
| 145 |
+
-------
|
| 146 |
+
result : ndarray
|
| 147 |
+
Correlation result. Has the same shape as `input`.
|
| 148 |
+
|
| 149 |
+
Examples
|
| 150 |
+
--------
|
| 151 |
+
>>> from scipy.ndimage import correlate1d
|
| 152 |
+
>>> correlate1d([2, 8, 0, 4, 1, 9, 9, 0], weights=[1, 3])
|
| 153 |
+
array([ 8, 26, 8, 12, 7, 28, 36, 9])
|
| 154 |
+
"""
|
| 155 |
+
input = np.asarray(input)
|
| 156 |
+
weights = np.asarray(weights)
|
| 157 |
+
complex_input = input.dtype.kind == 'c'
|
| 158 |
+
complex_weights = weights.dtype.kind == 'c'
|
| 159 |
+
if complex_input or complex_weights:
|
| 160 |
+
if complex_weights:
|
| 161 |
+
weights = weights.conj()
|
| 162 |
+
weights = weights.astype(np.complex128, copy=False)
|
| 163 |
+
kwargs = dict(axis=axis, mode=mode, origin=origin)
|
| 164 |
+
output = _ni_support._get_output(output, input, complex_output=True)
|
| 165 |
+
return _complex_via_real_components(correlate1d, input, weights,
|
| 166 |
+
output, cval, **kwargs)
|
| 167 |
+
|
| 168 |
+
output = _ni_support._get_output(output, input)
|
| 169 |
+
weights = np.asarray(weights, dtype=np.float64)
|
| 170 |
+
if weights.ndim != 1 or weights.shape[0] < 1:
|
| 171 |
+
raise RuntimeError('no filter weights given')
|
| 172 |
+
if not weights.flags.contiguous:
|
| 173 |
+
weights = weights.copy()
|
| 174 |
+
axis = normalize_axis_index(axis, input.ndim)
|
| 175 |
+
if _invalid_origin(origin, len(weights)):
|
| 176 |
+
raise ValueError('Invalid origin; origin must satisfy '
|
| 177 |
+
'-(len(weights) // 2) <= origin <= '
|
| 178 |
+
'(len(weights)-1) // 2')
|
| 179 |
+
mode = _ni_support._extend_mode_to_code(mode)
|
| 180 |
+
_nd_image.correlate1d(input, weights, axis, output, mode, cval,
|
| 181 |
+
origin)
|
| 182 |
+
return output
|
| 183 |
+
|
| 184 |
+
|
| 185 |
+
@_ni_docstrings.docfiller
|
| 186 |
+
def convolve1d(input, weights, axis=-1, output=None, mode="reflect",
|
| 187 |
+
cval=0.0, origin=0):
|
| 188 |
+
"""Calculate a 1-D convolution along the given axis.
|
| 189 |
+
|
| 190 |
+
The lines of the array along the given axis are convolved with the
|
| 191 |
+
given weights.
|
| 192 |
+
|
| 193 |
+
Parameters
|
| 194 |
+
----------
|
| 195 |
+
%(input)s
|
| 196 |
+
weights : ndarray
|
| 197 |
+
1-D sequence of numbers.
|
| 198 |
+
%(axis)s
|
| 199 |
+
%(output)s
|
| 200 |
+
%(mode_reflect)s
|
| 201 |
+
%(cval)s
|
| 202 |
+
%(origin)s
|
| 203 |
+
|
| 204 |
+
Returns
|
| 205 |
+
-------
|
| 206 |
+
convolve1d : ndarray
|
| 207 |
+
Convolved array with same shape as input
|
| 208 |
+
|
| 209 |
+
Examples
|
| 210 |
+
--------
|
| 211 |
+
>>> from scipy.ndimage import convolve1d
|
| 212 |
+
>>> convolve1d([2, 8, 0, 4, 1, 9, 9, 0], weights=[1, 3])
|
| 213 |
+
array([14, 24, 4, 13, 12, 36, 27, 0])
|
| 214 |
+
"""
|
| 215 |
+
weights = np.asarray(weights)
|
| 216 |
+
weights = weights[::-1]
|
| 217 |
+
origin = -origin
|
| 218 |
+
if not weights.shape[0] & 1:
|
| 219 |
+
origin -= 1
|
| 220 |
+
if weights.dtype.kind == 'c':
|
| 221 |
+
# pre-conjugate here to counteract the conjugation in correlate1d
|
| 222 |
+
weights = weights.conj()
|
| 223 |
+
return correlate1d(input, weights, axis, output, mode, cval, origin)
|
| 224 |
+
|
| 225 |
+
|
| 226 |
+
def _gaussian_kernel1d(sigma, order, radius):
|
| 227 |
+
"""
|
| 228 |
+
Computes a 1-D Gaussian convolution kernel.
|
| 229 |
+
"""
|
| 230 |
+
if order < 0:
|
| 231 |
+
raise ValueError('order must be non-negative')
|
| 232 |
+
exponent_range = np.arange(order + 1)
|
| 233 |
+
sigma2 = sigma * sigma
|
| 234 |
+
x = np.arange(-radius, radius+1)
|
| 235 |
+
phi_x = np.exp(-0.5 / sigma2 * x ** 2)
|
| 236 |
+
phi_x = phi_x / phi_x.sum()
|
| 237 |
+
|
| 238 |
+
if order == 0:
|
| 239 |
+
return phi_x
|
| 240 |
+
else:
|
| 241 |
+
# f(x) = q(x) * phi(x) = q(x) * exp(p(x))
|
| 242 |
+
# f'(x) = (q'(x) + q(x) * p'(x)) * phi(x)
|
| 243 |
+
# p'(x) = -1 / sigma ** 2
|
| 244 |
+
# Implement q'(x) + q(x) * p'(x) as a matrix operator and apply to the
|
| 245 |
+
# coefficients of q(x)
|
| 246 |
+
q = np.zeros(order + 1)
|
| 247 |
+
q[0] = 1
|
| 248 |
+
D = np.diag(exponent_range[1:], 1) # D @ q(x) = q'(x)
|
| 249 |
+
P = np.diag(np.ones(order)/-sigma2, -1) # P @ q(x) = q(x) * p'(x)
|
| 250 |
+
Q_deriv = D + P
|
| 251 |
+
for _ in range(order):
|
| 252 |
+
q = Q_deriv.dot(q)
|
| 253 |
+
q = (x[:, None] ** exponent_range).dot(q)
|
| 254 |
+
return q * phi_x
|
| 255 |
+
|
| 256 |
+
|
| 257 |
+
@_ni_docstrings.docfiller
|
| 258 |
+
def gaussian_filter1d(input, sigma, axis=-1, order=0, output=None,
|
| 259 |
+
mode="reflect", cval=0.0, truncate=4.0, *, radius=None):
|
| 260 |
+
"""1-D Gaussian filter.
|
| 261 |
+
|
| 262 |
+
Parameters
|
| 263 |
+
----------
|
| 264 |
+
%(input)s
|
| 265 |
+
sigma : scalar
|
| 266 |
+
standard deviation for Gaussian kernel
|
| 267 |
+
%(axis)s
|
| 268 |
+
order : int, optional
|
| 269 |
+
An order of 0 corresponds to convolution with a Gaussian
|
| 270 |
+
kernel. A positive order corresponds to convolution with
|
| 271 |
+
that derivative of a Gaussian.
|
| 272 |
+
%(output)s
|
| 273 |
+
%(mode_reflect)s
|
| 274 |
+
%(cval)s
|
| 275 |
+
truncate : float, optional
|
| 276 |
+
Truncate the filter at this many standard deviations.
|
| 277 |
+
Default is 4.0.
|
| 278 |
+
radius : None or int, optional
|
| 279 |
+
Radius of the Gaussian kernel. If specified, the size of
|
| 280 |
+
the kernel will be ``2*radius + 1``, and `truncate` is ignored.
|
| 281 |
+
Default is None.
|
| 282 |
+
|
| 283 |
+
Returns
|
| 284 |
+
-------
|
| 285 |
+
gaussian_filter1d : ndarray
|
| 286 |
+
|
| 287 |
+
Notes
|
| 288 |
+
-----
|
| 289 |
+
The Gaussian kernel will have size ``2*radius + 1`` along each axis. If
|
| 290 |
+
`radius` is None, a default ``radius = round(truncate * sigma)`` will be
|
| 291 |
+
used.
|
| 292 |
+
|
| 293 |
+
Examples
|
| 294 |
+
--------
|
| 295 |
+
>>> from scipy.ndimage import gaussian_filter1d
|
| 296 |
+
>>> import numpy as np
|
| 297 |
+
>>> gaussian_filter1d([1.0, 2.0, 3.0, 4.0, 5.0], 1)
|
| 298 |
+
array([ 1.42704095, 2.06782203, 3. , 3.93217797, 4.57295905])
|
| 299 |
+
>>> gaussian_filter1d([1.0, 2.0, 3.0, 4.0, 5.0], 4)
|
| 300 |
+
array([ 2.91948343, 2.95023502, 3. , 3.04976498, 3.08051657])
|
| 301 |
+
>>> import matplotlib.pyplot as plt
|
| 302 |
+
>>> rng = np.random.default_rng()
|
| 303 |
+
>>> x = rng.standard_normal(101).cumsum()
|
| 304 |
+
>>> y3 = gaussian_filter1d(x, 3)
|
| 305 |
+
>>> y6 = gaussian_filter1d(x, 6)
|
| 306 |
+
>>> plt.plot(x, 'k', label='original data')
|
| 307 |
+
>>> plt.plot(y3, '--', label='filtered, sigma=3')
|
| 308 |
+
>>> plt.plot(y6, ':', label='filtered, sigma=6')
|
| 309 |
+
>>> plt.legend()
|
| 310 |
+
>>> plt.grid()
|
| 311 |
+
>>> plt.show()
|
| 312 |
+
|
| 313 |
+
"""
|
| 314 |
+
sd = float(sigma)
|
| 315 |
+
# make the radius of the filter equal to truncate standard deviations
|
| 316 |
+
lw = int(truncate * sd + 0.5)
|
| 317 |
+
if radius is not None:
|
| 318 |
+
lw = radius
|
| 319 |
+
if not isinstance(lw, numbers.Integral) or lw < 0:
|
| 320 |
+
raise ValueError('Radius must be a nonnegative integer.')
|
| 321 |
+
# Since we are calling correlate, not convolve, revert the kernel
|
| 322 |
+
weights = _gaussian_kernel1d(sigma, order, lw)[::-1]
|
| 323 |
+
return correlate1d(input, weights, axis, output, mode, cval, 0)
|
| 324 |
+
|
| 325 |
+
|
| 326 |
+
@_ni_docstrings.docfiller
|
| 327 |
+
def gaussian_filter(input, sigma, order=0, output=None,
|
| 328 |
+
mode="reflect", cval=0.0, truncate=4.0, *, radius=None,
|
| 329 |
+
axes=None):
|
| 330 |
+
"""Multidimensional Gaussian filter.
|
| 331 |
+
|
| 332 |
+
Parameters
|
| 333 |
+
----------
|
| 334 |
+
%(input)s
|
| 335 |
+
sigma : scalar or sequence of scalars
|
| 336 |
+
Standard deviation for Gaussian kernel. The standard
|
| 337 |
+
deviations of the Gaussian filter are given for each axis as a
|
| 338 |
+
sequence, or as a single number, in which case it is equal for
|
| 339 |
+
all axes.
|
| 340 |
+
order : int or sequence of ints, optional
|
| 341 |
+
The order of the filter along each axis is given as a sequence
|
| 342 |
+
of integers, or as a single number. An order of 0 corresponds
|
| 343 |
+
to convolution with a Gaussian kernel. A positive order
|
| 344 |
+
corresponds to convolution with that derivative of a Gaussian.
|
| 345 |
+
%(output)s
|
| 346 |
+
%(mode_multiple)s
|
| 347 |
+
%(cval)s
|
| 348 |
+
truncate : float, optional
|
| 349 |
+
Truncate the filter at this many standard deviations.
|
| 350 |
+
Default is 4.0.
|
| 351 |
+
radius : None or int or sequence of ints, optional
|
| 352 |
+
Radius of the Gaussian kernel. The radius are given for each axis
|
| 353 |
+
as a sequence, or as a single number, in which case it is equal
|
| 354 |
+
for all axes. If specified, the size of the kernel along each axis
|
| 355 |
+
will be ``2*radius + 1``, and `truncate` is ignored.
|
| 356 |
+
Default is None.
|
| 357 |
+
axes : tuple of int or None, optional
|
| 358 |
+
If None, `input` is filtered along all axes. Otherwise,
|
| 359 |
+
`input` is filtered along the specified axes. When `axes` is
|
| 360 |
+
specified, any tuples used for `sigma`, `order`, `mode` and/or `radius`
|
| 361 |
+
must match the length of `axes`. The ith entry in any of these tuples
|
| 362 |
+
corresponds to the ith entry in `axes`.
|
| 363 |
+
|
| 364 |
+
Returns
|
| 365 |
+
-------
|
| 366 |
+
gaussian_filter : ndarray
|
| 367 |
+
Returned array of same shape as `input`.
|
| 368 |
+
|
| 369 |
+
Notes
|
| 370 |
+
-----
|
| 371 |
+
The multidimensional filter is implemented as a sequence of
|
| 372 |
+
1-D convolution filters. The intermediate arrays are
|
| 373 |
+
stored in the same data type as the output. Therefore, for output
|
| 374 |
+
types with a limited precision, the results may be imprecise
|
| 375 |
+
because intermediate results may be stored with insufficient
|
| 376 |
+
precision.
|
| 377 |
+
|
| 378 |
+
The Gaussian kernel will have size ``2*radius + 1`` along each axis. If
|
| 379 |
+
`radius` is None, the default ``radius = round(truncate * sigma)`` will be
|
| 380 |
+
used.
|
| 381 |
+
|
| 382 |
+
Examples
|
| 383 |
+
--------
|
| 384 |
+
>>> from scipy.ndimage import gaussian_filter
|
| 385 |
+
>>> import numpy as np
|
| 386 |
+
>>> a = np.arange(50, step=2).reshape((5,5))
|
| 387 |
+
>>> a
|
| 388 |
+
array([[ 0, 2, 4, 6, 8],
|
| 389 |
+
[10, 12, 14, 16, 18],
|
| 390 |
+
[20, 22, 24, 26, 28],
|
| 391 |
+
[30, 32, 34, 36, 38],
|
| 392 |
+
[40, 42, 44, 46, 48]])
|
| 393 |
+
>>> gaussian_filter(a, sigma=1)
|
| 394 |
+
array([[ 4, 6, 8, 9, 11],
|
| 395 |
+
[10, 12, 14, 15, 17],
|
| 396 |
+
[20, 22, 24, 25, 27],
|
| 397 |
+
[29, 31, 33, 34, 36],
|
| 398 |
+
[35, 37, 39, 40, 42]])
|
| 399 |
+
|
| 400 |
+
>>> from scipy import datasets
|
| 401 |
+
>>> import matplotlib.pyplot as plt
|
| 402 |
+
>>> fig = plt.figure()
|
| 403 |
+
>>> plt.gray() # show the filtered result in grayscale
|
| 404 |
+
>>> ax1 = fig.add_subplot(121) # left side
|
| 405 |
+
>>> ax2 = fig.add_subplot(122) # right side
|
| 406 |
+
>>> ascent = datasets.ascent()
|
| 407 |
+
>>> result = gaussian_filter(ascent, sigma=5)
|
| 408 |
+
>>> ax1.imshow(ascent)
|
| 409 |
+
>>> ax2.imshow(result)
|
| 410 |
+
>>> plt.show()
|
| 411 |
+
"""
|
| 412 |
+
input = np.asarray(input)
|
| 413 |
+
output = _ni_support._get_output(output, input)
|
| 414 |
+
|
| 415 |
+
axes = _ni_support._check_axes(axes, input.ndim)
|
| 416 |
+
num_axes = len(axes)
|
| 417 |
+
orders = _ni_support._normalize_sequence(order, num_axes)
|
| 418 |
+
sigmas = _ni_support._normalize_sequence(sigma, num_axes)
|
| 419 |
+
modes = _ni_support._normalize_sequence(mode, num_axes)
|
| 420 |
+
radiuses = _ni_support._normalize_sequence(radius, num_axes)
|
| 421 |
+
axes = [(axes[ii], sigmas[ii], orders[ii], modes[ii], radiuses[ii])
|
| 422 |
+
for ii in range(num_axes) if sigmas[ii] > 1e-15]
|
| 423 |
+
if len(axes) > 0:
|
| 424 |
+
for axis, sigma, order, mode, radius in axes:
|
| 425 |
+
gaussian_filter1d(input, sigma, axis, order, output,
|
| 426 |
+
mode, cval, truncate, radius=radius)
|
| 427 |
+
input = output
|
| 428 |
+
else:
|
| 429 |
+
output[...] = input[...]
|
| 430 |
+
return output
|
| 431 |
+
|
| 432 |
+
|
| 433 |
+
@_ni_docstrings.docfiller
|
| 434 |
+
def prewitt(input, axis=-1, output=None, mode="reflect", cval=0.0):
|
| 435 |
+
"""Calculate a Prewitt filter.
|
| 436 |
+
|
| 437 |
+
Parameters
|
| 438 |
+
----------
|
| 439 |
+
%(input)s
|
| 440 |
+
%(axis)s
|
| 441 |
+
%(output)s
|
| 442 |
+
%(mode_multiple)s
|
| 443 |
+
%(cval)s
|
| 444 |
+
|
| 445 |
+
Returns
|
| 446 |
+
-------
|
| 447 |
+
prewitt : ndarray
|
| 448 |
+
Filtered array. Has the same shape as `input`.
|
| 449 |
+
|
| 450 |
+
See Also
|
| 451 |
+
--------
|
| 452 |
+
sobel: Sobel filter
|
| 453 |
+
|
| 454 |
+
Notes
|
| 455 |
+
-----
|
| 456 |
+
This function computes the one-dimensional Prewitt filter.
|
| 457 |
+
Horizontal edges are emphasised with the horizontal transform (axis=0),
|
| 458 |
+
vertical edges with the vertical transform (axis=1), and so on for higher
|
| 459 |
+
dimensions. These can be combined to give the magnitude.
|
| 460 |
+
|
| 461 |
+
Examples
|
| 462 |
+
--------
|
| 463 |
+
>>> from scipy import ndimage, datasets
|
| 464 |
+
>>> import matplotlib.pyplot as plt
|
| 465 |
+
>>> import numpy as np
|
| 466 |
+
>>> ascent = datasets.ascent()
|
| 467 |
+
>>> prewitt_h = ndimage.prewitt(ascent, axis=0)
|
| 468 |
+
>>> prewitt_v = ndimage.prewitt(ascent, axis=1)
|
| 469 |
+
>>> magnitude = np.sqrt(prewitt_h ** 2 + prewitt_v ** 2)
|
| 470 |
+
>>> magnitude *= 255 / np.max(magnitude) # Normalization
|
| 471 |
+
>>> fig, axes = plt.subplots(2, 2, figsize = (8, 8))
|
| 472 |
+
>>> plt.gray()
|
| 473 |
+
>>> axes[0, 0].imshow(ascent)
|
| 474 |
+
>>> axes[0, 1].imshow(prewitt_h)
|
| 475 |
+
>>> axes[1, 0].imshow(prewitt_v)
|
| 476 |
+
>>> axes[1, 1].imshow(magnitude)
|
| 477 |
+
>>> titles = ["original", "horizontal", "vertical", "magnitude"]
|
| 478 |
+
>>> for i, ax in enumerate(axes.ravel()):
|
| 479 |
+
... ax.set_title(titles[i])
|
| 480 |
+
... ax.axis("off")
|
| 481 |
+
>>> plt.show()
|
| 482 |
+
|
| 483 |
+
"""
|
| 484 |
+
input = np.asarray(input)
|
| 485 |
+
axis = normalize_axis_index(axis, input.ndim)
|
| 486 |
+
output = _ni_support._get_output(output, input)
|
| 487 |
+
modes = _ni_support._normalize_sequence(mode, input.ndim)
|
| 488 |
+
correlate1d(input, [-1, 0, 1], axis, output, modes[axis], cval, 0)
|
| 489 |
+
axes = [ii for ii in range(input.ndim) if ii != axis]
|
| 490 |
+
for ii in axes:
|
| 491 |
+
correlate1d(output, [1, 1, 1], ii, output, modes[ii], cval, 0,)
|
| 492 |
+
return output
|
| 493 |
+
|
| 494 |
+
|
| 495 |
+
@_ni_docstrings.docfiller
|
| 496 |
+
def sobel(input, axis=-1, output=None, mode="reflect", cval=0.0):
|
| 497 |
+
"""Calculate a Sobel filter.
|
| 498 |
+
|
| 499 |
+
Parameters
|
| 500 |
+
----------
|
| 501 |
+
%(input)s
|
| 502 |
+
%(axis)s
|
| 503 |
+
%(output)s
|
| 504 |
+
%(mode_multiple)s
|
| 505 |
+
%(cval)s
|
| 506 |
+
|
| 507 |
+
Returns
|
| 508 |
+
-------
|
| 509 |
+
sobel : ndarray
|
| 510 |
+
Filtered array. Has the same shape as `input`.
|
| 511 |
+
|
| 512 |
+
Notes
|
| 513 |
+
-----
|
| 514 |
+
This function computes the axis-specific Sobel gradient.
|
| 515 |
+
The horizontal edges can be emphasised with the horizontal transform (axis=0),
|
| 516 |
+
the vertical edges with the vertical transform (axis=1) and so on for higher
|
| 517 |
+
dimensions. These can be combined to give the magnitude.
|
| 518 |
+
|
| 519 |
+
Examples
|
| 520 |
+
--------
|
| 521 |
+
>>> from scipy import ndimage, datasets
|
| 522 |
+
>>> import matplotlib.pyplot as plt
|
| 523 |
+
>>> import numpy as np
|
| 524 |
+
>>> ascent = datasets.ascent().astype('int32')
|
| 525 |
+
>>> sobel_h = ndimage.sobel(ascent, 0) # horizontal gradient
|
| 526 |
+
>>> sobel_v = ndimage.sobel(ascent, 1) # vertical gradient
|
| 527 |
+
>>> magnitude = np.sqrt(sobel_h**2 + sobel_v**2)
|
| 528 |
+
>>> magnitude *= 255.0 / np.max(magnitude) # normalization
|
| 529 |
+
>>> fig, axs = plt.subplots(2, 2, figsize=(8, 8))
|
| 530 |
+
>>> plt.gray() # show the filtered result in grayscale
|
| 531 |
+
>>> axs[0, 0].imshow(ascent)
|
| 532 |
+
>>> axs[0, 1].imshow(sobel_h)
|
| 533 |
+
>>> axs[1, 0].imshow(sobel_v)
|
| 534 |
+
>>> axs[1, 1].imshow(magnitude)
|
| 535 |
+
>>> titles = ["original", "horizontal", "vertical", "magnitude"]
|
| 536 |
+
>>> for i, ax in enumerate(axs.ravel()):
|
| 537 |
+
... ax.set_title(titles[i])
|
| 538 |
+
... ax.axis("off")
|
| 539 |
+
>>> plt.show()
|
| 540 |
+
|
| 541 |
+
"""
|
| 542 |
+
input = np.asarray(input)
|
| 543 |
+
axis = normalize_axis_index(axis, input.ndim)
|
| 544 |
+
output = _ni_support._get_output(output, input)
|
| 545 |
+
modes = _ni_support._normalize_sequence(mode, input.ndim)
|
| 546 |
+
correlate1d(input, [-1, 0, 1], axis, output, modes[axis], cval, 0)
|
| 547 |
+
axes = [ii for ii in range(input.ndim) if ii != axis]
|
| 548 |
+
for ii in axes:
|
| 549 |
+
correlate1d(output, [1, 2, 1], ii, output, modes[ii], cval, 0)
|
| 550 |
+
return output
|
| 551 |
+
|
| 552 |
+
|
| 553 |
+
@_ni_docstrings.docfiller
|
| 554 |
+
def generic_laplace(input, derivative2, output=None, mode="reflect",
|
| 555 |
+
cval=0.0,
|
| 556 |
+
extra_arguments=(),
|
| 557 |
+
extra_keywords=None,
|
| 558 |
+
*, axes=None):
|
| 559 |
+
"""
|
| 560 |
+
N-D Laplace filter using a provided second derivative function.
|
| 561 |
+
|
| 562 |
+
Parameters
|
| 563 |
+
----------
|
| 564 |
+
%(input)s
|
| 565 |
+
derivative2 : callable
|
| 566 |
+
Callable with the following signature::
|
| 567 |
+
|
| 568 |
+
derivative2(input, axis, output, mode, cval,
|
| 569 |
+
*extra_arguments, **extra_keywords)
|
| 570 |
+
|
| 571 |
+
See `extra_arguments`, `extra_keywords` below.
|
| 572 |
+
%(output)s
|
| 573 |
+
%(mode_multiple)s
|
| 574 |
+
%(cval)s
|
| 575 |
+
%(extra_keywords)s
|
| 576 |
+
%(extra_arguments)s
|
| 577 |
+
axes : tuple of int or None
|
| 578 |
+
The axes over which to apply the filter. If a `mode` tuple is
|
| 579 |
+
provided, its length must match the number of axes.
|
| 580 |
+
|
| 581 |
+
Returns
|
| 582 |
+
-------
|
| 583 |
+
generic_laplace : ndarray
|
| 584 |
+
Filtered array. Has the same shape as `input`.
|
| 585 |
+
|
| 586 |
+
"""
|
| 587 |
+
if extra_keywords is None:
|
| 588 |
+
extra_keywords = {}
|
| 589 |
+
input = np.asarray(input)
|
| 590 |
+
output = _ni_support._get_output(output, input)
|
| 591 |
+
axes = _ni_support._check_axes(axes, input.ndim)
|
| 592 |
+
if len(axes) > 0:
|
| 593 |
+
modes = _ni_support._normalize_sequence(mode, len(axes))
|
| 594 |
+
derivative2(input, axes[0], output, modes[0], cval,
|
| 595 |
+
*extra_arguments, **extra_keywords)
|
| 596 |
+
for ii in range(1, len(axes)):
|
| 597 |
+
tmp = derivative2(input, axes[ii], output.dtype, modes[ii], cval,
|
| 598 |
+
*extra_arguments, **extra_keywords)
|
| 599 |
+
output += tmp
|
| 600 |
+
else:
|
| 601 |
+
output[...] = input[...]
|
| 602 |
+
return output
|
| 603 |
+
|
| 604 |
+
|
| 605 |
+
@_ni_docstrings.docfiller
|
| 606 |
+
def laplace(input, output=None, mode="reflect", cval=0.0, *, axes=None):
|
| 607 |
+
"""N-D Laplace filter based on approximate second derivatives.
|
| 608 |
+
|
| 609 |
+
Parameters
|
| 610 |
+
----------
|
| 611 |
+
%(input)s
|
| 612 |
+
%(output)s
|
| 613 |
+
%(mode_multiple)s
|
| 614 |
+
%(cval)s
|
| 615 |
+
axes : tuple of int or None
|
| 616 |
+
The axes over which to apply the filter. If a `mode` tuple is
|
| 617 |
+
provided, its length must match the number of axes.
|
| 618 |
+
|
| 619 |
+
Returns
|
| 620 |
+
-------
|
| 621 |
+
laplace : ndarray
|
| 622 |
+
Filtered array. Has the same shape as `input`.
|
| 623 |
+
|
| 624 |
+
Examples
|
| 625 |
+
--------
|
| 626 |
+
>>> from scipy import ndimage, datasets
|
| 627 |
+
>>> import matplotlib.pyplot as plt
|
| 628 |
+
>>> fig = plt.figure()
|
| 629 |
+
>>> plt.gray() # show the filtered result in grayscale
|
| 630 |
+
>>> ax1 = fig.add_subplot(121) # left side
|
| 631 |
+
>>> ax2 = fig.add_subplot(122) # right side
|
| 632 |
+
>>> ascent = datasets.ascent()
|
| 633 |
+
>>> result = ndimage.laplace(ascent)
|
| 634 |
+
>>> ax1.imshow(ascent)
|
| 635 |
+
>>> ax2.imshow(result)
|
| 636 |
+
>>> plt.show()
|
| 637 |
+
"""
|
| 638 |
+
def derivative2(input, axis, output, mode, cval):
|
| 639 |
+
return correlate1d(input, [1, -2, 1], axis, output, mode, cval, 0)
|
| 640 |
+
return generic_laplace(input, derivative2, output, mode, cval, axes=axes)
|
| 641 |
+
|
| 642 |
+
|
| 643 |
+
@_ni_docstrings.docfiller
|
| 644 |
+
def gaussian_laplace(input, sigma, output=None, mode="reflect",
|
| 645 |
+
cval=0.0, *, axes=None, **kwargs):
|
| 646 |
+
"""Multidimensional Laplace filter using Gaussian second derivatives.
|
| 647 |
+
|
| 648 |
+
Parameters
|
| 649 |
+
----------
|
| 650 |
+
%(input)s
|
| 651 |
+
sigma : scalar or sequence of scalars
|
| 652 |
+
The standard deviations of the Gaussian filter are given for
|
| 653 |
+
each axis as a sequence, or as a single number, in which case
|
| 654 |
+
it is equal for all axes.
|
| 655 |
+
%(output)s
|
| 656 |
+
%(mode_multiple)s
|
| 657 |
+
%(cval)s
|
| 658 |
+
axes : tuple of int or None
|
| 659 |
+
The axes over which to apply the filter. If `sigma` or `mode` tuples
|
| 660 |
+
are provided, their length must match the number of axes.
|
| 661 |
+
Extra keyword arguments will be passed to gaussian_filter().
|
| 662 |
+
|
| 663 |
+
Returns
|
| 664 |
+
-------
|
| 665 |
+
gaussian_laplace : ndarray
|
| 666 |
+
Filtered array. Has the same shape as `input`.
|
| 667 |
+
|
| 668 |
+
Examples
|
| 669 |
+
--------
|
| 670 |
+
>>> from scipy import ndimage, datasets
|
| 671 |
+
>>> import matplotlib.pyplot as plt
|
| 672 |
+
>>> ascent = datasets.ascent()
|
| 673 |
+
|
| 674 |
+
>>> fig = plt.figure()
|
| 675 |
+
>>> plt.gray() # show the filtered result in grayscale
|
| 676 |
+
>>> ax1 = fig.add_subplot(121) # left side
|
| 677 |
+
>>> ax2 = fig.add_subplot(122) # right side
|
| 678 |
+
|
| 679 |
+
>>> result = ndimage.gaussian_laplace(ascent, sigma=1)
|
| 680 |
+
>>> ax1.imshow(result)
|
| 681 |
+
|
| 682 |
+
>>> result = ndimage.gaussian_laplace(ascent, sigma=3)
|
| 683 |
+
>>> ax2.imshow(result)
|
| 684 |
+
>>> plt.show()
|
| 685 |
+
"""
|
| 686 |
+
input = np.asarray(input)
|
| 687 |
+
|
| 688 |
+
def derivative2(input, axis, output, mode, cval, sigma, **kwargs):
|
| 689 |
+
order = [0] * input.ndim
|
| 690 |
+
order[axis] = 2
|
| 691 |
+
return gaussian_filter(input, sigma, order, output, mode, cval,
|
| 692 |
+
**kwargs)
|
| 693 |
+
|
| 694 |
+
axes = _ni_support._check_axes(axes, input.ndim)
|
| 695 |
+
num_axes = len(axes)
|
| 696 |
+
sigma = _ni_support._normalize_sequence(sigma, num_axes)
|
| 697 |
+
if num_axes < input.ndim:
|
| 698 |
+
# set sigma = 0 for any axes not being filtered
|
| 699 |
+
sigma_temp = [0,] * input.ndim
|
| 700 |
+
for s, ax in zip(sigma, axes):
|
| 701 |
+
sigma_temp[ax] = s
|
| 702 |
+
sigma = sigma_temp
|
| 703 |
+
|
| 704 |
+
return generic_laplace(input, derivative2, output, mode, cval,
|
| 705 |
+
extra_arguments=(sigma,),
|
| 706 |
+
extra_keywords=kwargs,
|
| 707 |
+
axes=axes)
|
| 708 |
+
|
| 709 |
+
|
| 710 |
+
@_ni_docstrings.docfiller
|
| 711 |
+
def generic_gradient_magnitude(input, derivative, output=None,
|
| 712 |
+
mode="reflect", cval=0.0,
|
| 713 |
+
extra_arguments=(), extra_keywords=None,
|
| 714 |
+
*, axes=None):
|
| 715 |
+
"""Gradient magnitude using a provided gradient function.
|
| 716 |
+
|
| 717 |
+
Parameters
|
| 718 |
+
----------
|
| 719 |
+
%(input)s
|
| 720 |
+
derivative : callable
|
| 721 |
+
Callable with the following signature::
|
| 722 |
+
|
| 723 |
+
derivative(input, axis, output, mode, cval,
|
| 724 |
+
*extra_arguments, **extra_keywords)
|
| 725 |
+
|
| 726 |
+
See `extra_arguments`, `extra_keywords` below.
|
| 727 |
+
`derivative` can assume that `input` and `output` are ndarrays.
|
| 728 |
+
Note that the output from `derivative` is modified inplace;
|
| 729 |
+
be careful to copy important inputs before returning them.
|
| 730 |
+
%(output)s
|
| 731 |
+
%(mode_multiple)s
|
| 732 |
+
%(cval)s
|
| 733 |
+
%(extra_keywords)s
|
| 734 |
+
%(extra_arguments)s
|
| 735 |
+
axes : tuple of int or None
|
| 736 |
+
The axes over which to apply the filter. If a `mode` tuple is
|
| 737 |
+
provided, its length must match the number of axes.
|
| 738 |
+
|
| 739 |
+
Returns
|
| 740 |
+
-------
|
| 741 |
+
generic_gradient_matnitude : ndarray
|
| 742 |
+
Filtered array. Has the same shape as `input`.
|
| 743 |
+
|
| 744 |
+
"""
|
| 745 |
+
if extra_keywords is None:
|
| 746 |
+
extra_keywords = {}
|
| 747 |
+
input = np.asarray(input)
|
| 748 |
+
output = _ni_support._get_output(output, input)
|
| 749 |
+
axes = _ni_support._check_axes(axes, input.ndim)
|
| 750 |
+
if len(axes) > 0:
|
| 751 |
+
modes = _ni_support._normalize_sequence(mode, len(axes))
|
| 752 |
+
derivative(input, axes[0], output, modes[0], cval,
|
| 753 |
+
*extra_arguments, **extra_keywords)
|
| 754 |
+
np.multiply(output, output, output)
|
| 755 |
+
for ii in range(1, len(axes)):
|
| 756 |
+
tmp = derivative(input, axes[ii], output.dtype, modes[ii], cval,
|
| 757 |
+
*extra_arguments, **extra_keywords)
|
| 758 |
+
np.multiply(tmp, tmp, tmp)
|
| 759 |
+
output += tmp
|
| 760 |
+
# This allows the sqrt to work with a different default casting
|
| 761 |
+
np.sqrt(output, output, casting='unsafe')
|
| 762 |
+
else:
|
| 763 |
+
output[...] = input[...]
|
| 764 |
+
return output
|
| 765 |
+
|
| 766 |
+
|
| 767 |
+
@_ni_docstrings.docfiller
|
| 768 |
+
def gaussian_gradient_magnitude(input, sigma, output=None,
|
| 769 |
+
mode="reflect", cval=0.0, *, axes=None,
|
| 770 |
+
**kwargs):
|
| 771 |
+
"""Multidimensional gradient magnitude using Gaussian derivatives.
|
| 772 |
+
|
| 773 |
+
Parameters
|
| 774 |
+
----------
|
| 775 |
+
%(input)s
|
| 776 |
+
sigma : scalar or sequence of scalars
|
| 777 |
+
The standard deviations of the Gaussian filter are given for
|
| 778 |
+
each axis as a sequence, or as a single number, in which case
|
| 779 |
+
it is equal for all axes.
|
| 780 |
+
%(output)s
|
| 781 |
+
%(mode_multiple)s
|
| 782 |
+
%(cval)s
|
| 783 |
+
axes : tuple of int or None
|
| 784 |
+
The axes over which to apply the filter. If `sigma` or `mode` tuples
|
| 785 |
+
are provided, their length must match the number of axes.
|
| 786 |
+
Extra keyword arguments will be passed to gaussian_filter().
|
| 787 |
+
|
| 788 |
+
Returns
|
| 789 |
+
-------
|
| 790 |
+
gaussian_gradient_magnitude : ndarray
|
| 791 |
+
Filtered array. Has the same shape as `input`.
|
| 792 |
+
|
| 793 |
+
Examples
|
| 794 |
+
--------
|
| 795 |
+
>>> from scipy import ndimage, datasets
|
| 796 |
+
>>> import matplotlib.pyplot as plt
|
| 797 |
+
>>> fig = plt.figure()
|
| 798 |
+
>>> plt.gray() # show the filtered result in grayscale
|
| 799 |
+
>>> ax1 = fig.add_subplot(121) # left side
|
| 800 |
+
>>> ax2 = fig.add_subplot(122) # right side
|
| 801 |
+
>>> ascent = datasets.ascent()
|
| 802 |
+
>>> result = ndimage.gaussian_gradient_magnitude(ascent, sigma=5)
|
| 803 |
+
>>> ax1.imshow(ascent)
|
| 804 |
+
>>> ax2.imshow(result)
|
| 805 |
+
>>> plt.show()
|
| 806 |
+
"""
|
| 807 |
+
input = np.asarray(input)
|
| 808 |
+
|
| 809 |
+
def derivative(input, axis, output, mode, cval, sigma, **kwargs):
|
| 810 |
+
order = [0] * input.ndim
|
| 811 |
+
order[axis] = 1
|
| 812 |
+
return gaussian_filter(input, sigma, order, output, mode,
|
| 813 |
+
cval, **kwargs)
|
| 814 |
+
|
| 815 |
+
return generic_gradient_magnitude(input, derivative, output, mode,
|
| 816 |
+
cval, extra_arguments=(sigma,),
|
| 817 |
+
extra_keywords=kwargs, axes=axes)
|
| 818 |
+
|
| 819 |
+
|
| 820 |
+
def _correlate_or_convolve(input, weights, output, mode, cval, origin,
|
| 821 |
+
convolution, axes):
|
| 822 |
+
input = np.asarray(input)
|
| 823 |
+
weights = np.asarray(weights)
|
| 824 |
+
complex_input = input.dtype.kind == 'c'
|
| 825 |
+
complex_weights = weights.dtype.kind == 'c'
|
| 826 |
+
if complex_input or complex_weights:
|
| 827 |
+
if complex_weights and not convolution:
|
| 828 |
+
# As for np.correlate, conjugate weights rather than input.
|
| 829 |
+
weights = weights.conj()
|
| 830 |
+
kwargs = dict(
|
| 831 |
+
mode=mode, origin=origin, convolution=convolution, axes=axes
|
| 832 |
+
)
|
| 833 |
+
output = _ni_support._get_output(output, input, complex_output=True)
|
| 834 |
+
|
| 835 |
+
return _complex_via_real_components(_correlate_or_convolve, input,
|
| 836 |
+
weights, output, cval, **kwargs)
|
| 837 |
+
|
| 838 |
+
axes = _ni_support._check_axes(axes, input.ndim)
|
| 839 |
+
weights = np.asarray(weights, dtype=np.float64)
|
| 840 |
+
|
| 841 |
+
# expand weights and origins if num_axes < input.ndim
|
| 842 |
+
weights = _expand_footprint(input.ndim, axes, weights, "weights")
|
| 843 |
+
origins = _expand_origin(input.ndim, axes, origin)
|
| 844 |
+
|
| 845 |
+
wshape = [ii for ii in weights.shape if ii > 0]
|
| 846 |
+
if len(wshape) != input.ndim:
|
| 847 |
+
raise RuntimeError(f"weights.ndim ({len(wshape)}) must match "
|
| 848 |
+
f"len(axes) ({len(axes)})")
|
| 849 |
+
if convolution:
|
| 850 |
+
weights = weights[tuple([slice(None, None, -1)] * weights.ndim)]
|
| 851 |
+
for ii in range(len(origins)):
|
| 852 |
+
origins[ii] = -origins[ii]
|
| 853 |
+
if not weights.shape[ii] & 1:
|
| 854 |
+
origins[ii] -= 1
|
| 855 |
+
for origin, lenw in zip(origins, wshape):
|
| 856 |
+
if _invalid_origin(origin, lenw):
|
| 857 |
+
raise ValueError('Invalid origin; origin must satisfy '
|
| 858 |
+
'-(weights.shape[k] // 2) <= origin[k] <= '
|
| 859 |
+
'(weights.shape[k]-1) // 2')
|
| 860 |
+
|
| 861 |
+
if not weights.flags.contiguous:
|
| 862 |
+
weights = weights.copy()
|
| 863 |
+
output = _ni_support._get_output(output, input)
|
| 864 |
+
temp_needed = np.may_share_memory(input, output)
|
| 865 |
+
if temp_needed:
|
| 866 |
+
# input and output arrays cannot share memory
|
| 867 |
+
temp = output
|
| 868 |
+
output = _ni_support._get_output(output.dtype, input)
|
| 869 |
+
if not isinstance(mode, str) and isinstance(mode, Iterable):
|
| 870 |
+
raise RuntimeError("A sequence of modes is not supported")
|
| 871 |
+
mode = _ni_support._extend_mode_to_code(mode)
|
| 872 |
+
_nd_image.correlate(input, weights, output, mode, cval, origins)
|
| 873 |
+
if temp_needed:
|
| 874 |
+
temp[...] = output
|
| 875 |
+
output = temp
|
| 876 |
+
return output
|
| 877 |
+
|
| 878 |
+
|
| 879 |
+
@_ni_docstrings.docfiller
|
| 880 |
+
def correlate(input, weights, output=None, mode='reflect', cval=0.0,
|
| 881 |
+
origin=0, *, axes=None):
|
| 882 |
+
"""
|
| 883 |
+
Multidimensional correlation.
|
| 884 |
+
|
| 885 |
+
The array is correlated with the given kernel.
|
| 886 |
+
|
| 887 |
+
Parameters
|
| 888 |
+
----------
|
| 889 |
+
%(input)s
|
| 890 |
+
weights : ndarray
|
| 891 |
+
array of weights, same number of dimensions as input
|
| 892 |
+
%(output)s
|
| 893 |
+
%(mode_reflect)s
|
| 894 |
+
%(cval)s
|
| 895 |
+
%(origin_multiple)s
|
| 896 |
+
axes : tuple of int or None, optional
|
| 897 |
+
If None, `input` is filtered along all axes. Otherwise,
|
| 898 |
+
`input` is filtered along the specified axes. When `axes` is
|
| 899 |
+
specified, any tuples used for `mode` or `origin` must match the length
|
| 900 |
+
of `axes`. The ith entry in any of these tuples corresponds to the ith
|
| 901 |
+
entry in `axes`.
|
| 902 |
+
|
| 903 |
+
Returns
|
| 904 |
+
-------
|
| 905 |
+
result : ndarray
|
| 906 |
+
The result of correlation of `input` with `weights`.
|
| 907 |
+
|
| 908 |
+
See Also
|
| 909 |
+
--------
|
| 910 |
+
convolve : Convolve an image with a kernel.
|
| 911 |
+
|
| 912 |
+
Examples
|
| 913 |
+
--------
|
| 914 |
+
Correlation is the process of moving a filter mask often referred to
|
| 915 |
+
as kernel over the image and computing the sum of products at each location.
|
| 916 |
+
|
| 917 |
+
>>> from scipy.ndimage import correlate
|
| 918 |
+
>>> import numpy as np
|
| 919 |
+
>>> input_img = np.arange(25).reshape(5,5)
|
| 920 |
+
>>> print(input_img)
|
| 921 |
+
[[ 0 1 2 3 4]
|
| 922 |
+
[ 5 6 7 8 9]
|
| 923 |
+
[10 11 12 13 14]
|
| 924 |
+
[15 16 17 18 19]
|
| 925 |
+
[20 21 22 23 24]]
|
| 926 |
+
|
| 927 |
+
Define a kernel (weights) for correlation. In this example, it is for sum of
|
| 928 |
+
center and up, down, left and right next elements.
|
| 929 |
+
|
| 930 |
+
>>> weights = [[0, 1, 0],
|
| 931 |
+
... [1, 1, 1],
|
| 932 |
+
... [0, 1, 0]]
|
| 933 |
+
|
| 934 |
+
We can calculate a correlation result:
|
| 935 |
+
For example, element ``[2,2]`` is ``7 + 11 + 12 + 13 + 17 = 60``.
|
| 936 |
+
|
| 937 |
+
>>> correlate(input_img, weights)
|
| 938 |
+
array([[ 6, 10, 15, 20, 24],
|
| 939 |
+
[ 26, 30, 35, 40, 44],
|
| 940 |
+
[ 51, 55, 60, 65, 69],
|
| 941 |
+
[ 76, 80, 85, 90, 94],
|
| 942 |
+
[ 96, 100, 105, 110, 114]])
|
| 943 |
+
|
| 944 |
+
"""
|
| 945 |
+
return _correlate_or_convolve(input, weights, output, mode, cval,
|
| 946 |
+
origin, False, axes)
|
| 947 |
+
|
| 948 |
+
|
| 949 |
+
@_ni_docstrings.docfiller
|
| 950 |
+
def convolve(input, weights, output=None, mode='reflect', cval=0.0,
|
| 951 |
+
origin=0, *, axes=None):
|
| 952 |
+
"""
|
| 953 |
+
Multidimensional convolution.
|
| 954 |
+
|
| 955 |
+
The array is convolved with the given kernel.
|
| 956 |
+
|
| 957 |
+
Parameters
|
| 958 |
+
----------
|
| 959 |
+
%(input)s
|
| 960 |
+
weights : array_like
|
| 961 |
+
Array of weights, same number of dimensions as input
|
| 962 |
+
%(output)s
|
| 963 |
+
%(mode_reflect)s
|
| 964 |
+
cval : scalar, optional
|
| 965 |
+
Value to fill past edges of input if `mode` is 'constant'. Default
|
| 966 |
+
is 0.0
|
| 967 |
+
origin : int or sequence, optional
|
| 968 |
+
Controls the placement of the filter on the input array's pixels.
|
| 969 |
+
A value of 0 (the default) centers the filter over the pixel, with
|
| 970 |
+
positive values shifting the filter to the right, and negative ones
|
| 971 |
+
to the left. By passing a sequence of origins with length equal to
|
| 972 |
+
the number of dimensions of the input array, different shifts can
|
| 973 |
+
be specified along each axis.
|
| 974 |
+
axes : tuple of int or None, optional
|
| 975 |
+
If None, `input` is filtered along all axes. Otherwise,
|
| 976 |
+
`input` is filtered along the specified axes. When `axes` is
|
| 977 |
+
specified, any tuples used for `mode` or `origin` must match the length
|
| 978 |
+
of `axes`. The ith entry in any of these tuples corresponds to the ith
|
| 979 |
+
entry in `axes`.
|
| 980 |
+
|
| 981 |
+
Returns
|
| 982 |
+
-------
|
| 983 |
+
result : ndarray
|
| 984 |
+
The result of convolution of `input` with `weights`.
|
| 985 |
+
|
| 986 |
+
See Also
|
| 987 |
+
--------
|
| 988 |
+
correlate : Correlate an image with a kernel.
|
| 989 |
+
|
| 990 |
+
Notes
|
| 991 |
+
-----
|
| 992 |
+
Each value in result is :math:`C_i = \\sum_j{I_{i+k-j} W_j}`, where
|
| 993 |
+
W is the `weights` kernel,
|
| 994 |
+
j is the N-D spatial index over :math:`W`,
|
| 995 |
+
I is the `input` and k is the coordinate of the center of
|
| 996 |
+
W, specified by `origin` in the input parameters.
|
| 997 |
+
|
| 998 |
+
Examples
|
| 999 |
+
--------
|
| 1000 |
+
Perhaps the simplest case to understand is ``mode='constant', cval=0.0``,
|
| 1001 |
+
because in this case borders (i.e., where the `weights` kernel, centered
|
| 1002 |
+
on any one value, extends beyond an edge of `input`) are treated as zeros.
|
| 1003 |
+
|
| 1004 |
+
>>> import numpy as np
|
| 1005 |
+
>>> a = np.array([[1, 2, 0, 0],
|
| 1006 |
+
... [5, 3, 0, 4],
|
| 1007 |
+
... [0, 0, 0, 7],
|
| 1008 |
+
... [9, 3, 0, 0]])
|
| 1009 |
+
>>> k = np.array([[1,1,1],[1,1,0],[1,0,0]])
|
| 1010 |
+
>>> from scipy import ndimage
|
| 1011 |
+
>>> ndimage.convolve(a, k, mode='constant', cval=0.0)
|
| 1012 |
+
array([[11, 10, 7, 4],
|
| 1013 |
+
[10, 3, 11, 11],
|
| 1014 |
+
[15, 12, 14, 7],
|
| 1015 |
+
[12, 3, 7, 0]])
|
| 1016 |
+
|
| 1017 |
+
Setting ``cval=1.0`` is equivalent to padding the outer edge of `input`
|
| 1018 |
+
with 1.0's (and then extracting only the original region of the result).
|
| 1019 |
+
|
| 1020 |
+
>>> ndimage.convolve(a, k, mode='constant', cval=1.0)
|
| 1021 |
+
array([[13, 11, 8, 7],
|
| 1022 |
+
[11, 3, 11, 14],
|
| 1023 |
+
[16, 12, 14, 10],
|
| 1024 |
+
[15, 6, 10, 5]])
|
| 1025 |
+
|
| 1026 |
+
With ``mode='reflect'`` (the default), outer values are reflected at the
|
| 1027 |
+
edge of `input` to fill in missing values.
|
| 1028 |
+
|
| 1029 |
+
>>> b = np.array([[2, 0, 0],
|
| 1030 |
+
... [1, 0, 0],
|
| 1031 |
+
... [0, 0, 0]])
|
| 1032 |
+
>>> k = np.array([[0,1,0], [0,1,0], [0,1,0]])
|
| 1033 |
+
>>> ndimage.convolve(b, k, mode='reflect')
|
| 1034 |
+
array([[5, 0, 0],
|
| 1035 |
+
[3, 0, 0],
|
| 1036 |
+
[1, 0, 0]])
|
| 1037 |
+
|
| 1038 |
+
This includes diagonally at the corners.
|
| 1039 |
+
|
| 1040 |
+
>>> k = np.array([[1,0,0],[0,1,0],[0,0,1]])
|
| 1041 |
+
>>> ndimage.convolve(b, k)
|
| 1042 |
+
array([[4, 2, 0],
|
| 1043 |
+
[3, 2, 0],
|
| 1044 |
+
[1, 1, 0]])
|
| 1045 |
+
|
| 1046 |
+
With ``mode='nearest'``, the single nearest value in to an edge in
|
| 1047 |
+
`input` is repeated as many times as needed to match the overlapping
|
| 1048 |
+
`weights`.
|
| 1049 |
+
|
| 1050 |
+
>>> c = np.array([[2, 0, 1],
|
| 1051 |
+
... [1, 0, 0],
|
| 1052 |
+
... [0, 0, 0]])
|
| 1053 |
+
>>> k = np.array([[0, 1, 0],
|
| 1054 |
+
... [0, 1, 0],
|
| 1055 |
+
... [0, 1, 0],
|
| 1056 |
+
... [0, 1, 0],
|
| 1057 |
+
... [0, 1, 0]])
|
| 1058 |
+
>>> ndimage.convolve(c, k, mode='nearest')
|
| 1059 |
+
array([[7, 0, 3],
|
| 1060 |
+
[5, 0, 2],
|
| 1061 |
+
[3, 0, 1]])
|
| 1062 |
+
|
| 1063 |
+
"""
|
| 1064 |
+
return _correlate_or_convolve(input, weights, output, mode, cval,
|
| 1065 |
+
origin, True, axes)
|
| 1066 |
+
|
| 1067 |
+
|
| 1068 |
+
@_ni_docstrings.docfiller
|
| 1069 |
+
def uniform_filter1d(input, size, axis=-1, output=None,
|
| 1070 |
+
mode="reflect", cval=0.0, origin=0):
|
| 1071 |
+
"""Calculate a 1-D uniform filter along the given axis.
|
| 1072 |
+
|
| 1073 |
+
The lines of the array along the given axis are filtered with a
|
| 1074 |
+
uniform filter of given size.
|
| 1075 |
+
|
| 1076 |
+
Parameters
|
| 1077 |
+
----------
|
| 1078 |
+
%(input)s
|
| 1079 |
+
size : int
|
| 1080 |
+
length of uniform filter
|
| 1081 |
+
%(axis)s
|
| 1082 |
+
%(output)s
|
| 1083 |
+
%(mode_reflect)s
|
| 1084 |
+
%(cval)s
|
| 1085 |
+
%(origin)s
|
| 1086 |
+
|
| 1087 |
+
Returns
|
| 1088 |
+
-------
|
| 1089 |
+
result : ndarray
|
| 1090 |
+
Filtered array. Has same shape as `input`.
|
| 1091 |
+
|
| 1092 |
+
Examples
|
| 1093 |
+
--------
|
| 1094 |
+
>>> from scipy.ndimage import uniform_filter1d
|
| 1095 |
+
>>> uniform_filter1d([2, 8, 0, 4, 1, 9, 9, 0], size=3)
|
| 1096 |
+
array([4, 3, 4, 1, 4, 6, 6, 3])
|
| 1097 |
+
"""
|
| 1098 |
+
input = np.asarray(input)
|
| 1099 |
+
axis = normalize_axis_index(axis, input.ndim)
|
| 1100 |
+
if size < 1:
|
| 1101 |
+
raise RuntimeError('incorrect filter size')
|
| 1102 |
+
complex_output = input.dtype.kind == 'c'
|
| 1103 |
+
output = _ni_support._get_output(output, input,
|
| 1104 |
+
complex_output=complex_output)
|
| 1105 |
+
if (size // 2 + origin < 0) or (size // 2 + origin >= size):
|
| 1106 |
+
raise ValueError('invalid origin')
|
| 1107 |
+
mode = _ni_support._extend_mode_to_code(mode)
|
| 1108 |
+
if not complex_output:
|
| 1109 |
+
_nd_image.uniform_filter1d(input, size, axis, output, mode, cval,
|
| 1110 |
+
origin)
|
| 1111 |
+
else:
|
| 1112 |
+
_nd_image.uniform_filter1d(input.real, size, axis, output.real, mode,
|
| 1113 |
+
np.real(cval), origin)
|
| 1114 |
+
_nd_image.uniform_filter1d(input.imag, size, axis, output.imag, mode,
|
| 1115 |
+
np.imag(cval), origin)
|
| 1116 |
+
return output
|
| 1117 |
+
|
| 1118 |
+
|
| 1119 |
+
@_ni_docstrings.docfiller
|
| 1120 |
+
def uniform_filter(input, size=3, output=None, mode="reflect",
|
| 1121 |
+
cval=0.0, origin=0, *, axes=None):
|
| 1122 |
+
"""Multidimensional uniform filter.
|
| 1123 |
+
|
| 1124 |
+
Parameters
|
| 1125 |
+
----------
|
| 1126 |
+
%(input)s
|
| 1127 |
+
size : int or sequence of ints, optional
|
| 1128 |
+
The sizes of the uniform filter are given for each axis as a
|
| 1129 |
+
sequence, or as a single number, in which case the size is
|
| 1130 |
+
equal for all axes.
|
| 1131 |
+
%(output)s
|
| 1132 |
+
%(mode_multiple)s
|
| 1133 |
+
%(cval)s
|
| 1134 |
+
%(origin_multiple)s
|
| 1135 |
+
axes : tuple of int or None, optional
|
| 1136 |
+
If None, `input` is filtered along all axes. Otherwise,
|
| 1137 |
+
`input` is filtered along the specified axes. When `axes` is
|
| 1138 |
+
specified, any tuples used for `size`, `origin`, and/or `mode`
|
| 1139 |
+
must match the length of `axes`. The ith entry in any of these tuples
|
| 1140 |
+
corresponds to the ith entry in `axes`.
|
| 1141 |
+
|
| 1142 |
+
Returns
|
| 1143 |
+
-------
|
| 1144 |
+
uniform_filter : ndarray
|
| 1145 |
+
Filtered array. Has the same shape as `input`.
|
| 1146 |
+
|
| 1147 |
+
Notes
|
| 1148 |
+
-----
|
| 1149 |
+
The multidimensional filter is implemented as a sequence of
|
| 1150 |
+
1-D uniform filters. The intermediate arrays are stored
|
| 1151 |
+
in the same data type as the output. Therefore, for output types
|
| 1152 |
+
with a limited precision, the results may be imprecise because
|
| 1153 |
+
intermediate results may be stored with insufficient precision.
|
| 1154 |
+
|
| 1155 |
+
Examples
|
| 1156 |
+
--------
|
| 1157 |
+
>>> from scipy import ndimage, datasets
|
| 1158 |
+
>>> import matplotlib.pyplot as plt
|
| 1159 |
+
>>> fig = plt.figure()
|
| 1160 |
+
>>> plt.gray() # show the filtered result in grayscale
|
| 1161 |
+
>>> ax1 = fig.add_subplot(121) # left side
|
| 1162 |
+
>>> ax2 = fig.add_subplot(122) # right side
|
| 1163 |
+
>>> ascent = datasets.ascent()
|
| 1164 |
+
>>> result = ndimage.uniform_filter(ascent, size=20)
|
| 1165 |
+
>>> ax1.imshow(ascent)
|
| 1166 |
+
>>> ax2.imshow(result)
|
| 1167 |
+
>>> plt.show()
|
| 1168 |
+
"""
|
| 1169 |
+
input = np.asarray(input)
|
| 1170 |
+
output = _ni_support._get_output(output, input,
|
| 1171 |
+
complex_output=input.dtype.kind == 'c')
|
| 1172 |
+
axes = _ni_support._check_axes(axes, input.ndim)
|
| 1173 |
+
num_axes = len(axes)
|
| 1174 |
+
sizes = _ni_support._normalize_sequence(size, num_axes)
|
| 1175 |
+
origins = _ni_support._normalize_sequence(origin, num_axes)
|
| 1176 |
+
modes = _ni_support._normalize_sequence(mode, num_axes)
|
| 1177 |
+
axes = [(axes[ii], sizes[ii], origins[ii], modes[ii])
|
| 1178 |
+
for ii in range(num_axes) if sizes[ii] > 1]
|
| 1179 |
+
if len(axes) > 0:
|
| 1180 |
+
for axis, size, origin, mode in axes:
|
| 1181 |
+
uniform_filter1d(input, int(size), axis, output, mode,
|
| 1182 |
+
cval, origin)
|
| 1183 |
+
input = output
|
| 1184 |
+
else:
|
| 1185 |
+
output[...] = input[...]
|
| 1186 |
+
return output
|
| 1187 |
+
|
| 1188 |
+
|
| 1189 |
+
@_ni_docstrings.docfiller
|
| 1190 |
+
def minimum_filter1d(input, size, axis=-1, output=None,
|
| 1191 |
+
mode="reflect", cval=0.0, origin=0):
|
| 1192 |
+
"""Calculate a 1-D minimum filter along the given axis.
|
| 1193 |
+
|
| 1194 |
+
The lines of the array along the given axis are filtered with a
|
| 1195 |
+
minimum filter of given size.
|
| 1196 |
+
|
| 1197 |
+
Parameters
|
| 1198 |
+
----------
|
| 1199 |
+
%(input)s
|
| 1200 |
+
size : int
|
| 1201 |
+
length along which to calculate 1D minimum
|
| 1202 |
+
%(axis)s
|
| 1203 |
+
%(output)s
|
| 1204 |
+
%(mode_reflect)s
|
| 1205 |
+
%(cval)s
|
| 1206 |
+
%(origin)s
|
| 1207 |
+
|
| 1208 |
+
Returns
|
| 1209 |
+
-------
|
| 1210 |
+
result : ndarray.
|
| 1211 |
+
Filtered image. Has the same shape as `input`.
|
| 1212 |
+
|
| 1213 |
+
Notes
|
| 1214 |
+
-----
|
| 1215 |
+
This function implements the MINLIST algorithm [1]_, as described by
|
| 1216 |
+
Richard Harter [2]_, and has a guaranteed O(n) performance, `n` being
|
| 1217 |
+
the `input` length, regardless of filter size.
|
| 1218 |
+
|
| 1219 |
+
References
|
| 1220 |
+
----------
|
| 1221 |
+
.. [1] http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.42.2777
|
| 1222 |
+
.. [2] http://www.richardhartersworld.com/cri/2001/slidingmin.html
|
| 1223 |
+
|
| 1224 |
+
|
| 1225 |
+
Examples
|
| 1226 |
+
--------
|
| 1227 |
+
>>> from scipy.ndimage import minimum_filter1d
|
| 1228 |
+
>>> minimum_filter1d([2, 8, 0, 4, 1, 9, 9, 0], size=3)
|
| 1229 |
+
array([2, 0, 0, 0, 1, 1, 0, 0])
|
| 1230 |
+
"""
|
| 1231 |
+
input = np.asarray(input)
|
| 1232 |
+
if np.iscomplexobj(input):
|
| 1233 |
+
raise TypeError('Complex type not supported')
|
| 1234 |
+
axis = normalize_axis_index(axis, input.ndim)
|
| 1235 |
+
if size < 1:
|
| 1236 |
+
raise RuntimeError('incorrect filter size')
|
| 1237 |
+
output = _ni_support._get_output(output, input)
|
| 1238 |
+
if (size // 2 + origin < 0) or (size // 2 + origin >= size):
|
| 1239 |
+
raise ValueError('invalid origin')
|
| 1240 |
+
mode = _ni_support._extend_mode_to_code(mode)
|
| 1241 |
+
_nd_image.min_or_max_filter1d(input, size, axis, output, mode, cval,
|
| 1242 |
+
origin, 1)
|
| 1243 |
+
return output
|
| 1244 |
+
|
| 1245 |
+
|
| 1246 |
+
@_ni_docstrings.docfiller
|
| 1247 |
+
def maximum_filter1d(input, size, axis=-1, output=None,
|
| 1248 |
+
mode="reflect", cval=0.0, origin=0):
|
| 1249 |
+
"""Calculate a 1-D maximum filter along the given axis.
|
| 1250 |
+
|
| 1251 |
+
The lines of the array along the given axis are filtered with a
|
| 1252 |
+
maximum filter of given size.
|
| 1253 |
+
|
| 1254 |
+
Parameters
|
| 1255 |
+
----------
|
| 1256 |
+
%(input)s
|
| 1257 |
+
size : int
|
| 1258 |
+
Length along which to calculate the 1-D maximum.
|
| 1259 |
+
%(axis)s
|
| 1260 |
+
%(output)s
|
| 1261 |
+
%(mode_reflect)s
|
| 1262 |
+
%(cval)s
|
| 1263 |
+
%(origin)s
|
| 1264 |
+
|
| 1265 |
+
Returns
|
| 1266 |
+
-------
|
| 1267 |
+
maximum1d : ndarray, None
|
| 1268 |
+
Maximum-filtered array with same shape as input.
|
| 1269 |
+
None if `output` is not None
|
| 1270 |
+
|
| 1271 |
+
Notes
|
| 1272 |
+
-----
|
| 1273 |
+
This function implements the MAXLIST algorithm [1]_, as described by
|
| 1274 |
+
Richard Harter [2]_, and has a guaranteed O(n) performance, `n` being
|
| 1275 |
+
the `input` length, regardless of filter size.
|
| 1276 |
+
|
| 1277 |
+
References
|
| 1278 |
+
----------
|
| 1279 |
+
.. [1] http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.42.2777
|
| 1280 |
+
.. [2] http://www.richardhartersworld.com/cri/2001/slidingmin.html
|
| 1281 |
+
|
| 1282 |
+
Examples
|
| 1283 |
+
--------
|
| 1284 |
+
>>> from scipy.ndimage import maximum_filter1d
|
| 1285 |
+
>>> maximum_filter1d([2, 8, 0, 4, 1, 9, 9, 0], size=3)
|
| 1286 |
+
array([8, 8, 8, 4, 9, 9, 9, 9])
|
| 1287 |
+
"""
|
| 1288 |
+
input = np.asarray(input)
|
| 1289 |
+
if np.iscomplexobj(input):
|
| 1290 |
+
raise TypeError('Complex type not supported')
|
| 1291 |
+
axis = normalize_axis_index(axis, input.ndim)
|
| 1292 |
+
if size < 1:
|
| 1293 |
+
raise RuntimeError('incorrect filter size')
|
| 1294 |
+
output = _ni_support._get_output(output, input)
|
| 1295 |
+
if (size // 2 + origin < 0) or (size // 2 + origin >= size):
|
| 1296 |
+
raise ValueError('invalid origin')
|
| 1297 |
+
mode = _ni_support._extend_mode_to_code(mode)
|
| 1298 |
+
_nd_image.min_or_max_filter1d(input, size, axis, output, mode, cval,
|
| 1299 |
+
origin, 0)
|
| 1300 |
+
return output
|
| 1301 |
+
|
| 1302 |
+
|
| 1303 |
+
def _min_or_max_filter(input, size, footprint, structure, output, mode,
|
| 1304 |
+
cval, origin, minimum, axes=None):
|
| 1305 |
+
if (size is not None) and (footprint is not None):
|
| 1306 |
+
warnings.warn("ignoring size because footprint is set",
|
| 1307 |
+
UserWarning, stacklevel=3)
|
| 1308 |
+
if structure is None:
|
| 1309 |
+
if footprint is None:
|
| 1310 |
+
if size is None:
|
| 1311 |
+
raise RuntimeError("no footprint provided")
|
| 1312 |
+
separable = True
|
| 1313 |
+
else:
|
| 1314 |
+
footprint = np.asarray(footprint, dtype=bool)
|
| 1315 |
+
if not footprint.any():
|
| 1316 |
+
raise ValueError("All-zero footprint is not supported.")
|
| 1317 |
+
if footprint.all():
|
| 1318 |
+
size = footprint.shape
|
| 1319 |
+
footprint = None
|
| 1320 |
+
separable = True
|
| 1321 |
+
else:
|
| 1322 |
+
separable = False
|
| 1323 |
+
else:
|
| 1324 |
+
structure = np.asarray(structure, dtype=np.float64)
|
| 1325 |
+
separable = False
|
| 1326 |
+
if footprint is None:
|
| 1327 |
+
footprint = np.ones(structure.shape, bool)
|
| 1328 |
+
else:
|
| 1329 |
+
footprint = np.asarray(footprint, dtype=bool)
|
| 1330 |
+
input = np.asarray(input)
|
| 1331 |
+
if np.iscomplexobj(input):
|
| 1332 |
+
raise TypeError("Complex type not supported")
|
| 1333 |
+
output = _ni_support._get_output(output, input)
|
| 1334 |
+
temp_needed = np.may_share_memory(input, output)
|
| 1335 |
+
if temp_needed:
|
| 1336 |
+
# input and output arrays cannot share memory
|
| 1337 |
+
temp = output
|
| 1338 |
+
output = _ni_support._get_output(output.dtype, input)
|
| 1339 |
+
axes = _ni_support._check_axes(axes, input.ndim)
|
| 1340 |
+
num_axes = len(axes)
|
| 1341 |
+
if separable:
|
| 1342 |
+
origins = _ni_support._normalize_sequence(origin, num_axes)
|
| 1343 |
+
sizes = _ni_support._normalize_sequence(size, num_axes)
|
| 1344 |
+
modes = _ni_support._normalize_sequence(mode, num_axes)
|
| 1345 |
+
axes = [(axes[ii], sizes[ii], origins[ii], modes[ii])
|
| 1346 |
+
for ii in range(len(axes)) if sizes[ii] > 1]
|
| 1347 |
+
if minimum:
|
| 1348 |
+
filter_ = minimum_filter1d
|
| 1349 |
+
else:
|
| 1350 |
+
filter_ = maximum_filter1d
|
| 1351 |
+
if len(axes) > 0:
|
| 1352 |
+
for axis, size, origin, mode in axes:
|
| 1353 |
+
filter_(input, int(size), axis, output, mode, cval, origin)
|
| 1354 |
+
input = output
|
| 1355 |
+
else:
|
| 1356 |
+
output[...] = input[...]
|
| 1357 |
+
else:
|
| 1358 |
+
# expand origins and footprint if num_axes < input.ndim
|
| 1359 |
+
footprint = _expand_footprint(input.ndim, axes, footprint)
|
| 1360 |
+
origins = _expand_origin(input.ndim, axes, origin)
|
| 1361 |
+
|
| 1362 |
+
fshape = [ii for ii in footprint.shape if ii > 0]
|
| 1363 |
+
if len(fshape) != input.ndim:
|
| 1364 |
+
raise RuntimeError(f"footprint.ndim ({footprint.ndim}) must match "
|
| 1365 |
+
f"len(axes) ({len(axes)})")
|
| 1366 |
+
for origin, lenf in zip(origins, fshape):
|
| 1367 |
+
if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf):
|
| 1368 |
+
raise ValueError("invalid origin")
|
| 1369 |
+
if not footprint.flags.contiguous:
|
| 1370 |
+
footprint = footprint.copy()
|
| 1371 |
+
if structure is not None:
|
| 1372 |
+
if len(structure.shape) != num_axes:
|
| 1373 |
+
raise RuntimeError("structure array has incorrect shape")
|
| 1374 |
+
if num_axes != structure.ndim:
|
| 1375 |
+
structure = np.expand_dims(
|
| 1376 |
+
structure,
|
| 1377 |
+
tuple(ax for ax in range(structure.ndim) if ax not in axes)
|
| 1378 |
+
)
|
| 1379 |
+
if not structure.flags.contiguous:
|
| 1380 |
+
structure = structure.copy()
|
| 1381 |
+
if not isinstance(mode, str) and isinstance(mode, Iterable):
|
| 1382 |
+
raise RuntimeError(
|
| 1383 |
+
"A sequence of modes is not supported for non-separable "
|
| 1384 |
+
"footprints")
|
| 1385 |
+
mode = _ni_support._extend_mode_to_code(mode)
|
| 1386 |
+
_nd_image.min_or_max_filter(input, footprint, structure, output,
|
| 1387 |
+
mode, cval, origins, minimum)
|
| 1388 |
+
if temp_needed:
|
| 1389 |
+
temp[...] = output
|
| 1390 |
+
output = temp
|
| 1391 |
+
return output
|
| 1392 |
+
|
| 1393 |
+
|
| 1394 |
+
@_ni_docstrings.docfiller
|
| 1395 |
+
def minimum_filter(input, size=None, footprint=None, output=None,
|
| 1396 |
+
mode="reflect", cval=0.0, origin=0, *, axes=None):
|
| 1397 |
+
"""Calculate a multidimensional minimum filter.
|
| 1398 |
+
|
| 1399 |
+
Parameters
|
| 1400 |
+
----------
|
| 1401 |
+
%(input)s
|
| 1402 |
+
%(size_foot)s
|
| 1403 |
+
%(output)s
|
| 1404 |
+
%(mode_multiple)s
|
| 1405 |
+
%(cval)s
|
| 1406 |
+
%(origin_multiple)s
|
| 1407 |
+
axes : tuple of int or None, optional
|
| 1408 |
+
If None, `input` is filtered along all axes. Otherwise,
|
| 1409 |
+
`input` is filtered along the specified axes. When `axes` is
|
| 1410 |
+
specified, any tuples used for `size`, `origin`, and/or `mode`
|
| 1411 |
+
must match the length of `axes`. The ith entry in any of these tuples
|
| 1412 |
+
corresponds to the ith entry in `axes`.
|
| 1413 |
+
|
| 1414 |
+
Returns
|
| 1415 |
+
-------
|
| 1416 |
+
minimum_filter : ndarray
|
| 1417 |
+
Filtered array. Has the same shape as `input`.
|
| 1418 |
+
|
| 1419 |
+
Notes
|
| 1420 |
+
-----
|
| 1421 |
+
A sequence of modes (one per axis) is only supported when the footprint is
|
| 1422 |
+
separable. Otherwise, a single mode string must be provided.
|
| 1423 |
+
|
| 1424 |
+
Examples
|
| 1425 |
+
--------
|
| 1426 |
+
>>> from scipy import ndimage, datasets
|
| 1427 |
+
>>> import matplotlib.pyplot as plt
|
| 1428 |
+
>>> fig = plt.figure()
|
| 1429 |
+
>>> plt.gray() # show the filtered result in grayscale
|
| 1430 |
+
>>> ax1 = fig.add_subplot(121) # left side
|
| 1431 |
+
>>> ax2 = fig.add_subplot(122) # right side
|
| 1432 |
+
>>> ascent = datasets.ascent()
|
| 1433 |
+
>>> result = ndimage.minimum_filter(ascent, size=20)
|
| 1434 |
+
>>> ax1.imshow(ascent)
|
| 1435 |
+
>>> ax2.imshow(result)
|
| 1436 |
+
>>> plt.show()
|
| 1437 |
+
"""
|
| 1438 |
+
return _min_or_max_filter(input, size, footprint, None, output, mode,
|
| 1439 |
+
cval, origin, 1, axes)
|
| 1440 |
+
|
| 1441 |
+
|
| 1442 |
+
@_ni_docstrings.docfiller
|
| 1443 |
+
def maximum_filter(input, size=None, footprint=None, output=None,
|
| 1444 |
+
mode="reflect", cval=0.0, origin=0, *, axes=None):
|
| 1445 |
+
"""Calculate a multidimensional maximum filter.
|
| 1446 |
+
|
| 1447 |
+
Parameters
|
| 1448 |
+
----------
|
| 1449 |
+
%(input)s
|
| 1450 |
+
%(size_foot)s
|
| 1451 |
+
%(output)s
|
| 1452 |
+
%(mode_multiple)s
|
| 1453 |
+
%(cval)s
|
| 1454 |
+
%(origin_multiple)s
|
| 1455 |
+
axes : tuple of int or None, optional
|
| 1456 |
+
If None, `input` is filtered along all axes. Otherwise,
|
| 1457 |
+
`input` is filtered along the specified axes. When `axes` is
|
| 1458 |
+
specified, any tuples used for `size`, `origin`, and/or `mode`
|
| 1459 |
+
must match the length of `axes`. The ith entry in any of these tuples
|
| 1460 |
+
corresponds to the ith entry in `axes`.
|
| 1461 |
+
|
| 1462 |
+
Returns
|
| 1463 |
+
-------
|
| 1464 |
+
maximum_filter : ndarray
|
| 1465 |
+
Filtered array. Has the same shape as `input`.
|
| 1466 |
+
|
| 1467 |
+
Notes
|
| 1468 |
+
-----
|
| 1469 |
+
A sequence of modes (one per axis) is only supported when the footprint is
|
| 1470 |
+
separable. Otherwise, a single mode string must be provided.
|
| 1471 |
+
|
| 1472 |
+
Examples
|
| 1473 |
+
--------
|
| 1474 |
+
>>> from scipy import ndimage, datasets
|
| 1475 |
+
>>> import matplotlib.pyplot as plt
|
| 1476 |
+
>>> fig = plt.figure()
|
| 1477 |
+
>>> plt.gray() # show the filtered result in grayscale
|
| 1478 |
+
>>> ax1 = fig.add_subplot(121) # left side
|
| 1479 |
+
>>> ax2 = fig.add_subplot(122) # right side
|
| 1480 |
+
>>> ascent = datasets.ascent()
|
| 1481 |
+
>>> result = ndimage.maximum_filter(ascent, size=20)
|
| 1482 |
+
>>> ax1.imshow(ascent)
|
| 1483 |
+
>>> ax2.imshow(result)
|
| 1484 |
+
>>> plt.show()
|
| 1485 |
+
"""
|
| 1486 |
+
return _min_or_max_filter(input, size, footprint, None, output, mode,
|
| 1487 |
+
cval, origin, 0, axes)
|
| 1488 |
+
|
| 1489 |
+
|
| 1490 |
+
@_ni_docstrings.docfiller
|
| 1491 |
+
def _rank_filter(input, rank, size=None, footprint=None, output=None,
|
| 1492 |
+
mode="reflect", cval=0.0, origin=0, operation='rank',
|
| 1493 |
+
axes=None):
|
| 1494 |
+
if (size is not None) and (footprint is not None):
|
| 1495 |
+
warnings.warn("ignoring size because footprint is set",
|
| 1496 |
+
UserWarning, stacklevel=3)
|
| 1497 |
+
input = np.asarray(input)
|
| 1498 |
+
if np.iscomplexobj(input):
|
| 1499 |
+
raise TypeError('Complex type not supported')
|
| 1500 |
+
axes = _ni_support._check_axes(axes, input.ndim)
|
| 1501 |
+
num_axes = len(axes)
|
| 1502 |
+
if footprint is None:
|
| 1503 |
+
if size is None:
|
| 1504 |
+
raise RuntimeError("no footprint or filter size provided")
|
| 1505 |
+
sizes = _ni_support._normalize_sequence(size, num_axes)
|
| 1506 |
+
footprint = np.ones(sizes, dtype=bool)
|
| 1507 |
+
else:
|
| 1508 |
+
footprint = np.asarray(footprint, dtype=bool)
|
| 1509 |
+
# expand origins, footprint and modes if num_axes < input.ndim
|
| 1510 |
+
footprint = _expand_footprint(input.ndim, axes, footprint)
|
| 1511 |
+
origins = _expand_origin(input.ndim, axes, origin)
|
| 1512 |
+
mode = _expand_mode(input.ndim, axes, mode)
|
| 1513 |
+
|
| 1514 |
+
fshape = [ii for ii in footprint.shape if ii > 0]
|
| 1515 |
+
if len(fshape) != input.ndim:
|
| 1516 |
+
raise RuntimeError(f"footprint.ndim ({footprint.ndim}) must match "
|
| 1517 |
+
f"len(axes) ({len(axes)})")
|
| 1518 |
+
for origin, lenf in zip(origins, fshape):
|
| 1519 |
+
if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf):
|
| 1520 |
+
raise ValueError('invalid origin')
|
| 1521 |
+
if not footprint.flags.contiguous:
|
| 1522 |
+
footprint = footprint.copy()
|
| 1523 |
+
filter_size = np.where(footprint, 1, 0).sum()
|
| 1524 |
+
if operation == 'median':
|
| 1525 |
+
rank = filter_size // 2
|
| 1526 |
+
elif operation == 'percentile':
|
| 1527 |
+
percentile = rank
|
| 1528 |
+
if percentile < 0.0:
|
| 1529 |
+
percentile += 100.0
|
| 1530 |
+
if percentile < 0 or percentile > 100:
|
| 1531 |
+
raise RuntimeError('invalid percentile')
|
| 1532 |
+
if percentile == 100.0:
|
| 1533 |
+
rank = filter_size - 1
|
| 1534 |
+
else:
|
| 1535 |
+
rank = int(float(filter_size) * percentile / 100.0)
|
| 1536 |
+
if rank < 0:
|
| 1537 |
+
rank += filter_size
|
| 1538 |
+
if rank < 0 or rank >= filter_size:
|
| 1539 |
+
raise RuntimeError('rank not within filter footprint size')
|
| 1540 |
+
if rank == 0:
|
| 1541 |
+
return minimum_filter(input, None, footprint, output, mode, cval,
|
| 1542 |
+
origins, axes=None)
|
| 1543 |
+
elif rank == filter_size - 1:
|
| 1544 |
+
return maximum_filter(input, None, footprint, output, mode, cval,
|
| 1545 |
+
origins, axes=None)
|
| 1546 |
+
else:
|
| 1547 |
+
output = _ni_support._get_output(output, input)
|
| 1548 |
+
temp_needed = np.may_share_memory(input, output)
|
| 1549 |
+
if temp_needed:
|
| 1550 |
+
# input and output arrays cannot share memory
|
| 1551 |
+
temp = output
|
| 1552 |
+
output = _ni_support._get_output(output.dtype, input)
|
| 1553 |
+
if not isinstance(mode, str) and isinstance(mode, Iterable):
|
| 1554 |
+
raise RuntimeError(
|
| 1555 |
+
"A sequence of modes is not supported by non-separable rank "
|
| 1556 |
+
"filters")
|
| 1557 |
+
mode = _ni_support._extend_mode_to_code(mode, is_filter=True)
|
| 1558 |
+
if input.ndim == 1:
|
| 1559 |
+
if input.dtype in (np.int64, np.float64, np.float32):
|
| 1560 |
+
x = input
|
| 1561 |
+
x_out = output
|
| 1562 |
+
elif input.dtype == np.float16:
|
| 1563 |
+
x = input.astype('float32')
|
| 1564 |
+
x_out = np.empty(x.shape, dtype='float32')
|
| 1565 |
+
elif np.result_type(input, np.int64) == np.int64:
|
| 1566 |
+
x = input.astype('int64')
|
| 1567 |
+
x_out = np.empty(x.shape, dtype='int64')
|
| 1568 |
+
elif input.dtype.kind in 'biu':
|
| 1569 |
+
# cast any other boolean, integer or unsigned type to int64
|
| 1570 |
+
x = input.astype('int64')
|
| 1571 |
+
x_out = np.empty(x.shape, dtype='int64')
|
| 1572 |
+
else:
|
| 1573 |
+
raise RuntimeError('Unsupported array type')
|
| 1574 |
+
cval = x.dtype.type(cval)
|
| 1575 |
+
_rank_filter_1d.rank_filter(x, rank, footprint.size, x_out, mode, cval,
|
| 1576 |
+
origin)
|
| 1577 |
+
if input.dtype not in (np.int64, np.float64, np.float32):
|
| 1578 |
+
np.copyto(output, x_out, casting='unsafe')
|
| 1579 |
+
else:
|
| 1580 |
+
_nd_image.rank_filter(input, rank, footprint, output, mode, cval, origins)
|
| 1581 |
+
if temp_needed:
|
| 1582 |
+
temp[...] = output
|
| 1583 |
+
output = temp
|
| 1584 |
+
return output
|
| 1585 |
+
|
| 1586 |
+
|
| 1587 |
+
@_ni_docstrings.docfiller
|
| 1588 |
+
def rank_filter(input, rank, size=None, footprint=None, output=None,
|
| 1589 |
+
mode="reflect", cval=0.0, origin=0, *, axes=None):
|
| 1590 |
+
"""Calculate a multidimensional rank filter.
|
| 1591 |
+
|
| 1592 |
+
Parameters
|
| 1593 |
+
----------
|
| 1594 |
+
%(input)s
|
| 1595 |
+
rank : int
|
| 1596 |
+
The rank parameter may be less than zero, i.e., rank = -1
|
| 1597 |
+
indicates the largest element.
|
| 1598 |
+
%(size_foot)s
|
| 1599 |
+
%(output)s
|
| 1600 |
+
%(mode_reflect)s
|
| 1601 |
+
%(cval)s
|
| 1602 |
+
%(origin_multiple)s
|
| 1603 |
+
axes : tuple of int or None, optional
|
| 1604 |
+
If None, `input` is filtered along all axes. Otherwise,
|
| 1605 |
+
`input` is filtered along the specified axes. When `axes` is
|
| 1606 |
+
specified, any tuples used for `size`, `origin`, and/or `mode`
|
| 1607 |
+
must match the length of `axes`. The ith entry in any of these tuples
|
| 1608 |
+
corresponds to the ith entry in `axes`.
|
| 1609 |
+
|
| 1610 |
+
Returns
|
| 1611 |
+
-------
|
| 1612 |
+
rank_filter : ndarray
|
| 1613 |
+
Filtered array. Has the same shape as `input`.
|
| 1614 |
+
|
| 1615 |
+
Examples
|
| 1616 |
+
--------
|
| 1617 |
+
>>> from scipy import ndimage, datasets
|
| 1618 |
+
>>> import matplotlib.pyplot as plt
|
| 1619 |
+
>>> fig = plt.figure()
|
| 1620 |
+
>>> plt.gray() # show the filtered result in grayscale
|
| 1621 |
+
>>> ax1 = fig.add_subplot(121) # left side
|
| 1622 |
+
>>> ax2 = fig.add_subplot(122) # right side
|
| 1623 |
+
>>> ascent = datasets.ascent()
|
| 1624 |
+
>>> result = ndimage.rank_filter(ascent, rank=42, size=20)
|
| 1625 |
+
>>> ax1.imshow(ascent)
|
| 1626 |
+
>>> ax2.imshow(result)
|
| 1627 |
+
>>> plt.show()
|
| 1628 |
+
"""
|
| 1629 |
+
rank = operator.index(rank)
|
| 1630 |
+
return _rank_filter(input, rank, size, footprint, output, mode, cval,
|
| 1631 |
+
origin, 'rank', axes=axes)
|
| 1632 |
+
|
| 1633 |
+
|
| 1634 |
+
@_ni_docstrings.docfiller
|
| 1635 |
+
def median_filter(input, size=None, footprint=None, output=None,
|
| 1636 |
+
mode="reflect", cval=0.0, origin=0, *, axes=None):
|
| 1637 |
+
"""
|
| 1638 |
+
Calculate a multidimensional median filter.
|
| 1639 |
+
|
| 1640 |
+
Parameters
|
| 1641 |
+
----------
|
| 1642 |
+
%(input)s
|
| 1643 |
+
%(size_foot)s
|
| 1644 |
+
%(output)s
|
| 1645 |
+
%(mode_reflect)s
|
| 1646 |
+
%(cval)s
|
| 1647 |
+
%(origin_multiple)s
|
| 1648 |
+
axes : tuple of int or None, optional
|
| 1649 |
+
If None, `input` is filtered along all axes. Otherwise,
|
| 1650 |
+
`input` is filtered along the specified axes. When `axes` is
|
| 1651 |
+
specified, any tuples used for `size`, `origin`, and/or `mode`
|
| 1652 |
+
must match the length of `axes`. The ith entry in any of these tuples
|
| 1653 |
+
corresponds to the ith entry in `axes`.
|
| 1654 |
+
|
| 1655 |
+
Returns
|
| 1656 |
+
-------
|
| 1657 |
+
median_filter : ndarray
|
| 1658 |
+
Filtered array. Has the same shape as `input`.
|
| 1659 |
+
|
| 1660 |
+
See Also
|
| 1661 |
+
--------
|
| 1662 |
+
scipy.signal.medfilt2d
|
| 1663 |
+
|
| 1664 |
+
Notes
|
| 1665 |
+
-----
|
| 1666 |
+
For 2-dimensional images with ``uint8``, ``float32`` or ``float64`` dtypes
|
| 1667 |
+
the specialised function `scipy.signal.medfilt2d` may be faster. It is
|
| 1668 |
+
however limited to constant mode with ``cval=0``.
|
| 1669 |
+
|
| 1670 |
+
Examples
|
| 1671 |
+
--------
|
| 1672 |
+
>>> from scipy import ndimage, datasets
|
| 1673 |
+
>>> import matplotlib.pyplot as plt
|
| 1674 |
+
>>> fig = plt.figure()
|
| 1675 |
+
>>> plt.gray() # show the filtered result in grayscale
|
| 1676 |
+
>>> ax1 = fig.add_subplot(121) # left side
|
| 1677 |
+
>>> ax2 = fig.add_subplot(122) # right side
|
| 1678 |
+
>>> ascent = datasets.ascent()
|
| 1679 |
+
>>> result = ndimage.median_filter(ascent, size=20)
|
| 1680 |
+
>>> ax1.imshow(ascent)
|
| 1681 |
+
>>> ax2.imshow(result)
|
| 1682 |
+
>>> plt.show()
|
| 1683 |
+
"""
|
| 1684 |
+
return _rank_filter(input, 0, size, footprint, output, mode, cval,
|
| 1685 |
+
origin, 'median', axes=axes)
|
| 1686 |
+
|
| 1687 |
+
|
| 1688 |
+
@_ni_docstrings.docfiller
|
| 1689 |
+
def percentile_filter(input, percentile, size=None, footprint=None,
|
| 1690 |
+
output=None, mode="reflect", cval=0.0, origin=0, *,
|
| 1691 |
+
axes=None):
|
| 1692 |
+
"""Calculate a multidimensional percentile filter.
|
| 1693 |
+
|
| 1694 |
+
Parameters
|
| 1695 |
+
----------
|
| 1696 |
+
%(input)s
|
| 1697 |
+
percentile : scalar
|
| 1698 |
+
The percentile parameter may be less than zero, i.e.,
|
| 1699 |
+
percentile = -20 equals percentile = 80
|
| 1700 |
+
%(size_foot)s
|
| 1701 |
+
%(output)s
|
| 1702 |
+
%(mode_reflect)s
|
| 1703 |
+
%(cval)s
|
| 1704 |
+
%(origin_multiple)s
|
| 1705 |
+
axes : tuple of int or None, optional
|
| 1706 |
+
If None, `input` is filtered along all axes. Otherwise,
|
| 1707 |
+
`input` is filtered along the specified axes. When `axes` is
|
| 1708 |
+
specified, any tuples used for `size`, `origin`, and/or `mode`
|
| 1709 |
+
must match the length of `axes`. The ith entry in any of these tuples
|
| 1710 |
+
corresponds to the ith entry in `axes`.
|
| 1711 |
+
|
| 1712 |
+
Returns
|
| 1713 |
+
-------
|
| 1714 |
+
percentile_filter : ndarray
|
| 1715 |
+
Filtered array. Has the same shape as `input`.
|
| 1716 |
+
|
| 1717 |
+
Examples
|
| 1718 |
+
--------
|
| 1719 |
+
>>> from scipy import ndimage, datasets
|
| 1720 |
+
>>> import matplotlib.pyplot as plt
|
| 1721 |
+
>>> fig = plt.figure()
|
| 1722 |
+
>>> plt.gray() # show the filtered result in grayscale
|
| 1723 |
+
>>> ax1 = fig.add_subplot(121) # left side
|
| 1724 |
+
>>> ax2 = fig.add_subplot(122) # right side
|
| 1725 |
+
>>> ascent = datasets.ascent()
|
| 1726 |
+
>>> result = ndimage.percentile_filter(ascent, percentile=20, size=20)
|
| 1727 |
+
>>> ax1.imshow(ascent)
|
| 1728 |
+
>>> ax2.imshow(result)
|
| 1729 |
+
>>> plt.show()
|
| 1730 |
+
"""
|
| 1731 |
+
return _rank_filter(input, percentile, size, footprint, output, mode,
|
| 1732 |
+
cval, origin, 'percentile', axes=axes)
|
| 1733 |
+
|
| 1734 |
+
|
| 1735 |
+
@_ni_docstrings.docfiller
|
| 1736 |
+
def generic_filter1d(input, function, filter_size, axis=-1,
|
| 1737 |
+
output=None, mode="reflect", cval=0.0, origin=0,
|
| 1738 |
+
extra_arguments=(), extra_keywords=None):
|
| 1739 |
+
"""Calculate a 1-D filter along the given axis.
|
| 1740 |
+
|
| 1741 |
+
`generic_filter1d` iterates over the lines of the array, calling the
|
| 1742 |
+
given function at each line. The arguments of the line are the
|
| 1743 |
+
input line, and the output line. The input and output lines are 1-D
|
| 1744 |
+
double arrays. The input line is extended appropriately according
|
| 1745 |
+
to the filter size and origin. The output line must be modified
|
| 1746 |
+
in-place with the result.
|
| 1747 |
+
|
| 1748 |
+
Parameters
|
| 1749 |
+
----------
|
| 1750 |
+
%(input)s
|
| 1751 |
+
function : {callable, scipy.LowLevelCallable}
|
| 1752 |
+
Function to apply along given axis.
|
| 1753 |
+
filter_size : scalar
|
| 1754 |
+
Length of the filter.
|
| 1755 |
+
%(axis)s
|
| 1756 |
+
%(output)s
|
| 1757 |
+
%(mode_reflect)s
|
| 1758 |
+
%(cval)s
|
| 1759 |
+
%(origin)s
|
| 1760 |
+
%(extra_arguments)s
|
| 1761 |
+
%(extra_keywords)s
|
| 1762 |
+
|
| 1763 |
+
Returns
|
| 1764 |
+
-------
|
| 1765 |
+
generic_filter1d : ndarray
|
| 1766 |
+
Filtered array. Has the same shape as `input`.
|
| 1767 |
+
|
| 1768 |
+
Notes
|
| 1769 |
+
-----
|
| 1770 |
+
This function also accepts low-level callback functions with one of
|
| 1771 |
+
the following signatures and wrapped in `scipy.LowLevelCallable`:
|
| 1772 |
+
|
| 1773 |
+
.. code:: c
|
| 1774 |
+
|
| 1775 |
+
int function(double *input_line, npy_intp input_length,
|
| 1776 |
+
double *output_line, npy_intp output_length,
|
| 1777 |
+
void *user_data)
|
| 1778 |
+
int function(double *input_line, intptr_t input_length,
|
| 1779 |
+
double *output_line, intptr_t output_length,
|
| 1780 |
+
void *user_data)
|
| 1781 |
+
|
| 1782 |
+
The calling function iterates over the lines of the input and output
|
| 1783 |
+
arrays, calling the callback function at each line. The current line
|
| 1784 |
+
is extended according to the border conditions set by the calling
|
| 1785 |
+
function, and the result is copied into the array that is passed
|
| 1786 |
+
through ``input_line``. The length of the input line (after extension)
|
| 1787 |
+
is passed through ``input_length``. The callback function should apply
|
| 1788 |
+
the filter and store the result in the array passed through
|
| 1789 |
+
``output_line``. The length of the output line is passed through
|
| 1790 |
+
``output_length``. ``user_data`` is the data pointer provided
|
| 1791 |
+
to `scipy.LowLevelCallable` as-is.
|
| 1792 |
+
|
| 1793 |
+
The callback function must return an integer error status that is zero
|
| 1794 |
+
if something went wrong and one otherwise. If an error occurs, you should
|
| 1795 |
+
normally set the python error status with an informative message
|
| 1796 |
+
before returning, otherwise a default error message is set by the
|
| 1797 |
+
calling function.
|
| 1798 |
+
|
| 1799 |
+
In addition, some other low-level function pointer specifications
|
| 1800 |
+
are accepted, but these are for backward compatibility only and should
|
| 1801 |
+
not be used in new code.
|
| 1802 |
+
|
| 1803 |
+
"""
|
| 1804 |
+
if extra_keywords is None:
|
| 1805 |
+
extra_keywords = {}
|
| 1806 |
+
input = np.asarray(input)
|
| 1807 |
+
if np.iscomplexobj(input):
|
| 1808 |
+
raise TypeError('Complex type not supported')
|
| 1809 |
+
output = _ni_support._get_output(output, input)
|
| 1810 |
+
if filter_size < 1:
|
| 1811 |
+
raise RuntimeError('invalid filter size')
|
| 1812 |
+
axis = normalize_axis_index(axis, input.ndim)
|
| 1813 |
+
if (filter_size // 2 + origin < 0) or (filter_size // 2 + origin >=
|
| 1814 |
+
filter_size):
|
| 1815 |
+
raise ValueError('invalid origin')
|
| 1816 |
+
mode = _ni_support._extend_mode_to_code(mode)
|
| 1817 |
+
_nd_image.generic_filter1d(input, function, filter_size, axis, output,
|
| 1818 |
+
mode, cval, origin, extra_arguments,
|
| 1819 |
+
extra_keywords)
|
| 1820 |
+
return output
|
| 1821 |
+
|
| 1822 |
+
|
| 1823 |
+
@_ni_docstrings.docfiller
|
| 1824 |
+
def generic_filter(input, function, size=None, footprint=None,
|
| 1825 |
+
output=None, mode="reflect", cval=0.0, origin=0,
|
| 1826 |
+
extra_arguments=(), extra_keywords=None, *, axes=None):
|
| 1827 |
+
"""Calculate a multidimensional filter using the given function.
|
| 1828 |
+
|
| 1829 |
+
At each element the provided function is called. The input values
|
| 1830 |
+
within the filter footprint at that element are passed to the function
|
| 1831 |
+
as a 1-D array of double values.
|
| 1832 |
+
|
| 1833 |
+
Parameters
|
| 1834 |
+
----------
|
| 1835 |
+
%(input)s
|
| 1836 |
+
function : {callable, scipy.LowLevelCallable}
|
| 1837 |
+
Function to apply at each element.
|
| 1838 |
+
%(size_foot)s
|
| 1839 |
+
%(output)s
|
| 1840 |
+
%(mode_reflect)s
|
| 1841 |
+
%(cval)s
|
| 1842 |
+
%(origin_multiple)s
|
| 1843 |
+
%(extra_arguments)s
|
| 1844 |
+
%(extra_keywords)s
|
| 1845 |
+
axes : tuple of int or None, optional
|
| 1846 |
+
If None, `input` is filtered along all axes. Otherwise,
|
| 1847 |
+
`input` is filtered along the specified axes. When `axes` is
|
| 1848 |
+
specified, any tuples used for `size` or `origin` must match the length
|
| 1849 |
+
of `axes`. The ith entry in any of these tuples corresponds to the ith
|
| 1850 |
+
entry in `axes`.
|
| 1851 |
+
|
| 1852 |
+
Returns
|
| 1853 |
+
-------
|
| 1854 |
+
generic_filter : ndarray
|
| 1855 |
+
Filtered array. Has the same shape as `input`.
|
| 1856 |
+
|
| 1857 |
+
Notes
|
| 1858 |
+
-----
|
| 1859 |
+
This function also accepts low-level callback functions with one of
|
| 1860 |
+
the following signatures and wrapped in `scipy.LowLevelCallable`:
|
| 1861 |
+
|
| 1862 |
+
.. code:: c
|
| 1863 |
+
|
| 1864 |
+
int callback(double *buffer, npy_intp filter_size,
|
| 1865 |
+
double *return_value, void *user_data)
|
| 1866 |
+
int callback(double *buffer, intptr_t filter_size,
|
| 1867 |
+
double *return_value, void *user_data)
|
| 1868 |
+
|
| 1869 |
+
The calling function iterates over the elements of the input and
|
| 1870 |
+
output arrays, calling the callback function at each element. The
|
| 1871 |
+
elements within the footprint of the filter at the current element are
|
| 1872 |
+
passed through the ``buffer`` parameter, and the number of elements
|
| 1873 |
+
within the footprint through ``filter_size``. The calculated value is
|
| 1874 |
+
returned in ``return_value``. ``user_data`` is the data pointer provided
|
| 1875 |
+
to `scipy.LowLevelCallable` as-is.
|
| 1876 |
+
|
| 1877 |
+
The callback function must return an integer error status that is zero
|
| 1878 |
+
if something went wrong and one otherwise. If an error occurs, you should
|
| 1879 |
+
normally set the python error status with an informative message
|
| 1880 |
+
before returning, otherwise a default error message is set by the
|
| 1881 |
+
calling function.
|
| 1882 |
+
|
| 1883 |
+
In addition, some other low-level function pointer specifications
|
| 1884 |
+
are accepted, but these are for backward compatibility only and should
|
| 1885 |
+
not be used in new code.
|
| 1886 |
+
|
| 1887 |
+
Examples
|
| 1888 |
+
--------
|
| 1889 |
+
Import the necessary modules and load the example image used for
|
| 1890 |
+
filtering.
|
| 1891 |
+
|
| 1892 |
+
>>> import numpy as np
|
| 1893 |
+
>>> from scipy import datasets
|
| 1894 |
+
>>> from scipy.ndimage import zoom, generic_filter
|
| 1895 |
+
>>> import matplotlib.pyplot as plt
|
| 1896 |
+
>>> ascent = zoom(datasets.ascent(), 0.5)
|
| 1897 |
+
|
| 1898 |
+
Compute a maximum filter with kernel size 5 by passing a simple NumPy
|
| 1899 |
+
aggregation function as argument to `function`.
|
| 1900 |
+
|
| 1901 |
+
>>> maximum_filter_result = generic_filter(ascent, np.amax, [5, 5])
|
| 1902 |
+
|
| 1903 |
+
While a maximum filter could also directly be obtained using
|
| 1904 |
+
`maximum_filter`, `generic_filter` allows generic Python function or
|
| 1905 |
+
`scipy.LowLevelCallable` to be used as a filter. Here, we compute the
|
| 1906 |
+
range between maximum and minimum value as an example for a kernel size
|
| 1907 |
+
of 5.
|
| 1908 |
+
|
| 1909 |
+
>>> def custom_filter(image):
|
| 1910 |
+
... return np.amax(image) - np.amin(image)
|
| 1911 |
+
>>> custom_filter_result = generic_filter(ascent, custom_filter, [5, 5])
|
| 1912 |
+
|
| 1913 |
+
Plot the original and filtered images.
|
| 1914 |
+
|
| 1915 |
+
>>> fig, axes = plt.subplots(3, 1, figsize=(3, 9))
|
| 1916 |
+
>>> plt.gray() # show the filtered result in grayscale
|
| 1917 |
+
>>> top, middle, bottom = axes
|
| 1918 |
+
>>> for ax in axes:
|
| 1919 |
+
... ax.set_axis_off() # remove coordinate system
|
| 1920 |
+
>>> top.imshow(ascent)
|
| 1921 |
+
>>> top.set_title("Original image")
|
| 1922 |
+
>>> middle.imshow(maximum_filter_result)
|
| 1923 |
+
>>> middle.set_title("Maximum filter, Kernel: 5x5")
|
| 1924 |
+
>>> bottom.imshow(custom_filter_result)
|
| 1925 |
+
>>> bottom.set_title("Custom filter, Kernel: 5x5")
|
| 1926 |
+
>>> fig.tight_layout()
|
| 1927 |
+
|
| 1928 |
+
"""
|
| 1929 |
+
if (size is not None) and (footprint is not None):
|
| 1930 |
+
warnings.warn("ignoring size because footprint is set",
|
| 1931 |
+
UserWarning, stacklevel=2)
|
| 1932 |
+
if extra_keywords is None:
|
| 1933 |
+
extra_keywords = {}
|
| 1934 |
+
input = np.asarray(input)
|
| 1935 |
+
if np.iscomplexobj(input):
|
| 1936 |
+
raise TypeError('Complex type not supported')
|
| 1937 |
+
axes = _ni_support._check_axes(axes, input.ndim)
|
| 1938 |
+
num_axes = len(axes)
|
| 1939 |
+
if footprint is None:
|
| 1940 |
+
if size is None:
|
| 1941 |
+
raise RuntimeError("no footprint or filter size provided")
|
| 1942 |
+
sizes = _ni_support._normalize_sequence(size, num_axes)
|
| 1943 |
+
footprint = np.ones(sizes, dtype=bool)
|
| 1944 |
+
else:
|
| 1945 |
+
footprint = np.asarray(footprint, dtype=bool)
|
| 1946 |
+
|
| 1947 |
+
# expand origins, footprint if num_axes < input.ndim
|
| 1948 |
+
footprint = _expand_footprint(input.ndim, axes, footprint)
|
| 1949 |
+
origins = _expand_origin(input.ndim, axes, origin)
|
| 1950 |
+
|
| 1951 |
+
fshape = [ii for ii in footprint.shape if ii > 0]
|
| 1952 |
+
if len(fshape) != input.ndim:
|
| 1953 |
+
raise RuntimeError(f"footprint.ndim ({footprint.ndim}) "
|
| 1954 |
+
f"must match len(axes) ({num_axes})")
|
| 1955 |
+
for origin, lenf in zip(origins, fshape):
|
| 1956 |
+
if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf):
|
| 1957 |
+
raise ValueError('invalid origin')
|
| 1958 |
+
if not footprint.flags.contiguous:
|
| 1959 |
+
footprint = footprint.copy()
|
| 1960 |
+
output = _ni_support._get_output(output, input)
|
| 1961 |
+
|
| 1962 |
+
mode = _ni_support._extend_mode_to_code(mode)
|
| 1963 |
+
_nd_image.generic_filter(input, function, footprint, output, mode,
|
| 1964 |
+
cval, origins, extra_arguments, extra_keywords)
|
| 1965 |
+
return output
|
evalkit_eagle/lib/python3.10/site-packages/scipy/ndimage/_fourier.py
ADDED
|
@@ -0,0 +1,306 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (C) 2003-2005 Peter J. Verveer
|
| 2 |
+
#
|
| 3 |
+
# Redistribution and use in source and binary forms, with or without
|
| 4 |
+
# modification, are permitted provided that the following conditions
|
| 5 |
+
# are met:
|
| 6 |
+
#
|
| 7 |
+
# 1. Redistributions of source code must retain the above copyright
|
| 8 |
+
# notice, this list of conditions and the following disclaimer.
|
| 9 |
+
#
|
| 10 |
+
# 2. Redistributions in binary form must reproduce the above
|
| 11 |
+
# copyright notice, this list of conditions and the following
|
| 12 |
+
# disclaimer in the documentation and/or other materials provided
|
| 13 |
+
# with the distribution.
|
| 14 |
+
#
|
| 15 |
+
# 3. The name of the author may not be used to endorse or promote
|
| 16 |
+
# products derived from this software without specific prior
|
| 17 |
+
# written permission.
|
| 18 |
+
#
|
| 19 |
+
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
|
| 20 |
+
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
| 21 |
+
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
| 22 |
+
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
|
| 23 |
+
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
| 24 |
+
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
|
| 25 |
+
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
| 26 |
+
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
| 27 |
+
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
| 28 |
+
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
| 29 |
+
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
| 30 |
+
|
| 31 |
+
import numpy as np
|
| 32 |
+
from scipy._lib._util import normalize_axis_index
|
| 33 |
+
from . import _ni_support
|
| 34 |
+
from . import _nd_image
|
| 35 |
+
|
| 36 |
+
__all__ = ['fourier_gaussian', 'fourier_uniform', 'fourier_ellipsoid',
|
| 37 |
+
'fourier_shift']
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def _get_output_fourier(output, input):
|
| 41 |
+
if output is None:
|
| 42 |
+
if input.dtype.type in [np.complex64, np.complex128, np.float32]:
|
| 43 |
+
output = np.zeros(input.shape, dtype=input.dtype)
|
| 44 |
+
else:
|
| 45 |
+
output = np.zeros(input.shape, dtype=np.float64)
|
| 46 |
+
elif type(output) is type:
|
| 47 |
+
if output not in [np.complex64, np.complex128,
|
| 48 |
+
np.float32, np.float64]:
|
| 49 |
+
raise RuntimeError("output type not supported")
|
| 50 |
+
output = np.zeros(input.shape, dtype=output)
|
| 51 |
+
elif output.shape != input.shape:
|
| 52 |
+
raise RuntimeError("output shape not correct")
|
| 53 |
+
return output
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
def _get_output_fourier_complex(output, input):
|
| 57 |
+
if output is None:
|
| 58 |
+
if input.dtype.type in [np.complex64, np.complex128]:
|
| 59 |
+
output = np.zeros(input.shape, dtype=input.dtype)
|
| 60 |
+
else:
|
| 61 |
+
output = np.zeros(input.shape, dtype=np.complex128)
|
| 62 |
+
elif type(output) is type:
|
| 63 |
+
if output not in [np.complex64, np.complex128]:
|
| 64 |
+
raise RuntimeError("output type not supported")
|
| 65 |
+
output = np.zeros(input.shape, dtype=output)
|
| 66 |
+
elif output.shape != input.shape:
|
| 67 |
+
raise RuntimeError("output shape not correct")
|
| 68 |
+
return output
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
def fourier_gaussian(input, sigma, n=-1, axis=-1, output=None):
|
| 72 |
+
"""
|
| 73 |
+
Multidimensional Gaussian fourier filter.
|
| 74 |
+
|
| 75 |
+
The array is multiplied with the fourier transform of a Gaussian
|
| 76 |
+
kernel.
|
| 77 |
+
|
| 78 |
+
Parameters
|
| 79 |
+
----------
|
| 80 |
+
input : array_like
|
| 81 |
+
The input array.
|
| 82 |
+
sigma : float or sequence
|
| 83 |
+
The sigma of the Gaussian kernel. If a float, `sigma` is the same for
|
| 84 |
+
all axes. If a sequence, `sigma` has to contain one value for each
|
| 85 |
+
axis.
|
| 86 |
+
n : int, optional
|
| 87 |
+
If `n` is negative (default), then the input is assumed to be the
|
| 88 |
+
result of a complex fft.
|
| 89 |
+
If `n` is larger than or equal to zero, the input is assumed to be the
|
| 90 |
+
result of a real fft, and `n` gives the length of the array before
|
| 91 |
+
transformation along the real transform direction.
|
| 92 |
+
axis : int, optional
|
| 93 |
+
The axis of the real transform.
|
| 94 |
+
output : ndarray, optional
|
| 95 |
+
If given, the result of filtering the input is placed in this array.
|
| 96 |
+
|
| 97 |
+
Returns
|
| 98 |
+
-------
|
| 99 |
+
fourier_gaussian : ndarray
|
| 100 |
+
The filtered input.
|
| 101 |
+
|
| 102 |
+
Examples
|
| 103 |
+
--------
|
| 104 |
+
>>> from scipy import ndimage, datasets
|
| 105 |
+
>>> import numpy.fft
|
| 106 |
+
>>> import matplotlib.pyplot as plt
|
| 107 |
+
>>> fig, (ax1, ax2) = plt.subplots(1, 2)
|
| 108 |
+
>>> plt.gray() # show the filtered result in grayscale
|
| 109 |
+
>>> ascent = datasets.ascent()
|
| 110 |
+
>>> input_ = numpy.fft.fft2(ascent)
|
| 111 |
+
>>> result = ndimage.fourier_gaussian(input_, sigma=4)
|
| 112 |
+
>>> result = numpy.fft.ifft2(result)
|
| 113 |
+
>>> ax1.imshow(ascent)
|
| 114 |
+
>>> ax2.imshow(result.real) # the imaginary part is an artifact
|
| 115 |
+
>>> plt.show()
|
| 116 |
+
"""
|
| 117 |
+
input = np.asarray(input)
|
| 118 |
+
output = _get_output_fourier(output, input)
|
| 119 |
+
axis = normalize_axis_index(axis, input.ndim)
|
| 120 |
+
sigmas = _ni_support._normalize_sequence(sigma, input.ndim)
|
| 121 |
+
sigmas = np.asarray(sigmas, dtype=np.float64)
|
| 122 |
+
if not sigmas.flags.contiguous:
|
| 123 |
+
sigmas = sigmas.copy()
|
| 124 |
+
|
| 125 |
+
_nd_image.fourier_filter(input, sigmas, n, axis, output, 0)
|
| 126 |
+
return output
|
| 127 |
+
|
| 128 |
+
|
| 129 |
+
def fourier_uniform(input, size, n=-1, axis=-1, output=None):
|
| 130 |
+
"""
|
| 131 |
+
Multidimensional uniform fourier filter.
|
| 132 |
+
|
| 133 |
+
The array is multiplied with the Fourier transform of a box of given
|
| 134 |
+
size.
|
| 135 |
+
|
| 136 |
+
Parameters
|
| 137 |
+
----------
|
| 138 |
+
input : array_like
|
| 139 |
+
The input array.
|
| 140 |
+
size : float or sequence
|
| 141 |
+
The size of the box used for filtering.
|
| 142 |
+
If a float, `size` is the same for all axes. If a sequence, `size` has
|
| 143 |
+
to contain one value for each axis.
|
| 144 |
+
n : int, optional
|
| 145 |
+
If `n` is negative (default), then the input is assumed to be the
|
| 146 |
+
result of a complex fft.
|
| 147 |
+
If `n` is larger than or equal to zero, the input is assumed to be the
|
| 148 |
+
result of a real fft, and `n` gives the length of the array before
|
| 149 |
+
transformation along the real transform direction.
|
| 150 |
+
axis : int, optional
|
| 151 |
+
The axis of the real transform.
|
| 152 |
+
output : ndarray, optional
|
| 153 |
+
If given, the result of filtering the input is placed in this array.
|
| 154 |
+
|
| 155 |
+
Returns
|
| 156 |
+
-------
|
| 157 |
+
fourier_uniform : ndarray
|
| 158 |
+
The filtered input.
|
| 159 |
+
|
| 160 |
+
Examples
|
| 161 |
+
--------
|
| 162 |
+
>>> from scipy import ndimage, datasets
|
| 163 |
+
>>> import numpy.fft
|
| 164 |
+
>>> import matplotlib.pyplot as plt
|
| 165 |
+
>>> fig, (ax1, ax2) = plt.subplots(1, 2)
|
| 166 |
+
>>> plt.gray() # show the filtered result in grayscale
|
| 167 |
+
>>> ascent = datasets.ascent()
|
| 168 |
+
>>> input_ = numpy.fft.fft2(ascent)
|
| 169 |
+
>>> result = ndimage.fourier_uniform(input_, size=20)
|
| 170 |
+
>>> result = numpy.fft.ifft2(result)
|
| 171 |
+
>>> ax1.imshow(ascent)
|
| 172 |
+
>>> ax2.imshow(result.real) # the imaginary part is an artifact
|
| 173 |
+
>>> plt.show()
|
| 174 |
+
"""
|
| 175 |
+
input = np.asarray(input)
|
| 176 |
+
output = _get_output_fourier(output, input)
|
| 177 |
+
axis = normalize_axis_index(axis, input.ndim)
|
| 178 |
+
sizes = _ni_support._normalize_sequence(size, input.ndim)
|
| 179 |
+
sizes = np.asarray(sizes, dtype=np.float64)
|
| 180 |
+
if not sizes.flags.contiguous:
|
| 181 |
+
sizes = sizes.copy()
|
| 182 |
+
_nd_image.fourier_filter(input, sizes, n, axis, output, 1)
|
| 183 |
+
return output
|
| 184 |
+
|
| 185 |
+
|
| 186 |
+
def fourier_ellipsoid(input, size, n=-1, axis=-1, output=None):
|
| 187 |
+
"""
|
| 188 |
+
Multidimensional ellipsoid Fourier filter.
|
| 189 |
+
|
| 190 |
+
The array is multiplied with the fourier transform of an ellipsoid of
|
| 191 |
+
given sizes.
|
| 192 |
+
|
| 193 |
+
Parameters
|
| 194 |
+
----------
|
| 195 |
+
input : array_like
|
| 196 |
+
The input array.
|
| 197 |
+
size : float or sequence
|
| 198 |
+
The size of the box used for filtering.
|
| 199 |
+
If a float, `size` is the same for all axes. If a sequence, `size` has
|
| 200 |
+
to contain one value for each axis.
|
| 201 |
+
n : int, optional
|
| 202 |
+
If `n` is negative (default), then the input is assumed to be the
|
| 203 |
+
result of a complex fft.
|
| 204 |
+
If `n` is larger than or equal to zero, the input is assumed to be the
|
| 205 |
+
result of a real fft, and `n` gives the length of the array before
|
| 206 |
+
transformation along the real transform direction.
|
| 207 |
+
axis : int, optional
|
| 208 |
+
The axis of the real transform.
|
| 209 |
+
output : ndarray, optional
|
| 210 |
+
If given, the result of filtering the input is placed in this array.
|
| 211 |
+
|
| 212 |
+
Returns
|
| 213 |
+
-------
|
| 214 |
+
fourier_ellipsoid : ndarray
|
| 215 |
+
The filtered input.
|
| 216 |
+
|
| 217 |
+
Notes
|
| 218 |
+
-----
|
| 219 |
+
This function is implemented for arrays of rank 1, 2, or 3.
|
| 220 |
+
|
| 221 |
+
Examples
|
| 222 |
+
--------
|
| 223 |
+
>>> from scipy import ndimage, datasets
|
| 224 |
+
>>> import numpy.fft
|
| 225 |
+
>>> import matplotlib.pyplot as plt
|
| 226 |
+
>>> fig, (ax1, ax2) = plt.subplots(1, 2)
|
| 227 |
+
>>> plt.gray() # show the filtered result in grayscale
|
| 228 |
+
>>> ascent = datasets.ascent()
|
| 229 |
+
>>> input_ = numpy.fft.fft2(ascent)
|
| 230 |
+
>>> result = ndimage.fourier_ellipsoid(input_, size=20)
|
| 231 |
+
>>> result = numpy.fft.ifft2(result)
|
| 232 |
+
>>> ax1.imshow(ascent)
|
| 233 |
+
>>> ax2.imshow(result.real) # the imaginary part is an artifact
|
| 234 |
+
>>> plt.show()
|
| 235 |
+
"""
|
| 236 |
+
input = np.asarray(input)
|
| 237 |
+
if input.ndim > 3:
|
| 238 |
+
raise NotImplementedError("Only 1d, 2d and 3d inputs are supported")
|
| 239 |
+
output = _get_output_fourier(output, input)
|
| 240 |
+
if output.size == 0:
|
| 241 |
+
# The C code has a bug that can result in a segfault with arrays
|
| 242 |
+
# that have size 0 (gh-17270), so check here.
|
| 243 |
+
return output
|
| 244 |
+
axis = normalize_axis_index(axis, input.ndim)
|
| 245 |
+
sizes = _ni_support._normalize_sequence(size, input.ndim)
|
| 246 |
+
sizes = np.asarray(sizes, dtype=np.float64)
|
| 247 |
+
if not sizes.flags.contiguous:
|
| 248 |
+
sizes = sizes.copy()
|
| 249 |
+
_nd_image.fourier_filter(input, sizes, n, axis, output, 2)
|
| 250 |
+
return output
|
| 251 |
+
|
| 252 |
+
|
| 253 |
+
def fourier_shift(input, shift, n=-1, axis=-1, output=None):
|
| 254 |
+
"""
|
| 255 |
+
Multidimensional Fourier shift filter.
|
| 256 |
+
|
| 257 |
+
The array is multiplied with the Fourier transform of a shift operation.
|
| 258 |
+
|
| 259 |
+
Parameters
|
| 260 |
+
----------
|
| 261 |
+
input : array_like
|
| 262 |
+
The input array.
|
| 263 |
+
shift : float or sequence
|
| 264 |
+
The size of the box used for filtering.
|
| 265 |
+
If a float, `shift` is the same for all axes. If a sequence, `shift`
|
| 266 |
+
has to contain one value for each axis.
|
| 267 |
+
n : int, optional
|
| 268 |
+
If `n` is negative (default), then the input is assumed to be the
|
| 269 |
+
result of a complex fft.
|
| 270 |
+
If `n` is larger than or equal to zero, the input is assumed to be the
|
| 271 |
+
result of a real fft, and `n` gives the length of the array before
|
| 272 |
+
transformation along the real transform direction.
|
| 273 |
+
axis : int, optional
|
| 274 |
+
The axis of the real transform.
|
| 275 |
+
output : ndarray, optional
|
| 276 |
+
If given, the result of shifting the input is placed in this array.
|
| 277 |
+
|
| 278 |
+
Returns
|
| 279 |
+
-------
|
| 280 |
+
fourier_shift : ndarray
|
| 281 |
+
The shifted input.
|
| 282 |
+
|
| 283 |
+
Examples
|
| 284 |
+
--------
|
| 285 |
+
>>> from scipy import ndimage, datasets
|
| 286 |
+
>>> import matplotlib.pyplot as plt
|
| 287 |
+
>>> import numpy.fft
|
| 288 |
+
>>> fig, (ax1, ax2) = plt.subplots(1, 2)
|
| 289 |
+
>>> plt.gray() # show the filtered result in grayscale
|
| 290 |
+
>>> ascent = datasets.ascent()
|
| 291 |
+
>>> input_ = numpy.fft.fft2(ascent)
|
| 292 |
+
>>> result = ndimage.fourier_shift(input_, shift=200)
|
| 293 |
+
>>> result = numpy.fft.ifft2(result)
|
| 294 |
+
>>> ax1.imshow(ascent)
|
| 295 |
+
>>> ax2.imshow(result.real) # the imaginary part is an artifact
|
| 296 |
+
>>> plt.show()
|
| 297 |
+
"""
|
| 298 |
+
input = np.asarray(input)
|
| 299 |
+
output = _get_output_fourier_complex(output, input)
|
| 300 |
+
axis = normalize_axis_index(axis, input.ndim)
|
| 301 |
+
shifts = _ni_support._normalize_sequence(shift, input.ndim)
|
| 302 |
+
shifts = np.asarray(shifts, dtype=np.float64)
|
| 303 |
+
if not shifts.flags.contiguous:
|
| 304 |
+
shifts = shifts.copy()
|
| 305 |
+
_nd_image.fourier_shift(input, shifts, n, axis, output)
|
| 306 |
+
return output
|
evalkit_eagle/lib/python3.10/site-packages/scipy/ndimage/_interpolation.py
ADDED
|
@@ -0,0 +1,1003 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (C) 2003-2005 Peter J. Verveer
|
| 2 |
+
#
|
| 3 |
+
# Redistribution and use in source and binary forms, with or without
|
| 4 |
+
# modification, are permitted provided that the following conditions
|
| 5 |
+
# are met:
|
| 6 |
+
#
|
| 7 |
+
# 1. Redistributions of source code must retain the above copyright
|
| 8 |
+
# notice, this list of conditions and the following disclaimer.
|
| 9 |
+
#
|
| 10 |
+
# 2. Redistributions in binary form must reproduce the above
|
| 11 |
+
# copyright notice, this list of conditions and the following
|
| 12 |
+
# disclaimer in the documentation and/or other materials provided
|
| 13 |
+
# with the distribution.
|
| 14 |
+
#
|
| 15 |
+
# 3. The name of the author may not be used to endorse or promote
|
| 16 |
+
# products derived from this software without specific prior
|
| 17 |
+
# written permission.
|
| 18 |
+
#
|
| 19 |
+
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
|
| 20 |
+
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
| 21 |
+
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
| 22 |
+
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
|
| 23 |
+
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
| 24 |
+
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
|
| 25 |
+
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
| 26 |
+
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
| 27 |
+
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
| 28 |
+
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
| 29 |
+
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
| 30 |
+
|
| 31 |
+
import itertools
|
| 32 |
+
import warnings
|
| 33 |
+
|
| 34 |
+
import numpy as np
|
| 35 |
+
from scipy._lib._util import normalize_axis_index
|
| 36 |
+
|
| 37 |
+
from scipy import special
|
| 38 |
+
from . import _ni_support
|
| 39 |
+
from . import _nd_image
|
| 40 |
+
from ._ni_docstrings import docfiller
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
__all__ = ['spline_filter1d', 'spline_filter', 'geometric_transform',
|
| 44 |
+
'map_coordinates', 'affine_transform', 'shift', 'zoom', 'rotate']
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
@docfiller
|
| 48 |
+
def spline_filter1d(input, order=3, axis=-1, output=np.float64,
|
| 49 |
+
mode='mirror'):
|
| 50 |
+
"""
|
| 51 |
+
Calculate a 1-D spline filter along the given axis.
|
| 52 |
+
|
| 53 |
+
The lines of the array along the given axis are filtered by a
|
| 54 |
+
spline filter. The order of the spline must be >= 2 and <= 5.
|
| 55 |
+
|
| 56 |
+
Parameters
|
| 57 |
+
----------
|
| 58 |
+
%(input)s
|
| 59 |
+
order : int, optional
|
| 60 |
+
The order of the spline, default is 3.
|
| 61 |
+
axis : int, optional
|
| 62 |
+
The axis along which the spline filter is applied. Default is the last
|
| 63 |
+
axis.
|
| 64 |
+
output : ndarray or dtype, optional
|
| 65 |
+
The array in which to place the output, or the dtype of the returned
|
| 66 |
+
array. Default is ``numpy.float64``.
|
| 67 |
+
%(mode_interp_mirror)s
|
| 68 |
+
|
| 69 |
+
Returns
|
| 70 |
+
-------
|
| 71 |
+
spline_filter1d : ndarray
|
| 72 |
+
The filtered input.
|
| 73 |
+
|
| 74 |
+
See Also
|
| 75 |
+
--------
|
| 76 |
+
spline_filter : Multidimensional spline filter.
|
| 77 |
+
|
| 78 |
+
Notes
|
| 79 |
+
-----
|
| 80 |
+
All of the interpolation functions in `ndimage` do spline interpolation of
|
| 81 |
+
the input image. If using B-splines of `order > 1`, the input image
|
| 82 |
+
values have to be converted to B-spline coefficients first, which is
|
| 83 |
+
done by applying this 1-D filter sequentially along all
|
| 84 |
+
axes of the input. All functions that require B-spline coefficients
|
| 85 |
+
will automatically filter their inputs, a behavior controllable with
|
| 86 |
+
the `prefilter` keyword argument. For functions that accept a `mode`
|
| 87 |
+
parameter, the result will only be correct if it matches the `mode`
|
| 88 |
+
used when filtering.
|
| 89 |
+
|
| 90 |
+
For complex-valued `input`, this function processes the real and imaginary
|
| 91 |
+
components independently.
|
| 92 |
+
|
| 93 |
+
.. versionadded:: 1.6.0
|
| 94 |
+
Complex-valued support added.
|
| 95 |
+
|
| 96 |
+
Examples
|
| 97 |
+
--------
|
| 98 |
+
We can filter an image using 1-D spline along the given axis:
|
| 99 |
+
|
| 100 |
+
>>> from scipy.ndimage import spline_filter1d
|
| 101 |
+
>>> import numpy as np
|
| 102 |
+
>>> import matplotlib.pyplot as plt
|
| 103 |
+
>>> orig_img = np.eye(20) # create an image
|
| 104 |
+
>>> orig_img[10, :] = 1.0
|
| 105 |
+
>>> sp_filter_axis_0 = spline_filter1d(orig_img, axis=0)
|
| 106 |
+
>>> sp_filter_axis_1 = spline_filter1d(orig_img, axis=1)
|
| 107 |
+
>>> f, ax = plt.subplots(1, 3, sharex=True)
|
| 108 |
+
>>> for ind, data in enumerate([[orig_img, "original image"],
|
| 109 |
+
... [sp_filter_axis_0, "spline filter (axis=0)"],
|
| 110 |
+
... [sp_filter_axis_1, "spline filter (axis=1)"]]):
|
| 111 |
+
... ax[ind].imshow(data[0], cmap='gray_r')
|
| 112 |
+
... ax[ind].set_title(data[1])
|
| 113 |
+
>>> plt.tight_layout()
|
| 114 |
+
>>> plt.show()
|
| 115 |
+
|
| 116 |
+
"""
|
| 117 |
+
if order < 0 or order > 5:
|
| 118 |
+
raise RuntimeError('spline order not supported')
|
| 119 |
+
input = np.asarray(input)
|
| 120 |
+
complex_output = np.iscomplexobj(input)
|
| 121 |
+
output = _ni_support._get_output(output, input,
|
| 122 |
+
complex_output=complex_output)
|
| 123 |
+
if complex_output:
|
| 124 |
+
spline_filter1d(input.real, order, axis, output.real, mode)
|
| 125 |
+
spline_filter1d(input.imag, order, axis, output.imag, mode)
|
| 126 |
+
return output
|
| 127 |
+
if order in [0, 1]:
|
| 128 |
+
output[...] = np.array(input)
|
| 129 |
+
else:
|
| 130 |
+
mode = _ni_support._extend_mode_to_code(mode)
|
| 131 |
+
axis = normalize_axis_index(axis, input.ndim)
|
| 132 |
+
_nd_image.spline_filter1d(input, order, axis, output, mode)
|
| 133 |
+
return output
|
| 134 |
+
|
| 135 |
+
@docfiller
|
| 136 |
+
def spline_filter(input, order=3, output=np.float64, mode='mirror'):
|
| 137 |
+
"""
|
| 138 |
+
Multidimensional spline filter.
|
| 139 |
+
|
| 140 |
+
Parameters
|
| 141 |
+
----------
|
| 142 |
+
%(input)s
|
| 143 |
+
order : int, optional
|
| 144 |
+
The order of the spline, default is 3.
|
| 145 |
+
output : ndarray or dtype, optional
|
| 146 |
+
The array in which to place the output, or the dtype of the returned
|
| 147 |
+
array. Default is ``numpy.float64``.
|
| 148 |
+
%(mode_interp_mirror)s
|
| 149 |
+
|
| 150 |
+
Returns
|
| 151 |
+
-------
|
| 152 |
+
spline_filter : ndarray
|
| 153 |
+
Filtered array. Has the same shape as `input`.
|
| 154 |
+
|
| 155 |
+
See Also
|
| 156 |
+
--------
|
| 157 |
+
spline_filter1d : Calculate a 1-D spline filter along the given axis.
|
| 158 |
+
|
| 159 |
+
Notes
|
| 160 |
+
-----
|
| 161 |
+
The multidimensional filter is implemented as a sequence of
|
| 162 |
+
1-D spline filters. The intermediate arrays are stored
|
| 163 |
+
in the same data type as the output. Therefore, for output types
|
| 164 |
+
with a limited precision, the results may be imprecise because
|
| 165 |
+
intermediate results may be stored with insufficient precision.
|
| 166 |
+
|
| 167 |
+
For complex-valued `input`, this function processes the real and imaginary
|
| 168 |
+
components independently.
|
| 169 |
+
|
| 170 |
+
.. versionadded:: 1.6.0
|
| 171 |
+
Complex-valued support added.
|
| 172 |
+
|
| 173 |
+
Examples
|
| 174 |
+
--------
|
| 175 |
+
We can filter an image using multidimensional splines:
|
| 176 |
+
|
| 177 |
+
>>> from scipy.ndimage import spline_filter
|
| 178 |
+
>>> import numpy as np
|
| 179 |
+
>>> import matplotlib.pyplot as plt
|
| 180 |
+
>>> orig_img = np.eye(20) # create an image
|
| 181 |
+
>>> orig_img[10, :] = 1.0
|
| 182 |
+
>>> sp_filter = spline_filter(orig_img, order=3)
|
| 183 |
+
>>> f, ax = plt.subplots(1, 2, sharex=True)
|
| 184 |
+
>>> for ind, data in enumerate([[orig_img, "original image"],
|
| 185 |
+
... [sp_filter, "spline filter"]]):
|
| 186 |
+
... ax[ind].imshow(data[0], cmap='gray_r')
|
| 187 |
+
... ax[ind].set_title(data[1])
|
| 188 |
+
>>> plt.tight_layout()
|
| 189 |
+
>>> plt.show()
|
| 190 |
+
|
| 191 |
+
"""
|
| 192 |
+
if order < 2 or order > 5:
|
| 193 |
+
raise RuntimeError('spline order not supported')
|
| 194 |
+
input = np.asarray(input)
|
| 195 |
+
complex_output = np.iscomplexobj(input)
|
| 196 |
+
output = _ni_support._get_output(output, input,
|
| 197 |
+
complex_output=complex_output)
|
| 198 |
+
if complex_output:
|
| 199 |
+
spline_filter(input.real, order, output.real, mode)
|
| 200 |
+
spline_filter(input.imag, order, output.imag, mode)
|
| 201 |
+
return output
|
| 202 |
+
if order not in [0, 1] and input.ndim > 0:
|
| 203 |
+
for axis in range(input.ndim):
|
| 204 |
+
spline_filter1d(input, order, axis, output=output, mode=mode)
|
| 205 |
+
input = output
|
| 206 |
+
else:
|
| 207 |
+
output[...] = input[...]
|
| 208 |
+
return output
|
| 209 |
+
|
| 210 |
+
|
| 211 |
+
def _prepad_for_spline_filter(input, mode, cval):
|
| 212 |
+
if mode in ['nearest', 'grid-constant']:
|
| 213 |
+
npad = 12
|
| 214 |
+
if mode == 'grid-constant':
|
| 215 |
+
padded = np.pad(input, npad, mode='constant',
|
| 216 |
+
constant_values=cval)
|
| 217 |
+
elif mode == 'nearest':
|
| 218 |
+
padded = np.pad(input, npad, mode='edge')
|
| 219 |
+
else:
|
| 220 |
+
# other modes have exact boundary conditions implemented so
|
| 221 |
+
# no prepadding is needed
|
| 222 |
+
npad = 0
|
| 223 |
+
padded = input
|
| 224 |
+
return padded, npad
|
| 225 |
+
|
| 226 |
+
|
| 227 |
+
@docfiller
|
| 228 |
+
def geometric_transform(input, mapping, output_shape=None,
|
| 229 |
+
output=None, order=3,
|
| 230 |
+
mode='constant', cval=0.0, prefilter=True,
|
| 231 |
+
extra_arguments=(), extra_keywords=None):
|
| 232 |
+
"""
|
| 233 |
+
Apply an arbitrary geometric transform.
|
| 234 |
+
|
| 235 |
+
The given mapping function is used to find, for each point in the
|
| 236 |
+
output, the corresponding coordinates in the input. The value of the
|
| 237 |
+
input at those coordinates is determined by spline interpolation of
|
| 238 |
+
the requested order.
|
| 239 |
+
|
| 240 |
+
Parameters
|
| 241 |
+
----------
|
| 242 |
+
%(input)s
|
| 243 |
+
mapping : {callable, scipy.LowLevelCallable}
|
| 244 |
+
A callable object that accepts a tuple of length equal to the output
|
| 245 |
+
array rank, and returns the corresponding input coordinates as a tuple
|
| 246 |
+
of length equal to the input array rank.
|
| 247 |
+
output_shape : tuple of ints, optional
|
| 248 |
+
Shape tuple.
|
| 249 |
+
%(output)s
|
| 250 |
+
order : int, optional
|
| 251 |
+
The order of the spline interpolation, default is 3.
|
| 252 |
+
The order has to be in the range 0-5.
|
| 253 |
+
%(mode_interp_constant)s
|
| 254 |
+
%(cval)s
|
| 255 |
+
%(prefilter)s
|
| 256 |
+
extra_arguments : tuple, optional
|
| 257 |
+
Extra arguments passed to `mapping`.
|
| 258 |
+
extra_keywords : dict, optional
|
| 259 |
+
Extra keywords passed to `mapping`.
|
| 260 |
+
|
| 261 |
+
Returns
|
| 262 |
+
-------
|
| 263 |
+
output : ndarray
|
| 264 |
+
The filtered input.
|
| 265 |
+
|
| 266 |
+
See Also
|
| 267 |
+
--------
|
| 268 |
+
map_coordinates, affine_transform, spline_filter1d
|
| 269 |
+
|
| 270 |
+
|
| 271 |
+
Notes
|
| 272 |
+
-----
|
| 273 |
+
This function also accepts low-level callback functions with one
|
| 274 |
+
the following signatures and wrapped in `scipy.LowLevelCallable`:
|
| 275 |
+
|
| 276 |
+
.. code:: c
|
| 277 |
+
|
| 278 |
+
int mapping(npy_intp *output_coordinates, double *input_coordinates,
|
| 279 |
+
int output_rank, int input_rank, void *user_data)
|
| 280 |
+
int mapping(intptr_t *output_coordinates, double *input_coordinates,
|
| 281 |
+
int output_rank, int input_rank, void *user_data)
|
| 282 |
+
|
| 283 |
+
The calling function iterates over the elements of the output array,
|
| 284 |
+
calling the callback function at each element. The coordinates of the
|
| 285 |
+
current output element are passed through ``output_coordinates``. The
|
| 286 |
+
callback function must return the coordinates at which the input must
|
| 287 |
+
be interpolated in ``input_coordinates``. The rank of the input and
|
| 288 |
+
output arrays are given by ``input_rank`` and ``output_rank``
|
| 289 |
+
respectively. ``user_data`` is the data pointer provided
|
| 290 |
+
to `scipy.LowLevelCallable` as-is.
|
| 291 |
+
|
| 292 |
+
The callback function must return an integer error status that is zero
|
| 293 |
+
if something went wrong and one otherwise. If an error occurs, you should
|
| 294 |
+
normally set the Python error status with an informative message
|
| 295 |
+
before returning, otherwise a default error message is set by the
|
| 296 |
+
calling function.
|
| 297 |
+
|
| 298 |
+
In addition, some other low-level function pointer specifications
|
| 299 |
+
are accepted, but these are for backward compatibility only and should
|
| 300 |
+
not be used in new code.
|
| 301 |
+
|
| 302 |
+
For complex-valued `input`, this function transforms the real and imaginary
|
| 303 |
+
components independently.
|
| 304 |
+
|
| 305 |
+
.. versionadded:: 1.6.0
|
| 306 |
+
Complex-valued support added.
|
| 307 |
+
|
| 308 |
+
Examples
|
| 309 |
+
--------
|
| 310 |
+
>>> import numpy as np
|
| 311 |
+
>>> from scipy.ndimage import geometric_transform
|
| 312 |
+
>>> a = np.arange(12.).reshape((4, 3))
|
| 313 |
+
>>> def shift_func(output_coords):
|
| 314 |
+
... return (output_coords[0] - 0.5, output_coords[1] - 0.5)
|
| 315 |
+
...
|
| 316 |
+
>>> geometric_transform(a, shift_func)
|
| 317 |
+
array([[ 0. , 0. , 0. ],
|
| 318 |
+
[ 0. , 1.362, 2.738],
|
| 319 |
+
[ 0. , 4.812, 6.187],
|
| 320 |
+
[ 0. , 8.263, 9.637]])
|
| 321 |
+
|
| 322 |
+
>>> b = [1, 2, 3, 4, 5]
|
| 323 |
+
>>> def shift_func(output_coords):
|
| 324 |
+
... return (output_coords[0] - 3,)
|
| 325 |
+
...
|
| 326 |
+
>>> geometric_transform(b, shift_func, mode='constant')
|
| 327 |
+
array([0, 0, 0, 1, 2])
|
| 328 |
+
>>> geometric_transform(b, shift_func, mode='nearest')
|
| 329 |
+
array([1, 1, 1, 1, 2])
|
| 330 |
+
>>> geometric_transform(b, shift_func, mode='reflect')
|
| 331 |
+
array([3, 2, 1, 1, 2])
|
| 332 |
+
>>> geometric_transform(b, shift_func, mode='wrap')
|
| 333 |
+
array([2, 3, 4, 1, 2])
|
| 334 |
+
|
| 335 |
+
"""
|
| 336 |
+
if extra_keywords is None:
|
| 337 |
+
extra_keywords = {}
|
| 338 |
+
if order < 0 or order > 5:
|
| 339 |
+
raise RuntimeError('spline order not supported')
|
| 340 |
+
input = np.asarray(input)
|
| 341 |
+
if output_shape is None:
|
| 342 |
+
output_shape = input.shape
|
| 343 |
+
if input.ndim < 1 or len(output_shape) < 1:
|
| 344 |
+
raise RuntimeError('input and output rank must be > 0')
|
| 345 |
+
complex_output = np.iscomplexobj(input)
|
| 346 |
+
output = _ni_support._get_output(output, input, shape=output_shape,
|
| 347 |
+
complex_output=complex_output)
|
| 348 |
+
if complex_output:
|
| 349 |
+
kwargs = dict(order=order, mode=mode, prefilter=prefilter,
|
| 350 |
+
output_shape=output_shape,
|
| 351 |
+
extra_arguments=extra_arguments,
|
| 352 |
+
extra_keywords=extra_keywords)
|
| 353 |
+
geometric_transform(input.real, mapping, output=output.real,
|
| 354 |
+
cval=np.real(cval), **kwargs)
|
| 355 |
+
geometric_transform(input.imag, mapping, output=output.imag,
|
| 356 |
+
cval=np.imag(cval), **kwargs)
|
| 357 |
+
return output
|
| 358 |
+
|
| 359 |
+
if prefilter and order > 1:
|
| 360 |
+
padded, npad = _prepad_for_spline_filter(input, mode, cval)
|
| 361 |
+
filtered = spline_filter(padded, order, output=np.float64,
|
| 362 |
+
mode=mode)
|
| 363 |
+
else:
|
| 364 |
+
npad = 0
|
| 365 |
+
filtered = input
|
| 366 |
+
mode = _ni_support._extend_mode_to_code(mode)
|
| 367 |
+
_nd_image.geometric_transform(filtered, mapping, None, None, None, output,
|
| 368 |
+
order, mode, cval, npad, extra_arguments,
|
| 369 |
+
extra_keywords)
|
| 370 |
+
return output
|
| 371 |
+
|
| 372 |
+
|
| 373 |
+
@docfiller
|
| 374 |
+
def map_coordinates(input, coordinates, output=None, order=3,
|
| 375 |
+
mode='constant', cval=0.0, prefilter=True):
|
| 376 |
+
"""
|
| 377 |
+
Map the input array to new coordinates by interpolation.
|
| 378 |
+
|
| 379 |
+
The array of coordinates is used to find, for each point in the output,
|
| 380 |
+
the corresponding coordinates in the input. The value of the input at
|
| 381 |
+
those coordinates is determined by spline interpolation of the
|
| 382 |
+
requested order.
|
| 383 |
+
|
| 384 |
+
The shape of the output is derived from that of the coordinate
|
| 385 |
+
array by dropping the first axis. The values of the array along
|
| 386 |
+
the first axis are the coordinates in the input array at which the
|
| 387 |
+
output value is found.
|
| 388 |
+
|
| 389 |
+
Parameters
|
| 390 |
+
----------
|
| 391 |
+
%(input)s
|
| 392 |
+
coordinates : array_like
|
| 393 |
+
The coordinates at which `input` is evaluated.
|
| 394 |
+
%(output)s
|
| 395 |
+
order : int, optional
|
| 396 |
+
The order of the spline interpolation, default is 3.
|
| 397 |
+
The order has to be in the range 0-5.
|
| 398 |
+
%(mode_interp_constant)s
|
| 399 |
+
%(cval)s
|
| 400 |
+
%(prefilter)s
|
| 401 |
+
|
| 402 |
+
Returns
|
| 403 |
+
-------
|
| 404 |
+
map_coordinates : ndarray
|
| 405 |
+
The result of transforming the input. The shape of the output is
|
| 406 |
+
derived from that of `coordinates` by dropping the first axis.
|
| 407 |
+
|
| 408 |
+
See Also
|
| 409 |
+
--------
|
| 410 |
+
spline_filter, geometric_transform, scipy.interpolate
|
| 411 |
+
|
| 412 |
+
Notes
|
| 413 |
+
-----
|
| 414 |
+
For complex-valued `input`, this function maps the real and imaginary
|
| 415 |
+
components independently.
|
| 416 |
+
|
| 417 |
+
.. versionadded:: 1.6.0
|
| 418 |
+
Complex-valued support added.
|
| 419 |
+
|
| 420 |
+
Examples
|
| 421 |
+
--------
|
| 422 |
+
>>> from scipy import ndimage
|
| 423 |
+
>>> import numpy as np
|
| 424 |
+
>>> a = np.arange(12.).reshape((4, 3))
|
| 425 |
+
>>> a
|
| 426 |
+
array([[ 0., 1., 2.],
|
| 427 |
+
[ 3., 4., 5.],
|
| 428 |
+
[ 6., 7., 8.],
|
| 429 |
+
[ 9., 10., 11.]])
|
| 430 |
+
>>> ndimage.map_coordinates(a, [[0.5, 2], [0.5, 1]], order=1)
|
| 431 |
+
array([ 2., 7.])
|
| 432 |
+
|
| 433 |
+
Above, the interpolated value of a[0.5, 0.5] gives output[0], while
|
| 434 |
+
a[2, 1] is output[1].
|
| 435 |
+
|
| 436 |
+
>>> inds = np.array([[0.5, 2], [0.5, 4]])
|
| 437 |
+
>>> ndimage.map_coordinates(a, inds, order=1, cval=-33.3)
|
| 438 |
+
array([ 2. , -33.3])
|
| 439 |
+
>>> ndimage.map_coordinates(a, inds, order=1, mode='nearest')
|
| 440 |
+
array([ 2., 8.])
|
| 441 |
+
>>> ndimage.map_coordinates(a, inds, order=1, cval=0, output=bool)
|
| 442 |
+
array([ True, False], dtype=bool)
|
| 443 |
+
|
| 444 |
+
"""
|
| 445 |
+
if order < 0 or order > 5:
|
| 446 |
+
raise RuntimeError('spline order not supported')
|
| 447 |
+
input = np.asarray(input)
|
| 448 |
+
coordinates = np.asarray(coordinates)
|
| 449 |
+
if np.iscomplexobj(coordinates):
|
| 450 |
+
raise TypeError('Complex type not supported')
|
| 451 |
+
output_shape = coordinates.shape[1:]
|
| 452 |
+
if input.ndim < 1 or len(output_shape) < 1:
|
| 453 |
+
raise RuntimeError('input and output rank must be > 0')
|
| 454 |
+
if coordinates.shape[0] != input.ndim:
|
| 455 |
+
raise RuntimeError('invalid shape for coordinate array')
|
| 456 |
+
complex_output = np.iscomplexobj(input)
|
| 457 |
+
output = _ni_support._get_output(output, input, shape=output_shape,
|
| 458 |
+
complex_output=complex_output)
|
| 459 |
+
if complex_output:
|
| 460 |
+
kwargs = dict(order=order, mode=mode, prefilter=prefilter)
|
| 461 |
+
map_coordinates(input.real, coordinates, output=output.real,
|
| 462 |
+
cval=np.real(cval), **kwargs)
|
| 463 |
+
map_coordinates(input.imag, coordinates, output=output.imag,
|
| 464 |
+
cval=np.imag(cval), **kwargs)
|
| 465 |
+
return output
|
| 466 |
+
if prefilter and order > 1:
|
| 467 |
+
padded, npad = _prepad_for_spline_filter(input, mode, cval)
|
| 468 |
+
filtered = spline_filter(padded, order, output=np.float64, mode=mode)
|
| 469 |
+
else:
|
| 470 |
+
npad = 0
|
| 471 |
+
filtered = input
|
| 472 |
+
mode = _ni_support._extend_mode_to_code(mode)
|
| 473 |
+
_nd_image.geometric_transform(filtered, None, coordinates, None, None,
|
| 474 |
+
output, order, mode, cval, npad, None, None)
|
| 475 |
+
return output
|
| 476 |
+
|
| 477 |
+
|
| 478 |
+
@docfiller
|
| 479 |
+
def affine_transform(input, matrix, offset=0.0, output_shape=None,
|
| 480 |
+
output=None, order=3,
|
| 481 |
+
mode='constant', cval=0.0, prefilter=True):
|
| 482 |
+
"""
|
| 483 |
+
Apply an affine transformation.
|
| 484 |
+
|
| 485 |
+
Given an output image pixel index vector ``o``, the pixel value
|
| 486 |
+
is determined from the input image at position
|
| 487 |
+
``np.dot(matrix, o) + offset``.
|
| 488 |
+
|
| 489 |
+
This does 'pull' (or 'backward') resampling, transforming the output space
|
| 490 |
+
to the input to locate data. Affine transformations are often described in
|
| 491 |
+
the 'push' (or 'forward') direction, transforming input to output. If you
|
| 492 |
+
have a matrix for the 'push' transformation, use its inverse
|
| 493 |
+
(:func:`numpy.linalg.inv`) in this function.
|
| 494 |
+
|
| 495 |
+
Parameters
|
| 496 |
+
----------
|
| 497 |
+
%(input)s
|
| 498 |
+
matrix : ndarray
|
| 499 |
+
The inverse coordinate transformation matrix, mapping output
|
| 500 |
+
coordinates to input coordinates. If ``ndim`` is the number of
|
| 501 |
+
dimensions of ``input``, the given matrix must have one of the
|
| 502 |
+
following shapes:
|
| 503 |
+
|
| 504 |
+
- ``(ndim, ndim)``: the linear transformation matrix for each
|
| 505 |
+
output coordinate.
|
| 506 |
+
- ``(ndim,)``: assume that the 2-D transformation matrix is
|
| 507 |
+
diagonal, with the diagonal specified by the given value. A more
|
| 508 |
+
efficient algorithm is then used that exploits the separability
|
| 509 |
+
of the problem.
|
| 510 |
+
- ``(ndim + 1, ndim + 1)``: assume that the transformation is
|
| 511 |
+
specified using homogeneous coordinates [1]_. In this case, any
|
| 512 |
+
value passed to ``offset`` is ignored.
|
| 513 |
+
- ``(ndim, ndim + 1)``: as above, but the bottom row of a
|
| 514 |
+
homogeneous transformation matrix is always ``[0, 0, ..., 1]``,
|
| 515 |
+
and may be omitted.
|
| 516 |
+
|
| 517 |
+
offset : float or sequence, optional
|
| 518 |
+
The offset into the array where the transform is applied. If a float,
|
| 519 |
+
`offset` is the same for each axis. If a sequence, `offset` should
|
| 520 |
+
contain one value for each axis.
|
| 521 |
+
output_shape : tuple of ints, optional
|
| 522 |
+
Shape tuple.
|
| 523 |
+
%(output)s
|
| 524 |
+
order : int, optional
|
| 525 |
+
The order of the spline interpolation, default is 3.
|
| 526 |
+
The order has to be in the range 0-5.
|
| 527 |
+
%(mode_interp_constant)s
|
| 528 |
+
%(cval)s
|
| 529 |
+
%(prefilter)s
|
| 530 |
+
|
| 531 |
+
Returns
|
| 532 |
+
-------
|
| 533 |
+
affine_transform : ndarray
|
| 534 |
+
The transformed input.
|
| 535 |
+
|
| 536 |
+
Notes
|
| 537 |
+
-----
|
| 538 |
+
The given matrix and offset are used to find for each point in the
|
| 539 |
+
output the corresponding coordinates in the input by an affine
|
| 540 |
+
transformation. The value of the input at those coordinates is
|
| 541 |
+
determined by spline interpolation of the requested order. Points
|
| 542 |
+
outside the boundaries of the input are filled according to the given
|
| 543 |
+
mode.
|
| 544 |
+
|
| 545 |
+
.. versionchanged:: 0.18.0
|
| 546 |
+
Previously, the exact interpretation of the affine transformation
|
| 547 |
+
depended on whether the matrix was supplied as a 1-D or a
|
| 548 |
+
2-D array. If a 1-D array was supplied
|
| 549 |
+
to the matrix parameter, the output pixel value at index ``o``
|
| 550 |
+
was determined from the input image at position
|
| 551 |
+
``matrix * (o + offset)``.
|
| 552 |
+
|
| 553 |
+
For complex-valued `input`, this function transforms the real and imaginary
|
| 554 |
+
components independently.
|
| 555 |
+
|
| 556 |
+
.. versionadded:: 1.6.0
|
| 557 |
+
Complex-valued support added.
|
| 558 |
+
|
| 559 |
+
References
|
| 560 |
+
----------
|
| 561 |
+
.. [1] https://en.wikipedia.org/wiki/Homogeneous_coordinates
|
| 562 |
+
"""
|
| 563 |
+
if order < 0 or order > 5:
|
| 564 |
+
raise RuntimeError('spline order not supported')
|
| 565 |
+
input = np.asarray(input)
|
| 566 |
+
if output_shape is None:
|
| 567 |
+
if isinstance(output, np.ndarray):
|
| 568 |
+
output_shape = output.shape
|
| 569 |
+
else:
|
| 570 |
+
output_shape = input.shape
|
| 571 |
+
if input.ndim < 1 or len(output_shape) < 1:
|
| 572 |
+
raise RuntimeError('input and output rank must be > 0')
|
| 573 |
+
complex_output = np.iscomplexobj(input)
|
| 574 |
+
output = _ni_support._get_output(output, input, shape=output_shape,
|
| 575 |
+
complex_output=complex_output)
|
| 576 |
+
if complex_output:
|
| 577 |
+
kwargs = dict(offset=offset, output_shape=output_shape, order=order,
|
| 578 |
+
mode=mode, prefilter=prefilter)
|
| 579 |
+
affine_transform(input.real, matrix, output=output.real,
|
| 580 |
+
cval=np.real(cval), **kwargs)
|
| 581 |
+
affine_transform(input.imag, matrix, output=output.imag,
|
| 582 |
+
cval=np.imag(cval), **kwargs)
|
| 583 |
+
return output
|
| 584 |
+
if prefilter and order > 1:
|
| 585 |
+
padded, npad = _prepad_for_spline_filter(input, mode, cval)
|
| 586 |
+
filtered = spline_filter(padded, order, output=np.float64, mode=mode)
|
| 587 |
+
else:
|
| 588 |
+
npad = 0
|
| 589 |
+
filtered = input
|
| 590 |
+
mode = _ni_support._extend_mode_to_code(mode)
|
| 591 |
+
matrix = np.asarray(matrix, dtype=np.float64)
|
| 592 |
+
if matrix.ndim not in [1, 2] or matrix.shape[0] < 1:
|
| 593 |
+
raise RuntimeError('no proper affine matrix provided')
|
| 594 |
+
if (matrix.ndim == 2 and matrix.shape[1] == input.ndim + 1 and
|
| 595 |
+
(matrix.shape[0] in [input.ndim, input.ndim + 1])):
|
| 596 |
+
if matrix.shape[0] == input.ndim + 1:
|
| 597 |
+
exptd = [0] * input.ndim + [1]
|
| 598 |
+
if not np.all(matrix[input.ndim] == exptd):
|
| 599 |
+
msg = (f'Expected homogeneous transformation matrix with '
|
| 600 |
+
f'shape {matrix.shape} for image shape {input.shape}, '
|
| 601 |
+
f'but bottom row was not equal to {exptd}')
|
| 602 |
+
raise ValueError(msg)
|
| 603 |
+
# assume input is homogeneous coordinate transformation matrix
|
| 604 |
+
offset = matrix[:input.ndim, input.ndim]
|
| 605 |
+
matrix = matrix[:input.ndim, :input.ndim]
|
| 606 |
+
if matrix.shape[0] != input.ndim:
|
| 607 |
+
raise RuntimeError('affine matrix has wrong number of rows')
|
| 608 |
+
if matrix.ndim == 2 and matrix.shape[1] != output.ndim:
|
| 609 |
+
raise RuntimeError('affine matrix has wrong number of columns')
|
| 610 |
+
if not matrix.flags.contiguous:
|
| 611 |
+
matrix = matrix.copy()
|
| 612 |
+
offset = _ni_support._normalize_sequence(offset, input.ndim)
|
| 613 |
+
offset = np.asarray(offset, dtype=np.float64)
|
| 614 |
+
if offset.ndim != 1 or offset.shape[0] < 1:
|
| 615 |
+
raise RuntimeError('no proper offset provided')
|
| 616 |
+
if not offset.flags.contiguous:
|
| 617 |
+
offset = offset.copy()
|
| 618 |
+
if matrix.ndim == 1:
|
| 619 |
+
warnings.warn(
|
| 620 |
+
"The behavior of affine_transform with a 1-D "
|
| 621 |
+
"array supplied for the matrix parameter has changed in "
|
| 622 |
+
"SciPy 0.18.0.",
|
| 623 |
+
stacklevel=2
|
| 624 |
+
)
|
| 625 |
+
_nd_image.zoom_shift(filtered, matrix, offset/matrix, output, order,
|
| 626 |
+
mode, cval, npad, False)
|
| 627 |
+
else:
|
| 628 |
+
_nd_image.geometric_transform(filtered, None, None, matrix, offset,
|
| 629 |
+
output, order, mode, cval, npad, None,
|
| 630 |
+
None)
|
| 631 |
+
return output
|
| 632 |
+
|
| 633 |
+
|
| 634 |
+
@docfiller
|
| 635 |
+
def shift(input, shift, output=None, order=3, mode='constant', cval=0.0,
|
| 636 |
+
prefilter=True):
|
| 637 |
+
"""
|
| 638 |
+
Shift an array.
|
| 639 |
+
|
| 640 |
+
The array is shifted using spline interpolation of the requested order.
|
| 641 |
+
Points outside the boundaries of the input are filled according to the
|
| 642 |
+
given mode.
|
| 643 |
+
|
| 644 |
+
Parameters
|
| 645 |
+
----------
|
| 646 |
+
%(input)s
|
| 647 |
+
shift : float or sequence
|
| 648 |
+
The shift along the axes. If a float, `shift` is the same for each
|
| 649 |
+
axis. If a sequence, `shift` should contain one value for each axis.
|
| 650 |
+
%(output)s
|
| 651 |
+
order : int, optional
|
| 652 |
+
The order of the spline interpolation, default is 3.
|
| 653 |
+
The order has to be in the range 0-5.
|
| 654 |
+
%(mode_interp_constant)s
|
| 655 |
+
%(cval)s
|
| 656 |
+
%(prefilter)s
|
| 657 |
+
|
| 658 |
+
Returns
|
| 659 |
+
-------
|
| 660 |
+
shift : ndarray
|
| 661 |
+
The shifted input.
|
| 662 |
+
|
| 663 |
+
See Also
|
| 664 |
+
--------
|
| 665 |
+
affine_transform : Affine transformations
|
| 666 |
+
|
| 667 |
+
Notes
|
| 668 |
+
-----
|
| 669 |
+
For complex-valued `input`, this function shifts the real and imaginary
|
| 670 |
+
components independently.
|
| 671 |
+
|
| 672 |
+
.. versionadded:: 1.6.0
|
| 673 |
+
Complex-valued support added.
|
| 674 |
+
|
| 675 |
+
Examples
|
| 676 |
+
--------
|
| 677 |
+
Import the necessary modules and an exemplary image.
|
| 678 |
+
|
| 679 |
+
>>> from scipy.ndimage import shift
|
| 680 |
+
>>> import matplotlib.pyplot as plt
|
| 681 |
+
>>> from scipy import datasets
|
| 682 |
+
>>> image = datasets.ascent()
|
| 683 |
+
|
| 684 |
+
Shift the image vertically by 20 pixels.
|
| 685 |
+
|
| 686 |
+
>>> image_shifted_vertically = shift(image, (20, 0))
|
| 687 |
+
|
| 688 |
+
Shift the image vertically by -200 pixels and horizontally by 100 pixels.
|
| 689 |
+
|
| 690 |
+
>>> image_shifted_both_directions = shift(image, (-200, 100))
|
| 691 |
+
|
| 692 |
+
Plot the original and the shifted images.
|
| 693 |
+
|
| 694 |
+
>>> fig, axes = plt.subplots(3, 1, figsize=(4, 12))
|
| 695 |
+
>>> plt.gray() # show the filtered result in grayscale
|
| 696 |
+
>>> top, middle, bottom = axes
|
| 697 |
+
>>> for ax in axes:
|
| 698 |
+
... ax.set_axis_off() # remove coordinate system
|
| 699 |
+
>>> top.imshow(image)
|
| 700 |
+
>>> top.set_title("Original image")
|
| 701 |
+
>>> middle.imshow(image_shifted_vertically)
|
| 702 |
+
>>> middle.set_title("Vertically shifted image")
|
| 703 |
+
>>> bottom.imshow(image_shifted_both_directions)
|
| 704 |
+
>>> bottom.set_title("Image shifted in both directions")
|
| 705 |
+
>>> fig.tight_layout()
|
| 706 |
+
"""
|
| 707 |
+
if order < 0 or order > 5:
|
| 708 |
+
raise RuntimeError('spline order not supported')
|
| 709 |
+
input = np.asarray(input)
|
| 710 |
+
if input.ndim < 1:
|
| 711 |
+
raise RuntimeError('input and output rank must be > 0')
|
| 712 |
+
complex_output = np.iscomplexobj(input)
|
| 713 |
+
output = _ni_support._get_output(output, input, complex_output=complex_output)
|
| 714 |
+
if complex_output:
|
| 715 |
+
# import under different name to avoid confusion with shift parameter
|
| 716 |
+
from scipy.ndimage._interpolation import shift as _shift
|
| 717 |
+
|
| 718 |
+
kwargs = dict(order=order, mode=mode, prefilter=prefilter)
|
| 719 |
+
_shift(input.real, shift, output=output.real, cval=np.real(cval), **kwargs)
|
| 720 |
+
_shift(input.imag, shift, output=output.imag, cval=np.imag(cval), **kwargs)
|
| 721 |
+
return output
|
| 722 |
+
if prefilter and order > 1:
|
| 723 |
+
padded, npad = _prepad_for_spline_filter(input, mode, cval)
|
| 724 |
+
filtered = spline_filter(padded, order, output=np.float64, mode=mode)
|
| 725 |
+
else:
|
| 726 |
+
npad = 0
|
| 727 |
+
filtered = input
|
| 728 |
+
mode = _ni_support._extend_mode_to_code(mode)
|
| 729 |
+
shift = _ni_support._normalize_sequence(shift, input.ndim)
|
| 730 |
+
shift = [-ii for ii in shift]
|
| 731 |
+
shift = np.asarray(shift, dtype=np.float64)
|
| 732 |
+
if not shift.flags.contiguous:
|
| 733 |
+
shift = shift.copy()
|
| 734 |
+
_nd_image.zoom_shift(filtered, None, shift, output, order, mode, cval,
|
| 735 |
+
npad, False)
|
| 736 |
+
return output
|
| 737 |
+
|
| 738 |
+
|
| 739 |
+
@docfiller
|
| 740 |
+
def zoom(input, zoom, output=None, order=3, mode='constant', cval=0.0,
|
| 741 |
+
prefilter=True, *, grid_mode=False):
|
| 742 |
+
"""
|
| 743 |
+
Zoom an array.
|
| 744 |
+
|
| 745 |
+
The array is zoomed using spline interpolation of the requested order.
|
| 746 |
+
|
| 747 |
+
Parameters
|
| 748 |
+
----------
|
| 749 |
+
%(input)s
|
| 750 |
+
zoom : float or sequence
|
| 751 |
+
The zoom factor along the axes. If a float, `zoom` is the same for each
|
| 752 |
+
axis. If a sequence, `zoom` should contain one value for each axis.
|
| 753 |
+
%(output)s
|
| 754 |
+
order : int, optional
|
| 755 |
+
The order of the spline interpolation, default is 3.
|
| 756 |
+
The order has to be in the range 0-5.
|
| 757 |
+
%(mode_interp_constant)s
|
| 758 |
+
%(cval)s
|
| 759 |
+
%(prefilter)s
|
| 760 |
+
grid_mode : bool, optional
|
| 761 |
+
If False, the distance from the pixel centers is zoomed. Otherwise, the
|
| 762 |
+
distance including the full pixel extent is used. For example, a 1d
|
| 763 |
+
signal of length 5 is considered to have length 4 when `grid_mode` is
|
| 764 |
+
False, but length 5 when `grid_mode` is True. See the following
|
| 765 |
+
visual illustration:
|
| 766 |
+
|
| 767 |
+
.. code-block:: text
|
| 768 |
+
|
| 769 |
+
| pixel 1 | pixel 2 | pixel 3 | pixel 4 | pixel 5 |
|
| 770 |
+
|<-------------------------------------->|
|
| 771 |
+
vs.
|
| 772 |
+
|<----------------------------------------------->|
|
| 773 |
+
|
| 774 |
+
The starting point of the arrow in the diagram above corresponds to
|
| 775 |
+
coordinate location 0 in each mode.
|
| 776 |
+
|
| 777 |
+
Returns
|
| 778 |
+
-------
|
| 779 |
+
zoom : ndarray
|
| 780 |
+
The zoomed input.
|
| 781 |
+
|
| 782 |
+
Notes
|
| 783 |
+
-----
|
| 784 |
+
For complex-valued `input`, this function zooms the real and imaginary
|
| 785 |
+
components independently.
|
| 786 |
+
|
| 787 |
+
.. versionadded:: 1.6.0
|
| 788 |
+
Complex-valued support added.
|
| 789 |
+
|
| 790 |
+
Examples
|
| 791 |
+
--------
|
| 792 |
+
>>> from scipy import ndimage, datasets
|
| 793 |
+
>>> import matplotlib.pyplot as plt
|
| 794 |
+
|
| 795 |
+
>>> fig = plt.figure()
|
| 796 |
+
>>> ax1 = fig.add_subplot(121) # left side
|
| 797 |
+
>>> ax2 = fig.add_subplot(122) # right side
|
| 798 |
+
>>> ascent = datasets.ascent()
|
| 799 |
+
>>> result = ndimage.zoom(ascent, 3.0)
|
| 800 |
+
>>> ax1.imshow(ascent, vmin=0, vmax=255)
|
| 801 |
+
>>> ax2.imshow(result, vmin=0, vmax=255)
|
| 802 |
+
>>> plt.show()
|
| 803 |
+
|
| 804 |
+
>>> print(ascent.shape)
|
| 805 |
+
(512, 512)
|
| 806 |
+
|
| 807 |
+
>>> print(result.shape)
|
| 808 |
+
(1536, 1536)
|
| 809 |
+
"""
|
| 810 |
+
if order < 0 or order > 5:
|
| 811 |
+
raise RuntimeError('spline order not supported')
|
| 812 |
+
input = np.asarray(input)
|
| 813 |
+
if input.ndim < 1:
|
| 814 |
+
raise RuntimeError('input and output rank must be > 0')
|
| 815 |
+
zoom = _ni_support._normalize_sequence(zoom, input.ndim)
|
| 816 |
+
output_shape = tuple(
|
| 817 |
+
[int(round(ii * jj)) for ii, jj in zip(input.shape, zoom)])
|
| 818 |
+
complex_output = np.iscomplexobj(input)
|
| 819 |
+
output = _ni_support._get_output(output, input, shape=output_shape,
|
| 820 |
+
complex_output=complex_output)
|
| 821 |
+
if complex_output:
|
| 822 |
+
# import under different name to avoid confusion with zoom parameter
|
| 823 |
+
from scipy.ndimage._interpolation import zoom as _zoom
|
| 824 |
+
|
| 825 |
+
kwargs = dict(order=order, mode=mode, prefilter=prefilter)
|
| 826 |
+
_zoom(input.real, zoom, output=output.real, cval=np.real(cval), **kwargs)
|
| 827 |
+
_zoom(input.imag, zoom, output=output.imag, cval=np.imag(cval), **kwargs)
|
| 828 |
+
return output
|
| 829 |
+
if prefilter and order > 1:
|
| 830 |
+
padded, npad = _prepad_for_spline_filter(input, mode, cval)
|
| 831 |
+
filtered = spline_filter(padded, order, output=np.float64, mode=mode)
|
| 832 |
+
else:
|
| 833 |
+
npad = 0
|
| 834 |
+
filtered = input
|
| 835 |
+
if grid_mode:
|
| 836 |
+
# warn about modes that may have surprising behavior
|
| 837 |
+
suggest_mode = None
|
| 838 |
+
if mode == 'constant':
|
| 839 |
+
suggest_mode = 'grid-constant'
|
| 840 |
+
elif mode == 'wrap':
|
| 841 |
+
suggest_mode = 'grid-wrap'
|
| 842 |
+
if suggest_mode is not None:
|
| 843 |
+
warnings.warn(
|
| 844 |
+
(f"It is recommended to use mode = {suggest_mode} instead of {mode} "
|
| 845 |
+
f"when grid_mode is True."),
|
| 846 |
+
stacklevel=2
|
| 847 |
+
)
|
| 848 |
+
mode = _ni_support._extend_mode_to_code(mode)
|
| 849 |
+
|
| 850 |
+
zoom_div = np.array(output_shape)
|
| 851 |
+
zoom_nominator = np.array(input.shape)
|
| 852 |
+
if not grid_mode:
|
| 853 |
+
zoom_div -= 1
|
| 854 |
+
zoom_nominator -= 1
|
| 855 |
+
|
| 856 |
+
# Zooming to infinite values is unpredictable, so just choose
|
| 857 |
+
# zoom factor 1 instead
|
| 858 |
+
zoom = np.divide(zoom_nominator, zoom_div,
|
| 859 |
+
out=np.ones_like(input.shape, dtype=np.float64),
|
| 860 |
+
where=zoom_div != 0)
|
| 861 |
+
zoom = np.ascontiguousarray(zoom)
|
| 862 |
+
_nd_image.zoom_shift(filtered, zoom, None, output, order, mode, cval, npad,
|
| 863 |
+
grid_mode)
|
| 864 |
+
return output
|
| 865 |
+
|
| 866 |
+
|
| 867 |
+
@docfiller
|
| 868 |
+
def rotate(input, angle, axes=(1, 0), reshape=True, output=None, order=3,
|
| 869 |
+
mode='constant', cval=0.0, prefilter=True):
|
| 870 |
+
"""
|
| 871 |
+
Rotate an array.
|
| 872 |
+
|
| 873 |
+
The array is rotated in the plane defined by the two axes given by the
|
| 874 |
+
`axes` parameter using spline interpolation of the requested order.
|
| 875 |
+
|
| 876 |
+
Parameters
|
| 877 |
+
----------
|
| 878 |
+
%(input)s
|
| 879 |
+
angle : float
|
| 880 |
+
The rotation angle in degrees.
|
| 881 |
+
axes : tuple of 2 ints, optional
|
| 882 |
+
The two axes that define the plane of rotation. Default is the first
|
| 883 |
+
two axes.
|
| 884 |
+
reshape : bool, optional
|
| 885 |
+
If `reshape` is true, the output shape is adapted so that the input
|
| 886 |
+
array is contained completely in the output. Default is True.
|
| 887 |
+
%(output)s
|
| 888 |
+
order : int, optional
|
| 889 |
+
The order of the spline interpolation, default is 3.
|
| 890 |
+
The order has to be in the range 0-5.
|
| 891 |
+
%(mode_interp_constant)s
|
| 892 |
+
%(cval)s
|
| 893 |
+
%(prefilter)s
|
| 894 |
+
|
| 895 |
+
Returns
|
| 896 |
+
-------
|
| 897 |
+
rotate : ndarray
|
| 898 |
+
The rotated input.
|
| 899 |
+
|
| 900 |
+
Notes
|
| 901 |
+
-----
|
| 902 |
+
For complex-valued `input`, this function rotates the real and imaginary
|
| 903 |
+
components independently.
|
| 904 |
+
|
| 905 |
+
.. versionadded:: 1.6.0
|
| 906 |
+
Complex-valued support added.
|
| 907 |
+
|
| 908 |
+
Examples
|
| 909 |
+
--------
|
| 910 |
+
>>> from scipy import ndimage, datasets
|
| 911 |
+
>>> import matplotlib.pyplot as plt
|
| 912 |
+
>>> fig = plt.figure(figsize=(10, 3))
|
| 913 |
+
>>> ax1, ax2, ax3 = fig.subplots(1, 3)
|
| 914 |
+
>>> img = datasets.ascent()
|
| 915 |
+
>>> img_45 = ndimage.rotate(img, 45, reshape=False)
|
| 916 |
+
>>> full_img_45 = ndimage.rotate(img, 45, reshape=True)
|
| 917 |
+
>>> ax1.imshow(img, cmap='gray')
|
| 918 |
+
>>> ax1.set_axis_off()
|
| 919 |
+
>>> ax2.imshow(img_45, cmap='gray')
|
| 920 |
+
>>> ax2.set_axis_off()
|
| 921 |
+
>>> ax3.imshow(full_img_45, cmap='gray')
|
| 922 |
+
>>> ax3.set_axis_off()
|
| 923 |
+
>>> fig.set_layout_engine('tight')
|
| 924 |
+
>>> plt.show()
|
| 925 |
+
>>> print(img.shape)
|
| 926 |
+
(512, 512)
|
| 927 |
+
>>> print(img_45.shape)
|
| 928 |
+
(512, 512)
|
| 929 |
+
>>> print(full_img_45.shape)
|
| 930 |
+
(724, 724)
|
| 931 |
+
|
| 932 |
+
"""
|
| 933 |
+
input_arr = np.asarray(input)
|
| 934 |
+
ndim = input_arr.ndim
|
| 935 |
+
|
| 936 |
+
if ndim < 2:
|
| 937 |
+
raise ValueError('input array should be at least 2D')
|
| 938 |
+
|
| 939 |
+
axes = list(axes)
|
| 940 |
+
|
| 941 |
+
if len(axes) != 2:
|
| 942 |
+
raise ValueError('axes should contain exactly two values')
|
| 943 |
+
|
| 944 |
+
if not all([float(ax).is_integer() for ax in axes]):
|
| 945 |
+
raise ValueError('axes should contain only integer values')
|
| 946 |
+
|
| 947 |
+
if axes[0] < 0:
|
| 948 |
+
axes[0] += ndim
|
| 949 |
+
if axes[1] < 0:
|
| 950 |
+
axes[1] += ndim
|
| 951 |
+
if axes[0] < 0 or axes[1] < 0 or axes[0] >= ndim or axes[1] >= ndim:
|
| 952 |
+
raise ValueError('invalid rotation plane specified')
|
| 953 |
+
|
| 954 |
+
axes.sort()
|
| 955 |
+
|
| 956 |
+
c, s = special.cosdg(angle), special.sindg(angle)
|
| 957 |
+
|
| 958 |
+
rot_matrix = np.array([[c, s],
|
| 959 |
+
[-s, c]])
|
| 960 |
+
|
| 961 |
+
img_shape = np.asarray(input_arr.shape)
|
| 962 |
+
in_plane_shape = img_shape[axes]
|
| 963 |
+
if reshape:
|
| 964 |
+
# Compute transformed input bounds
|
| 965 |
+
iy, ix = in_plane_shape
|
| 966 |
+
out_bounds = rot_matrix @ [[0, 0, iy, iy],
|
| 967 |
+
[0, ix, 0, ix]]
|
| 968 |
+
# Compute the shape of the transformed input plane
|
| 969 |
+
out_plane_shape = (np.ptp(out_bounds, axis=1) + 0.5).astype(int)
|
| 970 |
+
else:
|
| 971 |
+
out_plane_shape = img_shape[axes]
|
| 972 |
+
|
| 973 |
+
out_center = rot_matrix @ ((out_plane_shape - 1) / 2)
|
| 974 |
+
in_center = (in_plane_shape - 1) / 2
|
| 975 |
+
offset = in_center - out_center
|
| 976 |
+
|
| 977 |
+
output_shape = img_shape
|
| 978 |
+
output_shape[axes] = out_plane_shape
|
| 979 |
+
output_shape = tuple(output_shape)
|
| 980 |
+
|
| 981 |
+
complex_output = np.iscomplexobj(input_arr)
|
| 982 |
+
output = _ni_support._get_output(output, input_arr, shape=output_shape,
|
| 983 |
+
complex_output=complex_output)
|
| 984 |
+
|
| 985 |
+
if ndim <= 2:
|
| 986 |
+
affine_transform(input_arr, rot_matrix, offset, output_shape, output,
|
| 987 |
+
order, mode, cval, prefilter)
|
| 988 |
+
else:
|
| 989 |
+
# If ndim > 2, the rotation is applied over all the planes
|
| 990 |
+
# parallel to axes
|
| 991 |
+
planes_coord = itertools.product(
|
| 992 |
+
*[[slice(None)] if ax in axes else range(img_shape[ax])
|
| 993 |
+
for ax in range(ndim)])
|
| 994 |
+
|
| 995 |
+
out_plane_shape = tuple(out_plane_shape)
|
| 996 |
+
|
| 997 |
+
for coordinates in planes_coord:
|
| 998 |
+
ia = input_arr[coordinates]
|
| 999 |
+
oa = output[coordinates]
|
| 1000 |
+
affine_transform(ia, rot_matrix, offset, out_plane_shape,
|
| 1001 |
+
oa, order, mode, cval, prefilter)
|
| 1002 |
+
|
| 1003 |
+
return output
|
evalkit_eagle/lib/python3.10/site-packages/scipy/ndimage/_measurements.py
ADDED
|
@@ -0,0 +1,1687 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (C) 2003-2005 Peter J. Verveer
|
| 2 |
+
#
|
| 3 |
+
# Redistribution and use in source and binary forms, with or without
|
| 4 |
+
# modification, are permitted provided that the following conditions
|
| 5 |
+
# are met:
|
| 6 |
+
#
|
| 7 |
+
# 1. Redistributions of source code must retain the above copyright
|
| 8 |
+
# notice, this list of conditions and the following disclaimer.
|
| 9 |
+
#
|
| 10 |
+
# 2. Redistributions in binary form must reproduce the above
|
| 11 |
+
# copyright notice, this list of conditions and the following
|
| 12 |
+
# disclaimer in the documentation and/or other materials provided
|
| 13 |
+
# with the distribution.
|
| 14 |
+
#
|
| 15 |
+
# 3. The name of the author may not be used to endorse or promote
|
| 16 |
+
# products derived from this software without specific prior
|
| 17 |
+
# written permission.
|
| 18 |
+
#
|
| 19 |
+
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
|
| 20 |
+
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
| 21 |
+
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
| 22 |
+
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
|
| 23 |
+
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
| 24 |
+
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
|
| 25 |
+
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
| 26 |
+
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
| 27 |
+
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
| 28 |
+
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
| 29 |
+
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
| 30 |
+
|
| 31 |
+
import numpy as np
|
| 32 |
+
from . import _ni_support
|
| 33 |
+
from . import _ni_label
|
| 34 |
+
from . import _nd_image
|
| 35 |
+
from . import _morphology
|
| 36 |
+
|
| 37 |
+
__all__ = ['label', 'find_objects', 'labeled_comprehension', 'sum', 'mean',
|
| 38 |
+
'variance', 'standard_deviation', 'minimum', 'maximum', 'median',
|
| 39 |
+
'minimum_position', 'maximum_position', 'extrema', 'center_of_mass',
|
| 40 |
+
'histogram', 'watershed_ift', 'sum_labels', 'value_indices']
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def label(input, structure=None, output=None):
|
| 44 |
+
"""
|
| 45 |
+
Label features in an array.
|
| 46 |
+
|
| 47 |
+
Parameters
|
| 48 |
+
----------
|
| 49 |
+
input : array_like
|
| 50 |
+
An array-like object to be labeled. Any non-zero values in `input` are
|
| 51 |
+
counted as features and zero values are considered the background.
|
| 52 |
+
structure : array_like, optional
|
| 53 |
+
A structuring element that defines feature connections.
|
| 54 |
+
`structure` must be centrosymmetric
|
| 55 |
+
(see Notes).
|
| 56 |
+
If no structuring element is provided,
|
| 57 |
+
one is automatically generated with a squared connectivity equal to
|
| 58 |
+
one. That is, for a 2-D `input` array, the default structuring element
|
| 59 |
+
is::
|
| 60 |
+
|
| 61 |
+
[[0,1,0],
|
| 62 |
+
[1,1,1],
|
| 63 |
+
[0,1,0]]
|
| 64 |
+
|
| 65 |
+
output : (None, data-type, array_like), optional
|
| 66 |
+
If `output` is a data type, it specifies the type of the resulting
|
| 67 |
+
labeled feature array.
|
| 68 |
+
If `output` is an array-like object, then `output` will be updated
|
| 69 |
+
with the labeled features from this function. This function can
|
| 70 |
+
operate in-place, by passing output=input.
|
| 71 |
+
Note that the output must be able to store the largest label, or this
|
| 72 |
+
function will raise an Exception.
|
| 73 |
+
|
| 74 |
+
Returns
|
| 75 |
+
-------
|
| 76 |
+
label : ndarray or int
|
| 77 |
+
An integer ndarray where each unique feature in `input` has a unique
|
| 78 |
+
label in the returned array.
|
| 79 |
+
num_features : int
|
| 80 |
+
How many objects were found.
|
| 81 |
+
|
| 82 |
+
If `output` is None, this function returns a tuple of
|
| 83 |
+
(`labeled_array`, `num_features`).
|
| 84 |
+
|
| 85 |
+
If `output` is a ndarray, then it will be updated with values in
|
| 86 |
+
`labeled_array` and only `num_features` will be returned by this
|
| 87 |
+
function.
|
| 88 |
+
|
| 89 |
+
See Also
|
| 90 |
+
--------
|
| 91 |
+
find_objects : generate a list of slices for the labeled features (or
|
| 92 |
+
objects); useful for finding features' position or
|
| 93 |
+
dimensions
|
| 94 |
+
|
| 95 |
+
Notes
|
| 96 |
+
-----
|
| 97 |
+
A centrosymmetric matrix is a matrix that is symmetric about the center.
|
| 98 |
+
See [1]_ for more information.
|
| 99 |
+
|
| 100 |
+
The `structure` matrix must be centrosymmetric to ensure
|
| 101 |
+
two-way connections.
|
| 102 |
+
For instance, if the `structure` matrix is not centrosymmetric
|
| 103 |
+
and is defined as::
|
| 104 |
+
|
| 105 |
+
[[0,1,0],
|
| 106 |
+
[1,1,0],
|
| 107 |
+
[0,0,0]]
|
| 108 |
+
|
| 109 |
+
and the `input` is::
|
| 110 |
+
|
| 111 |
+
[[1,2],
|
| 112 |
+
[0,3]]
|
| 113 |
+
|
| 114 |
+
then the structure matrix would indicate the
|
| 115 |
+
entry 2 in the input is connected to 1,
|
| 116 |
+
but 1 is not connected to 2.
|
| 117 |
+
|
| 118 |
+
References
|
| 119 |
+
----------
|
| 120 |
+
.. [1] James R. Weaver, "Centrosymmetric (cross-symmetric)
|
| 121 |
+
matrices, their basic properties, eigenvalues, and
|
| 122 |
+
eigenvectors." The American Mathematical Monthly 92.10
|
| 123 |
+
(1985): 711-717.
|
| 124 |
+
|
| 125 |
+
Examples
|
| 126 |
+
--------
|
| 127 |
+
Create an image with some features, then label it using the default
|
| 128 |
+
(cross-shaped) structuring element:
|
| 129 |
+
|
| 130 |
+
>>> from scipy.ndimage import label, generate_binary_structure
|
| 131 |
+
>>> import numpy as np
|
| 132 |
+
>>> a = np.array([[0,0,1,1,0,0],
|
| 133 |
+
... [0,0,0,1,0,0],
|
| 134 |
+
... [1,1,0,0,1,0],
|
| 135 |
+
... [0,0,0,1,0,0]])
|
| 136 |
+
>>> labeled_array, num_features = label(a)
|
| 137 |
+
|
| 138 |
+
Each of the 4 features are labeled with a different integer:
|
| 139 |
+
|
| 140 |
+
>>> num_features
|
| 141 |
+
4
|
| 142 |
+
>>> labeled_array
|
| 143 |
+
array([[0, 0, 1, 1, 0, 0],
|
| 144 |
+
[0, 0, 0, 1, 0, 0],
|
| 145 |
+
[2, 2, 0, 0, 3, 0],
|
| 146 |
+
[0, 0, 0, 4, 0, 0]], dtype=int32)
|
| 147 |
+
|
| 148 |
+
Generate a structuring element that will consider features connected even
|
| 149 |
+
if they touch diagonally:
|
| 150 |
+
|
| 151 |
+
>>> s = generate_binary_structure(2,2)
|
| 152 |
+
|
| 153 |
+
or,
|
| 154 |
+
|
| 155 |
+
>>> s = [[1,1,1],
|
| 156 |
+
... [1,1,1],
|
| 157 |
+
... [1,1,1]]
|
| 158 |
+
|
| 159 |
+
Label the image using the new structuring element:
|
| 160 |
+
|
| 161 |
+
>>> labeled_array, num_features = label(a, structure=s)
|
| 162 |
+
|
| 163 |
+
Show the 2 labeled features (note that features 1, 3, and 4 from above are
|
| 164 |
+
now considered a single feature):
|
| 165 |
+
|
| 166 |
+
>>> num_features
|
| 167 |
+
2
|
| 168 |
+
>>> labeled_array
|
| 169 |
+
array([[0, 0, 1, 1, 0, 0],
|
| 170 |
+
[0, 0, 0, 1, 0, 0],
|
| 171 |
+
[2, 2, 0, 0, 1, 0],
|
| 172 |
+
[0, 0, 0, 1, 0, 0]], dtype=int32)
|
| 173 |
+
|
| 174 |
+
"""
|
| 175 |
+
input = np.asarray(input)
|
| 176 |
+
if np.iscomplexobj(input):
|
| 177 |
+
raise TypeError('Complex type not supported')
|
| 178 |
+
if structure is None:
|
| 179 |
+
structure = _morphology.generate_binary_structure(input.ndim, 1)
|
| 180 |
+
structure = np.asarray(structure, dtype=bool)
|
| 181 |
+
if structure.ndim != input.ndim:
|
| 182 |
+
raise RuntimeError('structure and input must have equal rank')
|
| 183 |
+
for ii in structure.shape:
|
| 184 |
+
if ii != 3:
|
| 185 |
+
raise ValueError('structure dimensions must be equal to 3')
|
| 186 |
+
|
| 187 |
+
# Use 32 bits if it's large enough for this image.
|
| 188 |
+
# _ni_label.label() needs two entries for background and
|
| 189 |
+
# foreground tracking
|
| 190 |
+
need_64bits = input.size >= (2**31 - 2)
|
| 191 |
+
|
| 192 |
+
if isinstance(output, np.ndarray):
|
| 193 |
+
if output.shape != input.shape:
|
| 194 |
+
raise ValueError("output shape not correct")
|
| 195 |
+
caller_provided_output = True
|
| 196 |
+
else:
|
| 197 |
+
caller_provided_output = False
|
| 198 |
+
if output is None:
|
| 199 |
+
output = np.empty(input.shape, np.intp if need_64bits else np.int32)
|
| 200 |
+
else:
|
| 201 |
+
output = np.empty(input.shape, output)
|
| 202 |
+
|
| 203 |
+
# handle scalars, 0-D arrays
|
| 204 |
+
if input.ndim == 0 or input.size == 0:
|
| 205 |
+
if input.ndim == 0:
|
| 206 |
+
# scalar
|
| 207 |
+
maxlabel = 1 if (input != 0) else 0
|
| 208 |
+
output[...] = maxlabel
|
| 209 |
+
else:
|
| 210 |
+
# 0-D
|
| 211 |
+
maxlabel = 0
|
| 212 |
+
if caller_provided_output:
|
| 213 |
+
return maxlabel
|
| 214 |
+
else:
|
| 215 |
+
return output, maxlabel
|
| 216 |
+
|
| 217 |
+
try:
|
| 218 |
+
max_label = _ni_label._label(input, structure, output)
|
| 219 |
+
except _ni_label.NeedMoreBits as e:
|
| 220 |
+
# Make another attempt with enough bits, then try to cast to the
|
| 221 |
+
# new type.
|
| 222 |
+
tmp_output = np.empty(input.shape, np.intp if need_64bits else np.int32)
|
| 223 |
+
max_label = _ni_label._label(input, structure, tmp_output)
|
| 224 |
+
output[...] = tmp_output[...]
|
| 225 |
+
if not np.all(output == tmp_output):
|
| 226 |
+
# refuse to return bad results
|
| 227 |
+
raise RuntimeError(
|
| 228 |
+
"insufficient bit-depth in requested output type"
|
| 229 |
+
) from e
|
| 230 |
+
|
| 231 |
+
if caller_provided_output:
|
| 232 |
+
# result was written in-place
|
| 233 |
+
return max_label
|
| 234 |
+
else:
|
| 235 |
+
return output, max_label
|
| 236 |
+
|
| 237 |
+
|
| 238 |
+
def find_objects(input, max_label=0):
|
| 239 |
+
"""
|
| 240 |
+
Find objects in a labeled array.
|
| 241 |
+
|
| 242 |
+
Parameters
|
| 243 |
+
----------
|
| 244 |
+
input : ndarray of ints
|
| 245 |
+
Array containing objects defined by different labels. Labels with
|
| 246 |
+
value 0 are ignored.
|
| 247 |
+
max_label : int, optional
|
| 248 |
+
Maximum label to be searched for in `input`. If max_label is not
|
| 249 |
+
given, the positions of all objects are returned.
|
| 250 |
+
|
| 251 |
+
Returns
|
| 252 |
+
-------
|
| 253 |
+
object_slices : list of tuples
|
| 254 |
+
A list of tuples, with each tuple containing N slices (with N the
|
| 255 |
+
dimension of the input array). Slices correspond to the minimal
|
| 256 |
+
parallelepiped that contains the object. If a number is missing,
|
| 257 |
+
None is returned instead of a slice. The label ``l`` corresponds to
|
| 258 |
+
the index ``l-1`` in the returned list.
|
| 259 |
+
|
| 260 |
+
See Also
|
| 261 |
+
--------
|
| 262 |
+
label, center_of_mass
|
| 263 |
+
|
| 264 |
+
Notes
|
| 265 |
+
-----
|
| 266 |
+
This function is very useful for isolating a volume of interest inside
|
| 267 |
+
a 3-D array, that cannot be "seen through".
|
| 268 |
+
|
| 269 |
+
Examples
|
| 270 |
+
--------
|
| 271 |
+
>>> from scipy import ndimage
|
| 272 |
+
>>> import numpy as np
|
| 273 |
+
>>> a = np.zeros((6,6), dtype=int)
|
| 274 |
+
>>> a[2:4, 2:4] = 1
|
| 275 |
+
>>> a[4, 4] = 1
|
| 276 |
+
>>> a[:2, :3] = 2
|
| 277 |
+
>>> a[0, 5] = 3
|
| 278 |
+
>>> a
|
| 279 |
+
array([[2, 2, 2, 0, 0, 3],
|
| 280 |
+
[2, 2, 2, 0, 0, 0],
|
| 281 |
+
[0, 0, 1, 1, 0, 0],
|
| 282 |
+
[0, 0, 1, 1, 0, 0],
|
| 283 |
+
[0, 0, 0, 0, 1, 0],
|
| 284 |
+
[0, 0, 0, 0, 0, 0]])
|
| 285 |
+
>>> ndimage.find_objects(a)
|
| 286 |
+
[(slice(2, 5, None), slice(2, 5, None)),
|
| 287 |
+
(slice(0, 2, None), slice(0, 3, None)),
|
| 288 |
+
(slice(0, 1, None), slice(5, 6, None))]
|
| 289 |
+
>>> ndimage.find_objects(a, max_label=2)
|
| 290 |
+
[(slice(2, 5, None), slice(2, 5, None)), (slice(0, 2, None), slice(0, 3, None))]
|
| 291 |
+
>>> ndimage.find_objects(a == 1, max_label=2)
|
| 292 |
+
[(slice(2, 5, None), slice(2, 5, None)), None]
|
| 293 |
+
|
| 294 |
+
>>> loc = ndimage.find_objects(a)[0]
|
| 295 |
+
>>> a[loc]
|
| 296 |
+
array([[1, 1, 0],
|
| 297 |
+
[1, 1, 0],
|
| 298 |
+
[0, 0, 1]])
|
| 299 |
+
|
| 300 |
+
"""
|
| 301 |
+
input = np.asarray(input)
|
| 302 |
+
if np.iscomplexobj(input):
|
| 303 |
+
raise TypeError('Complex type not supported')
|
| 304 |
+
|
| 305 |
+
if max_label < 1:
|
| 306 |
+
max_label = input.max()
|
| 307 |
+
|
| 308 |
+
return _nd_image.find_objects(input, max_label)
|
| 309 |
+
|
| 310 |
+
|
| 311 |
+
def value_indices(arr, *, ignore_value=None):
|
| 312 |
+
"""
|
| 313 |
+
Find indices of each distinct value in given array.
|
| 314 |
+
|
| 315 |
+
Parameters
|
| 316 |
+
----------
|
| 317 |
+
arr : ndarray of ints
|
| 318 |
+
Array containing integer values.
|
| 319 |
+
ignore_value : int, optional
|
| 320 |
+
This value will be ignored in searching the `arr` array. If not
|
| 321 |
+
given, all values found will be included in output. Default
|
| 322 |
+
is None.
|
| 323 |
+
|
| 324 |
+
Returns
|
| 325 |
+
-------
|
| 326 |
+
indices : dictionary
|
| 327 |
+
A Python dictionary of array indices for each distinct value. The
|
| 328 |
+
dictionary is keyed by the distinct values, the entries are array
|
| 329 |
+
index tuples covering all occurrences of the value within the
|
| 330 |
+
array.
|
| 331 |
+
|
| 332 |
+
This dictionary can occupy significant memory, usually several times
|
| 333 |
+
the size of the input array.
|
| 334 |
+
|
| 335 |
+
See Also
|
| 336 |
+
--------
|
| 337 |
+
label, maximum, median, minimum_position, extrema, sum, mean, variance,
|
| 338 |
+
standard_deviation, numpy.where, numpy.unique
|
| 339 |
+
|
| 340 |
+
Notes
|
| 341 |
+
-----
|
| 342 |
+
For a small array with few distinct values, one might use
|
| 343 |
+
`numpy.unique()` to find all possible values, and ``(arr == val)`` to
|
| 344 |
+
locate each value within that array. However, for large arrays,
|
| 345 |
+
with many distinct values, this can become extremely inefficient,
|
| 346 |
+
as locating each value would require a new search through the entire
|
| 347 |
+
array. Using this function, there is essentially one search, with
|
| 348 |
+
the indices saved for all distinct values.
|
| 349 |
+
|
| 350 |
+
This is useful when matching a categorical image (e.g. a segmentation
|
| 351 |
+
or classification) to an associated image of other data, allowing
|
| 352 |
+
any per-class statistic(s) to then be calculated. Provides a
|
| 353 |
+
more flexible alternative to functions like ``scipy.ndimage.mean()``
|
| 354 |
+
and ``scipy.ndimage.variance()``.
|
| 355 |
+
|
| 356 |
+
Some other closely related functionality, with different strengths and
|
| 357 |
+
weaknesses, can also be found in ``scipy.stats.binned_statistic()`` and
|
| 358 |
+
the `scikit-image <https://scikit-image.org/>`_ function
|
| 359 |
+
``skimage.measure.regionprops()``.
|
| 360 |
+
|
| 361 |
+
Note for IDL users: this provides functionality equivalent to IDL's
|
| 362 |
+
REVERSE_INDICES option (as per the IDL documentation for the
|
| 363 |
+
`HISTOGRAM <https://www.l3harrisgeospatial.com/docs/histogram.html>`_
|
| 364 |
+
function).
|
| 365 |
+
|
| 366 |
+
.. versionadded:: 1.10.0
|
| 367 |
+
|
| 368 |
+
Examples
|
| 369 |
+
--------
|
| 370 |
+
>>> import numpy as np
|
| 371 |
+
>>> from scipy import ndimage
|
| 372 |
+
>>> a = np.zeros((6, 6), dtype=int)
|
| 373 |
+
>>> a[2:4, 2:4] = 1
|
| 374 |
+
>>> a[4, 4] = 1
|
| 375 |
+
>>> a[:2, :3] = 2
|
| 376 |
+
>>> a[0, 5] = 3
|
| 377 |
+
>>> a
|
| 378 |
+
array([[2, 2, 2, 0, 0, 3],
|
| 379 |
+
[2, 2, 2, 0, 0, 0],
|
| 380 |
+
[0, 0, 1, 1, 0, 0],
|
| 381 |
+
[0, 0, 1, 1, 0, 0],
|
| 382 |
+
[0, 0, 0, 0, 1, 0],
|
| 383 |
+
[0, 0, 0, 0, 0, 0]])
|
| 384 |
+
>>> val_indices = ndimage.value_indices(a)
|
| 385 |
+
|
| 386 |
+
The dictionary `val_indices` will have an entry for each distinct
|
| 387 |
+
value in the input array.
|
| 388 |
+
|
| 389 |
+
>>> val_indices.keys()
|
| 390 |
+
dict_keys([np.int64(0), np.int64(1), np.int64(2), np.int64(3)])
|
| 391 |
+
|
| 392 |
+
The entry for each value is an index tuple, locating the elements
|
| 393 |
+
with that value.
|
| 394 |
+
|
| 395 |
+
>>> ndx1 = val_indices[1]
|
| 396 |
+
>>> ndx1
|
| 397 |
+
(array([2, 2, 3, 3, 4]), array([2, 3, 2, 3, 4]))
|
| 398 |
+
|
| 399 |
+
This can be used to index into the original array, or any other
|
| 400 |
+
array with the same shape.
|
| 401 |
+
|
| 402 |
+
>>> a[ndx1]
|
| 403 |
+
array([1, 1, 1, 1, 1])
|
| 404 |
+
|
| 405 |
+
If the zeros were to be ignored, then the resulting dictionary
|
| 406 |
+
would no longer have an entry for zero.
|
| 407 |
+
|
| 408 |
+
>>> val_indices = ndimage.value_indices(a, ignore_value=0)
|
| 409 |
+
>>> val_indices.keys()
|
| 410 |
+
dict_keys([np.int64(1), np.int64(2), np.int64(3)])
|
| 411 |
+
|
| 412 |
+
"""
|
| 413 |
+
# Cope with ignore_value being None, without too much extra complexity
|
| 414 |
+
# in the C code. If not None, the value is passed in as a numpy array
|
| 415 |
+
# with the same dtype as arr.
|
| 416 |
+
arr = np.asarray(arr)
|
| 417 |
+
ignore_value_arr = np.zeros((1,), dtype=arr.dtype)
|
| 418 |
+
ignoreIsNone = (ignore_value is None)
|
| 419 |
+
if not ignoreIsNone:
|
| 420 |
+
ignore_value_arr[0] = ignore_value_arr.dtype.type(ignore_value)
|
| 421 |
+
|
| 422 |
+
val_indices = _nd_image.value_indices(arr, ignoreIsNone, ignore_value_arr)
|
| 423 |
+
return val_indices
|
| 424 |
+
|
| 425 |
+
|
| 426 |
+
def labeled_comprehension(input, labels, index, func, out_dtype, default,
|
| 427 |
+
pass_positions=False):
|
| 428 |
+
"""
|
| 429 |
+
Roughly equivalent to [func(input[labels == i]) for i in index].
|
| 430 |
+
|
| 431 |
+
Sequentially applies an arbitrary function (that works on array_like input)
|
| 432 |
+
to subsets of an N-D image array specified by `labels` and `index`.
|
| 433 |
+
The option exists to provide the function with positional parameters as the
|
| 434 |
+
second argument.
|
| 435 |
+
|
| 436 |
+
Parameters
|
| 437 |
+
----------
|
| 438 |
+
input : array_like
|
| 439 |
+
Data from which to select `labels` to process.
|
| 440 |
+
labels : array_like or None
|
| 441 |
+
Labels to objects in `input`.
|
| 442 |
+
If not None, array must be same shape as `input`.
|
| 443 |
+
If None, `func` is applied to raveled `input`.
|
| 444 |
+
index : int, sequence of ints or None
|
| 445 |
+
Subset of `labels` to which to apply `func`.
|
| 446 |
+
If a scalar, a single value is returned.
|
| 447 |
+
If None, `func` is applied to all non-zero values of `labels`.
|
| 448 |
+
func : callable
|
| 449 |
+
Python function to apply to `labels` from `input`.
|
| 450 |
+
out_dtype : dtype
|
| 451 |
+
Dtype to use for `result`.
|
| 452 |
+
default : int, float or None
|
| 453 |
+
Default return value when a element of `index` does not exist
|
| 454 |
+
in `labels`.
|
| 455 |
+
pass_positions : bool, optional
|
| 456 |
+
If True, pass linear indices to `func` as a second argument.
|
| 457 |
+
Default is False.
|
| 458 |
+
|
| 459 |
+
Returns
|
| 460 |
+
-------
|
| 461 |
+
result : ndarray
|
| 462 |
+
Result of applying `func` to each of `labels` to `input` in `index`.
|
| 463 |
+
|
| 464 |
+
Examples
|
| 465 |
+
--------
|
| 466 |
+
>>> import numpy as np
|
| 467 |
+
>>> a = np.array([[1, 2, 0, 0],
|
| 468 |
+
... [5, 3, 0, 4],
|
| 469 |
+
... [0, 0, 0, 7],
|
| 470 |
+
... [9, 3, 0, 0]])
|
| 471 |
+
>>> from scipy import ndimage
|
| 472 |
+
>>> lbl, nlbl = ndimage.label(a)
|
| 473 |
+
>>> lbls = np.arange(1, nlbl+1)
|
| 474 |
+
>>> ndimage.labeled_comprehension(a, lbl, lbls, np.mean, float, 0)
|
| 475 |
+
array([ 2.75, 5.5 , 6. ])
|
| 476 |
+
|
| 477 |
+
Falling back to `default`:
|
| 478 |
+
|
| 479 |
+
>>> lbls = np.arange(1, nlbl+2)
|
| 480 |
+
>>> ndimage.labeled_comprehension(a, lbl, lbls, np.mean, float, -1)
|
| 481 |
+
array([ 2.75, 5.5 , 6. , -1. ])
|
| 482 |
+
|
| 483 |
+
Passing positions:
|
| 484 |
+
|
| 485 |
+
>>> def fn(val, pos):
|
| 486 |
+
... print("fn says: %s : %s" % (val, pos))
|
| 487 |
+
... return (val.sum()) if (pos.sum() % 2 == 0) else (-val.sum())
|
| 488 |
+
...
|
| 489 |
+
>>> ndimage.labeled_comprehension(a, lbl, lbls, fn, float, 0, True)
|
| 490 |
+
fn says: [1 2 5 3] : [0 1 4 5]
|
| 491 |
+
fn says: [4 7] : [ 7 11]
|
| 492 |
+
fn says: [9 3] : [12 13]
|
| 493 |
+
array([ 11., 11., -12., 0.])
|
| 494 |
+
|
| 495 |
+
"""
|
| 496 |
+
|
| 497 |
+
as_scalar = np.isscalar(index)
|
| 498 |
+
input = np.asarray(input)
|
| 499 |
+
|
| 500 |
+
if pass_positions:
|
| 501 |
+
positions = np.arange(input.size).reshape(input.shape)
|
| 502 |
+
|
| 503 |
+
if labels is None:
|
| 504 |
+
if index is not None:
|
| 505 |
+
raise ValueError("index without defined labels")
|
| 506 |
+
if not pass_positions:
|
| 507 |
+
return func(input.ravel())
|
| 508 |
+
else:
|
| 509 |
+
return func(input.ravel(), positions.ravel())
|
| 510 |
+
|
| 511 |
+
labels = np.asarray(labels)
|
| 512 |
+
|
| 513 |
+
try:
|
| 514 |
+
input, labels = np.broadcast_arrays(input, labels)
|
| 515 |
+
except ValueError as e:
|
| 516 |
+
raise ValueError("input and labels must have the same shape "
|
| 517 |
+
"(excepting dimensions with width 1)") from e
|
| 518 |
+
|
| 519 |
+
if index is None:
|
| 520 |
+
if not pass_positions:
|
| 521 |
+
return func(input[labels > 0])
|
| 522 |
+
else:
|
| 523 |
+
return func(input[labels > 0], positions[labels > 0])
|
| 524 |
+
|
| 525 |
+
index = np.atleast_1d(index)
|
| 526 |
+
if np.any(index.astype(labels.dtype).astype(index.dtype) != index):
|
| 527 |
+
raise ValueError(f"Cannot convert index values from <{index.dtype}> to "
|
| 528 |
+
f"<{labels.dtype}> (labels' type) without loss of precision")
|
| 529 |
+
|
| 530 |
+
index = index.astype(labels.dtype)
|
| 531 |
+
|
| 532 |
+
# optimization: find min/max in index,
|
| 533 |
+
# and select those parts of labels, input, and positions
|
| 534 |
+
lo = index.min()
|
| 535 |
+
hi = index.max()
|
| 536 |
+
mask = (labels >= lo) & (labels <= hi)
|
| 537 |
+
|
| 538 |
+
# this also ravels the arrays
|
| 539 |
+
labels = labels[mask]
|
| 540 |
+
input = input[mask]
|
| 541 |
+
if pass_positions:
|
| 542 |
+
positions = positions[mask]
|
| 543 |
+
|
| 544 |
+
# sort everything by labels
|
| 545 |
+
label_order = labels.argsort()
|
| 546 |
+
labels = labels[label_order]
|
| 547 |
+
input = input[label_order]
|
| 548 |
+
if pass_positions:
|
| 549 |
+
positions = positions[label_order]
|
| 550 |
+
|
| 551 |
+
index_order = index.argsort()
|
| 552 |
+
sorted_index = index[index_order]
|
| 553 |
+
|
| 554 |
+
def do_map(inputs, output):
|
| 555 |
+
"""labels must be sorted"""
|
| 556 |
+
nidx = sorted_index.size
|
| 557 |
+
|
| 558 |
+
# Find boundaries for each stretch of constant labels
|
| 559 |
+
# This could be faster, but we already paid N log N to sort labels.
|
| 560 |
+
lo = np.searchsorted(labels, sorted_index, side='left')
|
| 561 |
+
hi = np.searchsorted(labels, sorted_index, side='right')
|
| 562 |
+
|
| 563 |
+
for i, l, h in zip(range(nidx), lo, hi):
|
| 564 |
+
if l == h:
|
| 565 |
+
continue
|
| 566 |
+
output[i] = func(*[inp[l:h] for inp in inputs])
|
| 567 |
+
|
| 568 |
+
temp = np.empty(index.shape, out_dtype)
|
| 569 |
+
temp[:] = default
|
| 570 |
+
if not pass_positions:
|
| 571 |
+
do_map([input], temp)
|
| 572 |
+
else:
|
| 573 |
+
do_map([input, positions], temp)
|
| 574 |
+
|
| 575 |
+
output = np.zeros(index.shape, out_dtype)
|
| 576 |
+
output[index_order] = temp
|
| 577 |
+
if as_scalar:
|
| 578 |
+
output = output[0]
|
| 579 |
+
|
| 580 |
+
return output
|
| 581 |
+
|
| 582 |
+
|
| 583 |
+
def _safely_castable_to_int(dt):
|
| 584 |
+
"""Test whether the NumPy data type `dt` can be safely cast to an int."""
|
| 585 |
+
int_size = np.dtype(int).itemsize
|
| 586 |
+
safe = ((np.issubdtype(dt, np.signedinteger) and dt.itemsize <= int_size) or
|
| 587 |
+
(np.issubdtype(dt, np.unsignedinteger) and dt.itemsize < int_size))
|
| 588 |
+
return safe
|
| 589 |
+
|
| 590 |
+
|
| 591 |
+
def _stats(input, labels=None, index=None, centered=False):
|
| 592 |
+
"""Count, sum, and optionally compute (sum - centre)^2 of input by label
|
| 593 |
+
|
| 594 |
+
Parameters
|
| 595 |
+
----------
|
| 596 |
+
input : array_like, N-D
|
| 597 |
+
The input data to be analyzed.
|
| 598 |
+
labels : array_like (N-D), optional
|
| 599 |
+
The labels of the data in `input`. This array must be broadcast
|
| 600 |
+
compatible with `input`; typically, it is the same shape as `input`.
|
| 601 |
+
If `labels` is None, all nonzero values in `input` are treated as
|
| 602 |
+
the single labeled group.
|
| 603 |
+
index : label or sequence of labels, optional
|
| 604 |
+
These are the labels of the groups for which the stats are computed.
|
| 605 |
+
If `index` is None, the stats are computed for the single group where
|
| 606 |
+
`labels` is greater than 0.
|
| 607 |
+
centered : bool, optional
|
| 608 |
+
If True, the centered sum of squares for each labeled group is
|
| 609 |
+
also returned. Default is False.
|
| 610 |
+
|
| 611 |
+
Returns
|
| 612 |
+
-------
|
| 613 |
+
counts : int or ndarray of ints
|
| 614 |
+
The number of elements in each labeled group.
|
| 615 |
+
sums : scalar or ndarray of scalars
|
| 616 |
+
The sums of the values in each labeled group.
|
| 617 |
+
sums_c : scalar or ndarray of scalars, optional
|
| 618 |
+
The sums of mean-centered squares of the values in each labeled group.
|
| 619 |
+
This is only returned if `centered` is True.
|
| 620 |
+
|
| 621 |
+
"""
|
| 622 |
+
def single_group(vals):
|
| 623 |
+
if centered:
|
| 624 |
+
vals_c = vals - vals.mean()
|
| 625 |
+
return vals.size, vals.sum(), (vals_c * vals_c.conjugate()).sum()
|
| 626 |
+
else:
|
| 627 |
+
return vals.size, vals.sum()
|
| 628 |
+
|
| 629 |
+
input = np.asarray(input)
|
| 630 |
+
if labels is None:
|
| 631 |
+
return single_group(input)
|
| 632 |
+
|
| 633 |
+
# ensure input and labels match sizes
|
| 634 |
+
input, labels = np.broadcast_arrays(input, labels)
|
| 635 |
+
|
| 636 |
+
if index is None:
|
| 637 |
+
return single_group(input[labels > 0])
|
| 638 |
+
|
| 639 |
+
if np.isscalar(index):
|
| 640 |
+
return single_group(input[labels == index])
|
| 641 |
+
|
| 642 |
+
def _sum_centered(labels):
|
| 643 |
+
# `labels` is expected to be an ndarray with the same shape as `input`.
|
| 644 |
+
# It must contain the label indices (which are not necessarily the labels
|
| 645 |
+
# themselves).
|
| 646 |
+
means = sums / counts
|
| 647 |
+
centered_input = input - means[labels]
|
| 648 |
+
# bincount expects 1-D inputs, so we ravel the arguments.
|
| 649 |
+
bc = np.bincount(labels.ravel(),
|
| 650 |
+
weights=(centered_input *
|
| 651 |
+
centered_input.conjugate()).ravel())
|
| 652 |
+
return bc
|
| 653 |
+
|
| 654 |
+
# Remap labels to unique integers if necessary, or if the largest
|
| 655 |
+
# label is larger than the number of values.
|
| 656 |
+
|
| 657 |
+
if (not _safely_castable_to_int(labels.dtype) or
|
| 658 |
+
labels.min() < 0 or labels.max() > labels.size):
|
| 659 |
+
# Use np.unique to generate the label indices. `new_labels` will
|
| 660 |
+
# be 1-D, but it should be interpreted as the flattened N-D array of
|
| 661 |
+
# label indices.
|
| 662 |
+
unique_labels, new_labels = np.unique(labels, return_inverse=True)
|
| 663 |
+
new_labels = np.reshape(new_labels, (-1,)) # flatten, since it may be >1-D
|
| 664 |
+
counts = np.bincount(new_labels)
|
| 665 |
+
sums = np.bincount(new_labels, weights=input.ravel())
|
| 666 |
+
if centered:
|
| 667 |
+
# Compute the sum of the mean-centered squares.
|
| 668 |
+
# We must reshape new_labels to the N-D shape of `input` before
|
| 669 |
+
# passing it _sum_centered.
|
| 670 |
+
sums_c = _sum_centered(new_labels.reshape(labels.shape))
|
| 671 |
+
idxs = np.searchsorted(unique_labels, index)
|
| 672 |
+
# make all of idxs valid
|
| 673 |
+
idxs[idxs >= unique_labels.size] = 0
|
| 674 |
+
found = (unique_labels[idxs] == index)
|
| 675 |
+
else:
|
| 676 |
+
# labels are an integer type allowed by bincount, and there aren't too
|
| 677 |
+
# many, so call bincount directly.
|
| 678 |
+
counts = np.bincount(labels.ravel())
|
| 679 |
+
sums = np.bincount(labels.ravel(), weights=input.ravel())
|
| 680 |
+
if centered:
|
| 681 |
+
sums_c = _sum_centered(labels)
|
| 682 |
+
# make sure all index values are valid
|
| 683 |
+
idxs = np.asanyarray(index, np.int_).copy()
|
| 684 |
+
found = (idxs >= 0) & (idxs < counts.size)
|
| 685 |
+
idxs[~found] = 0
|
| 686 |
+
|
| 687 |
+
counts = counts[idxs]
|
| 688 |
+
counts[~found] = 0
|
| 689 |
+
sums = sums[idxs]
|
| 690 |
+
sums[~found] = 0
|
| 691 |
+
|
| 692 |
+
if not centered:
|
| 693 |
+
return (counts, sums)
|
| 694 |
+
else:
|
| 695 |
+
sums_c = sums_c[idxs]
|
| 696 |
+
sums_c[~found] = 0
|
| 697 |
+
return (counts, sums, sums_c)
|
| 698 |
+
|
| 699 |
+
|
| 700 |
+
def sum(input, labels=None, index=None):
|
| 701 |
+
"""
|
| 702 |
+
Calculate the sum of the values of the array.
|
| 703 |
+
|
| 704 |
+
Notes
|
| 705 |
+
-----
|
| 706 |
+
This is an alias for `ndimage.sum_labels` kept for backwards compatibility
|
| 707 |
+
reasons, for new code please prefer `sum_labels`. See the `sum_labels`
|
| 708 |
+
docstring for more details.
|
| 709 |
+
|
| 710 |
+
"""
|
| 711 |
+
return sum_labels(input, labels, index)
|
| 712 |
+
|
| 713 |
+
|
| 714 |
+
def sum_labels(input, labels=None, index=None):
|
| 715 |
+
"""
|
| 716 |
+
Calculate the sum of the values of the array.
|
| 717 |
+
|
| 718 |
+
Parameters
|
| 719 |
+
----------
|
| 720 |
+
input : array_like
|
| 721 |
+
Values of `input` inside the regions defined by `labels`
|
| 722 |
+
are summed together.
|
| 723 |
+
labels : array_like of ints, optional
|
| 724 |
+
Assign labels to the values of the array. Has to have the same shape as
|
| 725 |
+
`input`.
|
| 726 |
+
index : array_like, optional
|
| 727 |
+
A single label number or a sequence of label numbers of
|
| 728 |
+
the objects to be measured.
|
| 729 |
+
|
| 730 |
+
Returns
|
| 731 |
+
-------
|
| 732 |
+
sum : ndarray or scalar
|
| 733 |
+
An array of the sums of values of `input` inside the regions defined
|
| 734 |
+
by `labels` with the same shape as `index`. If 'index' is None or scalar,
|
| 735 |
+
a scalar is returned.
|
| 736 |
+
|
| 737 |
+
See Also
|
| 738 |
+
--------
|
| 739 |
+
mean, median
|
| 740 |
+
|
| 741 |
+
Examples
|
| 742 |
+
--------
|
| 743 |
+
>>> from scipy import ndimage
|
| 744 |
+
>>> input = [0,1,2,3]
|
| 745 |
+
>>> labels = [1,1,2,2]
|
| 746 |
+
>>> ndimage.sum_labels(input, labels, index=[1,2])
|
| 747 |
+
[1.0, 5.0]
|
| 748 |
+
>>> ndimage.sum_labels(input, labels, index=1)
|
| 749 |
+
1
|
| 750 |
+
>>> ndimage.sum_labels(input, labels)
|
| 751 |
+
6
|
| 752 |
+
|
| 753 |
+
|
| 754 |
+
"""
|
| 755 |
+
count, sum = _stats(input, labels, index)
|
| 756 |
+
return sum
|
| 757 |
+
|
| 758 |
+
|
| 759 |
+
def mean(input, labels=None, index=None):
|
| 760 |
+
"""
|
| 761 |
+
Calculate the mean of the values of an array at labels.
|
| 762 |
+
|
| 763 |
+
Parameters
|
| 764 |
+
----------
|
| 765 |
+
input : array_like
|
| 766 |
+
Array on which to compute the mean of elements over distinct
|
| 767 |
+
regions.
|
| 768 |
+
labels : array_like, optional
|
| 769 |
+
Array of labels of same shape, or broadcastable to the same shape as
|
| 770 |
+
`input`. All elements sharing the same label form one region over
|
| 771 |
+
which the mean of the elements is computed.
|
| 772 |
+
index : int or sequence of ints, optional
|
| 773 |
+
Labels of the objects over which the mean is to be computed.
|
| 774 |
+
Default is None, in which case the mean for all values where label is
|
| 775 |
+
greater than 0 is calculated.
|
| 776 |
+
|
| 777 |
+
Returns
|
| 778 |
+
-------
|
| 779 |
+
out : list
|
| 780 |
+
Sequence of same length as `index`, with the mean of the different
|
| 781 |
+
regions labeled by the labels in `index`.
|
| 782 |
+
|
| 783 |
+
See Also
|
| 784 |
+
--------
|
| 785 |
+
variance, standard_deviation, minimum, maximum, sum, label
|
| 786 |
+
|
| 787 |
+
Examples
|
| 788 |
+
--------
|
| 789 |
+
>>> from scipy import ndimage
|
| 790 |
+
>>> import numpy as np
|
| 791 |
+
>>> a = np.arange(25).reshape((5,5))
|
| 792 |
+
>>> labels = np.zeros_like(a)
|
| 793 |
+
>>> labels[3:5,3:5] = 1
|
| 794 |
+
>>> index = np.unique(labels)
|
| 795 |
+
>>> labels
|
| 796 |
+
array([[0, 0, 0, 0, 0],
|
| 797 |
+
[0, 0, 0, 0, 0],
|
| 798 |
+
[0, 0, 0, 0, 0],
|
| 799 |
+
[0, 0, 0, 1, 1],
|
| 800 |
+
[0, 0, 0, 1, 1]])
|
| 801 |
+
>>> index
|
| 802 |
+
array([0, 1])
|
| 803 |
+
>>> ndimage.mean(a, labels=labels, index=index)
|
| 804 |
+
[10.285714285714286, 21.0]
|
| 805 |
+
|
| 806 |
+
"""
|
| 807 |
+
|
| 808 |
+
count, sum = _stats(input, labels, index)
|
| 809 |
+
return sum / np.asanyarray(count).astype(np.float64)
|
| 810 |
+
|
| 811 |
+
|
| 812 |
+
def variance(input, labels=None, index=None):
|
| 813 |
+
"""
|
| 814 |
+
Calculate the variance of the values of an N-D image array, optionally at
|
| 815 |
+
specified sub-regions.
|
| 816 |
+
|
| 817 |
+
Parameters
|
| 818 |
+
----------
|
| 819 |
+
input : array_like
|
| 820 |
+
Nd-image data to process.
|
| 821 |
+
labels : array_like, optional
|
| 822 |
+
Labels defining sub-regions in `input`.
|
| 823 |
+
If not None, must be same shape as `input`.
|
| 824 |
+
index : int or sequence of ints, optional
|
| 825 |
+
`labels` to include in output. If None (default), all values where
|
| 826 |
+
`labels` is non-zero are used.
|
| 827 |
+
|
| 828 |
+
Returns
|
| 829 |
+
-------
|
| 830 |
+
variance : float or ndarray
|
| 831 |
+
Values of variance, for each sub-region if `labels` and `index` are
|
| 832 |
+
specified.
|
| 833 |
+
|
| 834 |
+
See Also
|
| 835 |
+
--------
|
| 836 |
+
label, standard_deviation, maximum, minimum, extrema
|
| 837 |
+
|
| 838 |
+
Examples
|
| 839 |
+
--------
|
| 840 |
+
>>> import numpy as np
|
| 841 |
+
>>> a = np.array([[1, 2, 0, 0],
|
| 842 |
+
... [5, 3, 0, 4],
|
| 843 |
+
... [0, 0, 0, 7],
|
| 844 |
+
... [9, 3, 0, 0]])
|
| 845 |
+
>>> from scipy import ndimage
|
| 846 |
+
>>> ndimage.variance(a)
|
| 847 |
+
7.609375
|
| 848 |
+
|
| 849 |
+
Features to process can be specified using `labels` and `index`:
|
| 850 |
+
|
| 851 |
+
>>> lbl, nlbl = ndimage.label(a)
|
| 852 |
+
>>> ndimage.variance(a, lbl, index=np.arange(1, nlbl+1))
|
| 853 |
+
array([ 2.1875, 2.25 , 9. ])
|
| 854 |
+
|
| 855 |
+
If no index is given, all non-zero `labels` are processed:
|
| 856 |
+
|
| 857 |
+
>>> ndimage.variance(a, lbl)
|
| 858 |
+
6.1875
|
| 859 |
+
|
| 860 |
+
"""
|
| 861 |
+
count, sum, sum_c_sq = _stats(input, labels, index, centered=True)
|
| 862 |
+
return sum_c_sq / np.asanyarray(count).astype(float)
|
| 863 |
+
|
| 864 |
+
|
| 865 |
+
def standard_deviation(input, labels=None, index=None):
|
| 866 |
+
"""
|
| 867 |
+
Calculate the standard deviation of the values of an N-D image array,
|
| 868 |
+
optionally at specified sub-regions.
|
| 869 |
+
|
| 870 |
+
Parameters
|
| 871 |
+
----------
|
| 872 |
+
input : array_like
|
| 873 |
+
N-D image data to process.
|
| 874 |
+
labels : array_like, optional
|
| 875 |
+
Labels to identify sub-regions in `input`.
|
| 876 |
+
If not None, must be same shape as `input`.
|
| 877 |
+
index : int or sequence of ints, optional
|
| 878 |
+
`labels` to include in output. If None (default), all values where
|
| 879 |
+
`labels` is non-zero are used.
|
| 880 |
+
|
| 881 |
+
Returns
|
| 882 |
+
-------
|
| 883 |
+
standard_deviation : float or ndarray
|
| 884 |
+
Values of standard deviation, for each sub-region if `labels` and
|
| 885 |
+
`index` are specified.
|
| 886 |
+
|
| 887 |
+
See Also
|
| 888 |
+
--------
|
| 889 |
+
label, variance, maximum, minimum, extrema
|
| 890 |
+
|
| 891 |
+
Examples
|
| 892 |
+
--------
|
| 893 |
+
>>> import numpy as np
|
| 894 |
+
>>> a = np.array([[1, 2, 0, 0],
|
| 895 |
+
... [5, 3, 0, 4],
|
| 896 |
+
... [0, 0, 0, 7],
|
| 897 |
+
... [9, 3, 0, 0]])
|
| 898 |
+
>>> from scipy import ndimage
|
| 899 |
+
>>> ndimage.standard_deviation(a)
|
| 900 |
+
2.7585095613392387
|
| 901 |
+
|
| 902 |
+
Features to process can be specified using `labels` and `index`:
|
| 903 |
+
|
| 904 |
+
>>> lbl, nlbl = ndimage.label(a)
|
| 905 |
+
>>> ndimage.standard_deviation(a, lbl, index=np.arange(1, nlbl+1))
|
| 906 |
+
array([ 1.479, 1.5 , 3. ])
|
| 907 |
+
|
| 908 |
+
If no index is given, non-zero `labels` are processed:
|
| 909 |
+
|
| 910 |
+
>>> ndimage.standard_deviation(a, lbl)
|
| 911 |
+
2.4874685927665499
|
| 912 |
+
|
| 913 |
+
"""
|
| 914 |
+
return np.sqrt(variance(input, labels, index))
|
| 915 |
+
|
| 916 |
+
|
| 917 |
+
def _select(input, labels=None, index=None, find_min=False, find_max=False,
|
| 918 |
+
find_min_positions=False, find_max_positions=False,
|
| 919 |
+
find_median=False):
|
| 920 |
+
"""Returns min, max, or both, plus their positions (if requested), and
|
| 921 |
+
median."""
|
| 922 |
+
|
| 923 |
+
input = np.asanyarray(input)
|
| 924 |
+
|
| 925 |
+
find_positions = find_min_positions or find_max_positions
|
| 926 |
+
positions = None
|
| 927 |
+
if find_positions:
|
| 928 |
+
positions = np.arange(input.size).reshape(input.shape)
|
| 929 |
+
|
| 930 |
+
def single_group(vals, positions):
|
| 931 |
+
result = []
|
| 932 |
+
if find_min:
|
| 933 |
+
result += [vals.min()]
|
| 934 |
+
if find_min_positions:
|
| 935 |
+
result += [positions[vals == vals.min()][0]]
|
| 936 |
+
if find_max:
|
| 937 |
+
result += [vals.max()]
|
| 938 |
+
if find_max_positions:
|
| 939 |
+
result += [positions[vals == vals.max()][0]]
|
| 940 |
+
if find_median:
|
| 941 |
+
result += [np.median(vals)]
|
| 942 |
+
return result
|
| 943 |
+
|
| 944 |
+
if labels is None:
|
| 945 |
+
return single_group(input, positions)
|
| 946 |
+
|
| 947 |
+
# ensure input and labels match sizes
|
| 948 |
+
input, labels = np.broadcast_arrays(input, labels)
|
| 949 |
+
|
| 950 |
+
if index is None:
|
| 951 |
+
mask = (labels > 0)
|
| 952 |
+
masked_positions = None
|
| 953 |
+
if find_positions:
|
| 954 |
+
masked_positions = positions[mask]
|
| 955 |
+
return single_group(input[mask], masked_positions)
|
| 956 |
+
|
| 957 |
+
if np.isscalar(index):
|
| 958 |
+
mask = (labels == index)
|
| 959 |
+
masked_positions = None
|
| 960 |
+
if find_positions:
|
| 961 |
+
masked_positions = positions[mask]
|
| 962 |
+
return single_group(input[mask], masked_positions)
|
| 963 |
+
|
| 964 |
+
index = np.asarray(index)
|
| 965 |
+
|
| 966 |
+
# remap labels to unique integers if necessary, or if the largest
|
| 967 |
+
# label is larger than the number of values.
|
| 968 |
+
if (not _safely_castable_to_int(labels.dtype) or
|
| 969 |
+
labels.min() < 0 or labels.max() > labels.size):
|
| 970 |
+
# remap labels, and indexes
|
| 971 |
+
unique_labels, labels = np.unique(labels, return_inverse=True)
|
| 972 |
+
idxs = np.searchsorted(unique_labels, index)
|
| 973 |
+
|
| 974 |
+
# make all of idxs valid
|
| 975 |
+
idxs[idxs >= unique_labels.size] = 0
|
| 976 |
+
found = (unique_labels[idxs] == index)
|
| 977 |
+
else:
|
| 978 |
+
# labels are an integer type, and there aren't too many
|
| 979 |
+
idxs = np.asanyarray(index, np.int_).copy()
|
| 980 |
+
found = (idxs >= 0) & (idxs <= labels.max())
|
| 981 |
+
|
| 982 |
+
idxs[~ found] = labels.max() + 1
|
| 983 |
+
|
| 984 |
+
if find_median:
|
| 985 |
+
order = np.lexsort((input.ravel(), labels.ravel()))
|
| 986 |
+
else:
|
| 987 |
+
order = input.ravel().argsort()
|
| 988 |
+
input = input.ravel()[order]
|
| 989 |
+
labels = labels.ravel()[order]
|
| 990 |
+
if find_positions:
|
| 991 |
+
positions = positions.ravel()[order]
|
| 992 |
+
|
| 993 |
+
result = []
|
| 994 |
+
if find_min:
|
| 995 |
+
mins = np.zeros(labels.max() + 2, input.dtype)
|
| 996 |
+
mins[labels[::-1]] = input[::-1]
|
| 997 |
+
result += [mins[idxs]]
|
| 998 |
+
if find_min_positions:
|
| 999 |
+
minpos = np.zeros(labels.max() + 2, int)
|
| 1000 |
+
minpos[labels[::-1]] = positions[::-1]
|
| 1001 |
+
result += [minpos[idxs]]
|
| 1002 |
+
if find_max:
|
| 1003 |
+
maxs = np.zeros(labels.max() + 2, input.dtype)
|
| 1004 |
+
maxs[labels] = input
|
| 1005 |
+
result += [maxs[idxs]]
|
| 1006 |
+
if find_max_positions:
|
| 1007 |
+
maxpos = np.zeros(labels.max() + 2, int)
|
| 1008 |
+
maxpos[labels] = positions
|
| 1009 |
+
result += [maxpos[idxs]]
|
| 1010 |
+
if find_median:
|
| 1011 |
+
locs = np.arange(len(labels))
|
| 1012 |
+
lo = np.zeros(labels.max() + 2, np.int_)
|
| 1013 |
+
lo[labels[::-1]] = locs[::-1]
|
| 1014 |
+
hi = np.zeros(labels.max() + 2, np.int_)
|
| 1015 |
+
hi[labels] = locs
|
| 1016 |
+
lo = lo[idxs]
|
| 1017 |
+
hi = hi[idxs]
|
| 1018 |
+
# lo is an index to the lowest value in input for each label,
|
| 1019 |
+
# hi is an index to the largest value.
|
| 1020 |
+
# move them to be either the same ((hi - lo) % 2 == 0) or next
|
| 1021 |
+
# to each other ((hi - lo) % 2 == 1), then average.
|
| 1022 |
+
step = (hi - lo) // 2
|
| 1023 |
+
lo += step
|
| 1024 |
+
hi -= step
|
| 1025 |
+
if (np.issubdtype(input.dtype, np.integer)
|
| 1026 |
+
or np.issubdtype(input.dtype, np.bool_)):
|
| 1027 |
+
# avoid integer overflow or boolean addition (gh-12836)
|
| 1028 |
+
result += [(input[lo].astype('d') + input[hi].astype('d')) / 2.0]
|
| 1029 |
+
else:
|
| 1030 |
+
result += [(input[lo] + input[hi]) / 2.0]
|
| 1031 |
+
|
| 1032 |
+
return result
|
| 1033 |
+
|
| 1034 |
+
|
| 1035 |
+
def minimum(input, labels=None, index=None):
|
| 1036 |
+
"""
|
| 1037 |
+
Calculate the minimum of the values of an array over labeled regions.
|
| 1038 |
+
|
| 1039 |
+
Parameters
|
| 1040 |
+
----------
|
| 1041 |
+
input : array_like
|
| 1042 |
+
Array_like of values. For each region specified by `labels`, the
|
| 1043 |
+
minimal values of `input` over the region is computed.
|
| 1044 |
+
labels : array_like, optional
|
| 1045 |
+
An array_like of integers marking different regions over which the
|
| 1046 |
+
minimum value of `input` is to be computed. `labels` must have the
|
| 1047 |
+
same shape as `input`. If `labels` is not specified, the minimum
|
| 1048 |
+
over the whole array is returned.
|
| 1049 |
+
index : array_like, optional
|
| 1050 |
+
A list of region labels that are taken into account for computing the
|
| 1051 |
+
minima. If index is None, the minimum over all elements where `labels`
|
| 1052 |
+
is non-zero is returned.
|
| 1053 |
+
|
| 1054 |
+
Returns
|
| 1055 |
+
-------
|
| 1056 |
+
minimum : float or list of floats
|
| 1057 |
+
List of minima of `input` over the regions determined by `labels` and
|
| 1058 |
+
whose index is in `index`. If `index` or `labels` are not specified, a
|
| 1059 |
+
float is returned: the minimal value of `input` if `labels` is None,
|
| 1060 |
+
and the minimal value of elements where `labels` is greater than zero
|
| 1061 |
+
if `index` is None.
|
| 1062 |
+
|
| 1063 |
+
See Also
|
| 1064 |
+
--------
|
| 1065 |
+
label, maximum, median, minimum_position, extrema, sum, mean, variance,
|
| 1066 |
+
standard_deviation
|
| 1067 |
+
|
| 1068 |
+
Notes
|
| 1069 |
+
-----
|
| 1070 |
+
The function returns a Python list and not a NumPy array, use
|
| 1071 |
+
`np.array` to convert the list to an array.
|
| 1072 |
+
|
| 1073 |
+
Examples
|
| 1074 |
+
--------
|
| 1075 |
+
>>> from scipy import ndimage
|
| 1076 |
+
>>> import numpy as np
|
| 1077 |
+
>>> a = np.array([[1, 2, 0, 0],
|
| 1078 |
+
... [5, 3, 0, 4],
|
| 1079 |
+
... [0, 0, 0, 7],
|
| 1080 |
+
... [9, 3, 0, 0]])
|
| 1081 |
+
>>> labels, labels_nb = ndimage.label(a)
|
| 1082 |
+
>>> labels
|
| 1083 |
+
array([[1, 1, 0, 0],
|
| 1084 |
+
[1, 1, 0, 2],
|
| 1085 |
+
[0, 0, 0, 2],
|
| 1086 |
+
[3, 3, 0, 0]], dtype=int32)
|
| 1087 |
+
>>> ndimage.minimum(a, labels=labels, index=np.arange(1, labels_nb + 1))
|
| 1088 |
+
[1, 4, 3]
|
| 1089 |
+
>>> ndimage.minimum(a)
|
| 1090 |
+
0
|
| 1091 |
+
>>> ndimage.minimum(a, labels=labels)
|
| 1092 |
+
1
|
| 1093 |
+
|
| 1094 |
+
"""
|
| 1095 |
+
return _select(input, labels, index, find_min=True)[0]
|
| 1096 |
+
|
| 1097 |
+
|
| 1098 |
+
def maximum(input, labels=None, index=None):
|
| 1099 |
+
"""
|
| 1100 |
+
Calculate the maximum of the values of an array over labeled regions.
|
| 1101 |
+
|
| 1102 |
+
Parameters
|
| 1103 |
+
----------
|
| 1104 |
+
input : array_like
|
| 1105 |
+
Array_like of values. For each region specified by `labels`, the
|
| 1106 |
+
maximal values of `input` over the region is computed.
|
| 1107 |
+
labels : array_like, optional
|
| 1108 |
+
An array of integers marking different regions over which the
|
| 1109 |
+
maximum value of `input` is to be computed. `labels` must have the
|
| 1110 |
+
same shape as `input`. If `labels` is not specified, the maximum
|
| 1111 |
+
over the whole array is returned.
|
| 1112 |
+
index : array_like, optional
|
| 1113 |
+
A list of region labels that are taken into account for computing the
|
| 1114 |
+
maxima. If index is None, the maximum over all elements where `labels`
|
| 1115 |
+
is non-zero is returned.
|
| 1116 |
+
|
| 1117 |
+
Returns
|
| 1118 |
+
-------
|
| 1119 |
+
output : float or list of floats
|
| 1120 |
+
List of maxima of `input` over the regions determined by `labels` and
|
| 1121 |
+
whose index is in `index`. If `index` or `labels` are not specified, a
|
| 1122 |
+
float is returned: the maximal value of `input` if `labels` is None,
|
| 1123 |
+
and the maximal value of elements where `labels` is greater than zero
|
| 1124 |
+
if `index` is None.
|
| 1125 |
+
|
| 1126 |
+
See Also
|
| 1127 |
+
--------
|
| 1128 |
+
label, minimum, median, maximum_position, extrema, sum, mean, variance,
|
| 1129 |
+
standard_deviation
|
| 1130 |
+
|
| 1131 |
+
Notes
|
| 1132 |
+
-----
|
| 1133 |
+
The function returns a Python list and not a NumPy array, use
|
| 1134 |
+
`np.array` to convert the list to an array.
|
| 1135 |
+
|
| 1136 |
+
Examples
|
| 1137 |
+
--------
|
| 1138 |
+
>>> import numpy as np
|
| 1139 |
+
>>> a = np.arange(16).reshape((4,4))
|
| 1140 |
+
>>> a
|
| 1141 |
+
array([[ 0, 1, 2, 3],
|
| 1142 |
+
[ 4, 5, 6, 7],
|
| 1143 |
+
[ 8, 9, 10, 11],
|
| 1144 |
+
[12, 13, 14, 15]])
|
| 1145 |
+
>>> labels = np.zeros_like(a)
|
| 1146 |
+
>>> labels[:2,:2] = 1
|
| 1147 |
+
>>> labels[2:, 1:3] = 2
|
| 1148 |
+
>>> labels
|
| 1149 |
+
array([[1, 1, 0, 0],
|
| 1150 |
+
[1, 1, 0, 0],
|
| 1151 |
+
[0, 2, 2, 0],
|
| 1152 |
+
[0, 2, 2, 0]])
|
| 1153 |
+
>>> from scipy import ndimage
|
| 1154 |
+
>>> ndimage.maximum(a)
|
| 1155 |
+
15
|
| 1156 |
+
>>> ndimage.maximum(a, labels=labels, index=[1,2])
|
| 1157 |
+
[5, 14]
|
| 1158 |
+
>>> ndimage.maximum(a, labels=labels)
|
| 1159 |
+
14
|
| 1160 |
+
|
| 1161 |
+
>>> b = np.array([[1, 2, 0, 0],
|
| 1162 |
+
... [5, 3, 0, 4],
|
| 1163 |
+
... [0, 0, 0, 7],
|
| 1164 |
+
... [9, 3, 0, 0]])
|
| 1165 |
+
>>> labels, labels_nb = ndimage.label(b)
|
| 1166 |
+
>>> labels
|
| 1167 |
+
array([[1, 1, 0, 0],
|
| 1168 |
+
[1, 1, 0, 2],
|
| 1169 |
+
[0, 0, 0, 2],
|
| 1170 |
+
[3, 3, 0, 0]], dtype=int32)
|
| 1171 |
+
>>> ndimage.maximum(b, labels=labels, index=np.arange(1, labels_nb + 1))
|
| 1172 |
+
[5, 7, 9]
|
| 1173 |
+
|
| 1174 |
+
"""
|
| 1175 |
+
return _select(input, labels, index, find_max=True)[0]
|
| 1176 |
+
|
| 1177 |
+
|
| 1178 |
+
def median(input, labels=None, index=None):
|
| 1179 |
+
"""
|
| 1180 |
+
Calculate the median of the values of an array over labeled regions.
|
| 1181 |
+
|
| 1182 |
+
Parameters
|
| 1183 |
+
----------
|
| 1184 |
+
input : array_like
|
| 1185 |
+
Array_like of values. For each region specified by `labels`, the
|
| 1186 |
+
median value of `input` over the region is computed.
|
| 1187 |
+
labels : array_like, optional
|
| 1188 |
+
An array_like of integers marking different regions over which the
|
| 1189 |
+
median value of `input` is to be computed. `labels` must have the
|
| 1190 |
+
same shape as `input`. If `labels` is not specified, the median
|
| 1191 |
+
over the whole array is returned.
|
| 1192 |
+
index : array_like, optional
|
| 1193 |
+
A list of region labels that are taken into account for computing the
|
| 1194 |
+
medians. If index is None, the median over all elements where `labels`
|
| 1195 |
+
is non-zero is returned.
|
| 1196 |
+
|
| 1197 |
+
Returns
|
| 1198 |
+
-------
|
| 1199 |
+
median : float or list of floats
|
| 1200 |
+
List of medians of `input` over the regions determined by `labels` and
|
| 1201 |
+
whose index is in `index`. If `index` or `labels` are not specified, a
|
| 1202 |
+
float is returned: the median value of `input` if `labels` is None,
|
| 1203 |
+
and the median value of elements where `labels` is greater than zero
|
| 1204 |
+
if `index` is None.
|
| 1205 |
+
|
| 1206 |
+
See Also
|
| 1207 |
+
--------
|
| 1208 |
+
label, minimum, maximum, extrema, sum, mean, variance, standard_deviation
|
| 1209 |
+
|
| 1210 |
+
Notes
|
| 1211 |
+
-----
|
| 1212 |
+
The function returns a Python list and not a NumPy array, use
|
| 1213 |
+
`np.array` to convert the list to an array.
|
| 1214 |
+
|
| 1215 |
+
Examples
|
| 1216 |
+
--------
|
| 1217 |
+
>>> from scipy import ndimage
|
| 1218 |
+
>>> import numpy as np
|
| 1219 |
+
>>> a = np.array([[1, 2, 0, 1],
|
| 1220 |
+
... [5, 3, 0, 4],
|
| 1221 |
+
... [0, 0, 0, 7],
|
| 1222 |
+
... [9, 3, 0, 0]])
|
| 1223 |
+
>>> labels, labels_nb = ndimage.label(a)
|
| 1224 |
+
>>> labels
|
| 1225 |
+
array([[1, 1, 0, 2],
|
| 1226 |
+
[1, 1, 0, 2],
|
| 1227 |
+
[0, 0, 0, 2],
|
| 1228 |
+
[3, 3, 0, 0]], dtype=int32)
|
| 1229 |
+
>>> ndimage.median(a, labels=labels, index=np.arange(1, labels_nb + 1))
|
| 1230 |
+
[2.5, 4.0, 6.0]
|
| 1231 |
+
>>> ndimage.median(a)
|
| 1232 |
+
1.0
|
| 1233 |
+
>>> ndimage.median(a, labels=labels)
|
| 1234 |
+
3.0
|
| 1235 |
+
|
| 1236 |
+
"""
|
| 1237 |
+
return _select(input, labels, index, find_median=True)[0]
|
| 1238 |
+
|
| 1239 |
+
|
| 1240 |
+
def minimum_position(input, labels=None, index=None):
|
| 1241 |
+
"""
|
| 1242 |
+
Find the positions of the minimums of the values of an array at labels.
|
| 1243 |
+
|
| 1244 |
+
Parameters
|
| 1245 |
+
----------
|
| 1246 |
+
input : array_like
|
| 1247 |
+
Array_like of values.
|
| 1248 |
+
labels : array_like, optional
|
| 1249 |
+
An array of integers marking different regions over which the
|
| 1250 |
+
position of the minimum value of `input` is to be computed.
|
| 1251 |
+
`labels` must have the same shape as `input`. If `labels` is not
|
| 1252 |
+
specified, the location of the first minimum over the whole
|
| 1253 |
+
array is returned.
|
| 1254 |
+
|
| 1255 |
+
The `labels` argument only works when `index` is specified.
|
| 1256 |
+
index : array_like, optional
|
| 1257 |
+
A list of region labels that are taken into account for finding the
|
| 1258 |
+
location of the minima. If `index` is None, the ``first`` minimum
|
| 1259 |
+
over all elements where `labels` is non-zero is returned.
|
| 1260 |
+
|
| 1261 |
+
The `index` argument only works when `labels` is specified.
|
| 1262 |
+
|
| 1263 |
+
Returns
|
| 1264 |
+
-------
|
| 1265 |
+
output : list of tuples of ints
|
| 1266 |
+
Tuple of ints or list of tuples of ints that specify the location
|
| 1267 |
+
of minima of `input` over the regions determined by `labels` and
|
| 1268 |
+
whose index is in `index`.
|
| 1269 |
+
|
| 1270 |
+
If `index` or `labels` are not specified, a tuple of ints is
|
| 1271 |
+
returned specifying the location of the first minimal value of `input`.
|
| 1272 |
+
|
| 1273 |
+
See Also
|
| 1274 |
+
--------
|
| 1275 |
+
label, minimum, median, maximum_position, extrema, sum, mean, variance,
|
| 1276 |
+
standard_deviation
|
| 1277 |
+
|
| 1278 |
+
Examples
|
| 1279 |
+
--------
|
| 1280 |
+
>>> import numpy as np
|
| 1281 |
+
>>> a = np.array([[10, 20, 30],
|
| 1282 |
+
... [40, 80, 100],
|
| 1283 |
+
... [1, 100, 200]])
|
| 1284 |
+
>>> b = np.array([[1, 2, 0, 1],
|
| 1285 |
+
... [5, 3, 0, 4],
|
| 1286 |
+
... [0, 0, 0, 7],
|
| 1287 |
+
... [9, 3, 0, 0]])
|
| 1288 |
+
|
| 1289 |
+
>>> from scipy import ndimage
|
| 1290 |
+
|
| 1291 |
+
>>> ndimage.minimum_position(a)
|
| 1292 |
+
(2, 0)
|
| 1293 |
+
>>> ndimage.minimum_position(b)
|
| 1294 |
+
(0, 2)
|
| 1295 |
+
|
| 1296 |
+
Features to process can be specified using `labels` and `index`:
|
| 1297 |
+
|
| 1298 |
+
>>> label, pos = ndimage.label(a)
|
| 1299 |
+
>>> ndimage.minimum_position(a, label, index=np.arange(1, pos+1))
|
| 1300 |
+
[(2, 0)]
|
| 1301 |
+
|
| 1302 |
+
>>> label, pos = ndimage.label(b)
|
| 1303 |
+
>>> ndimage.minimum_position(b, label, index=np.arange(1, pos+1))
|
| 1304 |
+
[(0, 0), (0, 3), (3, 1)]
|
| 1305 |
+
|
| 1306 |
+
"""
|
| 1307 |
+
dims = np.array(np.asarray(input).shape)
|
| 1308 |
+
# see np.unravel_index to understand this line.
|
| 1309 |
+
dim_prod = np.cumprod([1] + list(dims[:0:-1]))[::-1]
|
| 1310 |
+
|
| 1311 |
+
result = _select(input, labels, index, find_min_positions=True)[0]
|
| 1312 |
+
|
| 1313 |
+
if np.isscalar(result):
|
| 1314 |
+
return tuple((result // dim_prod) % dims)
|
| 1315 |
+
|
| 1316 |
+
return [tuple(v) for v in (result.reshape(-1, 1) // dim_prod) % dims]
|
| 1317 |
+
|
| 1318 |
+
|
| 1319 |
+
def maximum_position(input, labels=None, index=None):
|
| 1320 |
+
"""
|
| 1321 |
+
Find the positions of the maximums of the values of an array at labels.
|
| 1322 |
+
|
| 1323 |
+
For each region specified by `labels`, the position of the maximum
|
| 1324 |
+
value of `input` within the region is returned.
|
| 1325 |
+
|
| 1326 |
+
Parameters
|
| 1327 |
+
----------
|
| 1328 |
+
input : array_like
|
| 1329 |
+
Array_like of values.
|
| 1330 |
+
labels : array_like, optional
|
| 1331 |
+
An array of integers marking different regions over which the
|
| 1332 |
+
position of the maximum value of `input` is to be computed.
|
| 1333 |
+
`labels` must have the same shape as `input`. If `labels` is not
|
| 1334 |
+
specified, the location of the first maximum over the whole
|
| 1335 |
+
array is returned.
|
| 1336 |
+
|
| 1337 |
+
The `labels` argument only works when `index` is specified.
|
| 1338 |
+
index : array_like, optional
|
| 1339 |
+
A list of region labels that are taken into account for finding the
|
| 1340 |
+
location of the maxima. If `index` is None, the first maximum
|
| 1341 |
+
over all elements where `labels` is non-zero is returned.
|
| 1342 |
+
|
| 1343 |
+
The `index` argument only works when `labels` is specified.
|
| 1344 |
+
|
| 1345 |
+
Returns
|
| 1346 |
+
-------
|
| 1347 |
+
output : list of tuples of ints
|
| 1348 |
+
List of tuples of ints that specify the location of maxima of
|
| 1349 |
+
`input` over the regions determined by `labels` and whose index
|
| 1350 |
+
is in `index`.
|
| 1351 |
+
|
| 1352 |
+
If `index` or `labels` are not specified, a tuple of ints is
|
| 1353 |
+
returned specifying the location of the ``first`` maximal value
|
| 1354 |
+
of `input`.
|
| 1355 |
+
|
| 1356 |
+
See Also
|
| 1357 |
+
--------
|
| 1358 |
+
label, minimum, median, maximum_position, extrema, sum, mean, variance,
|
| 1359 |
+
standard_deviation
|
| 1360 |
+
|
| 1361 |
+
Examples
|
| 1362 |
+
--------
|
| 1363 |
+
>>> from scipy import ndimage
|
| 1364 |
+
>>> import numpy as np
|
| 1365 |
+
>>> a = np.array([[1, 2, 0, 0],
|
| 1366 |
+
... [5, 3, 0, 4],
|
| 1367 |
+
... [0, 0, 0, 7],
|
| 1368 |
+
... [9, 3, 0, 0]])
|
| 1369 |
+
>>> ndimage.maximum_position(a)
|
| 1370 |
+
(3, 0)
|
| 1371 |
+
|
| 1372 |
+
Features to process can be specified using `labels` and `index`:
|
| 1373 |
+
|
| 1374 |
+
>>> lbl = np.array([[0, 1, 2, 3],
|
| 1375 |
+
... [0, 1, 2, 3],
|
| 1376 |
+
... [0, 1, 2, 3],
|
| 1377 |
+
... [0, 1, 2, 3]])
|
| 1378 |
+
>>> ndimage.maximum_position(a, lbl, 1)
|
| 1379 |
+
(1, 1)
|
| 1380 |
+
|
| 1381 |
+
If no index is given, non-zero `labels` are processed:
|
| 1382 |
+
|
| 1383 |
+
>>> ndimage.maximum_position(a, lbl)
|
| 1384 |
+
(2, 3)
|
| 1385 |
+
|
| 1386 |
+
If there are no maxima, the position of the first element is returned:
|
| 1387 |
+
|
| 1388 |
+
>>> ndimage.maximum_position(a, lbl, 2)
|
| 1389 |
+
(0, 2)
|
| 1390 |
+
|
| 1391 |
+
"""
|
| 1392 |
+
dims = np.array(np.asarray(input).shape)
|
| 1393 |
+
# see np.unravel_index to understand this line.
|
| 1394 |
+
dim_prod = np.cumprod([1] + list(dims[:0:-1]))[::-1]
|
| 1395 |
+
|
| 1396 |
+
result = _select(input, labels, index, find_max_positions=True)[0]
|
| 1397 |
+
|
| 1398 |
+
if np.isscalar(result):
|
| 1399 |
+
return tuple((result // dim_prod) % dims)
|
| 1400 |
+
|
| 1401 |
+
return [tuple(v) for v in (result.reshape(-1, 1) // dim_prod) % dims]
|
| 1402 |
+
|
| 1403 |
+
|
| 1404 |
+
def extrema(input, labels=None, index=None):
|
| 1405 |
+
"""
|
| 1406 |
+
Calculate the minimums and maximums of the values of an array
|
| 1407 |
+
at labels, along with their positions.
|
| 1408 |
+
|
| 1409 |
+
Parameters
|
| 1410 |
+
----------
|
| 1411 |
+
input : ndarray
|
| 1412 |
+
N-D image data to process.
|
| 1413 |
+
labels : ndarray, optional
|
| 1414 |
+
Labels of features in input.
|
| 1415 |
+
If not None, must be same shape as `input`.
|
| 1416 |
+
index : int or sequence of ints, optional
|
| 1417 |
+
Labels to include in output. If None (default), all values where
|
| 1418 |
+
non-zero `labels` are used.
|
| 1419 |
+
|
| 1420 |
+
Returns
|
| 1421 |
+
-------
|
| 1422 |
+
minimums, maximums : int or ndarray
|
| 1423 |
+
Values of minimums and maximums in each feature.
|
| 1424 |
+
min_positions, max_positions : tuple or list of tuples
|
| 1425 |
+
Each tuple gives the N-D coordinates of the corresponding minimum
|
| 1426 |
+
or maximum.
|
| 1427 |
+
|
| 1428 |
+
See Also
|
| 1429 |
+
--------
|
| 1430 |
+
maximum, minimum, maximum_position, minimum_position, center_of_mass
|
| 1431 |
+
|
| 1432 |
+
Examples
|
| 1433 |
+
--------
|
| 1434 |
+
>>> import numpy as np
|
| 1435 |
+
>>> a = np.array([[1, 2, 0, 0],
|
| 1436 |
+
... [5, 3, 0, 4],
|
| 1437 |
+
... [0, 0, 0, 7],
|
| 1438 |
+
... [9, 3, 0, 0]])
|
| 1439 |
+
>>> from scipy import ndimage
|
| 1440 |
+
>>> ndimage.extrema(a)
|
| 1441 |
+
(0, 9, (0, 2), (3, 0))
|
| 1442 |
+
|
| 1443 |
+
Features to process can be specified using `labels` and `index`:
|
| 1444 |
+
|
| 1445 |
+
>>> lbl, nlbl = ndimage.label(a)
|
| 1446 |
+
>>> ndimage.extrema(a, lbl, index=np.arange(1, nlbl+1))
|
| 1447 |
+
(array([1, 4, 3]),
|
| 1448 |
+
array([5, 7, 9]),
|
| 1449 |
+
[(0, 0), (1, 3), (3, 1)],
|
| 1450 |
+
[(1, 0), (2, 3), (3, 0)])
|
| 1451 |
+
|
| 1452 |
+
If no index is given, non-zero `labels` are processed:
|
| 1453 |
+
|
| 1454 |
+
>>> ndimage.extrema(a, lbl)
|
| 1455 |
+
(1, 9, (0, 0), (3, 0))
|
| 1456 |
+
|
| 1457 |
+
"""
|
| 1458 |
+
dims = np.array(np.asarray(input).shape)
|
| 1459 |
+
# see np.unravel_index to understand this line.
|
| 1460 |
+
dim_prod = np.cumprod([1] + list(dims[:0:-1]))[::-1]
|
| 1461 |
+
|
| 1462 |
+
minimums, min_positions, maximums, max_positions = _select(input, labels,
|
| 1463 |
+
index,
|
| 1464 |
+
find_min=True,
|
| 1465 |
+
find_max=True,
|
| 1466 |
+
find_min_positions=True,
|
| 1467 |
+
find_max_positions=True)
|
| 1468 |
+
|
| 1469 |
+
if np.isscalar(minimums):
|
| 1470 |
+
return (minimums, maximums, tuple((min_positions // dim_prod) % dims),
|
| 1471 |
+
tuple((max_positions // dim_prod) % dims))
|
| 1472 |
+
|
| 1473 |
+
min_positions = [
|
| 1474 |
+
tuple(v) for v in (min_positions.reshape(-1, 1) // dim_prod) % dims
|
| 1475 |
+
]
|
| 1476 |
+
max_positions = [
|
| 1477 |
+
tuple(v) for v in (max_positions.reshape(-1, 1) // dim_prod) % dims
|
| 1478 |
+
]
|
| 1479 |
+
|
| 1480 |
+
return minimums, maximums, min_positions, max_positions
|
| 1481 |
+
|
| 1482 |
+
|
| 1483 |
+
def center_of_mass(input, labels=None, index=None):
|
| 1484 |
+
"""
|
| 1485 |
+
Calculate the center of mass of the values of an array at labels.
|
| 1486 |
+
|
| 1487 |
+
Parameters
|
| 1488 |
+
----------
|
| 1489 |
+
input : ndarray
|
| 1490 |
+
Data from which to calculate center-of-mass. The masses can either
|
| 1491 |
+
be positive or negative.
|
| 1492 |
+
labels : ndarray, optional
|
| 1493 |
+
Labels for objects in `input`, as generated by `ndimage.label`.
|
| 1494 |
+
Only used with `index`. Dimensions must be the same as `input`.
|
| 1495 |
+
index : int or sequence of ints, optional
|
| 1496 |
+
Labels for which to calculate centers-of-mass. If not specified,
|
| 1497 |
+
the combined center of mass of all labels greater than zero
|
| 1498 |
+
will be calculated. Only used with `labels`.
|
| 1499 |
+
|
| 1500 |
+
Returns
|
| 1501 |
+
-------
|
| 1502 |
+
center_of_mass : tuple, or list of tuples
|
| 1503 |
+
Coordinates of centers-of-mass.
|
| 1504 |
+
|
| 1505 |
+
Examples
|
| 1506 |
+
--------
|
| 1507 |
+
>>> import numpy as np
|
| 1508 |
+
>>> a = np.array(([0,0,0,0],
|
| 1509 |
+
... [0,1,1,0],
|
| 1510 |
+
... [0,1,1,0],
|
| 1511 |
+
... [0,1,1,0]))
|
| 1512 |
+
>>> from scipy import ndimage
|
| 1513 |
+
>>> ndimage.center_of_mass(a)
|
| 1514 |
+
(2.0, 1.5)
|
| 1515 |
+
|
| 1516 |
+
Calculation of multiple objects in an image
|
| 1517 |
+
|
| 1518 |
+
>>> b = np.array(([0,1,1,0],
|
| 1519 |
+
... [0,1,0,0],
|
| 1520 |
+
... [0,0,0,0],
|
| 1521 |
+
... [0,0,1,1],
|
| 1522 |
+
... [0,0,1,1]))
|
| 1523 |
+
>>> lbl = ndimage.label(b)[0]
|
| 1524 |
+
>>> ndimage.center_of_mass(b, lbl, [1,2])
|
| 1525 |
+
[(0.33333333333333331, 1.3333333333333333), (3.5, 2.5)]
|
| 1526 |
+
|
| 1527 |
+
Negative masses are also accepted, which can occur for example when
|
| 1528 |
+
bias is removed from measured data due to random noise.
|
| 1529 |
+
|
| 1530 |
+
>>> c = np.array(([-1,0,0,0],
|
| 1531 |
+
... [0,-1,-1,0],
|
| 1532 |
+
... [0,1,-1,0],
|
| 1533 |
+
... [0,1,1,0]))
|
| 1534 |
+
>>> ndimage.center_of_mass(c)
|
| 1535 |
+
(-4.0, 1.0)
|
| 1536 |
+
|
| 1537 |
+
If there are division by zero issues, the function does not raise an
|
| 1538 |
+
error but rather issues a RuntimeWarning before returning inf and/or NaN.
|
| 1539 |
+
|
| 1540 |
+
>>> d = np.array([-1, 1])
|
| 1541 |
+
>>> ndimage.center_of_mass(d)
|
| 1542 |
+
(inf,)
|
| 1543 |
+
"""
|
| 1544 |
+
input = np.asarray(input)
|
| 1545 |
+
normalizer = sum_labels(input, labels, index)
|
| 1546 |
+
grids = np.ogrid[[slice(0, i) for i in input.shape]]
|
| 1547 |
+
|
| 1548 |
+
results = [sum_labels(input * grids[dir].astype(float), labels, index) / normalizer
|
| 1549 |
+
for dir in range(input.ndim)]
|
| 1550 |
+
|
| 1551 |
+
if np.isscalar(results[0]):
|
| 1552 |
+
return tuple(results)
|
| 1553 |
+
|
| 1554 |
+
return [tuple(v) for v in np.array(results).T]
|
| 1555 |
+
|
| 1556 |
+
|
| 1557 |
+
def histogram(input, min, max, bins, labels=None, index=None):
|
| 1558 |
+
"""
|
| 1559 |
+
Calculate the histogram of the values of an array, optionally at labels.
|
| 1560 |
+
|
| 1561 |
+
Histogram calculates the frequency of values in an array within bins
|
| 1562 |
+
determined by `min`, `max`, and `bins`. The `labels` and `index`
|
| 1563 |
+
keywords can limit the scope of the histogram to specified sub-regions
|
| 1564 |
+
within the array.
|
| 1565 |
+
|
| 1566 |
+
Parameters
|
| 1567 |
+
----------
|
| 1568 |
+
input : array_like
|
| 1569 |
+
Data for which to calculate histogram.
|
| 1570 |
+
min, max : int
|
| 1571 |
+
Minimum and maximum values of range of histogram bins.
|
| 1572 |
+
bins : int
|
| 1573 |
+
Number of bins.
|
| 1574 |
+
labels : array_like, optional
|
| 1575 |
+
Labels for objects in `input`.
|
| 1576 |
+
If not None, must be same shape as `input`.
|
| 1577 |
+
index : int or sequence of ints, optional
|
| 1578 |
+
Label or labels for which to calculate histogram. If None, all values
|
| 1579 |
+
where label is greater than zero are used
|
| 1580 |
+
|
| 1581 |
+
Returns
|
| 1582 |
+
-------
|
| 1583 |
+
hist : ndarray
|
| 1584 |
+
Histogram counts.
|
| 1585 |
+
|
| 1586 |
+
Examples
|
| 1587 |
+
--------
|
| 1588 |
+
>>> import numpy as np
|
| 1589 |
+
>>> a = np.array([[ 0. , 0.2146, 0.5962, 0. ],
|
| 1590 |
+
... [ 0. , 0.7778, 0. , 0. ],
|
| 1591 |
+
... [ 0. , 0. , 0. , 0. ],
|
| 1592 |
+
... [ 0. , 0. , 0.7181, 0.2787],
|
| 1593 |
+
... [ 0. , 0. , 0.6573, 0.3094]])
|
| 1594 |
+
>>> from scipy import ndimage
|
| 1595 |
+
>>> ndimage.histogram(a, 0, 1, 10)
|
| 1596 |
+
array([13, 0, 2, 1, 0, 1, 1, 2, 0, 0])
|
| 1597 |
+
|
| 1598 |
+
With labels and no indices, non-zero elements are counted:
|
| 1599 |
+
|
| 1600 |
+
>>> lbl, nlbl = ndimage.label(a)
|
| 1601 |
+
>>> ndimage.histogram(a, 0, 1, 10, lbl)
|
| 1602 |
+
array([0, 0, 2, 1, 0, 1, 1, 2, 0, 0])
|
| 1603 |
+
|
| 1604 |
+
Indices can be used to count only certain objects:
|
| 1605 |
+
|
| 1606 |
+
>>> ndimage.histogram(a, 0, 1, 10, lbl, 2)
|
| 1607 |
+
array([0, 0, 1, 1, 0, 0, 1, 1, 0, 0])
|
| 1608 |
+
|
| 1609 |
+
"""
|
| 1610 |
+
_bins = np.linspace(min, max, bins + 1)
|
| 1611 |
+
|
| 1612 |
+
def _hist(vals):
|
| 1613 |
+
return np.histogram(vals, _bins)[0]
|
| 1614 |
+
|
| 1615 |
+
return labeled_comprehension(input, labels, index, _hist, object, None,
|
| 1616 |
+
pass_positions=False)
|
| 1617 |
+
|
| 1618 |
+
|
| 1619 |
+
def watershed_ift(input, markers, structure=None, output=None):
|
| 1620 |
+
"""
|
| 1621 |
+
Apply watershed from markers using image foresting transform algorithm.
|
| 1622 |
+
|
| 1623 |
+
Parameters
|
| 1624 |
+
----------
|
| 1625 |
+
input : array_like
|
| 1626 |
+
Input.
|
| 1627 |
+
markers : array_like
|
| 1628 |
+
Markers are points within each watershed that form the beginning
|
| 1629 |
+
of the process. Negative markers are considered background markers
|
| 1630 |
+
which are processed after the other markers.
|
| 1631 |
+
structure : structure element, optional
|
| 1632 |
+
A structuring element defining the connectivity of the object can be
|
| 1633 |
+
provided. If None, an element is generated with a squared
|
| 1634 |
+
connectivity equal to one.
|
| 1635 |
+
output : ndarray, optional
|
| 1636 |
+
An output array can optionally be provided. The same shape as input.
|
| 1637 |
+
|
| 1638 |
+
Returns
|
| 1639 |
+
-------
|
| 1640 |
+
watershed_ift : ndarray
|
| 1641 |
+
Output. Same shape as `input`.
|
| 1642 |
+
|
| 1643 |
+
References
|
| 1644 |
+
----------
|
| 1645 |
+
.. [1] A.X. Falcao, J. Stolfi and R. de Alencar Lotufo, "The image
|
| 1646 |
+
foresting transform: theory, algorithms, and applications",
|
| 1647 |
+
Pattern Analysis and Machine Intelligence, vol. 26, pp. 19-29, 2004.
|
| 1648 |
+
|
| 1649 |
+
"""
|
| 1650 |
+
input = np.asarray(input)
|
| 1651 |
+
if input.dtype.type not in [np.uint8, np.uint16]:
|
| 1652 |
+
raise TypeError('only 8 and 16 unsigned inputs are supported')
|
| 1653 |
+
|
| 1654 |
+
if structure is None:
|
| 1655 |
+
structure = _morphology.generate_binary_structure(input.ndim, 1)
|
| 1656 |
+
structure = np.asarray(structure, dtype=bool)
|
| 1657 |
+
if structure.ndim != input.ndim:
|
| 1658 |
+
raise RuntimeError('structure and input must have equal rank')
|
| 1659 |
+
for ii in structure.shape:
|
| 1660 |
+
if ii != 3:
|
| 1661 |
+
raise RuntimeError('structure dimensions must be equal to 3')
|
| 1662 |
+
|
| 1663 |
+
if not structure.flags.contiguous:
|
| 1664 |
+
structure = structure.copy()
|
| 1665 |
+
markers = np.asarray(markers)
|
| 1666 |
+
if input.shape != markers.shape:
|
| 1667 |
+
raise RuntimeError('input and markers must have equal shape')
|
| 1668 |
+
|
| 1669 |
+
integral_types = [np.int8,
|
| 1670 |
+
np.int16,
|
| 1671 |
+
np.int32,
|
| 1672 |
+
np.int64,
|
| 1673 |
+
np.intc,
|
| 1674 |
+
np.intp]
|
| 1675 |
+
|
| 1676 |
+
if markers.dtype.type not in integral_types:
|
| 1677 |
+
raise RuntimeError('marker should be of integer type')
|
| 1678 |
+
|
| 1679 |
+
if isinstance(output, np.ndarray):
|
| 1680 |
+
if output.dtype.type not in integral_types:
|
| 1681 |
+
raise RuntimeError('output should be of integer type')
|
| 1682 |
+
else:
|
| 1683 |
+
output = markers.dtype
|
| 1684 |
+
|
| 1685 |
+
output = _ni_support._get_output(output, input)
|
| 1686 |
+
_nd_image.watershed_ift(input, markers, structure, output)
|
| 1687 |
+
return output
|
evalkit_eagle/lib/python3.10/site-packages/scipy/ndimage/_morphology.py
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
evalkit_eagle/lib/python3.10/site-packages/scipy/ndimage/_ndimage_api.py
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""This is the 'bare' ndimage API.
|
| 2 |
+
|
| 3 |
+
This --- private! --- module only collects implementations of public ndimage API
|
| 4 |
+
for _support_alternative_backends.
|
| 5 |
+
The latter --- also private! --- module adds delegation to CuPy etc and
|
| 6 |
+
re-exports decorated names to __init__.py
|
| 7 |
+
"""
|
| 8 |
+
|
| 9 |
+
from ._filters import * # noqa: F403
|
| 10 |
+
from ._fourier import * # noqa: F403
|
| 11 |
+
from ._interpolation import * # noqa: F403
|
| 12 |
+
from ._measurements import * # noqa: F403
|
| 13 |
+
from ._morphology import * # noqa: F403
|
| 14 |
+
|
| 15 |
+
__all__ = [s for s in dir() if not s.startswith('_')]
|
evalkit_eagle/lib/python3.10/site-packages/scipy/ndimage/_ni_docstrings.py
ADDED
|
@@ -0,0 +1,210 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Docstring components common to several ndimage functions."""
|
| 2 |
+
from typing import Final
|
| 3 |
+
|
| 4 |
+
from scipy._lib import doccer
|
| 5 |
+
|
| 6 |
+
__all__ = ['docfiller']
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
_input_doc = (
|
| 10 |
+
"""input : array_like
|
| 11 |
+
The input array.""")
|
| 12 |
+
_axis_doc = (
|
| 13 |
+
"""axis : int, optional
|
| 14 |
+
The axis of `input` along which to calculate. Default is -1.""")
|
| 15 |
+
_output_doc = (
|
| 16 |
+
"""output : array or dtype, optional
|
| 17 |
+
The array in which to place the output, or the dtype of the
|
| 18 |
+
returned array. By default an array of the same dtype as input
|
| 19 |
+
will be created.""")
|
| 20 |
+
_size_foot_doc = (
|
| 21 |
+
"""size : scalar or tuple, optional
|
| 22 |
+
See footprint, below. Ignored if footprint is given.
|
| 23 |
+
footprint : array, optional
|
| 24 |
+
Either `size` or `footprint` must be defined. `size` gives
|
| 25 |
+
the shape that is taken from the input array, at every element
|
| 26 |
+
position, to define the input to the filter function.
|
| 27 |
+
`footprint` is a boolean array that specifies (implicitly) a
|
| 28 |
+
shape, but also which of the elements within this shape will get
|
| 29 |
+
passed to the filter function. Thus ``size=(n,m)`` is equivalent
|
| 30 |
+
to ``footprint=np.ones((n,m))``. We adjust `size` to the number
|
| 31 |
+
of dimensions of the input array, so that, if the input array is
|
| 32 |
+
shape (10,10,10), and `size` is 2, then the actual size used is
|
| 33 |
+
(2,2,2). When `footprint` is given, `size` is ignored.""")
|
| 34 |
+
_mode_reflect_doc = (
|
| 35 |
+
"""mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
|
| 36 |
+
The `mode` parameter determines how the input array is extended
|
| 37 |
+
beyond its boundaries. Default is 'reflect'. Behavior for each valid
|
| 38 |
+
value is as follows:
|
| 39 |
+
|
| 40 |
+
'reflect' (`d c b a | a b c d | d c b a`)
|
| 41 |
+
The input is extended by reflecting about the edge of the last
|
| 42 |
+
pixel. This mode is also sometimes referred to as half-sample
|
| 43 |
+
symmetric.
|
| 44 |
+
|
| 45 |
+
'constant' (`k k k k | a b c d | k k k k`)
|
| 46 |
+
The input is extended by filling all values beyond the edge with
|
| 47 |
+
the same constant value, defined by the `cval` parameter.
|
| 48 |
+
|
| 49 |
+
'nearest' (`a a a a | a b c d | d d d d`)
|
| 50 |
+
The input is extended by replicating the last pixel.
|
| 51 |
+
|
| 52 |
+
'mirror' (`d c b | a b c d | c b a`)
|
| 53 |
+
The input is extended by reflecting about the center of the last
|
| 54 |
+
pixel. This mode is also sometimes referred to as whole-sample
|
| 55 |
+
symmetric.
|
| 56 |
+
|
| 57 |
+
'wrap' (`a b c d | a b c d | a b c d`)
|
| 58 |
+
The input is extended by wrapping around to the opposite edge.
|
| 59 |
+
|
| 60 |
+
For consistency with the interpolation functions, the following mode
|
| 61 |
+
names can also be used:
|
| 62 |
+
|
| 63 |
+
'grid-mirror'
|
| 64 |
+
This is a synonym for 'reflect'.
|
| 65 |
+
|
| 66 |
+
'grid-constant'
|
| 67 |
+
This is a synonym for 'constant'.
|
| 68 |
+
|
| 69 |
+
'grid-wrap'
|
| 70 |
+
This is a synonym for 'wrap'.""")
|
| 71 |
+
|
| 72 |
+
_mode_interp_constant_doc = (
|
| 73 |
+
"""mode : {'reflect', 'grid-mirror', 'constant', 'grid-constant', 'nearest', \
|
| 74 |
+
'mirror', 'grid-wrap', 'wrap'}, optional
|
| 75 |
+
The `mode` parameter determines how the input array is extended
|
| 76 |
+
beyond its boundaries. Default is 'constant'. Behavior for each valid
|
| 77 |
+
value is as follows (see additional plots and details on
|
| 78 |
+
:ref:`boundary modes <ndimage-interpolation-modes>`):
|
| 79 |
+
|
| 80 |
+
'reflect' (`d c b a | a b c d | d c b a`)
|
| 81 |
+
The input is extended by reflecting about the edge of the last
|
| 82 |
+
pixel. This mode is also sometimes referred to as half-sample
|
| 83 |
+
symmetric.
|
| 84 |
+
|
| 85 |
+
'grid-mirror'
|
| 86 |
+
This is a synonym for 'reflect'.
|
| 87 |
+
|
| 88 |
+
'constant' (`k k k k | a b c d | k k k k`)
|
| 89 |
+
The input is extended by filling all values beyond the edge with
|
| 90 |
+
the same constant value, defined by the `cval` parameter. No
|
| 91 |
+
interpolation is performed beyond the edges of the input.
|
| 92 |
+
|
| 93 |
+
'grid-constant' (`k k k k | a b c d | k k k k`)
|
| 94 |
+
The input is extended by filling all values beyond the edge with
|
| 95 |
+
the same constant value, defined by the `cval` parameter. Interpolation
|
| 96 |
+
occurs for samples outside the input's extent as well.
|
| 97 |
+
|
| 98 |
+
'nearest' (`a a a a | a b c d | d d d d`)
|
| 99 |
+
The input is extended by replicating the last pixel.
|
| 100 |
+
|
| 101 |
+
'mirror' (`d c b | a b c d | c b a`)
|
| 102 |
+
The input is extended by reflecting about the center of the last
|
| 103 |
+
pixel. This mode is also sometimes referred to as whole-sample
|
| 104 |
+
symmetric.
|
| 105 |
+
|
| 106 |
+
'grid-wrap' (`a b c d | a b c d | a b c d`)
|
| 107 |
+
The input is extended by wrapping around to the opposite edge.
|
| 108 |
+
|
| 109 |
+
'wrap' (`d b c d | a b c d | b c a b`)
|
| 110 |
+
The input is extended by wrapping around to the opposite edge, but in a
|
| 111 |
+
way such that the last point and initial point exactly overlap. In this
|
| 112 |
+
case it is not well defined which sample will be chosen at the point of
|
| 113 |
+
overlap.""")
|
| 114 |
+
_mode_interp_mirror_doc = (
|
| 115 |
+
_mode_interp_constant_doc.replace("Default is 'constant'",
|
| 116 |
+
"Default is 'mirror'")
|
| 117 |
+
)
|
| 118 |
+
assert _mode_interp_mirror_doc != _mode_interp_constant_doc, \
|
| 119 |
+
'Default not replaced'
|
| 120 |
+
|
| 121 |
+
_mode_multiple_doc = (
|
| 122 |
+
"""mode : str or sequence, optional
|
| 123 |
+
The `mode` parameter determines how the input array is extended
|
| 124 |
+
when the filter overlaps a border. By passing a sequence of modes
|
| 125 |
+
with length equal to the number of dimensions of the input array,
|
| 126 |
+
different modes can be specified along each axis. Default value is
|
| 127 |
+
'reflect'. The valid values and their behavior is as follows:
|
| 128 |
+
|
| 129 |
+
'reflect' (`d c b a | a b c d | d c b a`)
|
| 130 |
+
The input is extended by reflecting about the edge of the last
|
| 131 |
+
pixel. This mode is also sometimes referred to as half-sample
|
| 132 |
+
symmetric.
|
| 133 |
+
|
| 134 |
+
'constant' (`k k k k | a b c d | k k k k`)
|
| 135 |
+
The input is extended by filling all values beyond the edge with
|
| 136 |
+
the same constant value, defined by the `cval` parameter.
|
| 137 |
+
|
| 138 |
+
'nearest' (`a a a a | a b c d | d d d d`)
|
| 139 |
+
The input is extended by replicating the last pixel.
|
| 140 |
+
|
| 141 |
+
'mirror' (`d c b | a b c d | c b a`)
|
| 142 |
+
The input is extended by reflecting about the center of the last
|
| 143 |
+
pixel. This mode is also sometimes referred to as whole-sample
|
| 144 |
+
symmetric.
|
| 145 |
+
|
| 146 |
+
'wrap' (`a b c d | a b c d | a b c d`)
|
| 147 |
+
The input is extended by wrapping around to the opposite edge.
|
| 148 |
+
|
| 149 |
+
For consistency with the interpolation functions, the following mode
|
| 150 |
+
names can also be used:
|
| 151 |
+
|
| 152 |
+
'grid-constant'
|
| 153 |
+
This is a synonym for 'constant'.
|
| 154 |
+
|
| 155 |
+
'grid-mirror'
|
| 156 |
+
This is a synonym for 'reflect'.
|
| 157 |
+
|
| 158 |
+
'grid-wrap'
|
| 159 |
+
This is a synonym for 'wrap'.""")
|
| 160 |
+
_cval_doc = (
|
| 161 |
+
"""cval : scalar, optional
|
| 162 |
+
Value to fill past edges of input if `mode` is 'constant'. Default
|
| 163 |
+
is 0.0.""")
|
| 164 |
+
_origin_doc = (
|
| 165 |
+
"""origin : int, optional
|
| 166 |
+
Controls the placement of the filter on the input array's pixels.
|
| 167 |
+
A value of 0 (the default) centers the filter over the pixel, with
|
| 168 |
+
positive values shifting the filter to the left, and negative ones
|
| 169 |
+
to the right.""")
|
| 170 |
+
_origin_multiple_doc = (
|
| 171 |
+
"""origin : int or sequence, optional
|
| 172 |
+
Controls the placement of the filter on the input array's pixels.
|
| 173 |
+
A value of 0 (the default) centers the filter over the pixel, with
|
| 174 |
+
positive values shifting the filter to the left, and negative ones
|
| 175 |
+
to the right. By passing a sequence of origins with length equal to
|
| 176 |
+
the number of dimensions of the input array, different shifts can
|
| 177 |
+
be specified along each axis.""")
|
| 178 |
+
_extra_arguments_doc = (
|
| 179 |
+
"""extra_arguments : sequence, optional
|
| 180 |
+
Sequence of extra positional arguments to pass to passed function.""")
|
| 181 |
+
_extra_keywords_doc = (
|
| 182 |
+
"""extra_keywords : dict, optional
|
| 183 |
+
dict of extra keyword arguments to pass to passed function.""")
|
| 184 |
+
_prefilter_doc = (
|
| 185 |
+
"""prefilter : bool, optional
|
| 186 |
+
Determines if the input array is prefiltered with `spline_filter`
|
| 187 |
+
before interpolation. The default is True, which will create a
|
| 188 |
+
temporary `float64` array of filtered values if ``order > 1``. If
|
| 189 |
+
setting this to False, the output will be slightly blurred if
|
| 190 |
+
``order > 1``, unless the input is prefiltered, i.e. it is the result
|
| 191 |
+
of calling `spline_filter` on the original input.""")
|
| 192 |
+
|
| 193 |
+
docdict = {
|
| 194 |
+
'input': _input_doc,
|
| 195 |
+
'axis': _axis_doc,
|
| 196 |
+
'output': _output_doc,
|
| 197 |
+
'size_foot': _size_foot_doc,
|
| 198 |
+
'mode_interp_constant': _mode_interp_constant_doc,
|
| 199 |
+
'mode_interp_mirror': _mode_interp_mirror_doc,
|
| 200 |
+
'mode_reflect': _mode_reflect_doc,
|
| 201 |
+
'mode_multiple': _mode_multiple_doc,
|
| 202 |
+
'cval': _cval_doc,
|
| 203 |
+
'origin': _origin_doc,
|
| 204 |
+
'origin_multiple': _origin_multiple_doc,
|
| 205 |
+
'extra_arguments': _extra_arguments_doc,
|
| 206 |
+
'extra_keywords': _extra_keywords_doc,
|
| 207 |
+
'prefilter': _prefilter_doc
|
| 208 |
+
}
|
| 209 |
+
|
| 210 |
+
docfiller: Final = doccer.filldoc(docdict)
|
evalkit_eagle/lib/python3.10/site-packages/scipy/ndimage/_ni_label.cpython-310-x86_64-linux-gnu.so
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:546a88e983569411483e43f3e5a6faa86888e030e01ac2081991424a2aa95404
|
| 3 |
+
size 424104
|
evalkit_eagle/lib/python3.10/site-packages/scipy/ndimage/_ni_support.py
ADDED
|
@@ -0,0 +1,143 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (C) 2003-2005 Peter J. Verveer
|
| 2 |
+
#
|
| 3 |
+
# Redistribution and use in source and binary forms, with or without
|
| 4 |
+
# modification, are permitted provided that the following conditions
|
| 5 |
+
# are met:
|
| 6 |
+
#
|
| 7 |
+
# 1. Redistributions of source code must retain the above copyright
|
| 8 |
+
# notice, this list of conditions and the following disclaimer.
|
| 9 |
+
#
|
| 10 |
+
# 2. Redistributions in binary form must reproduce the above
|
| 11 |
+
# copyright notice, this list of conditions and the following
|
| 12 |
+
# disclaimer in the documentation and/or other materials provided
|
| 13 |
+
# with the distribution.
|
| 14 |
+
#
|
| 15 |
+
# 3. The name of the author may not be used to endorse or promote
|
| 16 |
+
# products derived from this software without specific prior
|
| 17 |
+
# written permission.
|
| 18 |
+
#
|
| 19 |
+
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
|
| 20 |
+
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
| 21 |
+
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
| 22 |
+
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
|
| 23 |
+
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
| 24 |
+
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
|
| 25 |
+
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
| 26 |
+
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
| 27 |
+
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
| 28 |
+
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
| 29 |
+
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
| 30 |
+
|
| 31 |
+
from collections.abc import Iterable
|
| 32 |
+
import operator
|
| 33 |
+
import warnings
|
| 34 |
+
import numpy as np
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def _extend_mode_to_code(mode, is_filter=False):
|
| 38 |
+
"""Convert an extension mode to the corresponding integer code.
|
| 39 |
+
"""
|
| 40 |
+
if mode == 'nearest':
|
| 41 |
+
return 0
|
| 42 |
+
elif mode == 'wrap':
|
| 43 |
+
return 1
|
| 44 |
+
elif mode in ['reflect', 'grid-mirror']:
|
| 45 |
+
return 2
|
| 46 |
+
elif mode == 'mirror':
|
| 47 |
+
return 3
|
| 48 |
+
elif mode == 'constant':
|
| 49 |
+
return 4
|
| 50 |
+
elif mode == 'grid-wrap' and is_filter:
|
| 51 |
+
return 1
|
| 52 |
+
elif mode == 'grid-wrap':
|
| 53 |
+
return 5
|
| 54 |
+
elif mode == 'grid-constant' and is_filter:
|
| 55 |
+
return 4
|
| 56 |
+
elif mode == 'grid-constant':
|
| 57 |
+
return 6
|
| 58 |
+
else:
|
| 59 |
+
raise RuntimeError('boundary mode not supported')
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
def _normalize_sequence(input, rank):
|
| 63 |
+
"""If input is a scalar, create a sequence of length equal to the
|
| 64 |
+
rank by duplicating the input. If input is a sequence,
|
| 65 |
+
check if its length is equal to the length of array.
|
| 66 |
+
"""
|
| 67 |
+
is_str = isinstance(input, str)
|
| 68 |
+
if not is_str and np.iterable(input):
|
| 69 |
+
normalized = list(input)
|
| 70 |
+
if len(normalized) != rank:
|
| 71 |
+
err = "sequence argument must have length equal to input rank"
|
| 72 |
+
raise RuntimeError(err)
|
| 73 |
+
else:
|
| 74 |
+
normalized = [input] * rank
|
| 75 |
+
return normalized
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
def _get_output(output, input, shape=None, complex_output=False):
|
| 79 |
+
if shape is None:
|
| 80 |
+
shape = input.shape
|
| 81 |
+
if output is None:
|
| 82 |
+
if not complex_output:
|
| 83 |
+
output = np.zeros(shape, dtype=input.dtype.name)
|
| 84 |
+
else:
|
| 85 |
+
complex_type = np.promote_types(input.dtype, np.complex64)
|
| 86 |
+
output = np.zeros(shape, dtype=complex_type)
|
| 87 |
+
elif isinstance(output, (type, np.dtype)):
|
| 88 |
+
# Classes (like `np.float32`) and dtypes are interpreted as dtype
|
| 89 |
+
if complex_output and np.dtype(output).kind != 'c':
|
| 90 |
+
warnings.warn("promoting specified output dtype to complex", stacklevel=3)
|
| 91 |
+
output = np.promote_types(output, np.complex64)
|
| 92 |
+
output = np.zeros(shape, dtype=output)
|
| 93 |
+
elif isinstance(output, str):
|
| 94 |
+
output = np.dtype(output)
|
| 95 |
+
if complex_output and output.kind != 'c':
|
| 96 |
+
raise RuntimeError("output must have complex dtype")
|
| 97 |
+
elif not issubclass(output.type, np.number):
|
| 98 |
+
raise RuntimeError("output must have numeric dtype")
|
| 99 |
+
output = np.zeros(shape, dtype=output)
|
| 100 |
+
else:
|
| 101 |
+
# output was supplied as an array
|
| 102 |
+
output = np.asarray(output)
|
| 103 |
+
if output.shape != shape:
|
| 104 |
+
raise RuntimeError("output shape not correct")
|
| 105 |
+
elif complex_output and output.dtype.kind != 'c':
|
| 106 |
+
raise RuntimeError("output must have complex dtype")
|
| 107 |
+
return output
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
def _check_axes(axes, ndim):
|
| 111 |
+
if axes is None:
|
| 112 |
+
return tuple(range(ndim))
|
| 113 |
+
elif np.isscalar(axes):
|
| 114 |
+
axes = (operator.index(axes),)
|
| 115 |
+
elif isinstance(axes, Iterable):
|
| 116 |
+
for ax in axes:
|
| 117 |
+
axes = tuple(operator.index(ax) for ax in axes)
|
| 118 |
+
if ax < -ndim or ax > ndim - 1:
|
| 119 |
+
raise ValueError(f"specified axis: {ax} is out of range")
|
| 120 |
+
axes = tuple(ax % ndim if ax < 0 else ax for ax in axes)
|
| 121 |
+
else:
|
| 122 |
+
message = "axes must be an integer, iterable of integers, or None"
|
| 123 |
+
raise ValueError(message)
|
| 124 |
+
if len(tuple(set(axes))) != len(axes):
|
| 125 |
+
raise ValueError("axes must be unique")
|
| 126 |
+
return axes
|
| 127 |
+
|
| 128 |
+
def _skip_if_dtype(arg):
|
| 129 |
+
"""'array or dtype' polymorphism.
|
| 130 |
+
|
| 131 |
+
Return None for np.int8, dtype('float32') or 'f' etc
|
| 132 |
+
arg for np.empty(3) etc
|
| 133 |
+
"""
|
| 134 |
+
if isinstance(arg, str):
|
| 135 |
+
return None
|
| 136 |
+
if type(arg) is type:
|
| 137 |
+
return None if issubclass(arg, np.generic) else arg
|
| 138 |
+
else:
|
| 139 |
+
return None if isinstance(arg, np.dtype) else arg
|
| 140 |
+
|
| 141 |
+
|
| 142 |
+
def _skip_if_int(arg):
|
| 143 |
+
return None if (arg is None or isinstance(arg, int)) else arg
|
evalkit_eagle/lib/python3.10/site-packages/scipy/ndimage/_rank_filter_1d.cpython-310-x86_64-linux-gnu.so
ADDED
|
Binary file (27.4 kB). View file
|
|
|
evalkit_eagle/lib/python3.10/site-packages/scipy/ndimage/_support_alternative_backends.py
ADDED
|
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import functools
|
| 2 |
+
from scipy._lib._array_api import (
|
| 3 |
+
is_cupy, is_jax, scipy_namespace_for, SCIPY_ARRAY_API
|
| 4 |
+
)
|
| 5 |
+
|
| 6 |
+
import numpy as np
|
| 7 |
+
from ._ndimage_api import * # noqa: F403
|
| 8 |
+
from . import _ndimage_api
|
| 9 |
+
from . import _delegators
|
| 10 |
+
__all__ = _ndimage_api.__all__
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
MODULE_NAME = 'ndimage'
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
def delegate_xp(delegator, module_name):
|
| 17 |
+
def inner(func):
|
| 18 |
+
@functools.wraps(func)
|
| 19 |
+
def wrapper(*args, **kwds):
|
| 20 |
+
xp = delegator(*args, **kwds)
|
| 21 |
+
|
| 22 |
+
# try delegating to a cupyx/jax namesake
|
| 23 |
+
if is_cupy(xp):
|
| 24 |
+
# https://github.com/cupy/cupy/issues/8336
|
| 25 |
+
import importlib
|
| 26 |
+
cupyx_module = importlib.import_module(f"cupyx.scipy.{module_name}")
|
| 27 |
+
cupyx_func = getattr(cupyx_module, func.__name__)
|
| 28 |
+
return cupyx_func(*args, **kwds)
|
| 29 |
+
elif is_jax(xp) and func.__name__ == "map_coordinates":
|
| 30 |
+
spx = scipy_namespace_for(xp)
|
| 31 |
+
jax_module = getattr(spx, module_name)
|
| 32 |
+
jax_func = getattr(jax_module, func.__name__)
|
| 33 |
+
return jax_func(*args, **kwds)
|
| 34 |
+
else:
|
| 35 |
+
# the original function (does all np.asarray internally)
|
| 36 |
+
# XXX: output arrays
|
| 37 |
+
result = func(*args, **kwds)
|
| 38 |
+
|
| 39 |
+
if isinstance(result, (np.ndarray, np.generic)):
|
| 40 |
+
# XXX: np.int32->np.array_0D
|
| 41 |
+
return xp.asarray(result)
|
| 42 |
+
elif isinstance(result, int):
|
| 43 |
+
return result
|
| 44 |
+
elif isinstance(result, dict):
|
| 45 |
+
# value_indices: result is {np.int64(1): (array(0), array(1))} etc
|
| 46 |
+
return {
|
| 47 |
+
k.item(): tuple(xp.asarray(vv) for vv in v)
|
| 48 |
+
for k,v in result.items()
|
| 49 |
+
}
|
| 50 |
+
elif result is None:
|
| 51 |
+
# inplace operations
|
| 52 |
+
return result
|
| 53 |
+
else:
|
| 54 |
+
# lists/tuples
|
| 55 |
+
return type(result)(
|
| 56 |
+
xp.asarray(x) if isinstance(x, np.ndarray) else x
|
| 57 |
+
for x in result
|
| 58 |
+
)
|
| 59 |
+
return wrapper
|
| 60 |
+
return inner
|
| 61 |
+
|
| 62 |
+
# ### decorate ###
|
| 63 |
+
for func_name in _ndimage_api.__all__:
|
| 64 |
+
bare_func = getattr(_ndimage_api, func_name)
|
| 65 |
+
delegator = getattr(_delegators, func_name + "_signature")
|
| 66 |
+
|
| 67 |
+
f = (delegate_xp(delegator, MODULE_NAME)(bare_func)
|
| 68 |
+
if SCIPY_ARRAY_API
|
| 69 |
+
else bare_func)
|
| 70 |
+
|
| 71 |
+
# add the decorated function to the namespace, to be imported in __init__.py
|
| 72 |
+
vars()[func_name] = f
|
evalkit_eagle/lib/python3.10/site-packages/scipy/ndimage/filters.py
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# This file is not meant for public use and will be removed in SciPy v2.0.0.
|
| 2 |
+
# Use the `scipy.ndimage` namespace for importing the functions
|
| 3 |
+
# included below.
|
| 4 |
+
|
| 5 |
+
from scipy._lib.deprecation import _sub_module_deprecation
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
__all__ = [ # noqa: F822
|
| 9 |
+
'correlate1d', 'convolve1d', 'gaussian_filter1d',
|
| 10 |
+
'gaussian_filter', 'prewitt', 'sobel', 'generic_laplace',
|
| 11 |
+
'laplace', 'gaussian_laplace', 'generic_gradient_magnitude',
|
| 12 |
+
'gaussian_gradient_magnitude', 'correlate', 'convolve',
|
| 13 |
+
'uniform_filter1d', 'uniform_filter', 'minimum_filter1d',
|
| 14 |
+
'maximum_filter1d', 'minimum_filter', 'maximum_filter',
|
| 15 |
+
'rank_filter', 'median_filter', 'percentile_filter',
|
| 16 |
+
'generic_filter1d', 'generic_filter'
|
| 17 |
+
]
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def __dir__():
|
| 21 |
+
return __all__
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def __getattr__(name):
|
| 25 |
+
return _sub_module_deprecation(sub_package='ndimage', module='filters',
|
| 26 |
+
private_modules=['_filters'], all=__all__,
|
| 27 |
+
attribute=name)
|
evalkit_eagle/lib/python3.10/site-packages/scipy/ndimage/fourier.py
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# This file is not meant for public use and will be removed in SciPy v2.0.0.
|
| 2 |
+
# Use the `scipy.ndimage` namespace for importing the functions
|
| 3 |
+
# included below.
|
| 4 |
+
|
| 5 |
+
from scipy._lib.deprecation import _sub_module_deprecation
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
__all__ = [ # noqa: F822
|
| 9 |
+
'fourier_gaussian', 'fourier_uniform',
|
| 10 |
+
'fourier_ellipsoid', 'fourier_shift'
|
| 11 |
+
]
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def __dir__():
|
| 15 |
+
return __all__
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def __getattr__(name):
|
| 19 |
+
return _sub_module_deprecation(sub_package='ndimage', module='fourier',
|
| 20 |
+
private_modules=['_fourier'], all=__all__,
|
| 21 |
+
attribute=name)
|
evalkit_eagle/lib/python3.10/site-packages/scipy/ndimage/interpolation.py
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# This file is not meant for public use and will be removed in SciPy v2.0.0.
|
| 2 |
+
# Use the `scipy.ndimage` namespace for importing the functions
|
| 3 |
+
# included below.
|
| 4 |
+
|
| 5 |
+
from scipy._lib.deprecation import _sub_module_deprecation
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
__all__ = [ # noqa: F822
|
| 9 |
+
'spline_filter1d', 'spline_filter',
|
| 10 |
+
'geometric_transform', 'map_coordinates',
|
| 11 |
+
'affine_transform', 'shift', 'zoom', 'rotate',
|
| 12 |
+
]
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def __dir__():
|
| 16 |
+
return __all__
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def __getattr__(name):
|
| 20 |
+
return _sub_module_deprecation(sub_package='ndimage', module='interpolation',
|
| 21 |
+
private_modules=['_interpolation'], all=__all__,
|
| 22 |
+
attribute=name)
|
evalkit_eagle/lib/python3.10/site-packages/scipy/ndimage/measurements.py
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# This file is not meant for public use and will be removed in SciPy v2.0.0.
|
| 2 |
+
# Use the `scipy.ndimage` namespace for importing the functions
|
| 3 |
+
# included below.
|
| 4 |
+
|
| 5 |
+
from scipy._lib.deprecation import _sub_module_deprecation
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
__all__ = [ # noqa: F822
|
| 9 |
+
'label', 'find_objects', 'labeled_comprehension',
|
| 10 |
+
'sum', 'mean', 'variance', 'standard_deviation',
|
| 11 |
+
'minimum', 'maximum', 'median', 'minimum_position',
|
| 12 |
+
'maximum_position', 'extrema', 'center_of_mass',
|
| 13 |
+
'histogram', 'watershed_ift', 'sum_labels'
|
| 14 |
+
]
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
def __dir__():
|
| 18 |
+
return __all__
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def __getattr__(name):
|
| 22 |
+
return _sub_module_deprecation(sub_package='ndimage', module='measurements',
|
| 23 |
+
private_modules=['_measurements'], all=__all__,
|
| 24 |
+
attribute=name)
|
evalkit_eagle/lib/python3.10/site-packages/scipy/ndimage/morphology.py
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# This file is not meant for public use and will be removed in SciPy v2.0.0.
|
| 2 |
+
# Use the `scipy.ndimage` namespace for importing the functions
|
| 3 |
+
# included below.
|
| 4 |
+
|
| 5 |
+
from scipy._lib.deprecation import _sub_module_deprecation
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
__all__ = [ # noqa: F822
|
| 9 |
+
'iterate_structure', 'generate_binary_structure',
|
| 10 |
+
'binary_erosion', 'binary_dilation', 'binary_opening',
|
| 11 |
+
'binary_closing', 'binary_hit_or_miss', 'binary_propagation',
|
| 12 |
+
'binary_fill_holes', 'grey_erosion', 'grey_dilation',
|
| 13 |
+
'grey_opening', 'grey_closing', 'morphological_gradient',
|
| 14 |
+
'morphological_laplace', 'white_tophat', 'black_tophat',
|
| 15 |
+
'distance_transform_bf', 'distance_transform_cdt',
|
| 16 |
+
'distance_transform_edt'
|
| 17 |
+
]
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def __dir__():
|
| 21 |
+
return __all__
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def __getattr__(name):
|
| 25 |
+
return _sub_module_deprecation(sub_package='ndimage', module='morphology',
|
| 26 |
+
private_modules=['_morphology'], all=__all__,
|
| 27 |
+
attribute=name)
|
evalkit_eagle/lib/python3.10/site-packages/scipy/ndimage/tests/__init__.py
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
|
| 3 |
+
# list of numarray data types
|
| 4 |
+
integer_types: list[str] = [
|
| 5 |
+
"int8", "uint8", "int16", "uint16",
|
| 6 |
+
"int32", "uint32", "int64", "uint64"]
|
| 7 |
+
|
| 8 |
+
float_types: list[str] = ["float32", "float64"]
|
| 9 |
+
|
| 10 |
+
complex_types: list[str] = ["complex64", "complex128"]
|
| 11 |
+
|
| 12 |
+
types: list[str] = integer_types + float_types
|
evalkit_eagle/lib/python3.10/site-packages/scipy/ndimage/tests/test_c_api.py
ADDED
|
@@ -0,0 +1,102 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
from scipy._lib._array_api import xp_assert_close
|
| 3 |
+
|
| 4 |
+
from scipy import ndimage
|
| 5 |
+
from scipy.ndimage import _ctest
|
| 6 |
+
from scipy.ndimage import _cytest
|
| 7 |
+
from scipy._lib._ccallback import LowLevelCallable
|
| 8 |
+
|
| 9 |
+
FILTER1D_FUNCTIONS = [
|
| 10 |
+
lambda filter_size: _ctest.filter1d(filter_size),
|
| 11 |
+
lambda filter_size: _cytest.filter1d(filter_size, with_signature=False),
|
| 12 |
+
lambda filter_size: LowLevelCallable(
|
| 13 |
+
_cytest.filter1d(filter_size, with_signature=True)
|
| 14 |
+
),
|
| 15 |
+
lambda filter_size: LowLevelCallable.from_cython(
|
| 16 |
+
_cytest, "_filter1d",
|
| 17 |
+
_cytest.filter1d_capsule(filter_size),
|
| 18 |
+
),
|
| 19 |
+
]
|
| 20 |
+
|
| 21 |
+
FILTER2D_FUNCTIONS = [
|
| 22 |
+
lambda weights: _ctest.filter2d(weights),
|
| 23 |
+
lambda weights: _cytest.filter2d(weights, with_signature=False),
|
| 24 |
+
lambda weights: LowLevelCallable(_cytest.filter2d(weights, with_signature=True)),
|
| 25 |
+
lambda weights: LowLevelCallable.from_cython(_cytest,
|
| 26 |
+
"_filter2d",
|
| 27 |
+
_cytest.filter2d_capsule(weights),),
|
| 28 |
+
]
|
| 29 |
+
|
| 30 |
+
TRANSFORM_FUNCTIONS = [
|
| 31 |
+
lambda shift: _ctest.transform(shift),
|
| 32 |
+
lambda shift: _cytest.transform(shift, with_signature=False),
|
| 33 |
+
lambda shift: LowLevelCallable(_cytest.transform(shift, with_signature=True)),
|
| 34 |
+
lambda shift: LowLevelCallable.from_cython(_cytest,
|
| 35 |
+
"_transform",
|
| 36 |
+
_cytest.transform_capsule(shift),),
|
| 37 |
+
]
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def test_generic_filter():
|
| 41 |
+
def filter2d(footprint_elements, weights):
|
| 42 |
+
return (weights*footprint_elements).sum()
|
| 43 |
+
|
| 44 |
+
def check(j):
|
| 45 |
+
func = FILTER2D_FUNCTIONS[j]
|
| 46 |
+
|
| 47 |
+
im = np.ones((20, 20))
|
| 48 |
+
im[:10,:10] = 0
|
| 49 |
+
footprint = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
|
| 50 |
+
footprint_size = np.count_nonzero(footprint)
|
| 51 |
+
weights = np.ones(footprint_size)/footprint_size
|
| 52 |
+
|
| 53 |
+
res = ndimage.generic_filter(im, func(weights),
|
| 54 |
+
footprint=footprint)
|
| 55 |
+
std = ndimage.generic_filter(im, filter2d, footprint=footprint,
|
| 56 |
+
extra_arguments=(weights,))
|
| 57 |
+
xp_assert_close(res, std, err_msg=f"#{j} failed")
|
| 58 |
+
|
| 59 |
+
for j, func in enumerate(FILTER2D_FUNCTIONS):
|
| 60 |
+
check(j)
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
def test_generic_filter1d():
|
| 64 |
+
def filter1d(input_line, output_line, filter_size):
|
| 65 |
+
for i in range(output_line.size):
|
| 66 |
+
output_line[i] = 0
|
| 67 |
+
for j in range(filter_size):
|
| 68 |
+
output_line[i] += input_line[i+j]
|
| 69 |
+
output_line /= filter_size
|
| 70 |
+
|
| 71 |
+
def check(j):
|
| 72 |
+
func = FILTER1D_FUNCTIONS[j]
|
| 73 |
+
|
| 74 |
+
im = np.tile(np.hstack((np.zeros(10), np.ones(10))), (10, 1))
|
| 75 |
+
filter_size = 3
|
| 76 |
+
|
| 77 |
+
res = ndimage.generic_filter1d(im, func(filter_size),
|
| 78 |
+
filter_size)
|
| 79 |
+
std = ndimage.generic_filter1d(im, filter1d, filter_size,
|
| 80 |
+
extra_arguments=(filter_size,))
|
| 81 |
+
xp_assert_close(res, std, err_msg=f"#{j} failed")
|
| 82 |
+
|
| 83 |
+
for j, func in enumerate(FILTER1D_FUNCTIONS):
|
| 84 |
+
check(j)
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
def test_geometric_transform():
|
| 88 |
+
def transform(output_coordinates, shift):
|
| 89 |
+
return output_coordinates[0] - shift, output_coordinates[1] - shift
|
| 90 |
+
|
| 91 |
+
def check(j):
|
| 92 |
+
func = TRANSFORM_FUNCTIONS[j]
|
| 93 |
+
|
| 94 |
+
im = np.arange(12).reshape(4, 3).astype(np.float64)
|
| 95 |
+
shift = 0.5
|
| 96 |
+
|
| 97 |
+
res = ndimage.geometric_transform(im, func(shift))
|
| 98 |
+
std = ndimage.geometric_transform(im, transform, extra_arguments=(shift,))
|
| 99 |
+
xp_assert_close(res, std, err_msg=f"#{j} failed")
|
| 100 |
+
|
| 101 |
+
for j, func in enumerate(TRANSFORM_FUNCTIONS):
|
| 102 |
+
check(j)
|
evalkit_eagle/lib/python3.10/site-packages/scipy/ndimage/tests/test_datatypes.py
ADDED
|
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
""" Testing data types for ndimage calls
|
| 2 |
+
"""
|
| 3 |
+
import numpy as np
|
| 4 |
+
|
| 5 |
+
from scipy._lib._array_api import assert_array_almost_equal
|
| 6 |
+
import pytest
|
| 7 |
+
|
| 8 |
+
from scipy import ndimage
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def test_map_coordinates_dts():
|
| 12 |
+
# check that ndimage accepts different data types for interpolation
|
| 13 |
+
data = np.array([[4, 1, 3, 2],
|
| 14 |
+
[7, 6, 8, 5],
|
| 15 |
+
[3, 5, 3, 6]])
|
| 16 |
+
shifted_data = np.array([[0, 0, 0, 0],
|
| 17 |
+
[0, 4, 1, 3],
|
| 18 |
+
[0, 7, 6, 8]])
|
| 19 |
+
idx = np.indices(data.shape)
|
| 20 |
+
dts = (np.uint8, np.uint16, np.uint32, np.uint64,
|
| 21 |
+
np.int8, np.int16, np.int32, np.int64,
|
| 22 |
+
np.intp, np.uintp, np.float32, np.float64)
|
| 23 |
+
for order in range(0, 6):
|
| 24 |
+
for data_dt in dts:
|
| 25 |
+
these_data = data.astype(data_dt)
|
| 26 |
+
for coord_dt in dts:
|
| 27 |
+
# affine mapping
|
| 28 |
+
mat = np.eye(2, dtype=coord_dt)
|
| 29 |
+
off = np.zeros((2,), dtype=coord_dt)
|
| 30 |
+
out = ndimage.affine_transform(these_data, mat, off)
|
| 31 |
+
assert_array_almost_equal(these_data, out)
|
| 32 |
+
# map coordinates
|
| 33 |
+
coords_m1 = idx.astype(coord_dt) - 1
|
| 34 |
+
coords_p10 = idx.astype(coord_dt) + 10
|
| 35 |
+
out = ndimage.map_coordinates(these_data, coords_m1, order=order)
|
| 36 |
+
assert_array_almost_equal(out, shifted_data)
|
| 37 |
+
# check constant fill works
|
| 38 |
+
out = ndimage.map_coordinates(these_data, coords_p10, order=order)
|
| 39 |
+
assert_array_almost_equal(out, np.zeros((3,4)))
|
| 40 |
+
# check shift and zoom
|
| 41 |
+
out = ndimage.shift(these_data, 1)
|
| 42 |
+
assert_array_almost_equal(out, shifted_data)
|
| 43 |
+
out = ndimage.zoom(these_data, 1)
|
| 44 |
+
assert_array_almost_equal(these_data, out)
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
@pytest.mark.xfail(True, reason="Broken on many platforms")
|
| 48 |
+
def test_uint64_max():
|
| 49 |
+
# Test interpolation respects uint64 max. Reported to fail at least on
|
| 50 |
+
# win32 (due to the 32 bit visual C compiler using signed int64 when
|
| 51 |
+
# converting between uint64 to double) and Debian on s390x.
|
| 52 |
+
# Interpolation is always done in double precision floating point, so
|
| 53 |
+
# we use the largest uint64 value for which int(float(big)) still fits
|
| 54 |
+
# in a uint64.
|
| 55 |
+
# This test was last enabled on macOS only, and there it started failing
|
| 56 |
+
# on arm64 as well (see gh-19117).
|
| 57 |
+
big = 2**64 - 1025
|
| 58 |
+
arr = np.array([big, big, big], dtype=np.uint64)
|
| 59 |
+
# Tests geometric transform (map_coordinates, affine_transform)
|
| 60 |
+
inds = np.indices(arr.shape) - 0.1
|
| 61 |
+
x = ndimage.map_coordinates(arr, inds)
|
| 62 |
+
assert x[1] == int(float(big))
|
| 63 |
+
assert x[2] == int(float(big))
|
| 64 |
+
# Tests zoom / shift
|
| 65 |
+
x = ndimage.shift(arr, 0.1)
|
| 66 |
+
assert x[1] == int(float(big))
|
| 67 |
+
assert x[2] == int(float(big))
|
evalkit_eagle/lib/python3.10/site-packages/scipy/ndimage/tests/test_filters.py
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
evalkit_eagle/lib/python3.10/site-packages/scipy/ndimage/tests/test_interpolation.py
ADDED
|
@@ -0,0 +1,1484 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import sys
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
from numpy.testing import suppress_warnings
|
| 5 |
+
from scipy._lib._array_api import (
|
| 6 |
+
xp_assert_equal, xp_assert_close,
|
| 7 |
+
assert_array_almost_equal,
|
| 8 |
+
)
|
| 9 |
+
from scipy._lib._array_api import is_cupy, is_jax, _asarray, array_namespace
|
| 10 |
+
|
| 11 |
+
import pytest
|
| 12 |
+
from pytest import raises as assert_raises
|
| 13 |
+
import scipy.ndimage as ndimage
|
| 14 |
+
|
| 15 |
+
from . import types
|
| 16 |
+
|
| 17 |
+
from scipy.conftest import array_api_compatible
|
| 18 |
+
skip_xp_backends = pytest.mark.skip_xp_backends
|
| 19 |
+
pytestmark = [array_api_compatible, pytest.mark.usefixtures("skip_xp_backends"),
|
| 20 |
+
skip_xp_backends(cpu_only=True, exceptions=['cupy', 'jax.numpy'],)]
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
eps = 1e-12
|
| 24 |
+
|
| 25 |
+
ndimage_to_numpy_mode = {
|
| 26 |
+
'mirror': 'reflect',
|
| 27 |
+
'reflect': 'symmetric',
|
| 28 |
+
'grid-mirror': 'symmetric',
|
| 29 |
+
'grid-wrap': 'wrap',
|
| 30 |
+
'nearest': 'edge',
|
| 31 |
+
'grid-constant': 'constant',
|
| 32 |
+
}
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
class TestBoundaries:
|
| 36 |
+
|
| 37 |
+
@skip_xp_backends("cupy", reason="CuPy does not have geometric_transform")
|
| 38 |
+
@pytest.mark.parametrize(
|
| 39 |
+
'mode, expected_value',
|
| 40 |
+
[('nearest', [1.5, 2.5, 3.5, 4, 4, 4, 4]),
|
| 41 |
+
('wrap', [1.5, 2.5, 3.5, 1.5, 2.5, 3.5, 1.5]),
|
| 42 |
+
('grid-wrap', [1.5, 2.5, 3.5, 2.5, 1.5, 2.5, 3.5]),
|
| 43 |
+
('mirror', [1.5, 2.5, 3.5, 3.5, 2.5, 1.5, 1.5]),
|
| 44 |
+
('reflect', [1.5, 2.5, 3.5, 4, 3.5, 2.5, 1.5]),
|
| 45 |
+
('constant', [1.5, 2.5, 3.5, -1, -1, -1, -1]),
|
| 46 |
+
('grid-constant', [1.5, 2.5, 3.5, 1.5, -1, -1, -1])]
|
| 47 |
+
)
|
| 48 |
+
def test_boundaries(self, mode, expected_value, xp):
|
| 49 |
+
def shift(x):
|
| 50 |
+
return (x[0] + 0.5,)
|
| 51 |
+
|
| 52 |
+
data = xp.asarray([1, 2, 3, 4.])
|
| 53 |
+
xp_assert_equal(
|
| 54 |
+
ndimage.geometric_transform(data, shift, cval=-1, mode=mode,
|
| 55 |
+
output_shape=(7,), order=1),
|
| 56 |
+
xp.asarray(expected_value))
|
| 57 |
+
|
| 58 |
+
@skip_xp_backends("cupy", reason="CuPy does not have geometric_transform")
|
| 59 |
+
@pytest.mark.parametrize(
|
| 60 |
+
'mode, expected_value',
|
| 61 |
+
[('nearest', [1, 1, 2, 3]),
|
| 62 |
+
('wrap', [3, 1, 2, 3]),
|
| 63 |
+
('grid-wrap', [4, 1, 2, 3]),
|
| 64 |
+
('mirror', [2, 1, 2, 3]),
|
| 65 |
+
('reflect', [1, 1, 2, 3]),
|
| 66 |
+
('constant', [-1, 1, 2, 3]),
|
| 67 |
+
('grid-constant', [-1, 1, 2, 3])]
|
| 68 |
+
)
|
| 69 |
+
def test_boundaries2(self, mode, expected_value, xp):
|
| 70 |
+
def shift(x):
|
| 71 |
+
return (x[0] - 0.9,)
|
| 72 |
+
|
| 73 |
+
data = xp.asarray([1, 2, 3, 4])
|
| 74 |
+
xp_assert_equal(
|
| 75 |
+
ndimage.geometric_transform(data, shift, cval=-1, mode=mode,
|
| 76 |
+
output_shape=(4,)),
|
| 77 |
+
xp.asarray(expected_value))
|
| 78 |
+
|
| 79 |
+
@pytest.mark.parametrize('mode', ['mirror', 'reflect', 'grid-mirror',
|
| 80 |
+
'grid-wrap', 'grid-constant',
|
| 81 |
+
'nearest'])
|
| 82 |
+
@pytest.mark.parametrize('order', range(6))
|
| 83 |
+
def test_boundary_spline_accuracy(self, mode, order, xp):
|
| 84 |
+
"""Tests based on examples from gh-2640"""
|
| 85 |
+
if (is_jax(xp) and
|
| 86 |
+
(mode not in ['mirror', 'reflect', 'constant', 'wrap', 'nearest']
|
| 87 |
+
or order > 1)
|
| 88 |
+
):
|
| 89 |
+
pytest.xfail("Jax does not support grid- modes or order > 1")
|
| 90 |
+
|
| 91 |
+
np_data = np.arange(-6, 7, dtype=np.float64)
|
| 92 |
+
data = xp.asarray(np_data)
|
| 93 |
+
x = xp.asarray(np.linspace(-8, 15, num=1000))
|
| 94 |
+
newaxis = array_namespace(x).newaxis
|
| 95 |
+
y = ndimage.map_coordinates(data, x[newaxis, ...], order=order, mode=mode)
|
| 96 |
+
|
| 97 |
+
# compute expected value using explicit padding via np.pad
|
| 98 |
+
npad = 32
|
| 99 |
+
pad_mode = ndimage_to_numpy_mode.get(mode)
|
| 100 |
+
padded = xp.asarray(np.pad(np_data, npad, mode=pad_mode))
|
| 101 |
+
coords = xp.asarray(npad + x)[newaxis, ...]
|
| 102 |
+
expected = ndimage.map_coordinates(padded, coords, order=order, mode=mode)
|
| 103 |
+
|
| 104 |
+
atol = 1e-5 if mode == 'grid-constant' else 1e-12
|
| 105 |
+
xp_assert_close(y, expected, rtol=1e-7, atol=atol)
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
@pytest.mark.parametrize('order', range(2, 6))
|
| 109 |
+
@pytest.mark.parametrize('dtype', types)
|
| 110 |
+
class TestSpline:
|
| 111 |
+
|
| 112 |
+
def test_spline01(self, dtype, order, xp):
|
| 113 |
+
dtype = getattr(xp, dtype)
|
| 114 |
+
data = xp.ones([], dtype=dtype)
|
| 115 |
+
out = ndimage.spline_filter(data, order=order)
|
| 116 |
+
assert out == xp.asarray(1, dtype=out.dtype)
|
| 117 |
+
|
| 118 |
+
def test_spline02(self, dtype, order, xp):
|
| 119 |
+
dtype = getattr(xp, dtype)
|
| 120 |
+
data = xp.asarray([1], dtype=dtype)
|
| 121 |
+
out = ndimage.spline_filter(data, order=order)
|
| 122 |
+
assert_array_almost_equal(out, xp.asarray([1]))
|
| 123 |
+
|
| 124 |
+
@skip_xp_backends(np_only=True, reason='output=dtype is numpy-specific')
|
| 125 |
+
def test_spline03(self, dtype, order, xp):
|
| 126 |
+
dtype = getattr(xp, dtype)
|
| 127 |
+
data = xp.ones([], dtype=dtype)
|
| 128 |
+
out = ndimage.spline_filter(data, order, output=dtype)
|
| 129 |
+
assert out == xp.asarray(1, dtype=out.dtype)
|
| 130 |
+
|
| 131 |
+
def test_spline04(self, dtype, order, xp):
|
| 132 |
+
dtype = getattr(xp, dtype)
|
| 133 |
+
data = xp.ones([4], dtype=dtype)
|
| 134 |
+
out = ndimage.spline_filter(data, order)
|
| 135 |
+
assert_array_almost_equal(out, xp.asarray([1, 1, 1, 1]))
|
| 136 |
+
|
| 137 |
+
def test_spline05(self, dtype, order, xp):
|
| 138 |
+
dtype = getattr(xp, dtype)
|
| 139 |
+
data = xp.ones([4, 4], dtype=dtype)
|
| 140 |
+
out = ndimage.spline_filter(data, order=order)
|
| 141 |
+
expected = xp.asarray([[1, 1, 1, 1],
|
| 142 |
+
[1, 1, 1, 1],
|
| 143 |
+
[1, 1, 1, 1],
|
| 144 |
+
[1, 1, 1, 1]])
|
| 145 |
+
assert_array_almost_equal(out, expected)
|
| 146 |
+
|
| 147 |
+
|
| 148 |
+
@skip_xp_backends("cupy", reason="CuPy does not have geometric_transform")
|
| 149 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 150 |
+
class TestGeometricTransform:
|
| 151 |
+
|
| 152 |
+
def test_geometric_transform01(self, order, xp):
|
| 153 |
+
data = xp.asarray([1])
|
| 154 |
+
|
| 155 |
+
def mapping(x):
|
| 156 |
+
return x
|
| 157 |
+
|
| 158 |
+
out = ndimage.geometric_transform(data, mapping, data.shape,
|
| 159 |
+
order=order)
|
| 160 |
+
assert_array_almost_equal(out, xp.asarray([1], dtype=out.dtype))
|
| 161 |
+
|
| 162 |
+
def test_geometric_transform02(self, order, xp):
|
| 163 |
+
data = xp.ones([4])
|
| 164 |
+
|
| 165 |
+
def mapping(x):
|
| 166 |
+
return x
|
| 167 |
+
|
| 168 |
+
out = ndimage.geometric_transform(data, mapping, data.shape,
|
| 169 |
+
order=order)
|
| 170 |
+
assert_array_almost_equal(out, xp.asarray([1, 1, 1, 1], dtype=out.dtype))
|
| 171 |
+
|
| 172 |
+
def test_geometric_transform03(self, order, xp):
|
| 173 |
+
data = xp.ones([4])
|
| 174 |
+
|
| 175 |
+
def mapping(x):
|
| 176 |
+
return (x[0] - 1,)
|
| 177 |
+
|
| 178 |
+
out = ndimage.geometric_transform(data, mapping, data.shape,
|
| 179 |
+
order=order)
|
| 180 |
+
assert_array_almost_equal(out, xp.asarray([0, 1, 1, 1], dtype=out.dtype))
|
| 181 |
+
|
| 182 |
+
def test_geometric_transform04(self, order, xp):
|
| 183 |
+
data = xp.asarray([4, 1, 3, 2])
|
| 184 |
+
|
| 185 |
+
def mapping(x):
|
| 186 |
+
return (x[0] - 1,)
|
| 187 |
+
|
| 188 |
+
out = ndimage.geometric_transform(data, mapping, data.shape,
|
| 189 |
+
order=order)
|
| 190 |
+
assert_array_almost_equal(out, xp.asarray([0, 4, 1, 3], dtype=out.dtype))
|
| 191 |
+
|
| 192 |
+
@pytest.mark.parametrize('dtype', ["float64", "complex128"])
|
| 193 |
+
def test_geometric_transform05(self, order, dtype, xp):
|
| 194 |
+
dtype = getattr(xp, dtype)
|
| 195 |
+
data = xp.asarray([[1, 1, 1, 1],
|
| 196 |
+
[1, 1, 1, 1],
|
| 197 |
+
[1, 1, 1, 1]], dtype=dtype)
|
| 198 |
+
expected = xp.asarray([[0, 1, 1, 1],
|
| 199 |
+
[0, 1, 1, 1],
|
| 200 |
+
[0, 1, 1, 1]], dtype=dtype)
|
| 201 |
+
|
| 202 |
+
isdtype = array_namespace(data).isdtype
|
| 203 |
+
if isdtype(data.dtype, 'complex floating'):
|
| 204 |
+
data -= 1j * data
|
| 205 |
+
expected -= 1j * expected
|
| 206 |
+
|
| 207 |
+
def mapping(x):
|
| 208 |
+
return (x[0], x[1] - 1)
|
| 209 |
+
|
| 210 |
+
out = ndimage.geometric_transform(data, mapping, data.shape,
|
| 211 |
+
order=order)
|
| 212 |
+
assert_array_almost_equal(out, expected)
|
| 213 |
+
|
| 214 |
+
def test_geometric_transform06(self, order, xp):
|
| 215 |
+
data = xp.asarray([[4, 1, 3, 2],
|
| 216 |
+
[7, 6, 8, 5],
|
| 217 |
+
[3, 5, 3, 6]])
|
| 218 |
+
|
| 219 |
+
def mapping(x):
|
| 220 |
+
return (x[0], x[1] - 1)
|
| 221 |
+
|
| 222 |
+
out = ndimage.geometric_transform(data, mapping, data.shape,
|
| 223 |
+
order=order)
|
| 224 |
+
expected = xp.asarray([[0, 4, 1, 3],
|
| 225 |
+
[0, 7, 6, 8],
|
| 226 |
+
[0, 3, 5, 3]], dtype=out.dtype)
|
| 227 |
+
assert_array_almost_equal(out, expected)
|
| 228 |
+
|
| 229 |
+
def test_geometric_transform07(self, order, xp):
|
| 230 |
+
data = xp.asarray([[4, 1, 3, 2],
|
| 231 |
+
[7, 6, 8, 5],
|
| 232 |
+
[3, 5, 3, 6]])
|
| 233 |
+
|
| 234 |
+
def mapping(x):
|
| 235 |
+
return (x[0] - 1, x[1])
|
| 236 |
+
|
| 237 |
+
out = ndimage.geometric_transform(data, mapping, data.shape,
|
| 238 |
+
order=order)
|
| 239 |
+
expected = xp.asarray([[0, 0, 0, 0],
|
| 240 |
+
[4, 1, 3, 2],
|
| 241 |
+
[7, 6, 8, 5]], dtype=out.dtype)
|
| 242 |
+
assert_array_almost_equal(out, expected)
|
| 243 |
+
|
| 244 |
+
def test_geometric_transform08(self, order, xp):
|
| 245 |
+
data = xp.asarray([[4, 1, 3, 2],
|
| 246 |
+
[7, 6, 8, 5],
|
| 247 |
+
[3, 5, 3, 6]])
|
| 248 |
+
|
| 249 |
+
def mapping(x):
|
| 250 |
+
return (x[0] - 1, x[1] - 1)
|
| 251 |
+
|
| 252 |
+
out = ndimage.geometric_transform(data, mapping, data.shape,
|
| 253 |
+
order=order)
|
| 254 |
+
expected = xp.asarray([[0, 0, 0, 0],
|
| 255 |
+
[0, 4, 1, 3],
|
| 256 |
+
[0, 7, 6, 8]], dtype=out.dtype)
|
| 257 |
+
assert_array_almost_equal(out, expected)
|
| 258 |
+
|
| 259 |
+
def test_geometric_transform10(self, order, xp):
|
| 260 |
+
data = xp.asarray([[4, 1, 3, 2],
|
| 261 |
+
[7, 6, 8, 5],
|
| 262 |
+
[3, 5, 3, 6]])
|
| 263 |
+
|
| 264 |
+
def mapping(x):
|
| 265 |
+
return (x[0] - 1, x[1] - 1)
|
| 266 |
+
|
| 267 |
+
if (order > 1):
|
| 268 |
+
filtered = ndimage.spline_filter(data, order=order)
|
| 269 |
+
else:
|
| 270 |
+
filtered = data
|
| 271 |
+
out = ndimage.geometric_transform(filtered, mapping, data.shape,
|
| 272 |
+
order=order, prefilter=False)
|
| 273 |
+
expected = xp.asarray([[0, 0, 0, 0],
|
| 274 |
+
[0, 4, 1, 3],
|
| 275 |
+
[0, 7, 6, 8]], dtype=out.dtype)
|
| 276 |
+
assert_array_almost_equal(out, expected)
|
| 277 |
+
|
| 278 |
+
def test_geometric_transform13(self, order, xp):
|
| 279 |
+
data = xp.ones([2], dtype=xp.float64)
|
| 280 |
+
|
| 281 |
+
def mapping(x):
|
| 282 |
+
return (x[0] // 2,)
|
| 283 |
+
|
| 284 |
+
out = ndimage.geometric_transform(data, mapping, [4], order=order)
|
| 285 |
+
assert_array_almost_equal(out, xp.asarray([1, 1, 1, 1], dtype=out.dtype))
|
| 286 |
+
|
| 287 |
+
def test_geometric_transform14(self, order, xp):
|
| 288 |
+
data = xp.asarray([1, 5, 2, 6, 3, 7, 4, 4])
|
| 289 |
+
|
| 290 |
+
def mapping(x):
|
| 291 |
+
return (2 * x[0],)
|
| 292 |
+
|
| 293 |
+
out = ndimage.geometric_transform(data, mapping, [4], order=order)
|
| 294 |
+
assert_array_almost_equal(out, xp.asarray([1, 2, 3, 4], dtype=out.dtype))
|
| 295 |
+
|
| 296 |
+
def test_geometric_transform15(self, order, xp):
|
| 297 |
+
data = [1, 2, 3, 4]
|
| 298 |
+
|
| 299 |
+
def mapping(x):
|
| 300 |
+
return (x[0] / 2,)
|
| 301 |
+
|
| 302 |
+
out = ndimage.geometric_transform(data, mapping, [8], order=order)
|
| 303 |
+
assert_array_almost_equal(out[::2], [1, 2, 3, 4])
|
| 304 |
+
|
| 305 |
+
def test_geometric_transform16(self, order, xp):
|
| 306 |
+
data = [[1, 2, 3, 4],
|
| 307 |
+
[5, 6, 7, 8],
|
| 308 |
+
[9.0, 10, 11, 12]]
|
| 309 |
+
|
| 310 |
+
def mapping(x):
|
| 311 |
+
return (x[0], x[1] * 2)
|
| 312 |
+
|
| 313 |
+
out = ndimage.geometric_transform(data, mapping, (3, 2),
|
| 314 |
+
order=order)
|
| 315 |
+
assert_array_almost_equal(out, [[1, 3], [5, 7], [9, 11]])
|
| 316 |
+
|
| 317 |
+
def test_geometric_transform17(self, order, xp):
|
| 318 |
+
data = [[1, 2, 3, 4],
|
| 319 |
+
[5, 6, 7, 8],
|
| 320 |
+
[9, 10, 11, 12]]
|
| 321 |
+
|
| 322 |
+
def mapping(x):
|
| 323 |
+
return (x[0] * 2, x[1])
|
| 324 |
+
|
| 325 |
+
out = ndimage.geometric_transform(data, mapping, (1, 4),
|
| 326 |
+
order=order)
|
| 327 |
+
assert_array_almost_equal(out, [[1, 2, 3, 4]])
|
| 328 |
+
|
| 329 |
+
def test_geometric_transform18(self, order, xp):
|
| 330 |
+
data = [[1, 2, 3, 4],
|
| 331 |
+
[5, 6, 7, 8],
|
| 332 |
+
[9, 10, 11, 12]]
|
| 333 |
+
|
| 334 |
+
def mapping(x):
|
| 335 |
+
return (x[0] * 2, x[1] * 2)
|
| 336 |
+
|
| 337 |
+
out = ndimage.geometric_transform(data, mapping, (1, 2),
|
| 338 |
+
order=order)
|
| 339 |
+
assert_array_almost_equal(out, [[1, 3]])
|
| 340 |
+
|
| 341 |
+
def test_geometric_transform19(self, order, xp):
|
| 342 |
+
data = [[1, 2, 3, 4],
|
| 343 |
+
[5, 6, 7, 8],
|
| 344 |
+
[9, 10, 11, 12]]
|
| 345 |
+
|
| 346 |
+
def mapping(x):
|
| 347 |
+
return (x[0], x[1] / 2)
|
| 348 |
+
|
| 349 |
+
out = ndimage.geometric_transform(data, mapping, (3, 8),
|
| 350 |
+
order=order)
|
| 351 |
+
assert_array_almost_equal(out[..., ::2], data)
|
| 352 |
+
|
| 353 |
+
def test_geometric_transform20(self, order, xp):
|
| 354 |
+
data = [[1, 2, 3, 4],
|
| 355 |
+
[5, 6, 7, 8],
|
| 356 |
+
[9, 10, 11, 12]]
|
| 357 |
+
|
| 358 |
+
def mapping(x):
|
| 359 |
+
return (x[0] / 2, x[1])
|
| 360 |
+
|
| 361 |
+
out = ndimage.geometric_transform(data, mapping, (6, 4),
|
| 362 |
+
order=order)
|
| 363 |
+
assert_array_almost_equal(out[::2, ...], data)
|
| 364 |
+
|
| 365 |
+
def test_geometric_transform21(self, order, xp):
|
| 366 |
+
data = [[1, 2, 3, 4],
|
| 367 |
+
[5, 6, 7, 8],
|
| 368 |
+
[9, 10, 11, 12]]
|
| 369 |
+
|
| 370 |
+
def mapping(x):
|
| 371 |
+
return (x[0] / 2, x[1] / 2)
|
| 372 |
+
|
| 373 |
+
out = ndimage.geometric_transform(data, mapping, (6, 8),
|
| 374 |
+
order=order)
|
| 375 |
+
assert_array_almost_equal(out[::2, ::2], data)
|
| 376 |
+
|
| 377 |
+
def test_geometric_transform22(self, order, xp):
|
| 378 |
+
data = xp.asarray([[1, 2, 3, 4],
|
| 379 |
+
[5, 6, 7, 8],
|
| 380 |
+
[9, 10, 11, 12]], dtype=xp.float64)
|
| 381 |
+
|
| 382 |
+
def mapping1(x):
|
| 383 |
+
return (x[0] / 2, x[1] / 2)
|
| 384 |
+
|
| 385 |
+
def mapping2(x):
|
| 386 |
+
return (x[0] * 2, x[1] * 2)
|
| 387 |
+
|
| 388 |
+
out = ndimage.geometric_transform(data, mapping1,
|
| 389 |
+
(6, 8), order=order)
|
| 390 |
+
out = ndimage.geometric_transform(out, mapping2,
|
| 391 |
+
(3, 4), order=order)
|
| 392 |
+
assert_array_almost_equal(out, data)
|
| 393 |
+
|
| 394 |
+
def test_geometric_transform23(self, order, xp):
|
| 395 |
+
data = [[1, 2, 3, 4],
|
| 396 |
+
[5, 6, 7, 8],
|
| 397 |
+
[9, 10, 11, 12]]
|
| 398 |
+
|
| 399 |
+
def mapping(x):
|
| 400 |
+
return (1, x[0] * 2)
|
| 401 |
+
|
| 402 |
+
out = ndimage.geometric_transform(data, mapping, (2,), order=order)
|
| 403 |
+
out = out.astype(np.int32)
|
| 404 |
+
assert_array_almost_equal(out, [5, 7])
|
| 405 |
+
|
| 406 |
+
def test_geometric_transform24(self, order, xp):
|
| 407 |
+
data = [[1, 2, 3, 4],
|
| 408 |
+
[5, 6, 7, 8],
|
| 409 |
+
[9, 10, 11, 12]]
|
| 410 |
+
|
| 411 |
+
def mapping(x, a, b):
|
| 412 |
+
return (a, x[0] * b)
|
| 413 |
+
|
| 414 |
+
out = ndimage.geometric_transform(
|
| 415 |
+
data, mapping, (2,), order=order, extra_arguments=(1,),
|
| 416 |
+
extra_keywords={'b': 2})
|
| 417 |
+
assert_array_almost_equal(out, [5, 7])
|
| 418 |
+
|
| 419 |
+
|
| 420 |
+
@skip_xp_backends("cupy", reason="CuPy does not have geometric_transform")
|
| 421 |
+
class TestGeometricTransformExtra:
|
| 422 |
+
|
| 423 |
+
def test_geometric_transform_grid_constant_order1(self, xp):
|
| 424 |
+
|
| 425 |
+
# verify interpolation outside the original bounds
|
| 426 |
+
x = xp.asarray([[1, 2, 3],
|
| 427 |
+
[4, 5, 6]], dtype=xp.float64)
|
| 428 |
+
|
| 429 |
+
def mapping(x):
|
| 430 |
+
return (x[0] - 0.5), (x[1] - 0.5)
|
| 431 |
+
|
| 432 |
+
expected_result = xp.asarray([[0.25, 0.75, 1.25],
|
| 433 |
+
[1.25, 3.00, 4.00]])
|
| 434 |
+
assert_array_almost_equal(
|
| 435 |
+
ndimage.geometric_transform(x, mapping, mode='grid-constant',
|
| 436 |
+
order=1),
|
| 437 |
+
expected_result,
|
| 438 |
+
)
|
| 439 |
+
|
| 440 |
+
@pytest.mark.parametrize('mode', ['grid-constant', 'grid-wrap', 'nearest',
|
| 441 |
+
'mirror', 'reflect'])
|
| 442 |
+
@pytest.mark.parametrize('order', range(6))
|
| 443 |
+
def test_geometric_transform_vs_padded(self, order, mode, xp):
|
| 444 |
+
|
| 445 |
+
def mapping(x):
|
| 446 |
+
return (x[0] - 0.4), (x[1] + 2.3)
|
| 447 |
+
|
| 448 |
+
# Manually pad and then extract center after the transform to get the
|
| 449 |
+
# expected result.
|
| 450 |
+
x = np.arange(144, dtype=float).reshape(12, 12)
|
| 451 |
+
npad = 24
|
| 452 |
+
pad_mode = ndimage_to_numpy_mode.get(mode)
|
| 453 |
+
x_padded = np.pad(x, npad, mode=pad_mode)
|
| 454 |
+
|
| 455 |
+
x = xp.asarray(x)
|
| 456 |
+
x_padded = xp.asarray(x_padded)
|
| 457 |
+
|
| 458 |
+
center_slice = tuple([slice(npad, -npad)] * x.ndim)
|
| 459 |
+
expected_result = ndimage.geometric_transform(
|
| 460 |
+
x_padded, mapping, mode=mode, order=order)[center_slice]
|
| 461 |
+
|
| 462 |
+
xp_assert_close(
|
| 463 |
+
ndimage.geometric_transform(x, mapping, mode=mode,
|
| 464 |
+
order=order),
|
| 465 |
+
expected_result,
|
| 466 |
+
rtol=1e-7,
|
| 467 |
+
)
|
| 468 |
+
|
| 469 |
+
@skip_xp_backends(np_only=True, reason='endianness is numpy-specific')
|
| 470 |
+
def test_geometric_transform_endianness_with_output_parameter(self, xp):
|
| 471 |
+
# geometric transform given output ndarray or dtype with
|
| 472 |
+
# non-native endianness. see issue #4127
|
| 473 |
+
data = np.asarray([1])
|
| 474 |
+
|
| 475 |
+
def mapping(x):
|
| 476 |
+
return x
|
| 477 |
+
|
| 478 |
+
for out in [data.dtype, data.dtype.newbyteorder(),
|
| 479 |
+
np.empty_like(data),
|
| 480 |
+
np.empty_like(data).astype(data.dtype.newbyteorder())]:
|
| 481 |
+
returned = ndimage.geometric_transform(data, mapping, data.shape,
|
| 482 |
+
output=out)
|
| 483 |
+
result = out if returned is None else returned
|
| 484 |
+
assert_array_almost_equal(result, [1])
|
| 485 |
+
|
| 486 |
+
@skip_xp_backends(np_only=True, reason='string `output` is numpy-specific')
|
| 487 |
+
def test_geometric_transform_with_string_output(self, xp):
|
| 488 |
+
data = xp.asarray([1])
|
| 489 |
+
|
| 490 |
+
def mapping(x):
|
| 491 |
+
return x
|
| 492 |
+
|
| 493 |
+
out = ndimage.geometric_transform(data, mapping, output='f')
|
| 494 |
+
assert out.dtype is np.dtype('f')
|
| 495 |
+
assert_array_almost_equal(out, [1])
|
| 496 |
+
|
| 497 |
+
|
| 498 |
+
class TestMapCoordinates:
|
| 499 |
+
|
| 500 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 501 |
+
@pytest.mark.parametrize('dtype', [np.float64, np.complex128])
|
| 502 |
+
def test_map_coordinates01(self, order, dtype, xp):
|
| 503 |
+
if is_jax(xp) and order > 1:
|
| 504 |
+
pytest.xfail("jax map_coordinates requires order <= 1")
|
| 505 |
+
|
| 506 |
+
data = xp.asarray([[4, 1, 3, 2],
|
| 507 |
+
[7, 6, 8, 5],
|
| 508 |
+
[3, 5, 3, 6]])
|
| 509 |
+
expected = xp.asarray([[0, 0, 0, 0],
|
| 510 |
+
[0, 4, 1, 3],
|
| 511 |
+
[0, 7, 6, 8]])
|
| 512 |
+
isdtype = array_namespace(data).isdtype
|
| 513 |
+
if isdtype(data.dtype, 'complex floating'):
|
| 514 |
+
data = data - 1j * data
|
| 515 |
+
expected = expected - 1j * expected
|
| 516 |
+
|
| 517 |
+
idx = np.indices(data.shape)
|
| 518 |
+
idx -= 1
|
| 519 |
+
idx = xp.asarray(idx)
|
| 520 |
+
|
| 521 |
+
out = ndimage.map_coordinates(data, idx, order=order)
|
| 522 |
+
assert_array_almost_equal(out, expected)
|
| 523 |
+
|
| 524 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 525 |
+
def test_map_coordinates02(self, order, xp):
|
| 526 |
+
if is_jax(xp):
|
| 527 |
+
if order > 1:
|
| 528 |
+
pytest.xfail("jax map_coordinates requires order <= 1")
|
| 529 |
+
if order == 1:
|
| 530 |
+
pytest.xfail("output differs. jax bug?")
|
| 531 |
+
|
| 532 |
+
data = xp.asarray([[4, 1, 3, 2],
|
| 533 |
+
[7, 6, 8, 5],
|
| 534 |
+
[3, 5, 3, 6]])
|
| 535 |
+
idx = np.indices(data.shape, np.float64)
|
| 536 |
+
idx -= 0.5
|
| 537 |
+
idx = xp.asarray(idx)
|
| 538 |
+
|
| 539 |
+
out1 = ndimage.shift(data, 0.5, order=order)
|
| 540 |
+
out2 = ndimage.map_coordinates(data, idx, order=order)
|
| 541 |
+
assert_array_almost_equal(out1, out2)
|
| 542 |
+
|
| 543 |
+
@skip_xp_backends("jax.numpy", reason="`order` is required in jax")
|
| 544 |
+
def test_map_coordinates03(self, xp):
|
| 545 |
+
data = _asarray([[4, 1, 3, 2],
|
| 546 |
+
[7, 6, 8, 5],
|
| 547 |
+
[3, 5, 3, 6]], order='F', xp=xp)
|
| 548 |
+
idx = np.indices(data.shape) - 1
|
| 549 |
+
idx = xp.asarray(idx)
|
| 550 |
+
out = ndimage.map_coordinates(data, idx)
|
| 551 |
+
expected = xp.asarray([[0, 0, 0, 0],
|
| 552 |
+
[0, 4, 1, 3],
|
| 553 |
+
[0, 7, 6, 8]])
|
| 554 |
+
assert_array_almost_equal(out, expected)
|
| 555 |
+
assert_array_almost_equal(out, ndimage.shift(data, (1, 1)))
|
| 556 |
+
|
| 557 |
+
idx = np.indices(data[::2, ...].shape) - 1
|
| 558 |
+
idx = xp.asarray(idx)
|
| 559 |
+
out = ndimage.map_coordinates(data[::2, ...], idx)
|
| 560 |
+
assert_array_almost_equal(out, xp.asarray([[0, 0, 0, 0],
|
| 561 |
+
[0, 4, 1, 3]]))
|
| 562 |
+
assert_array_almost_equal(out, ndimage.shift(data[::2, ...], (1, 1)))
|
| 563 |
+
|
| 564 |
+
idx = np.indices(data[:, ::2].shape) - 1
|
| 565 |
+
idx = xp.asarray(idx)
|
| 566 |
+
out = ndimage.map_coordinates(data[:, ::2], idx)
|
| 567 |
+
assert_array_almost_equal(out, xp.asarray([[0, 0], [0, 4], [0, 7]]))
|
| 568 |
+
assert_array_almost_equal(out, ndimage.shift(data[:, ::2], (1, 1)))
|
| 569 |
+
|
| 570 |
+
@skip_xp_backends(np_only=True)
|
| 571 |
+
def test_map_coordinates_endianness_with_output_parameter(self, xp):
|
| 572 |
+
# output parameter given as array or dtype with either endianness
|
| 573 |
+
# see issue #4127
|
| 574 |
+
# NB: NumPy-only
|
| 575 |
+
|
| 576 |
+
data = np.asarray([[1, 2], [7, 6]])
|
| 577 |
+
expected = np.asarray([[0, 0], [0, 1]])
|
| 578 |
+
idx = np.indices(data.shape)
|
| 579 |
+
idx -= 1
|
| 580 |
+
for out in [
|
| 581 |
+
data.dtype,
|
| 582 |
+
data.dtype.newbyteorder(),
|
| 583 |
+
np.empty_like(expected),
|
| 584 |
+
np.empty_like(expected).astype(expected.dtype.newbyteorder())
|
| 585 |
+
]:
|
| 586 |
+
returned = ndimage.map_coordinates(data, idx, output=out)
|
| 587 |
+
result = out if returned is None else returned
|
| 588 |
+
assert_array_almost_equal(result, expected)
|
| 589 |
+
|
| 590 |
+
@skip_xp_backends(np_only=True, reason='string `output` is numpy-specific')
|
| 591 |
+
def test_map_coordinates_with_string_output(self, xp):
|
| 592 |
+
data = xp.asarray([[1]])
|
| 593 |
+
idx = np.indices(data.shape)
|
| 594 |
+
idx = xp.asarray(idx)
|
| 595 |
+
out = ndimage.map_coordinates(data, idx, output='f')
|
| 596 |
+
assert out.dtype is np.dtype('f')
|
| 597 |
+
assert_array_almost_equal(out, xp.asarray([[1]]))
|
| 598 |
+
|
| 599 |
+
@pytest.mark.skipif('win32' in sys.platform or np.intp(0).itemsize < 8,
|
| 600 |
+
reason='do not run on 32 bit or windows '
|
| 601 |
+
'(no sparse memory)')
|
| 602 |
+
def test_map_coordinates_large_data(self, xp):
|
| 603 |
+
# check crash on large data
|
| 604 |
+
try:
|
| 605 |
+
n = 30000
|
| 606 |
+
# a = xp.reshape(xp.empty(n**2, dtype=xp.float32), (n, n))
|
| 607 |
+
a = np.empty(n**2, dtype=np.float32).reshape(n, n)
|
| 608 |
+
# fill the part we might read
|
| 609 |
+
a[n - 3:, n - 3:] = 0
|
| 610 |
+
ndimage.map_coordinates(
|
| 611 |
+
xp.asarray(a), xp.asarray([[n - 1.5], [n - 1.5]]), order=1
|
| 612 |
+
)
|
| 613 |
+
except MemoryError as e:
|
| 614 |
+
raise pytest.skip('Not enough memory available') from e
|
| 615 |
+
|
| 616 |
+
|
| 617 |
+
class TestAffineTransform:
|
| 618 |
+
|
| 619 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 620 |
+
def test_affine_transform01(self, order, xp):
|
| 621 |
+
data = xp.asarray([1])
|
| 622 |
+
out = ndimage.affine_transform(data, xp.asarray([[1]]), order=order)
|
| 623 |
+
assert_array_almost_equal(out, xp.asarray([1]))
|
| 624 |
+
|
| 625 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 626 |
+
def test_affine_transform02(self, order, xp):
|
| 627 |
+
data = xp.ones([4])
|
| 628 |
+
out = ndimage.affine_transform(data, xp.asarray([[1]]), order=order)
|
| 629 |
+
assert_array_almost_equal(out, xp.asarray([1, 1, 1, 1]))
|
| 630 |
+
|
| 631 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 632 |
+
def test_affine_transform03(self, order, xp):
|
| 633 |
+
data = xp.ones([4])
|
| 634 |
+
out = ndimage.affine_transform(data, xp.asarray([[1]]), -1, order=order)
|
| 635 |
+
assert_array_almost_equal(out, xp.asarray([0, 1, 1, 1]))
|
| 636 |
+
|
| 637 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 638 |
+
def test_affine_transform04(self, order, xp):
|
| 639 |
+
data = xp.asarray([4, 1, 3, 2])
|
| 640 |
+
out = ndimage.affine_transform(data, xp.asarray([[1]]), -1, order=order)
|
| 641 |
+
assert_array_almost_equal(out, xp.asarray([0, 4, 1, 3]))
|
| 642 |
+
|
| 643 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 644 |
+
@pytest.mark.parametrize('dtype', ["float64", "complex128"])
|
| 645 |
+
def test_affine_transform05(self, order, dtype, xp):
|
| 646 |
+
dtype = getattr(xp, dtype)
|
| 647 |
+
data = xp.asarray([[1, 1, 1, 1],
|
| 648 |
+
[1, 1, 1, 1],
|
| 649 |
+
[1, 1, 1, 1]], dtype=dtype)
|
| 650 |
+
expected = xp.asarray([[0, 1, 1, 1],
|
| 651 |
+
[0, 1, 1, 1],
|
| 652 |
+
[0, 1, 1, 1]], dtype=dtype)
|
| 653 |
+
isdtype = array_namespace(data).isdtype
|
| 654 |
+
if isdtype(data.dtype, 'complex floating'):
|
| 655 |
+
data -= 1j * data
|
| 656 |
+
expected -= 1j * expected
|
| 657 |
+
out = ndimage.affine_transform(data, xp.asarray([[1, 0], [0, 1]]),
|
| 658 |
+
[0, -1], order=order)
|
| 659 |
+
assert_array_almost_equal(out, expected)
|
| 660 |
+
|
| 661 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 662 |
+
def test_affine_transform06(self, order, xp):
|
| 663 |
+
data = xp.asarray([[4, 1, 3, 2],
|
| 664 |
+
[7, 6, 8, 5],
|
| 665 |
+
[3, 5, 3, 6]])
|
| 666 |
+
out = ndimage.affine_transform(data, xp.asarray([[1, 0], [0, 1]]),
|
| 667 |
+
[0, -1], order=order)
|
| 668 |
+
assert_array_almost_equal(out, xp.asarray([[0, 4, 1, 3],
|
| 669 |
+
[0, 7, 6, 8],
|
| 670 |
+
[0, 3, 5, 3]]))
|
| 671 |
+
|
| 672 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 673 |
+
def test_affine_transform07(self, order, xp):
|
| 674 |
+
data = xp.asarray([[4, 1, 3, 2],
|
| 675 |
+
[7, 6, 8, 5],
|
| 676 |
+
[3, 5, 3, 6]])
|
| 677 |
+
out = ndimage.affine_transform(data, xp.asarray([[1, 0], [0, 1]]),
|
| 678 |
+
[-1, 0], order=order)
|
| 679 |
+
assert_array_almost_equal(out, xp.asarray([[0, 0, 0, 0],
|
| 680 |
+
[4, 1, 3, 2],
|
| 681 |
+
[7, 6, 8, 5]]))
|
| 682 |
+
|
| 683 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 684 |
+
def test_affine_transform08(self, order, xp):
|
| 685 |
+
data = xp.asarray([[4, 1, 3, 2],
|
| 686 |
+
[7, 6, 8, 5],
|
| 687 |
+
[3, 5, 3, 6]])
|
| 688 |
+
out = ndimage.affine_transform(data, xp.asarray([[1, 0], [0, 1]]),
|
| 689 |
+
[-1, -1], order=order)
|
| 690 |
+
assert_array_almost_equal(out, xp.asarray([[0, 0, 0, 0],
|
| 691 |
+
[0, 4, 1, 3],
|
| 692 |
+
[0, 7, 6, 8]]))
|
| 693 |
+
|
| 694 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 695 |
+
def test_affine_transform09(self, order, xp):
|
| 696 |
+
data = xp.asarray([[4, 1, 3, 2],
|
| 697 |
+
[7, 6, 8, 5],
|
| 698 |
+
[3, 5, 3, 6]])
|
| 699 |
+
if (order > 1):
|
| 700 |
+
filtered = ndimage.spline_filter(data, order=order)
|
| 701 |
+
else:
|
| 702 |
+
filtered = data
|
| 703 |
+
out = ndimage.affine_transform(filtered, xp.asarray([[1, 0], [0, 1]]),
|
| 704 |
+
[-1, -1], order=order,
|
| 705 |
+
prefilter=False)
|
| 706 |
+
assert_array_almost_equal(out, xp.asarray([[0, 0, 0, 0],
|
| 707 |
+
[0, 4, 1, 3],
|
| 708 |
+
[0, 7, 6, 8]]))
|
| 709 |
+
|
| 710 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 711 |
+
def test_affine_transform10(self, order, xp):
|
| 712 |
+
data = xp.ones([2], dtype=xp.float64)
|
| 713 |
+
out = ndimage.affine_transform(data, xp.asarray([[0.5]]), output_shape=(4,),
|
| 714 |
+
order=order)
|
| 715 |
+
assert_array_almost_equal(out, xp.asarray([1, 1, 1, 0]))
|
| 716 |
+
|
| 717 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 718 |
+
def test_affine_transform11(self, order, xp):
|
| 719 |
+
data = xp.asarray([1, 5, 2, 6, 3, 7, 4, 4])
|
| 720 |
+
out = ndimage.affine_transform(data, xp.asarray([[2]]), 0, (4,), order=order)
|
| 721 |
+
assert_array_almost_equal(out, xp.asarray([1, 2, 3, 4]))
|
| 722 |
+
|
| 723 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 724 |
+
def test_affine_transform12(self, order, xp):
|
| 725 |
+
data = xp.asarray([1, 2, 3, 4])
|
| 726 |
+
out = ndimage.affine_transform(data, xp.asarray([[0.5]]), 0, (8,), order=order)
|
| 727 |
+
assert_array_almost_equal(out[::2], xp.asarray([1, 2, 3, 4]))
|
| 728 |
+
|
| 729 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 730 |
+
def test_affine_transform13(self, order, xp):
|
| 731 |
+
data = [[1, 2, 3, 4],
|
| 732 |
+
[5, 6, 7, 8],
|
| 733 |
+
[9.0, 10, 11, 12]]
|
| 734 |
+
data = xp.asarray(data)
|
| 735 |
+
out = ndimage.affine_transform(data, xp.asarray([[1, 0], [0, 2]]), 0, (3, 2),
|
| 736 |
+
order=order)
|
| 737 |
+
assert_array_almost_equal(out, xp.asarray([[1, 3], [5, 7], [9, 11]]))
|
| 738 |
+
|
| 739 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 740 |
+
def test_affine_transform14(self, order, xp):
|
| 741 |
+
data = [[1, 2, 3, 4],
|
| 742 |
+
[5, 6, 7, 8],
|
| 743 |
+
[9, 10, 11, 12]]
|
| 744 |
+
data = xp.asarray(data)
|
| 745 |
+
out = ndimage.affine_transform(data, xp.asarray([[2, 0], [0, 1]]), 0, (1, 4),
|
| 746 |
+
order=order)
|
| 747 |
+
assert_array_almost_equal(out, xp.asarray([[1, 2, 3, 4]]))
|
| 748 |
+
|
| 749 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 750 |
+
def test_affine_transform15(self, order, xp):
|
| 751 |
+
data = [[1, 2, 3, 4],
|
| 752 |
+
[5, 6, 7, 8],
|
| 753 |
+
[9, 10, 11, 12]]
|
| 754 |
+
data = xp.asarray(data)
|
| 755 |
+
out = ndimage.affine_transform(data, xp.asarray([[2, 0], [0, 2]]), 0, (1, 2),
|
| 756 |
+
order=order)
|
| 757 |
+
assert_array_almost_equal(out, xp.asarray([[1, 3]]))
|
| 758 |
+
|
| 759 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 760 |
+
def test_affine_transform16(self, order, xp):
|
| 761 |
+
data = [[1, 2, 3, 4],
|
| 762 |
+
[5, 6, 7, 8],
|
| 763 |
+
[9, 10, 11, 12]]
|
| 764 |
+
data = xp.asarray(data)
|
| 765 |
+
out = ndimage.affine_transform(data, xp.asarray([[1, 0.0], [0, 0.5]]), 0,
|
| 766 |
+
(3, 8), order=order)
|
| 767 |
+
assert_array_almost_equal(out[..., ::2], data)
|
| 768 |
+
|
| 769 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 770 |
+
def test_affine_transform17(self, order, xp):
|
| 771 |
+
data = [[1, 2, 3, 4],
|
| 772 |
+
[5, 6, 7, 8],
|
| 773 |
+
[9, 10, 11, 12]]
|
| 774 |
+
data = xp.asarray(data)
|
| 775 |
+
out = ndimage.affine_transform(data, xp.asarray([[0.5, 0], [0, 1]]), 0,
|
| 776 |
+
(6, 4), order=order)
|
| 777 |
+
assert_array_almost_equal(out[::2, ...], data)
|
| 778 |
+
|
| 779 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 780 |
+
def test_affine_transform18(self, order, xp):
|
| 781 |
+
data = xp.asarray([[1, 2, 3, 4],
|
| 782 |
+
[5, 6, 7, 8],
|
| 783 |
+
[9, 10, 11, 12]])
|
| 784 |
+
out = ndimage.affine_transform(data, xp.asarray([[0.5, 0], [0, 0.5]]), 0,
|
| 785 |
+
(6, 8), order=order)
|
| 786 |
+
assert_array_almost_equal(out[::2, ::2], data)
|
| 787 |
+
|
| 788 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 789 |
+
def test_affine_transform19(self, order, xp):
|
| 790 |
+
data = xp.asarray([[1, 2, 3, 4],
|
| 791 |
+
[5, 6, 7, 8],
|
| 792 |
+
[9, 10, 11, 12]], dtype=xp.float64)
|
| 793 |
+
out = ndimage.affine_transform(data, xp.asarray([[0.5, 0], [0, 0.5]]), 0,
|
| 794 |
+
(6, 8), order=order)
|
| 795 |
+
out = ndimage.affine_transform(out, xp.asarray([[2.0, 0], [0, 2.0]]), 0,
|
| 796 |
+
(3, 4), order=order)
|
| 797 |
+
assert_array_almost_equal(out, data)
|
| 798 |
+
|
| 799 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 800 |
+
def test_affine_transform20(self, order, xp):
|
| 801 |
+
if is_cupy(xp):
|
| 802 |
+
pytest.xfail("https://github.com/cupy/cupy/issues/8394")
|
| 803 |
+
|
| 804 |
+
data = [[1, 2, 3, 4],
|
| 805 |
+
[5, 6, 7, 8],
|
| 806 |
+
[9, 10, 11, 12]]
|
| 807 |
+
data = xp.asarray(data)
|
| 808 |
+
out = ndimage.affine_transform(data, xp.asarray([[0], [2]]), 0, (2,),
|
| 809 |
+
order=order)
|
| 810 |
+
assert_array_almost_equal(out, xp.asarray([1, 3]))
|
| 811 |
+
|
| 812 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 813 |
+
def test_affine_transform21(self, order, xp):
|
| 814 |
+
if is_cupy(xp):
|
| 815 |
+
pytest.xfail("https://github.com/cupy/cupy/issues/8394")
|
| 816 |
+
|
| 817 |
+
data = [[1, 2, 3, 4],
|
| 818 |
+
[5, 6, 7, 8],
|
| 819 |
+
[9, 10, 11, 12]]
|
| 820 |
+
data = xp.asarray(data)
|
| 821 |
+
out = ndimage.affine_transform(data, xp.asarray([[2], [0]]), 0, (2,),
|
| 822 |
+
order=order)
|
| 823 |
+
assert_array_almost_equal(out, xp.asarray([1, 9]))
|
| 824 |
+
|
| 825 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 826 |
+
def test_affine_transform22(self, order, xp):
|
| 827 |
+
# shift and offset interaction; see issue #1547
|
| 828 |
+
data = xp.asarray([4, 1, 3, 2])
|
| 829 |
+
out = ndimage.affine_transform(data, xp.asarray([[2]]), [-1], (3,),
|
| 830 |
+
order=order)
|
| 831 |
+
assert_array_almost_equal(out, xp.asarray([0, 1, 2]))
|
| 832 |
+
|
| 833 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 834 |
+
def test_affine_transform23(self, order, xp):
|
| 835 |
+
# shift and offset interaction; see issue #1547
|
| 836 |
+
data = xp.asarray([4, 1, 3, 2])
|
| 837 |
+
out = ndimage.affine_transform(data, xp.asarray([[0.5]]), [-1], (8,),
|
| 838 |
+
order=order)
|
| 839 |
+
assert_array_almost_equal(out[::2], xp.asarray([0, 4, 1, 3]))
|
| 840 |
+
|
| 841 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 842 |
+
def test_affine_transform24(self, order, xp):
|
| 843 |
+
# consistency between diagonal and non-diagonal case; see issue #1547
|
| 844 |
+
data = xp.asarray([4, 1, 3, 2])
|
| 845 |
+
with suppress_warnings() as sup:
|
| 846 |
+
sup.filter(UserWarning,
|
| 847 |
+
'The behavior of affine_transform with a 1-D array .* '
|
| 848 |
+
'has changed')
|
| 849 |
+
out1 = ndimage.affine_transform(data, xp.asarray([2]), -1, order=order)
|
| 850 |
+
out2 = ndimage.affine_transform(data, xp.asarray([[2]]), -1, order=order)
|
| 851 |
+
assert_array_almost_equal(out1, out2)
|
| 852 |
+
|
| 853 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 854 |
+
def test_affine_transform25(self, order, xp):
|
| 855 |
+
# consistency between diagonal and non-diagonal case; see issue #1547
|
| 856 |
+
data = xp.asarray([4, 1, 3, 2])
|
| 857 |
+
with suppress_warnings() as sup:
|
| 858 |
+
sup.filter(UserWarning,
|
| 859 |
+
'The behavior of affine_transform with a 1-D array .* '
|
| 860 |
+
'has changed')
|
| 861 |
+
out1 = ndimage.affine_transform(data, xp.asarray([0.5]), -1, order=order)
|
| 862 |
+
out2 = ndimage.affine_transform(data, xp.asarray([[0.5]]), -1, order=order)
|
| 863 |
+
assert_array_almost_equal(out1, out2)
|
| 864 |
+
|
| 865 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 866 |
+
def test_affine_transform26(self, order, xp):
|
| 867 |
+
# test homogeneous coordinates
|
| 868 |
+
data = xp.asarray([[4, 1, 3, 2],
|
| 869 |
+
[7, 6, 8, 5],
|
| 870 |
+
[3, 5, 3, 6]])
|
| 871 |
+
if (order > 1):
|
| 872 |
+
filtered = ndimage.spline_filter(data, order=order)
|
| 873 |
+
else:
|
| 874 |
+
filtered = data
|
| 875 |
+
tform_original = xp.eye(2)
|
| 876 |
+
offset_original = -xp.ones((2, 1))
|
| 877 |
+
|
| 878 |
+
concat = array_namespace(tform_original, offset_original).concat
|
| 879 |
+
tform_h1 = concat((tform_original, offset_original), axis=1) # hstack
|
| 880 |
+
tform_h2 = concat( (tform_h1, xp.asarray([[0.0, 0, 1]])), axis=0) # vstack
|
| 881 |
+
|
| 882 |
+
offs = [float(x) for x in xp.reshape(offset_original, (-1,))]
|
| 883 |
+
|
| 884 |
+
out1 = ndimage.affine_transform(filtered, tform_original,
|
| 885 |
+
offs,
|
| 886 |
+
order=order, prefilter=False)
|
| 887 |
+
out2 = ndimage.affine_transform(filtered, tform_h1, order=order,
|
| 888 |
+
prefilter=False)
|
| 889 |
+
out3 = ndimage.affine_transform(filtered, tform_h2, order=order,
|
| 890 |
+
prefilter=False)
|
| 891 |
+
for out in [out1, out2, out3]:
|
| 892 |
+
assert_array_almost_equal(out, xp.asarray([[0, 0, 0, 0],
|
| 893 |
+
[0, 4, 1, 3],
|
| 894 |
+
[0, 7, 6, 8]]))
|
| 895 |
+
|
| 896 |
+
def test_affine_transform27(self, xp):
|
| 897 |
+
if is_cupy(xp):
|
| 898 |
+
pytest.xfail("CuPy does not raise")
|
| 899 |
+
|
| 900 |
+
# test valid homogeneous transformation matrix
|
| 901 |
+
data = xp.asarray([[4, 1, 3, 2],
|
| 902 |
+
[7, 6, 8, 5],
|
| 903 |
+
[3, 5, 3, 6]])
|
| 904 |
+
concat = array_namespace(data).concat
|
| 905 |
+
tform_h1 = concat( (xp.eye(2), -xp.ones((2, 1))) , axis=1) # vstack
|
| 906 |
+
tform_h2 = concat((tform_h1, xp.asarray([[5.0, 2, 1]])), axis=0) # hstack
|
| 907 |
+
|
| 908 |
+
assert_raises(ValueError, ndimage.affine_transform, data, tform_h2)
|
| 909 |
+
|
| 910 |
+
@skip_xp_backends(np_only=True, reason='byteorder is numpy-specific')
|
| 911 |
+
def test_affine_transform_1d_endianness_with_output_parameter(self, xp):
|
| 912 |
+
# 1d affine transform given output ndarray or dtype with
|
| 913 |
+
# either endianness. see issue #7388
|
| 914 |
+
data = xp.ones((2, 2))
|
| 915 |
+
for out in [xp.empty_like(data),
|
| 916 |
+
xp.empty_like(data).astype(data.dtype.newbyteorder()),
|
| 917 |
+
data.dtype, data.dtype.newbyteorder()]:
|
| 918 |
+
with suppress_warnings() as sup:
|
| 919 |
+
sup.filter(UserWarning,
|
| 920 |
+
'The behavior of affine_transform with a 1-D array '
|
| 921 |
+
'.* has changed')
|
| 922 |
+
matrix = xp.asarray([1, 1])
|
| 923 |
+
returned = ndimage.affine_transform(data, matrix, output=out)
|
| 924 |
+
result = out if returned is None else returned
|
| 925 |
+
assert_array_almost_equal(result, xp.asarray([[1, 1], [1, 1]]))
|
| 926 |
+
|
| 927 |
+
@skip_xp_backends(np_only=True, reason='byteorder is numpy-specific')
|
| 928 |
+
def test_affine_transform_multi_d_endianness_with_output_parameter(self, xp):
|
| 929 |
+
# affine transform given output ndarray or dtype with either endianness
|
| 930 |
+
# see issue #4127
|
| 931 |
+
# NB: byteorder is numpy-specific
|
| 932 |
+
data = np.asarray([1])
|
| 933 |
+
for out in [data.dtype, data.dtype.newbyteorder(),
|
| 934 |
+
np.empty_like(data),
|
| 935 |
+
np.empty_like(data).astype(data.dtype.newbyteorder())]:
|
| 936 |
+
returned = ndimage.affine_transform(data, np.asarray([[1]]), output=out)
|
| 937 |
+
result = out if returned is None else returned
|
| 938 |
+
assert_array_almost_equal(result, np.asarray([1]))
|
| 939 |
+
|
| 940 |
+
@skip_xp_backends(np_only=True,
|
| 941 |
+
reason='`out` of a different size is numpy-specific'
|
| 942 |
+
)
|
| 943 |
+
def test_affine_transform_output_shape(self, xp):
|
| 944 |
+
# don't require output_shape when out of a different size is given
|
| 945 |
+
data = xp.arange(8, dtype=xp.float64)
|
| 946 |
+
out = xp.ones((16,))
|
| 947 |
+
|
| 948 |
+
ndimage.affine_transform(data, xp.asarray([[1]]), output=out)
|
| 949 |
+
assert_array_almost_equal(out[:8], data)
|
| 950 |
+
|
| 951 |
+
# mismatched output shape raises an error
|
| 952 |
+
with pytest.raises(RuntimeError):
|
| 953 |
+
ndimage.affine_transform(
|
| 954 |
+
data, [[1]], output=out, output_shape=(12,))
|
| 955 |
+
|
| 956 |
+
@skip_xp_backends(np_only=True, reason='string `output` is numpy-specific')
|
| 957 |
+
def test_affine_transform_with_string_output(self, xp):
|
| 958 |
+
data = xp.asarray([1])
|
| 959 |
+
out = ndimage.affine_transform(data, xp.asarray([[1]]), output='f')
|
| 960 |
+
assert out.dtype is np.dtype('f')
|
| 961 |
+
assert_array_almost_equal(out, xp.asarray([1]))
|
| 962 |
+
|
| 963 |
+
@pytest.mark.parametrize('shift',
|
| 964 |
+
[(1, 0), (0, 1), (-1, 1), (3, -5), (2, 7)])
|
| 965 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 966 |
+
def test_affine_transform_shift_via_grid_wrap(self, shift, order, xp):
|
| 967 |
+
# For mode 'grid-wrap', integer shifts should match np.roll
|
| 968 |
+
x = np.asarray([[0, 1],
|
| 969 |
+
[2, 3]])
|
| 970 |
+
affine = np.zeros((2, 3))
|
| 971 |
+
affine[:2, :2] = np.eye(2)
|
| 972 |
+
affine[:, 2] = np.asarray(shift)
|
| 973 |
+
|
| 974 |
+
expected = np.roll(x, shift, axis=(0, 1))
|
| 975 |
+
|
| 976 |
+
x = xp.asarray(x)
|
| 977 |
+
affine = xp.asarray(affine)
|
| 978 |
+
expected = xp.asarray(expected)
|
| 979 |
+
|
| 980 |
+
assert_array_almost_equal(
|
| 981 |
+
ndimage.affine_transform(x, affine, mode='grid-wrap', order=order),
|
| 982 |
+
expected
|
| 983 |
+
)
|
| 984 |
+
|
| 985 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 986 |
+
def test_affine_transform_shift_reflect(self, order, xp):
|
| 987 |
+
# shift by x.shape results in reflection
|
| 988 |
+
x = np.asarray([[0, 1, 2],
|
| 989 |
+
[3, 4, 5]])
|
| 990 |
+
expected = x[::-1, ::-1].copy() # strides >0 for torch
|
| 991 |
+
x = xp.asarray(x)
|
| 992 |
+
expected = xp.asarray(expected)
|
| 993 |
+
|
| 994 |
+
affine = np.zeros([2, 3])
|
| 995 |
+
affine[:2, :2] = np.eye(2)
|
| 996 |
+
affine[:, 2] = np.asarray(x.shape)
|
| 997 |
+
affine = xp.asarray(affine)
|
| 998 |
+
|
| 999 |
+
assert_array_almost_equal(
|
| 1000 |
+
ndimage.affine_transform(x, affine, mode='reflect', order=order),
|
| 1001 |
+
expected,
|
| 1002 |
+
)
|
| 1003 |
+
|
| 1004 |
+
|
| 1005 |
+
class TestShift:
|
| 1006 |
+
|
| 1007 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 1008 |
+
def test_shift01(self, order, xp):
|
| 1009 |
+
data = xp.asarray([1])
|
| 1010 |
+
out = ndimage.shift(data, [1], order=order)
|
| 1011 |
+
assert_array_almost_equal(out, xp.asarray([0]))
|
| 1012 |
+
|
| 1013 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 1014 |
+
def test_shift02(self, order, xp):
|
| 1015 |
+
data = xp.ones([4])
|
| 1016 |
+
out = ndimage.shift(data, [1], order=order)
|
| 1017 |
+
assert_array_almost_equal(out, xp.asarray([0, 1, 1, 1]))
|
| 1018 |
+
|
| 1019 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 1020 |
+
def test_shift03(self, order, xp):
|
| 1021 |
+
data = xp.ones([4])
|
| 1022 |
+
out = ndimage.shift(data, -1, order=order)
|
| 1023 |
+
assert_array_almost_equal(out, xp.asarray([1, 1, 1, 0]))
|
| 1024 |
+
|
| 1025 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 1026 |
+
def test_shift04(self, order, xp):
|
| 1027 |
+
data = xp.asarray([4, 1, 3, 2])
|
| 1028 |
+
out = ndimage.shift(data, 1, order=order)
|
| 1029 |
+
assert_array_almost_equal(out, xp.asarray([0, 4, 1, 3]))
|
| 1030 |
+
|
| 1031 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 1032 |
+
@pytest.mark.parametrize('dtype', ["float64", "complex128"])
|
| 1033 |
+
def test_shift05(self, order, dtype, xp):
|
| 1034 |
+
dtype = getattr(xp, dtype)
|
| 1035 |
+
data = xp.asarray([[1, 1, 1, 1],
|
| 1036 |
+
[1, 1, 1, 1],
|
| 1037 |
+
[1, 1, 1, 1]], dtype=dtype)
|
| 1038 |
+
expected = xp.asarray([[0, 1, 1, 1],
|
| 1039 |
+
[0, 1, 1, 1],
|
| 1040 |
+
[0, 1, 1, 1]], dtype=dtype)
|
| 1041 |
+
isdtype = array_namespace(data).isdtype
|
| 1042 |
+
if isdtype(data.dtype, 'complex floating'):
|
| 1043 |
+
data -= 1j * data
|
| 1044 |
+
expected -= 1j * expected
|
| 1045 |
+
out = ndimage.shift(data, [0, 1], order=order)
|
| 1046 |
+
assert_array_almost_equal(out, expected)
|
| 1047 |
+
|
| 1048 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 1049 |
+
@pytest.mark.parametrize('mode', ['constant', 'grid-constant'])
|
| 1050 |
+
@pytest.mark.parametrize('dtype', ['float64', 'complex128'])
|
| 1051 |
+
def test_shift_with_nonzero_cval(self, order, mode, dtype, xp):
|
| 1052 |
+
data = np.asarray([[1, 1, 1, 1],
|
| 1053 |
+
[1, 1, 1, 1],
|
| 1054 |
+
[1, 1, 1, 1]], dtype=dtype)
|
| 1055 |
+
|
| 1056 |
+
expected = np.asarray([[0, 1, 1, 1],
|
| 1057 |
+
[0, 1, 1, 1],
|
| 1058 |
+
[0, 1, 1, 1]], dtype=dtype)
|
| 1059 |
+
|
| 1060 |
+
isdtype = array_namespace(data).isdtype
|
| 1061 |
+
if isdtype(data.dtype, 'complex floating'):
|
| 1062 |
+
data -= 1j * data
|
| 1063 |
+
expected -= 1j * expected
|
| 1064 |
+
cval = 5.0
|
| 1065 |
+
expected[:, 0] = cval # specific to shift of [0, 1] used below
|
| 1066 |
+
|
| 1067 |
+
data = xp.asarray(data)
|
| 1068 |
+
expected = xp.asarray(expected)
|
| 1069 |
+
out = ndimage.shift(data, [0, 1], order=order, mode=mode, cval=cval)
|
| 1070 |
+
assert_array_almost_equal(out, expected)
|
| 1071 |
+
|
| 1072 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 1073 |
+
def test_shift06(self, order, xp):
|
| 1074 |
+
data = xp.asarray([[4, 1, 3, 2],
|
| 1075 |
+
[7, 6, 8, 5],
|
| 1076 |
+
[3, 5, 3, 6]])
|
| 1077 |
+
out = ndimage.shift(data, [0, 1], order=order)
|
| 1078 |
+
assert_array_almost_equal(out, xp.asarray([[0, 4, 1, 3],
|
| 1079 |
+
[0, 7, 6, 8],
|
| 1080 |
+
[0, 3, 5, 3]]))
|
| 1081 |
+
|
| 1082 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 1083 |
+
def test_shift07(self, order, xp):
|
| 1084 |
+
data = xp.asarray([[4, 1, 3, 2],
|
| 1085 |
+
[7, 6, 8, 5],
|
| 1086 |
+
[3, 5, 3, 6]])
|
| 1087 |
+
out = ndimage.shift(data, [1, 0], order=order)
|
| 1088 |
+
assert_array_almost_equal(out, xp.asarray([[0, 0, 0, 0],
|
| 1089 |
+
[4, 1, 3, 2],
|
| 1090 |
+
[7, 6, 8, 5]]))
|
| 1091 |
+
|
| 1092 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 1093 |
+
def test_shift08(self, order, xp):
|
| 1094 |
+
data = xp.asarray([[4, 1, 3, 2],
|
| 1095 |
+
[7, 6, 8, 5],
|
| 1096 |
+
[3, 5, 3, 6]])
|
| 1097 |
+
out = ndimage.shift(data, [1, 1], order=order)
|
| 1098 |
+
assert_array_almost_equal(out, xp.asarray([[0, 0, 0, 0],
|
| 1099 |
+
[0, 4, 1, 3],
|
| 1100 |
+
[0, 7, 6, 8]]))
|
| 1101 |
+
|
| 1102 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 1103 |
+
def test_shift09(self, order, xp):
|
| 1104 |
+
data = xp.asarray([[4, 1, 3, 2],
|
| 1105 |
+
[7, 6, 8, 5],
|
| 1106 |
+
[3, 5, 3, 6]])
|
| 1107 |
+
if (order > 1):
|
| 1108 |
+
filtered = ndimage.spline_filter(data, order=order)
|
| 1109 |
+
else:
|
| 1110 |
+
filtered = data
|
| 1111 |
+
out = ndimage.shift(filtered, [1, 1], order=order, prefilter=False)
|
| 1112 |
+
assert_array_almost_equal(out, xp.asarray([[0, 0, 0, 0],
|
| 1113 |
+
[0, 4, 1, 3],
|
| 1114 |
+
[0, 7, 6, 8]]))
|
| 1115 |
+
|
| 1116 |
+
@pytest.mark.parametrize('shift',
|
| 1117 |
+
[(1, 0), (0, 1), (-1, 1), (3, -5), (2, 7)])
|
| 1118 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 1119 |
+
def test_shift_grid_wrap(self, shift, order, xp):
|
| 1120 |
+
# For mode 'grid-wrap', integer shifts should match np.roll
|
| 1121 |
+
x = np.asarray([[0, 1],
|
| 1122 |
+
[2, 3]])
|
| 1123 |
+
expected = np.roll(x, shift, axis=(0,1))
|
| 1124 |
+
|
| 1125 |
+
x = xp.asarray(x)
|
| 1126 |
+
expected = xp.asarray(expected)
|
| 1127 |
+
|
| 1128 |
+
assert_array_almost_equal(
|
| 1129 |
+
ndimage.shift(x, shift, mode='grid-wrap', order=order),
|
| 1130 |
+
expected
|
| 1131 |
+
)
|
| 1132 |
+
|
| 1133 |
+
@pytest.mark.parametrize('shift',
|
| 1134 |
+
[(1, 0), (0, 1), (-1, 1), (3, -5), (2, 7)])
|
| 1135 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 1136 |
+
def test_shift_grid_constant1(self, shift, order, xp):
|
| 1137 |
+
# For integer shifts, 'constant' and 'grid-constant' should be equal
|
| 1138 |
+
x = xp.reshape(xp.arange(20), (5, 4))
|
| 1139 |
+
assert_array_almost_equal(
|
| 1140 |
+
ndimage.shift(x, shift, mode='grid-constant', order=order),
|
| 1141 |
+
ndimage.shift(x, shift, mode='constant', order=order),
|
| 1142 |
+
)
|
| 1143 |
+
|
| 1144 |
+
def test_shift_grid_constant_order1(self, xp):
|
| 1145 |
+
x = xp.asarray([[1, 2, 3],
|
| 1146 |
+
[4, 5, 6]], dtype=xp.float64)
|
| 1147 |
+
expected_result = xp.asarray([[0.25, 0.75, 1.25],
|
| 1148 |
+
[1.25, 3.00, 4.00]])
|
| 1149 |
+
assert_array_almost_equal(
|
| 1150 |
+
ndimage.shift(x, (0.5, 0.5), mode='grid-constant', order=1),
|
| 1151 |
+
expected_result,
|
| 1152 |
+
)
|
| 1153 |
+
|
| 1154 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 1155 |
+
def test_shift_reflect(self, order, xp):
|
| 1156 |
+
# shift by x.shape results in reflection
|
| 1157 |
+
x = np.asarray([[0, 1, 2],
|
| 1158 |
+
[3, 4, 5]])
|
| 1159 |
+
expected = x[::-1, ::-1].copy() # strides > 0 for torch
|
| 1160 |
+
|
| 1161 |
+
x = xp.asarray(x)
|
| 1162 |
+
expected = xp.asarray(expected)
|
| 1163 |
+
assert_array_almost_equal(
|
| 1164 |
+
ndimage.shift(x, x.shape, mode='reflect', order=order),
|
| 1165 |
+
expected,
|
| 1166 |
+
)
|
| 1167 |
+
|
| 1168 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 1169 |
+
@pytest.mark.parametrize('prefilter', [False, True])
|
| 1170 |
+
def test_shift_nearest_boundary(self, order, prefilter, xp):
|
| 1171 |
+
# verify that shifting at least order // 2 beyond the end of the array
|
| 1172 |
+
# gives a value equal to the edge value.
|
| 1173 |
+
x = xp.arange(16)
|
| 1174 |
+
kwargs = dict(mode='nearest', order=order, prefilter=prefilter)
|
| 1175 |
+
assert_array_almost_equal(
|
| 1176 |
+
ndimage.shift(x, order // 2 + 1, **kwargs)[0], x[0],
|
| 1177 |
+
)
|
| 1178 |
+
assert_array_almost_equal(
|
| 1179 |
+
ndimage.shift(x, -order // 2 - 1, **kwargs)[-1], x[-1],
|
| 1180 |
+
)
|
| 1181 |
+
|
| 1182 |
+
@pytest.mark.parametrize('mode', ['grid-constant', 'grid-wrap', 'nearest',
|
| 1183 |
+
'mirror', 'reflect'])
|
| 1184 |
+
@pytest.mark.parametrize('order', range(6))
|
| 1185 |
+
def test_shift_vs_padded(self, order, mode, xp):
|
| 1186 |
+
x_np = np.arange(144, dtype=float).reshape(12, 12)
|
| 1187 |
+
shift = (0.4, -2.3)
|
| 1188 |
+
|
| 1189 |
+
# manually pad and then extract center to get expected result
|
| 1190 |
+
npad = 32
|
| 1191 |
+
pad_mode = ndimage_to_numpy_mode.get(mode)
|
| 1192 |
+
x_padded = xp.asarray(np.pad(x_np, npad, mode=pad_mode))
|
| 1193 |
+
x = xp.asarray(x_np)
|
| 1194 |
+
|
| 1195 |
+
center_slice = tuple([slice(npad, -npad)] * x.ndim)
|
| 1196 |
+
expected_result = ndimage.shift(
|
| 1197 |
+
x_padded, shift, mode=mode, order=order)[center_slice]
|
| 1198 |
+
|
| 1199 |
+
xp_assert_close(
|
| 1200 |
+
ndimage.shift(x, shift, mode=mode, order=order),
|
| 1201 |
+
expected_result,
|
| 1202 |
+
rtol=1e-7,
|
| 1203 |
+
)
|
| 1204 |
+
|
| 1205 |
+
|
| 1206 |
+
class TestZoom:
|
| 1207 |
+
|
| 1208 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 1209 |
+
def test_zoom1(self, order, xp):
|
| 1210 |
+
for z in [2, [2, 2]]:
|
| 1211 |
+
arr = xp.reshape(xp.arange(25, dtype=xp.float64), (5, 5))
|
| 1212 |
+
arr = ndimage.zoom(arr, z, order=order)
|
| 1213 |
+
assert arr.shape == (10, 10)
|
| 1214 |
+
assert xp.all(arr[-1, :] != 0)
|
| 1215 |
+
assert xp.all(arr[-1, :] >= (20 - eps))
|
| 1216 |
+
assert xp.all(arr[0, :] <= (5 + eps))
|
| 1217 |
+
assert xp.all(arr >= (0 - eps))
|
| 1218 |
+
assert xp.all(arr <= (24 + eps))
|
| 1219 |
+
|
| 1220 |
+
def test_zoom2(self, xp):
|
| 1221 |
+
arr = xp.reshape(xp.arange(12), (3, 4))
|
| 1222 |
+
out = ndimage.zoom(ndimage.zoom(arr, 2), 0.5)
|
| 1223 |
+
xp_assert_equal(out, arr)
|
| 1224 |
+
|
| 1225 |
+
def test_zoom3(self, xp):
|
| 1226 |
+
arr = xp.asarray([[1, 2]])
|
| 1227 |
+
out1 = ndimage.zoom(arr, (2, 1))
|
| 1228 |
+
out2 = ndimage.zoom(arr, (1, 2))
|
| 1229 |
+
|
| 1230 |
+
assert_array_almost_equal(out1, xp.asarray([[1, 2], [1, 2]]))
|
| 1231 |
+
assert_array_almost_equal(out2, xp.asarray([[1, 1, 2, 2]]))
|
| 1232 |
+
|
| 1233 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 1234 |
+
@pytest.mark.parametrize('dtype', ["float64", "complex128"])
|
| 1235 |
+
def test_zoom_affine01(self, order, dtype, xp):
|
| 1236 |
+
dtype = getattr(xp, dtype)
|
| 1237 |
+
data = xp.asarray([[1, 2, 3, 4],
|
| 1238 |
+
[5, 6, 7, 8],
|
| 1239 |
+
[9, 10, 11, 12]], dtype=dtype)
|
| 1240 |
+
isdtype = array_namespace(data).isdtype
|
| 1241 |
+
if isdtype(data.dtype, 'complex floating'):
|
| 1242 |
+
data -= 1j * data
|
| 1243 |
+
with suppress_warnings() as sup:
|
| 1244 |
+
sup.filter(UserWarning,
|
| 1245 |
+
'The behavior of affine_transform with a 1-D array .* '
|
| 1246 |
+
'has changed')
|
| 1247 |
+
out = ndimage.affine_transform(data, xp.asarray([0.5, 0.5]), 0,
|
| 1248 |
+
(6, 8), order=order)
|
| 1249 |
+
assert_array_almost_equal(out[::2, ::2], data)
|
| 1250 |
+
|
| 1251 |
+
def test_zoom_infinity(self, xp):
|
| 1252 |
+
# Ticket #1419 regression test
|
| 1253 |
+
dim = 8
|
| 1254 |
+
ndimage.zoom(xp.zeros((dim, dim)), 1. / dim, mode='nearest')
|
| 1255 |
+
|
| 1256 |
+
def test_zoom_zoomfactor_one(self, xp):
|
| 1257 |
+
# Ticket #1122 regression test
|
| 1258 |
+
arr = xp.zeros((1, 5, 5))
|
| 1259 |
+
zoom = (1.0, 2.0, 2.0)
|
| 1260 |
+
|
| 1261 |
+
out = ndimage.zoom(arr, zoom, cval=7)
|
| 1262 |
+
ref = xp.zeros((1, 10, 10))
|
| 1263 |
+
assert_array_almost_equal(out, ref)
|
| 1264 |
+
|
| 1265 |
+
def test_zoom_output_shape_roundoff(self, xp):
|
| 1266 |
+
arr = xp.zeros((3, 11, 25))
|
| 1267 |
+
zoom = (4.0 / 3, 15.0 / 11, 29.0 / 25)
|
| 1268 |
+
out = ndimage.zoom(arr, zoom)
|
| 1269 |
+
assert out.shape == (4, 15, 29)
|
| 1270 |
+
|
| 1271 |
+
@pytest.mark.parametrize('zoom', [(1, 1), (3, 5), (8, 2), (8, 8)])
|
| 1272 |
+
@pytest.mark.parametrize('mode', ['nearest', 'constant', 'wrap', 'reflect',
|
| 1273 |
+
'mirror', 'grid-wrap', 'grid-mirror',
|
| 1274 |
+
'grid-constant'])
|
| 1275 |
+
def test_zoom_by_int_order0(self, zoom, mode, xp):
|
| 1276 |
+
# order 0 zoom should be the same as replication via np.kron
|
| 1277 |
+
# Note: This is not True for general x shapes when grid_mode is False,
|
| 1278 |
+
# but works here for all modes because the size ratio happens to
|
| 1279 |
+
# always be an integer when x.shape = (2, 2).
|
| 1280 |
+
x_np = np.asarray([[0, 1],
|
| 1281 |
+
[2, 3]], dtype=np.float64)
|
| 1282 |
+
expected = np.kron(x_np, np.ones(zoom))
|
| 1283 |
+
|
| 1284 |
+
x = xp.asarray(x_np)
|
| 1285 |
+
expected = xp.asarray(expected)
|
| 1286 |
+
|
| 1287 |
+
assert_array_almost_equal(
|
| 1288 |
+
ndimage.zoom(x, zoom, order=0, mode=mode),
|
| 1289 |
+
expected
|
| 1290 |
+
)
|
| 1291 |
+
|
| 1292 |
+
@pytest.mark.parametrize('shape', [(2, 3), (4, 4)])
|
| 1293 |
+
@pytest.mark.parametrize('zoom', [(1, 1), (3, 5), (8, 2), (8, 8)])
|
| 1294 |
+
@pytest.mark.parametrize('mode', ['nearest', 'reflect', 'mirror',
|
| 1295 |
+
'grid-wrap', 'grid-constant'])
|
| 1296 |
+
def test_zoom_grid_by_int_order0(self, shape, zoom, mode, xp):
|
| 1297 |
+
# When grid_mode is True, order 0 zoom should be the same as
|
| 1298 |
+
# replication via np.kron. The only exceptions to this are the
|
| 1299 |
+
# non-grid modes 'constant' and 'wrap'.
|
| 1300 |
+
x_np = np.arange(np.prod(shape), dtype=float).reshape(shape)
|
| 1301 |
+
|
| 1302 |
+
x = xp.asarray(x_np)
|
| 1303 |
+
assert_array_almost_equal(
|
| 1304 |
+
ndimage.zoom(x, zoom, order=0, mode=mode, grid_mode=True),
|
| 1305 |
+
xp.asarray(np.kron(x_np, np.ones(zoom)))
|
| 1306 |
+
)
|
| 1307 |
+
|
| 1308 |
+
@pytest.mark.parametrize('mode', ['constant', 'wrap'])
|
| 1309 |
+
@pytest.mark.thread_unsafe
|
| 1310 |
+
def test_zoom_grid_mode_warnings(self, mode, xp):
|
| 1311 |
+
# Warn on use of non-grid modes when grid_mode is True
|
| 1312 |
+
x = xp.reshape(xp.arange(9, dtype=xp.float64), (3, 3))
|
| 1313 |
+
with pytest.warns(UserWarning,
|
| 1314 |
+
match="It is recommended to use mode"):
|
| 1315 |
+
ndimage.zoom(x, 2, mode=mode, grid_mode=True),
|
| 1316 |
+
|
| 1317 |
+
@skip_xp_backends(np_only=True, reason='inplace output= is numpy-specific')
|
| 1318 |
+
def test_zoom_output_shape(self, xp):
|
| 1319 |
+
"""Ticket #643"""
|
| 1320 |
+
x = xp.reshape(xp.arange(12), (3, 4))
|
| 1321 |
+
ndimage.zoom(x, 2, output=xp.zeros((6, 8)))
|
| 1322 |
+
|
| 1323 |
+
def test_zoom_0d_array(self, xp):
|
| 1324 |
+
# Ticket #21670 regression test
|
| 1325 |
+
a = xp.arange(10.)
|
| 1326 |
+
factor = 2
|
| 1327 |
+
actual = ndimage.zoom(a, np.array(factor))
|
| 1328 |
+
expected = ndimage.zoom(a, factor)
|
| 1329 |
+
xp_assert_close(actual, expected)
|
| 1330 |
+
|
| 1331 |
+
|
| 1332 |
+
class TestRotate:
|
| 1333 |
+
|
| 1334 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 1335 |
+
def test_rotate01(self, order, xp):
|
| 1336 |
+
data = xp.asarray([[0, 0, 0, 0],
|
| 1337 |
+
[0, 1, 1, 0],
|
| 1338 |
+
[0, 0, 0, 0]], dtype=xp.float64)
|
| 1339 |
+
out = ndimage.rotate(data, 0, order=order)
|
| 1340 |
+
assert_array_almost_equal(out, data)
|
| 1341 |
+
|
| 1342 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 1343 |
+
def test_rotate02(self, order, xp):
|
| 1344 |
+
data = xp.asarray([[0, 0, 0, 0],
|
| 1345 |
+
[0, 1, 0, 0],
|
| 1346 |
+
[0, 0, 0, 0]], dtype=xp.float64)
|
| 1347 |
+
expected = xp.asarray([[0, 0, 0],
|
| 1348 |
+
[0, 0, 0],
|
| 1349 |
+
[0, 1, 0],
|
| 1350 |
+
[0, 0, 0]], dtype=xp.float64)
|
| 1351 |
+
out = ndimage.rotate(data, 90, order=order)
|
| 1352 |
+
assert_array_almost_equal(out, expected)
|
| 1353 |
+
|
| 1354 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 1355 |
+
@pytest.mark.parametrize('dtype', ["float64", "complex128"])
|
| 1356 |
+
def test_rotate03(self, order, dtype, xp):
|
| 1357 |
+
dtype = getattr(xp, dtype)
|
| 1358 |
+
data = xp.asarray([[0, 0, 0, 0, 0],
|
| 1359 |
+
[0, 1, 1, 0, 0],
|
| 1360 |
+
[0, 0, 0, 0, 0]], dtype=dtype)
|
| 1361 |
+
expected = xp.asarray([[0, 0, 0],
|
| 1362 |
+
[0, 0, 0],
|
| 1363 |
+
[0, 1, 0],
|
| 1364 |
+
[0, 1, 0],
|
| 1365 |
+
[0, 0, 0]], dtype=dtype)
|
| 1366 |
+
isdtype = array_namespace(data).isdtype
|
| 1367 |
+
if isdtype(data.dtype, 'complex floating'):
|
| 1368 |
+
data -= 1j * data
|
| 1369 |
+
expected -= 1j * expected
|
| 1370 |
+
out = ndimage.rotate(data, 90, order=order)
|
| 1371 |
+
assert_array_almost_equal(out, expected)
|
| 1372 |
+
|
| 1373 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 1374 |
+
def test_rotate04(self, order, xp):
|
| 1375 |
+
data = xp.asarray([[0, 0, 0, 0, 0],
|
| 1376 |
+
[0, 1, 1, 0, 0],
|
| 1377 |
+
[0, 0, 0, 0, 0]], dtype=xp.float64)
|
| 1378 |
+
expected = xp.asarray([[0, 0, 0, 0, 0],
|
| 1379 |
+
[0, 0, 1, 0, 0],
|
| 1380 |
+
[0, 0, 1, 0, 0]], dtype=xp.float64)
|
| 1381 |
+
out = ndimage.rotate(data, 90, reshape=False, order=order)
|
| 1382 |
+
assert_array_almost_equal(out, expected)
|
| 1383 |
+
|
| 1384 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 1385 |
+
def test_rotate05(self, order, xp):
|
| 1386 |
+
data = np.empty((4, 3, 3))
|
| 1387 |
+
for i in range(3):
|
| 1388 |
+
data[:, :, i] = np.asarray([[0, 0, 0],
|
| 1389 |
+
[0, 1, 0],
|
| 1390 |
+
[0, 1, 0],
|
| 1391 |
+
[0, 0, 0]], dtype=np.float64)
|
| 1392 |
+
data = xp.asarray(data)
|
| 1393 |
+
expected = xp.asarray([[0, 0, 0, 0],
|
| 1394 |
+
[0, 1, 1, 0],
|
| 1395 |
+
[0, 0, 0, 0]], dtype=xp.float64)
|
| 1396 |
+
out = ndimage.rotate(data, 90, order=order)
|
| 1397 |
+
for i in range(3):
|
| 1398 |
+
assert_array_almost_equal(out[:, :, i], expected)
|
| 1399 |
+
|
| 1400 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 1401 |
+
def test_rotate06(self, order, xp):
|
| 1402 |
+
data = np.empty((3, 4, 3))
|
| 1403 |
+
for i in range(3):
|
| 1404 |
+
data[:, :, i] = np.asarray([[0, 0, 0, 0],
|
| 1405 |
+
[0, 1, 1, 0],
|
| 1406 |
+
[0, 0, 0, 0]], dtype=np.float64)
|
| 1407 |
+
data = xp.asarray(data)
|
| 1408 |
+
expected = xp.asarray([[0, 0, 0],
|
| 1409 |
+
[0, 1, 0],
|
| 1410 |
+
[0, 1, 0],
|
| 1411 |
+
[0, 0, 0]], dtype=xp.float64)
|
| 1412 |
+
out = ndimage.rotate(data, 90, order=order)
|
| 1413 |
+
for i in range(3):
|
| 1414 |
+
assert_array_almost_equal(out[:, :, i], expected)
|
| 1415 |
+
|
| 1416 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 1417 |
+
def test_rotate07(self, order, xp):
|
| 1418 |
+
data = xp.asarray([[[0, 0, 0, 0, 0],
|
| 1419 |
+
[0, 1, 1, 0, 0],
|
| 1420 |
+
[0, 0, 0, 0, 0]]] * 2, dtype=xp.float64)
|
| 1421 |
+
permute_dims = array_namespace(data).permute_dims
|
| 1422 |
+
data = permute_dims(data, (2, 1, 0))
|
| 1423 |
+
expected = xp.asarray([[[0, 0, 0],
|
| 1424 |
+
[0, 1, 0],
|
| 1425 |
+
[0, 1, 0],
|
| 1426 |
+
[0, 0, 0],
|
| 1427 |
+
[0, 0, 0]]] * 2, dtype=xp.float64)
|
| 1428 |
+
expected = permute_dims(expected, (2, 1, 0))
|
| 1429 |
+
out = ndimage.rotate(data, 90, axes=(0, 1), order=order)
|
| 1430 |
+
assert_array_almost_equal(out, expected)
|
| 1431 |
+
|
| 1432 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
| 1433 |
+
def test_rotate08(self, order, xp):
|
| 1434 |
+
data = xp.asarray([[[0, 0, 0, 0, 0],
|
| 1435 |
+
[0, 1, 1, 0, 0],
|
| 1436 |
+
[0, 0, 0, 0, 0]]] * 2, dtype=xp.float64)
|
| 1437 |
+
permute_dims = array_namespace(data).permute_dims
|
| 1438 |
+
data = permute_dims(data, (2, 1, 0)) # == np.transpose
|
| 1439 |
+
expected = xp.asarray([[[0, 0, 1, 0, 0],
|
| 1440 |
+
[0, 0, 1, 0, 0],
|
| 1441 |
+
[0, 0, 0, 0, 0]]] * 2, dtype=xp.float64)
|
| 1442 |
+
permute_dims = array_namespace(data).permute_dims
|
| 1443 |
+
expected = permute_dims(expected, (2, 1, 0))
|
| 1444 |
+
out = ndimage.rotate(data, 90, axes=(0, 1), reshape=False, order=order)
|
| 1445 |
+
assert_array_almost_equal(out, expected)
|
| 1446 |
+
|
| 1447 |
+
def test_rotate09(self, xp):
|
| 1448 |
+
data = xp.asarray([[0, 0, 0, 0, 0],
|
| 1449 |
+
[0, 1, 1, 0, 0],
|
| 1450 |
+
[0, 0, 0, 0, 0]] * 2, dtype=xp.float64)
|
| 1451 |
+
with assert_raises(ValueError):
|
| 1452 |
+
ndimage.rotate(data, 90, axes=(0, data.ndim))
|
| 1453 |
+
|
| 1454 |
+
def test_rotate10(self, xp):
|
| 1455 |
+
data = xp.reshape(xp.arange(45, dtype=xp.float64), (3, 5, 3))
|
| 1456 |
+
|
| 1457 |
+
# The output of ndimage.rotate before refactoring
|
| 1458 |
+
expected = xp.asarray([[[0.0, 0.0, 0.0],
|
| 1459 |
+
[0.0, 0.0, 0.0],
|
| 1460 |
+
[6.54914793, 7.54914793, 8.54914793],
|
| 1461 |
+
[10.84520162, 11.84520162, 12.84520162],
|
| 1462 |
+
[0.0, 0.0, 0.0]],
|
| 1463 |
+
[[6.19286575, 7.19286575, 8.19286575],
|
| 1464 |
+
[13.4730712, 14.4730712, 15.4730712],
|
| 1465 |
+
[21.0, 22.0, 23.0],
|
| 1466 |
+
[28.5269288, 29.5269288, 30.5269288],
|
| 1467 |
+
[35.80713425, 36.80713425, 37.80713425]],
|
| 1468 |
+
[[0.0, 0.0, 0.0],
|
| 1469 |
+
[31.15479838, 32.15479838, 33.15479838],
|
| 1470 |
+
[35.45085207, 36.45085207, 37.45085207],
|
| 1471 |
+
[0.0, 0.0, 0.0],
|
| 1472 |
+
[0.0, 0.0, 0.0]]], dtype=xp.float64)
|
| 1473 |
+
|
| 1474 |
+
out = ndimage.rotate(data, angle=12, reshape=False)
|
| 1475 |
+
#assert_array_almost_equal(out, expected)
|
| 1476 |
+
xp_assert_close(out, expected, rtol=1e-6, atol=2e-6)
|
| 1477 |
+
|
| 1478 |
+
def test_rotate_exact_180(self, xp):
|
| 1479 |
+
if is_cupy(xp):
|
| 1480 |
+
pytest.xfail("https://github.com/cupy/cupy/issues/8400")
|
| 1481 |
+
|
| 1482 |
+
a = np.tile(xp.arange(5), (5, 1))
|
| 1483 |
+
b = ndimage.rotate(ndimage.rotate(a, 180), -180)
|
| 1484 |
+
xp_assert_equal(a, b)
|