diff --git a/.gitattributes b/.gitattributes index e578b55fb56c2f0a13724fd3ea28c7ee3aadd4b8..98a0f1909cc0e0f26dc606f309be28af678146a3 100644 --- a/.gitattributes +++ b/.gitattributes @@ -659,3 +659,5 @@ llava_video/lib/python3.10/site-packages/scipy/integrate/_vode.cpython-310-x86_6 llava_video/lib/python3.10/site-packages/scipy/special/__pycache__/_add_newdocs.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text llava_video/lib/python3.10/site-packages/scipy/sparse/tests/__pycache__/test_base.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text llava_video/lib/python3.10/site-packages/scipy/optimize/__pycache__/_optimize.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +llava_video/lib/python3.10/site-packages/scipy/sparse/linalg/_propack/_cpropack.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +llava_video/lib/python3.10/site-packages/scipy/odr/__odrpack.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text diff --git a/llava_video/lib/python3.10/site-packages/scipy/datasets/__init__.py b/llava_video/lib/python3.10/site-packages/scipy/datasets/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..fdd4ffebec4c57f6d399a0f76df2b66056f0b225 --- /dev/null +++ b/llava_video/lib/python3.10/site-packages/scipy/datasets/__init__.py @@ -0,0 +1,90 @@ +""" +================================ +Datasets (:mod:`scipy.datasets`) +================================ + +.. currentmodule:: scipy.datasets + +Dataset Methods +=============== + +.. autosummary:: + :toctree: generated/ + + ascent + face + electrocardiogram + +Utility Methods +=============== + +.. autosummary:: + :toctree: generated/ + + download_all -- Download all the dataset files to specified path. + clear_cache -- Clear cached dataset directory. + + +Usage of Datasets +================= + +SciPy dataset methods can be simply called as follows: ``'()'`` +This downloads the dataset files over the network once, and saves the cache, +before returning a `numpy.ndarray` object representing the dataset. + +Note that the return data structure and data type might be different for +different dataset methods. For a more detailed example on usage, please look +into the particular dataset method documentation above. + + +How dataset retrieval and storage works +======================================= + +SciPy dataset files are stored within individual GitHub repositories under the +SciPy GitHub organization, following a naming convention as +``'dataset-'``, for example `scipy.datasets.face` files live at +https://github.com/scipy/dataset-face. The `scipy.datasets` submodule utilizes +and depends on `Pooch `_, a Python +package built to simplify fetching data files. Pooch uses these repos to +retrieve the respective dataset files when calling the dataset function. + +A registry of all the datasets, essentially a mapping of filenames with their +SHA256 hash and repo urls are maintained, which Pooch uses to handle and verify +the downloads on function call. After downloading the dataset once, the files +are saved in the system cache directory under ``'scipy-data'``. + +Dataset cache locations may vary on different platforms. + +For macOS:: + + '~/Library/Caches/scipy-data' + +For Linux and other Unix-like platforms:: + + '~/.cache/scipy-data' # or the value of the XDG_CACHE_HOME env var, if defined + +For Windows:: + + 'C:\\Users\\\\AppData\\Local\\\\scipy-data\\Cache' + + +In environments with constrained network connectivity for various security +reasons or on systems without continuous internet connections, one may manually +load the cache of the datasets by placing the contents of the dataset repo in +the above mentioned cache directory to avoid fetching dataset errors without +the internet connectivity. + +""" + + +from ._fetchers import face, ascent, electrocardiogram +from ._download_all import download_all +from ._utils import clear_cache + +__all__ = ['ascent', 'electrocardiogram', 'face', + 'download_all', 'clear_cache'] + + +from scipy._lib._testutils import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/llava_video/lib/python3.10/site-packages/scipy/datasets/__pycache__/__init__.cpython-310.pyc b/llava_video/lib/python3.10/site-packages/scipy/datasets/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..909fcea8c242136fd40d1364e971d5659a0cd75d Binary files /dev/null and b/llava_video/lib/python3.10/site-packages/scipy/datasets/__pycache__/__init__.cpython-310.pyc differ diff --git a/llava_video/lib/python3.10/site-packages/scipy/datasets/__pycache__/_download_all.cpython-310.pyc b/llava_video/lib/python3.10/site-packages/scipy/datasets/__pycache__/_download_all.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..87544583fa46f4b9d06b16ba478b10badc620adf Binary files /dev/null and b/llava_video/lib/python3.10/site-packages/scipy/datasets/__pycache__/_download_all.cpython-310.pyc differ diff --git a/llava_video/lib/python3.10/site-packages/scipy/datasets/__pycache__/_fetchers.cpython-310.pyc b/llava_video/lib/python3.10/site-packages/scipy/datasets/__pycache__/_fetchers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..31e55c37a3f1376610628560d34c31c81d1f4669 Binary files /dev/null and b/llava_video/lib/python3.10/site-packages/scipy/datasets/__pycache__/_fetchers.cpython-310.pyc differ diff --git a/llava_video/lib/python3.10/site-packages/scipy/datasets/__pycache__/_registry.cpython-310.pyc b/llava_video/lib/python3.10/site-packages/scipy/datasets/__pycache__/_registry.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f7ab75c445848106fd307f8f59635065ff781f4a Binary files /dev/null and b/llava_video/lib/python3.10/site-packages/scipy/datasets/__pycache__/_registry.cpython-310.pyc differ diff --git a/llava_video/lib/python3.10/site-packages/scipy/datasets/__pycache__/_utils.cpython-310.pyc b/llava_video/lib/python3.10/site-packages/scipy/datasets/__pycache__/_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..96d2f3e699a0ee896c819c48f56428ddeb9c930f Binary files /dev/null and b/llava_video/lib/python3.10/site-packages/scipy/datasets/__pycache__/_utils.cpython-310.pyc differ diff --git a/llava_video/lib/python3.10/site-packages/scipy/datasets/_download_all.py b/llava_video/lib/python3.10/site-packages/scipy/datasets/_download_all.py new file mode 100644 index 0000000000000000000000000000000000000000..255fdcaf22950848f458a7ed9ada183e0a2e630e --- /dev/null +++ b/llava_video/lib/python3.10/site-packages/scipy/datasets/_download_all.py @@ -0,0 +1,57 @@ +""" +Platform independent script to download all the +`scipy.datasets` module data files. +This doesn't require a full scipy build. + +Run: python _download_all.py +""" + +import argparse +try: + import pooch +except ImportError: + pooch = None + + +if __package__ is None or __package__ == '': + # Running as python script, use absolute import + import _registry # type: ignore +else: + # Running as python module, use relative import + from . import _registry + + +def download_all(path=None): + """ + Utility method to download all the dataset files + for `scipy.datasets` module. + + Parameters + ---------- + path : str, optional + Directory path to download all the dataset files. + If None, default to the system cache_dir detected by pooch. + """ + if pooch is None: + raise ImportError("Missing optional dependency 'pooch' required " + "for scipy.datasets module. Please use pip or " + "conda to install 'pooch'.") + if path is None: + path = pooch.os_cache('scipy-data') + for dataset_name, dataset_hash in _registry.registry.items(): + pooch.retrieve(url=_registry.registry_urls[dataset_name], + known_hash=dataset_hash, + fname=dataset_name, path=path) + + +def main(): + parser = argparse.ArgumentParser(description='Download SciPy data files.') + parser.add_argument("path", nargs='?', type=str, + default=pooch.os_cache('scipy-data'), + help="Directory path to download all the data files.") + args = parser.parse_args() + download_all(args.path) + + +if __name__ == "__main__": + main() diff --git a/llava_video/lib/python3.10/site-packages/scipy/datasets/_fetchers.py b/llava_video/lib/python3.10/site-packages/scipy/datasets/_fetchers.py new file mode 100644 index 0000000000000000000000000000000000000000..57bb2fa6a12e753eb07a1f359ac04a29bd5c77e5 --- /dev/null +++ b/llava_video/lib/python3.10/site-packages/scipy/datasets/_fetchers.py @@ -0,0 +1,219 @@ +from numpy import array, frombuffer, load +from ._registry import registry, registry_urls + +try: + import pooch +except ImportError: + pooch = None + data_fetcher = None +else: + data_fetcher = pooch.create( + # Use the default cache folder for the operating system + # Pooch uses appdirs (https://github.com/ActiveState/appdirs) to + # select an appropriate directory for the cache on each platform. + path=pooch.os_cache("scipy-data"), + + # The remote data is on Github + # base_url is a required param, even though we override this + # using individual urls in the registry. + base_url="https://github.com/scipy/", + registry=registry, + urls=registry_urls + ) + + +def fetch_data(dataset_name, data_fetcher=data_fetcher): + if data_fetcher is None: + raise ImportError("Missing optional dependency 'pooch' required " + "for scipy.datasets module. Please use pip or " + "conda to install 'pooch'.") + # The "fetch" method returns the full path to the downloaded data file. + return data_fetcher.fetch(dataset_name) + + +def ascent(): + """ + Get an 8-bit grayscale bit-depth, 512 x 512 derived image for easy + use in demos. + + The image is derived from + https://pixnio.com/people/accent-to-the-top + + Parameters + ---------- + None + + Returns + ------- + ascent : ndarray + convenient image to use for testing and demonstration + + Examples + -------- + >>> import scipy.datasets + >>> ascent = scipy.datasets.ascent() + >>> ascent.shape + (512, 512) + >>> ascent.max() + np.uint8(255) + + >>> import matplotlib.pyplot as plt + >>> plt.gray() + >>> plt.imshow(ascent) + >>> plt.show() + + """ + import pickle + + # The file will be downloaded automatically the first time this is run, + # returning the path to the downloaded file. Afterwards, Pooch finds + # it in the local cache and doesn't repeat the download. + fname = fetch_data("ascent.dat") + # Now we just need to load it with our standard Python tools. + with open(fname, 'rb') as f: + ascent = array(pickle.load(f)) + return ascent + + +def electrocardiogram(): + """ + Load an electrocardiogram as an example for a 1-D signal. + + The returned signal is a 5 minute long electrocardiogram (ECG), a medical + recording of the heart's electrical activity, sampled at 360 Hz. + + Returns + ------- + ecg : ndarray + The electrocardiogram in millivolt (mV) sampled at 360 Hz. + + Notes + ----- + The provided signal is an excerpt (19:35 to 24:35) from the `record 208`_ + (lead MLII) provided by the MIT-BIH Arrhythmia Database [1]_ on + PhysioNet [2]_. The excerpt includes noise induced artifacts, typical + heartbeats as well as pathological changes. + + .. _record 208: https://physionet.org/physiobank/database/html/mitdbdir/records.htm#208 + + .. versionadded:: 1.1.0 + + References + ---------- + .. [1] Moody GB, Mark RG. The impact of the MIT-BIH Arrhythmia Database. + IEEE Eng in Med and Biol 20(3):45-50 (May-June 2001). + (PMID: 11446209); :doi:`10.13026/C2F305` + .. [2] Goldberger AL, Amaral LAN, Glass L, Hausdorff JM, Ivanov PCh, + Mark RG, Mietus JE, Moody GB, Peng C-K, Stanley HE. PhysioBank, + PhysioToolkit, and PhysioNet: Components of a New Research Resource + for Complex Physiologic Signals. Circulation 101(23):e215-e220; + :doi:`10.1161/01.CIR.101.23.e215` + + Examples + -------- + >>> from scipy.datasets import electrocardiogram + >>> ecg = electrocardiogram() + >>> ecg + array([-0.245, -0.215, -0.185, ..., -0.405, -0.395, -0.385], shape=(108000,)) + >>> ecg.shape, ecg.mean(), ecg.std() + ((108000,), -0.16510875, 0.5992473991177294) + + As stated the signal features several areas with a different morphology. + E.g., the first few seconds show the electrical activity of a heart in + normal sinus rhythm as seen below. + + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> fs = 360 + >>> time = np.arange(ecg.size) / fs + >>> plt.plot(time, ecg) + >>> plt.xlabel("time in s") + >>> plt.ylabel("ECG in mV") + >>> plt.xlim(9, 10.2) + >>> plt.ylim(-1, 1.5) + >>> plt.show() + + After second 16, however, the first premature ventricular contractions, + also called extrasystoles, appear. These have a different morphology + compared to typical heartbeats. The difference can easily be observed + in the following plot. + + >>> plt.plot(time, ecg) + >>> plt.xlabel("time in s") + >>> plt.ylabel("ECG in mV") + >>> plt.xlim(46.5, 50) + >>> plt.ylim(-2, 1.5) + >>> plt.show() + + At several points large artifacts disturb the recording, e.g.: + + >>> plt.plot(time, ecg) + >>> plt.xlabel("time in s") + >>> plt.ylabel("ECG in mV") + >>> plt.xlim(207, 215) + >>> plt.ylim(-2, 3.5) + >>> plt.show() + + Finally, examining the power spectrum reveals that most of the biosignal is + made up of lower frequencies. At 60 Hz the noise induced by the mains + electricity can be clearly observed. + + >>> from scipy.signal import welch + >>> f, Pxx = welch(ecg, fs=fs, nperseg=2048, scaling="spectrum") + >>> plt.semilogy(f, Pxx) + >>> plt.xlabel("Frequency in Hz") + >>> plt.ylabel("Power spectrum of the ECG in mV**2") + >>> plt.xlim(f[[0, -1]]) + >>> plt.show() + """ + fname = fetch_data("ecg.dat") + with load(fname) as file: + ecg = file["ecg"].astype(int) # np.uint16 -> int + # Convert raw output of ADC to mV: (ecg - adc_zero) / adc_gain + ecg = (ecg - 1024) / 200.0 + return ecg + + +def face(gray=False): + """ + Get a 1024 x 768, color image of a raccoon face. + + The image is derived from + https://pixnio.com/fauna-animals/raccoons/raccoon-procyon-lotor + + Parameters + ---------- + gray : bool, optional + If True return 8-bit grey-scale image, otherwise return a color image + + Returns + ------- + face : ndarray + image of a raccoon face + + Examples + -------- + >>> import scipy.datasets + >>> face = scipy.datasets.face() + >>> face.shape + (768, 1024, 3) + >>> face.max() + np.uint8(255) + + >>> import matplotlib.pyplot as plt + >>> plt.gray() + >>> plt.imshow(face) + >>> plt.show() + + """ + import bz2 + fname = fetch_data("face.dat") + with open(fname, 'rb') as f: + rawdata = f.read() + face_data = bz2.decompress(rawdata) + face = frombuffer(face_data, dtype='uint8') + face.shape = (768, 1024, 3) + if gray is True: + face = (0.21 * face[:, :, 0] + 0.71 * face[:, :, 1] + + 0.07 * face[:, :, 2]).astype('uint8') + return face diff --git a/llava_video/lib/python3.10/site-packages/scipy/datasets/_registry.py b/llava_video/lib/python3.10/site-packages/scipy/datasets/_registry.py new file mode 100644 index 0000000000000000000000000000000000000000..969384ad9843159e766100bfa9755aed8102dd09 --- /dev/null +++ b/llava_video/lib/python3.10/site-packages/scipy/datasets/_registry.py @@ -0,0 +1,26 @@ +########################################################################## +# This file serves as the dataset registry for SciPy Datasets SubModule. +########################################################################## + + +# To generate the SHA256 hash, use the command +# openssl sha256 +registry = { + "ascent.dat": "03ce124c1afc880f87b55f6b061110e2e1e939679184f5614e38dacc6c1957e2", + "ecg.dat": "f20ad3365fb9b7f845d0e5c48b6fe67081377ee466c3a220b7f69f35c8958baf", + "face.dat": "9d8b0b4d081313e2b485748c770472e5a95ed1738146883d84c7030493e82886" +} + +registry_urls = { + "ascent.dat": "https://raw.githubusercontent.com/scipy/dataset-ascent/main/ascent.dat", + "ecg.dat": "https://raw.githubusercontent.com/scipy/dataset-ecg/main/ecg.dat", + "face.dat": "https://raw.githubusercontent.com/scipy/dataset-face/main/face.dat" +} + +# dataset method mapping with their associated filenames +# : ["filename1", "filename2", ...] +method_files_map = { + "ascent": ["ascent.dat"], + "electrocardiogram": ["ecg.dat"], + "face": ["face.dat"] +} diff --git a/llava_video/lib/python3.10/site-packages/scipy/datasets/_utils.py b/llava_video/lib/python3.10/site-packages/scipy/datasets/_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..8f644f8797d6e3256a16ec2c509eec725c726300 --- /dev/null +++ b/llava_video/lib/python3.10/site-packages/scipy/datasets/_utils.py @@ -0,0 +1,81 @@ +import os +import shutil +from ._registry import method_files_map + +try: + import platformdirs +except ImportError: + platformdirs = None # type: ignore[assignment] + + +def _clear_cache(datasets, cache_dir=None, method_map=None): + if method_map is None: + # Use SciPy Datasets method map + method_map = method_files_map + if cache_dir is None: + # Use default cache_dir path + if platformdirs is None: + # platformdirs is pooch dependency + raise ImportError("Missing optional dependency 'pooch' required " + "for scipy.datasets module. Please use pip or " + "conda to install 'pooch'.") + cache_dir = platformdirs.user_cache_dir("scipy-data") + + if not os.path.exists(cache_dir): + print(f"Cache Directory {cache_dir} doesn't exist. Nothing to clear.") + return + + if datasets is None: + print(f"Cleaning the cache directory {cache_dir}!") + shutil.rmtree(cache_dir) + else: + if not isinstance(datasets, (list, tuple)): + # single dataset method passed should be converted to list + datasets = [datasets, ] + for dataset in datasets: + assert callable(dataset) + dataset_name = dataset.__name__ # Name of the dataset method + if dataset_name not in method_map: + raise ValueError(f"Dataset method {dataset_name} doesn't " + "exist. Please check if the passed dataset " + "is a subset of the following dataset " + f"methods: {list(method_map.keys())}") + + data_files = method_map[dataset_name] + data_filepaths = [os.path.join(cache_dir, file) + for file in data_files] + for data_filepath in data_filepaths: + if os.path.exists(data_filepath): + print("Cleaning the file " + f"{os.path.split(data_filepath)[1]} " + f"for dataset {dataset_name}") + os.remove(data_filepath) + else: + print(f"Path {data_filepath} doesn't exist. " + "Nothing to clear.") + + +def clear_cache(datasets=None): + """ + Cleans the scipy datasets cache directory. + + If a scipy.datasets method or a list/tuple of the same is + provided, then clear_cache removes all the data files + associated to the passed dataset method callable(s). + + By default, it removes all the cached data files. + + Parameters + ---------- + datasets : callable or list/tuple of callable or None + + Examples + -------- + >>> from scipy import datasets + >>> ascent_array = datasets.ascent() + >>> ascent_array.shape + (512, 512) + >>> datasets.clear_cache([datasets.ascent]) + Cleaning the file ascent.dat for dataset ascent + """ + _clear_cache(datasets) diff --git a/llava_video/lib/python3.10/site-packages/scipy/datasets/tests/__init__.py b/llava_video/lib/python3.10/site-packages/scipy/datasets/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llava_video/lib/python3.10/site-packages/scipy/datasets/tests/__pycache__/__init__.cpython-310.pyc b/llava_video/lib/python3.10/site-packages/scipy/datasets/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..66fba994484913e76a0bf326c8d4df793b7c590c Binary files /dev/null and b/llava_video/lib/python3.10/site-packages/scipy/datasets/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/llava_video/lib/python3.10/site-packages/scipy/datasets/tests/__pycache__/test_data.cpython-310.pyc b/llava_video/lib/python3.10/site-packages/scipy/datasets/tests/__pycache__/test_data.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c2fd17c82813043ff95fe784b2e791f4843dd3e0 Binary files /dev/null and b/llava_video/lib/python3.10/site-packages/scipy/datasets/tests/__pycache__/test_data.cpython-310.pyc differ diff --git a/llava_video/lib/python3.10/site-packages/scipy/datasets/tests/test_data.py b/llava_video/lib/python3.10/site-packages/scipy/datasets/tests/test_data.py new file mode 100644 index 0000000000000000000000000000000000000000..243176bd89b7b6f16406d66293d1872ac2712252 --- /dev/null +++ b/llava_video/lib/python3.10/site-packages/scipy/datasets/tests/test_data.py @@ -0,0 +1,128 @@ +from scipy.datasets._registry import registry +from scipy.datasets._fetchers import data_fetcher +from scipy.datasets._utils import _clear_cache +from scipy.datasets import ascent, face, electrocardiogram, download_all +from numpy.testing import assert_equal, assert_almost_equal +import os +from threading import get_ident +import pytest + +try: + import pooch +except ImportError: + raise ImportError("Missing optional dependency 'pooch' required " + "for scipy.datasets module. Please use pip or " + "conda to install 'pooch'.") + + +data_dir = data_fetcher.path # type: ignore + + +def _has_hash(path, expected_hash): + """Check if the provided path has the expected hash.""" + if not os.path.exists(path): + return False + return pooch.file_hash(path) == expected_hash + + +class TestDatasets: + + @pytest.fixture(scope='module', autouse=True) + def test_download_all(self): + # This fixture requires INTERNET CONNECTION + + # test_setup phase + download_all() + + yield + + @pytest.mark.fail_slow(10) + def test_existence_all(self): + assert len(os.listdir(data_dir)) >= len(registry) + + def test_ascent(self): + assert_equal(ascent().shape, (512, 512)) + + # hash check + assert _has_hash(os.path.join(data_dir, "ascent.dat"), + registry["ascent.dat"]) + + def test_face(self): + assert_equal(face().shape, (768, 1024, 3)) + + # hash check + assert _has_hash(os.path.join(data_dir, "face.dat"), + registry["face.dat"]) + + def test_electrocardiogram(self): + # Test shape, dtype and stats of signal + ecg = electrocardiogram() + assert_equal(ecg.dtype, float) + assert_equal(ecg.shape, (108000,)) + assert_almost_equal(ecg.mean(), -0.16510875) + assert_almost_equal(ecg.std(), 0.5992473991177294) + + # hash check + assert _has_hash(os.path.join(data_dir, "ecg.dat"), + registry["ecg.dat"]) + + +def test_clear_cache(tmp_path): + # Note: `tmp_path` is a pytest fixture, it handles cleanup + thread_basepath = tmp_path / str(get_ident()) + thread_basepath.mkdir() + + dummy_basepath = thread_basepath / "dummy_cache_dir" + dummy_basepath.mkdir() + + # Create three dummy dataset files for dummy dataset methods + dummy_method_map = {} + for i in range(4): + dummy_method_map[f"data{i}"] = [f"data{i}.dat"] + data_filepath = dummy_basepath / f"data{i}.dat" + data_filepath.write_text("") + + # clear files associated to single dataset method data0 + # also test callable argument instead of list of callables + def data0(): + pass + _clear_cache(datasets=data0, cache_dir=dummy_basepath, + method_map=dummy_method_map) + assert not os.path.exists(dummy_basepath/"data0.dat") + + # clear files associated to multiple dataset methods "data3" and "data4" + def data1(): + pass + + def data2(): + pass + _clear_cache(datasets=[data1, data2], cache_dir=dummy_basepath, + method_map=dummy_method_map) + assert not os.path.exists(dummy_basepath/"data1.dat") + assert not os.path.exists(dummy_basepath/"data2.dat") + + # clear multiple dataset files "data3_0.dat" and "data3_1.dat" + # associated with dataset method "data3" + def data4(): + pass + # create files + (dummy_basepath / "data4_0.dat").write_text("") + (dummy_basepath / "data4_1.dat").write_text("") + + dummy_method_map["data4"] = ["data4_0.dat", "data4_1.dat"] + _clear_cache(datasets=[data4], cache_dir=dummy_basepath, + method_map=dummy_method_map) + assert not os.path.exists(dummy_basepath/"data4_0.dat") + assert not os.path.exists(dummy_basepath/"data4_1.dat") + + # wrong dataset method should raise ValueError since it + # doesn't exist in the dummy_method_map + def data5(): + pass + with pytest.raises(ValueError): + _clear_cache(datasets=[data5], cache_dir=dummy_basepath, + method_map=dummy_method_map) + + # remove all dataset cache + _clear_cache(datasets=None, cache_dir=dummy_basepath) + assert not os.path.exists(dummy_basepath) diff --git a/llava_video/lib/python3.10/site-packages/scipy/integrate/__pycache__/__init__.cpython-310.pyc b/llava_video/lib/python3.10/site-packages/scipy/integrate/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..334c150077246538299c570617f72b456a284fe7 Binary files /dev/null and b/llava_video/lib/python3.10/site-packages/scipy/integrate/__pycache__/__init__.cpython-310.pyc differ diff --git a/llava_video/lib/python3.10/site-packages/scipy/integrate/__pycache__/_bvp.cpython-310.pyc b/llava_video/lib/python3.10/site-packages/scipy/integrate/__pycache__/_bvp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f51cb04ed132fdc73fd35ae29abc26b83d9a9109 Binary files /dev/null and b/llava_video/lib/python3.10/site-packages/scipy/integrate/__pycache__/_bvp.cpython-310.pyc differ diff --git a/llava_video/lib/python3.10/site-packages/scipy/integrate/__pycache__/_cubature.cpython-310.pyc b/llava_video/lib/python3.10/site-packages/scipy/integrate/__pycache__/_cubature.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..19133f3101185ac0e11a716b5a41dd8faf79120b Binary files /dev/null and b/llava_video/lib/python3.10/site-packages/scipy/integrate/__pycache__/_cubature.cpython-310.pyc differ diff --git a/llava_video/lib/python3.10/site-packages/scipy/integrate/__pycache__/_ode.cpython-310.pyc b/llava_video/lib/python3.10/site-packages/scipy/integrate/__pycache__/_ode.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dce5eb0ef0f6f60419b9203216927db9a9ebad1e Binary files /dev/null and b/llava_video/lib/python3.10/site-packages/scipy/integrate/__pycache__/_ode.cpython-310.pyc differ diff --git a/llava_video/lib/python3.10/site-packages/scipy/integrate/__pycache__/_odepack_py.cpython-310.pyc b/llava_video/lib/python3.10/site-packages/scipy/integrate/__pycache__/_odepack_py.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d7ccb9779c7cb782389c199dc5f9682655e2c191 Binary files /dev/null and b/llava_video/lib/python3.10/site-packages/scipy/integrate/__pycache__/_odepack_py.cpython-310.pyc differ diff --git a/llava_video/lib/python3.10/site-packages/scipy/integrate/__pycache__/_quad_vec.cpython-310.pyc b/llava_video/lib/python3.10/site-packages/scipy/integrate/__pycache__/_quad_vec.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0102e25a012a6cade0e971afe24bb222a6746118 Binary files /dev/null and b/llava_video/lib/python3.10/site-packages/scipy/integrate/__pycache__/_quad_vec.cpython-310.pyc differ diff --git a/llava_video/lib/python3.10/site-packages/scipy/integrate/__pycache__/_quadpack_py.cpython-310.pyc b/llava_video/lib/python3.10/site-packages/scipy/integrate/__pycache__/_quadpack_py.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bc148b20bfb7b18ad963c344b8beb01231d6ed35 Binary files /dev/null and b/llava_video/lib/python3.10/site-packages/scipy/integrate/__pycache__/_quadpack_py.cpython-310.pyc differ diff --git a/llava_video/lib/python3.10/site-packages/scipy/integrate/__pycache__/_quadrature.cpython-310.pyc b/llava_video/lib/python3.10/site-packages/scipy/integrate/__pycache__/_quadrature.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b1dd9398cb64941d9d342b792243b5a6e83be8e3 Binary files /dev/null and b/llava_video/lib/python3.10/site-packages/scipy/integrate/__pycache__/_quadrature.cpython-310.pyc differ diff --git a/llava_video/lib/python3.10/site-packages/scipy/integrate/__pycache__/_tanhsinh.cpython-310.pyc b/llava_video/lib/python3.10/site-packages/scipy/integrate/__pycache__/_tanhsinh.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..528587f11fe04e56306cc742d9108e0ff7c17d9f Binary files /dev/null and b/llava_video/lib/python3.10/site-packages/scipy/integrate/__pycache__/_tanhsinh.cpython-310.pyc differ diff --git a/llava_video/lib/python3.10/site-packages/scipy/integrate/__pycache__/dop.cpython-310.pyc b/llava_video/lib/python3.10/site-packages/scipy/integrate/__pycache__/dop.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3ddfc8eb020f89a72458af916111eca76e3df19e Binary files /dev/null and b/llava_video/lib/python3.10/site-packages/scipy/integrate/__pycache__/dop.cpython-310.pyc differ diff --git a/llava_video/lib/python3.10/site-packages/scipy/integrate/__pycache__/lsoda.cpython-310.pyc b/llava_video/lib/python3.10/site-packages/scipy/integrate/__pycache__/lsoda.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2f80e105534b2c8d23e2bd46e531432b70162a72 Binary files /dev/null and b/llava_video/lib/python3.10/site-packages/scipy/integrate/__pycache__/lsoda.cpython-310.pyc differ diff --git a/llava_video/lib/python3.10/site-packages/scipy/integrate/__pycache__/odepack.cpython-310.pyc b/llava_video/lib/python3.10/site-packages/scipy/integrate/__pycache__/odepack.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3af2fcf27f6fd1dafdbc2c8a0e80a3cee3f7b27f Binary files /dev/null and b/llava_video/lib/python3.10/site-packages/scipy/integrate/__pycache__/odepack.cpython-310.pyc differ diff --git a/llava_video/lib/python3.10/site-packages/scipy/integrate/__pycache__/quadpack.cpython-310.pyc b/llava_video/lib/python3.10/site-packages/scipy/integrate/__pycache__/quadpack.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..824906e599867e90463d36a72eff356d1611a95a Binary files /dev/null and b/llava_video/lib/python3.10/site-packages/scipy/integrate/__pycache__/quadpack.cpython-310.pyc differ diff --git a/llava_video/lib/python3.10/site-packages/scipy/integrate/__pycache__/vode.cpython-310.pyc b/llava_video/lib/python3.10/site-packages/scipy/integrate/__pycache__/vode.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d9d135d6cb1f82d58f09e276194157ce73439057 Binary files /dev/null and b/llava_video/lib/python3.10/site-packages/scipy/integrate/__pycache__/vode.cpython-310.pyc differ diff --git a/llava_video/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/__init__.cpython-310.pyc b/llava_video/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6ac3ee809ff31fe498d1b2ab0b0aaea3c37a91b3 Binary files /dev/null and b/llava_video/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/llava_video/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test__quad_vec.cpython-310.pyc b/llava_video/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test__quad_vec.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0d39cf3de6cc94661f0def4a18fb79f200d8308f Binary files /dev/null and b/llava_video/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test__quad_vec.cpython-310.pyc differ diff --git a/llava_video/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_banded_ode_solvers.cpython-310.pyc b/llava_video/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_banded_ode_solvers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..074b8108265a2edbf5083084b28ffb0c600c9dbf Binary files /dev/null and b/llava_video/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_banded_ode_solvers.cpython-310.pyc differ diff --git a/llava_video/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_bvp.cpython-310.pyc b/llava_video/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_bvp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a07de9c72c36c4b1a40ecc71b0f6838eecb9631b Binary files /dev/null and b/llava_video/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_bvp.cpython-310.pyc differ diff --git a/llava_video/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_cubature.cpython-310.pyc b/llava_video/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_cubature.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9497145f1444bbf00ba320d7ae4ddcf15e200056 Binary files /dev/null and b/llava_video/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_cubature.cpython-310.pyc differ diff --git a/llava_video/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_integrate.cpython-310.pyc b/llava_video/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_integrate.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7bda9843c802b456c55037593288aa35b3c34203 Binary files /dev/null and b/llava_video/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_integrate.cpython-310.pyc differ diff --git a/llava_video/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_odeint_jac.cpython-310.pyc b/llava_video/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_odeint_jac.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bfc0e38e3843671897c652b756b5635ae1704525 Binary files /dev/null and b/llava_video/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_odeint_jac.cpython-310.pyc differ diff --git a/llava_video/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_quadpack.cpython-310.pyc b/llava_video/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_quadpack.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8f533188bffde05177072947acbcaba0f83c2010 Binary files /dev/null and b/llava_video/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_quadpack.cpython-310.pyc differ diff --git a/llava_video/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_quadrature.cpython-310.pyc b/llava_video/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_quadrature.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..18a0418c068ede45abd1afee06a3b9d3e02226c3 Binary files /dev/null and b/llava_video/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_quadrature.cpython-310.pyc differ diff --git a/llava_video/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_tanhsinh.cpython-310.pyc b/llava_video/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_tanhsinh.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4dff963212f9512a0d6ca15a1fa740dcecc2fcd3 Binary files /dev/null and b/llava_video/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_tanhsinh.cpython-310.pyc differ diff --git a/llava_video/lib/python3.10/site-packages/scipy/integrate/tests/test_banded_ode_solvers.py b/llava_video/lib/python3.10/site-packages/scipy/integrate/tests/test_banded_ode_solvers.py new file mode 100644 index 0000000000000000000000000000000000000000..358c5e3d1fcfe7ccd7e3691bd9af2f47656f4e2b --- /dev/null +++ b/llava_video/lib/python3.10/site-packages/scipy/integrate/tests/test_banded_ode_solvers.py @@ -0,0 +1,220 @@ +import itertools +import pytest +import numpy as np +from numpy.testing import assert_allclose +from scipy.integrate import ode + + +def _band_count(a): + """Returns ml and mu, the lower and upper band sizes of a.""" + nrows, ncols = a.shape + ml = 0 + for k in range(-nrows+1, 0): + if np.diag(a, k).any(): + ml = -k + break + mu = 0 + for k in range(nrows-1, 0, -1): + if np.diag(a, k).any(): + mu = k + break + return ml, mu + + +def _linear_func(t, y, a): + """Linear system dy/dt = a * y""" + return a.dot(y) + + +def _linear_jac(t, y, a): + """Jacobian of a * y is a.""" + return a + + +def _linear_banded_jac(t, y, a): + """Banded Jacobian.""" + ml, mu = _band_count(a) + bjac = [np.r_[[0] * k, np.diag(a, k)] for k in range(mu, 0, -1)] + bjac.append(np.diag(a)) + for k in range(-1, -ml-1, -1): + bjac.append(np.r_[np.diag(a, k), [0] * (-k)]) + return bjac + + +def _solve_linear_sys(a, y0, tend=1, dt=0.1, + solver=None, method='bdf', use_jac=True, + with_jacobian=False, banded=False): + """Use scipy.integrate.ode to solve a linear system of ODEs. + + a : square ndarray + Matrix of the linear system to be solved. + y0 : ndarray + Initial condition + tend : float + Stop time. + dt : float + Step size of the output. + solver : str + If not None, this must be "vode", "lsoda" or "zvode". + method : str + Either "bdf" or "adams". + use_jac : bool + Determines if the jacobian function is passed to ode(). + with_jacobian : bool + Passed to ode.set_integrator(). + banded : bool + Determines whether a banded or full jacobian is used. + If `banded` is True, `lband` and `uband` are determined by the + values in `a`. + """ + if banded: + lband, uband = _band_count(a) + else: + lband = None + uband = None + + if use_jac: + if banded: + r = ode(_linear_func, _linear_banded_jac) + else: + r = ode(_linear_func, _linear_jac) + else: + r = ode(_linear_func) + + if solver is None: + if np.iscomplexobj(a): + solver = "zvode" + else: + solver = "vode" + + r.set_integrator(solver, + with_jacobian=with_jacobian, + method=method, + lband=lband, uband=uband, + rtol=1e-9, atol=1e-10, + ) + t0 = 0 + r.set_initial_value(y0, t0) + r.set_f_params(a) + r.set_jac_params(a) + + t = [t0] + y = [y0] + while r.successful() and r.t < tend: + r.integrate(r.t + dt) + t.append(r.t) + y.append(r.y) + + t = np.array(t) + y = np.array(y) + return t, y + + +def _analytical_solution(a, y0, t): + """ + Analytical solution to the linear differential equations dy/dt = a*y. + + The solution is only valid if `a` is diagonalizable. + + Returns a 2-D array with shape (len(t), len(y0)). + """ + lam, v = np.linalg.eig(a) + c = np.linalg.solve(v, y0) + e = c * np.exp(lam * t.reshape(-1, 1)) + sol = e.dot(v.T) + return sol + + +@pytest.mark.thread_unsafe +def test_banded_ode_solvers(): + # Test the "lsoda", "vode" and "zvode" solvers of the `ode` class + # with a system that has a banded Jacobian matrix. + + t_exact = np.linspace(0, 1.0, 5) + + # --- Real arrays for testing the "lsoda" and "vode" solvers --- + + # lband = 2, uband = 1: + a_real = np.array([[-0.6, 0.1, 0.0, 0.0, 0.0], + [0.2, -0.5, 0.9, 0.0, 0.0], + [0.1, 0.1, -0.4, 0.1, 0.0], + [0.0, 0.3, -0.1, -0.9, -0.3], + [0.0, 0.0, 0.1, 0.1, -0.7]]) + + # lband = 0, uband = 1: + a_real_upper = np.triu(a_real) + + # lband = 2, uband = 0: + a_real_lower = np.tril(a_real) + + # lband = 0, uband = 0: + a_real_diag = np.triu(a_real_lower) + + real_matrices = [a_real, a_real_upper, a_real_lower, a_real_diag] + real_solutions = [] + + for a in real_matrices: + y0 = np.arange(1, a.shape[0] + 1) + y_exact = _analytical_solution(a, y0, t_exact) + real_solutions.append((y0, t_exact, y_exact)) + + def check_real(idx, solver, meth, use_jac, with_jac, banded): + a = real_matrices[idx] + y0, t_exact, y_exact = real_solutions[idx] + t, y = _solve_linear_sys(a, y0, + tend=t_exact[-1], + dt=t_exact[1] - t_exact[0], + solver=solver, + method=meth, + use_jac=use_jac, + with_jacobian=with_jac, + banded=banded) + assert_allclose(t, t_exact) + assert_allclose(y, y_exact) + + for idx in range(len(real_matrices)): + p = [['vode', 'lsoda'], # solver + ['bdf', 'adams'], # method + [False, True], # use_jac + [False, True], # with_jacobian + [False, True]] # banded + for solver, meth, use_jac, with_jac, banded in itertools.product(*p): + check_real(idx, solver, meth, use_jac, with_jac, banded) + + # --- Complex arrays for testing the "zvode" solver --- + + # complex, lband = 2, uband = 1: + a_complex = a_real - 0.5j * a_real + + # complex, lband = 0, uband = 0: + a_complex_diag = np.diag(np.diag(a_complex)) + + complex_matrices = [a_complex, a_complex_diag] + complex_solutions = [] + + for a in complex_matrices: + y0 = np.arange(1, a.shape[0] + 1) + 1j + y_exact = _analytical_solution(a, y0, t_exact) + complex_solutions.append((y0, t_exact, y_exact)) + + def check_complex(idx, solver, meth, use_jac, with_jac, banded): + a = complex_matrices[idx] + y0, t_exact, y_exact = complex_solutions[idx] + t, y = _solve_linear_sys(a, y0, + tend=t_exact[-1], + dt=t_exact[1] - t_exact[0], + solver=solver, + method=meth, + use_jac=use_jac, + with_jacobian=with_jac, + banded=banded) + assert_allclose(t, t_exact) + assert_allclose(y, y_exact) + + for idx in range(len(complex_matrices)): + p = [['bdf', 'adams'], # method + [False, True], # use_jac + [False, True], # with_jacobian + [False, True]] # banded + for meth, use_jac, with_jac, banded in itertools.product(*p): + check_complex(idx, "zvode", meth, use_jac, with_jac, banded) diff --git a/llava_video/lib/python3.10/site-packages/scipy/integrate/tests/test_integrate.py b/llava_video/lib/python3.10/site-packages/scipy/integrate/tests/test_integrate.py new file mode 100644 index 0000000000000000000000000000000000000000..44bfecdaac0f00b413538510c61dd1317a076261 --- /dev/null +++ b/llava_video/lib/python3.10/site-packages/scipy/integrate/tests/test_integrate.py @@ -0,0 +1,840 @@ +# Authors: Nils Wagner, Ed Schofield, Pauli Virtanen, John Travers +""" +Tests for numerical integration. +""" +import numpy as np +from numpy import (arange, zeros, array, dot, sqrt, cos, sin, eye, pi, exp, + allclose) + +from numpy.testing import ( + assert_, assert_array_almost_equal, + assert_allclose, assert_array_equal, assert_equal, assert_warns) +import pytest +from pytest import raises as assert_raises +from scipy.integrate import odeint, ode, complex_ode + +#------------------------------------------------------------------------------ +# Test ODE integrators +#------------------------------------------------------------------------------ + + +class TestOdeint: + # Check integrate.odeint + + def _do_problem(self, problem): + t = arange(0.0, problem.stop_t, 0.05) + + # Basic case + z, infodict = odeint(problem.f, problem.z0, t, full_output=True) + assert_(problem.verify(z, t)) + + # Use tfirst=True + z, infodict = odeint(lambda t, y: problem.f(y, t), problem.z0, t, + full_output=True, tfirst=True) + assert_(problem.verify(z, t)) + + if hasattr(problem, 'jac'): + # Use Dfun + z, infodict = odeint(problem.f, problem.z0, t, Dfun=problem.jac, + full_output=True) + assert_(problem.verify(z, t)) + + # Use Dfun and tfirst=True + z, infodict = odeint(lambda t, y: problem.f(y, t), problem.z0, t, + Dfun=lambda t, y: problem.jac(y, t), + full_output=True, tfirst=True) + assert_(problem.verify(z, t)) + + def test_odeint(self): + for problem_cls in PROBLEMS: + problem = problem_cls() + if problem.cmplx: + continue + self._do_problem(problem) + + +class TestODEClass: + + ode_class = None # Set in subclass. + + def _do_problem(self, problem, integrator, method='adams'): + + # ode has callback arguments in different order than odeint + def f(t, z): + return problem.f(z, t) + jac = None + if hasattr(problem, 'jac'): + def jac(t, z): + return problem.jac(z, t) + + integrator_params = {} + if problem.lband is not None or problem.uband is not None: + integrator_params['uband'] = problem.uband + integrator_params['lband'] = problem.lband + + ig = self.ode_class(f, jac) + ig.set_integrator(integrator, + atol=problem.atol/10, + rtol=problem.rtol/10, + method=method, + **integrator_params) + + ig.set_initial_value(problem.z0, t=0.0) + z = ig.integrate(problem.stop_t) + + assert_array_equal(z, ig.y) + assert_(ig.successful(), (problem, method)) + assert_(ig.get_return_code() > 0, (problem, method)) + assert_(problem.verify(array([z]), problem.stop_t), (problem, method)) + + +class TestOde(TestODEClass): + + ode_class = ode + + def test_vode(self): + # Check the vode solver + for problem_cls in PROBLEMS: + problem = problem_cls() + if problem.cmplx: + continue + if not problem.stiff: + self._do_problem(problem, 'vode', 'adams') + self._do_problem(problem, 'vode', 'bdf') + + def test_zvode(self): + # Check the zvode solver + for problem_cls in PROBLEMS: + problem = problem_cls() + if not problem.stiff: + self._do_problem(problem, 'zvode', 'adams') + self._do_problem(problem, 'zvode', 'bdf') + + def test_lsoda(self): + # Check the lsoda solver + for problem_cls in PROBLEMS: + problem = problem_cls() + if problem.cmplx: + continue + self._do_problem(problem, 'lsoda') + + def test_dopri5(self): + # Check the dopri5 solver + for problem_cls in PROBLEMS: + problem = problem_cls() + if problem.cmplx: + continue + if problem.stiff: + continue + if hasattr(problem, 'jac'): + continue + self._do_problem(problem, 'dopri5') + + def test_dop853(self): + # Check the dop853 solver + for problem_cls in PROBLEMS: + problem = problem_cls() + if problem.cmplx: + continue + if problem.stiff: + continue + if hasattr(problem, 'jac'): + continue + self._do_problem(problem, 'dop853') + + @pytest.mark.thread_unsafe + def test_concurrent_fail(self): + for sol in ('vode', 'zvode', 'lsoda'): + def f(t, y): + return 1.0 + + r = ode(f).set_integrator(sol) + r.set_initial_value(0, 0) + + r2 = ode(f).set_integrator(sol) + r2.set_initial_value(0, 0) + + r.integrate(r.t + 0.1) + r2.integrate(r2.t + 0.1) + + assert_raises(RuntimeError, r.integrate, r.t + 0.1) + + def test_concurrent_ok(self, num_parallel_threads): + def f(t, y): + return 1.0 + + for k in range(3): + for sol in ('vode', 'zvode', 'lsoda', 'dopri5', 'dop853'): + if sol in {'vode', 'zvode', 'lsoda'} and num_parallel_threads > 1: + continue + r = ode(f).set_integrator(sol) + r.set_initial_value(0, 0) + + r2 = ode(f).set_integrator(sol) + r2.set_initial_value(0, 0) + + r.integrate(r.t + 0.1) + r2.integrate(r2.t + 0.1) + r2.integrate(r2.t + 0.1) + + assert_allclose(r.y, 0.1) + assert_allclose(r2.y, 0.2) + + for sol in ('dopri5', 'dop853'): + r = ode(f).set_integrator(sol) + r.set_initial_value(0, 0) + + r2 = ode(f).set_integrator(sol) + r2.set_initial_value(0, 0) + + r.integrate(r.t + 0.1) + r.integrate(r.t + 0.1) + r2.integrate(r2.t + 0.1) + r.integrate(r.t + 0.1) + r2.integrate(r2.t + 0.1) + + assert_allclose(r.y, 0.3) + assert_allclose(r2.y, 0.2) + + +class TestComplexOde(TestODEClass): + + ode_class = complex_ode + + def test_vode(self): + # Check the vode solver + for problem_cls in PROBLEMS: + problem = problem_cls() + if not problem.stiff: + self._do_problem(problem, 'vode', 'adams') + else: + self._do_problem(problem, 'vode', 'bdf') + + def test_lsoda(self): + + # Check the lsoda solver + for problem_cls in PROBLEMS: + problem = problem_cls() + self._do_problem(problem, 'lsoda') + + def test_dopri5(self): + # Check the dopri5 solver + for problem_cls in PROBLEMS: + problem = problem_cls() + if problem.stiff: + continue + if hasattr(problem, 'jac'): + continue + self._do_problem(problem, 'dopri5') + + def test_dop853(self): + # Check the dop853 solver + for problem_cls in PROBLEMS: + problem = problem_cls() + if problem.stiff: + continue + if hasattr(problem, 'jac'): + continue + self._do_problem(problem, 'dop853') + + +class TestSolout: + # Check integrate.ode correctly handles solout for dopri5 and dop853 + def _run_solout_test(self, integrator): + # Check correct usage of solout + ts = [] + ys = [] + t0 = 0.0 + tend = 10.0 + y0 = [1.0, 2.0] + + def solout(t, y): + ts.append(t) + ys.append(y.copy()) + + def rhs(t, y): + return [y[0] + y[1], -y[1]**2] + + ig = ode(rhs).set_integrator(integrator) + ig.set_solout(solout) + ig.set_initial_value(y0, t0) + ret = ig.integrate(tend) + assert_array_equal(ys[0], y0) + assert_array_equal(ys[-1], ret) + assert_equal(ts[0], t0) + assert_equal(ts[-1], tend) + + def test_solout(self): + for integrator in ('dopri5', 'dop853'): + self._run_solout_test(integrator) + + def _run_solout_after_initial_test(self, integrator): + # Check if solout works even if it is set after the initial value. + ts = [] + ys = [] + t0 = 0.0 + tend = 10.0 + y0 = [1.0, 2.0] + + def solout(t, y): + ts.append(t) + ys.append(y.copy()) + + def rhs(t, y): + return [y[0] + y[1], -y[1]**2] + + ig = ode(rhs).set_integrator(integrator) + ig.set_initial_value(y0, t0) + ig.set_solout(solout) + ret = ig.integrate(tend) + assert_array_equal(ys[0], y0) + assert_array_equal(ys[-1], ret) + assert_equal(ts[0], t0) + assert_equal(ts[-1], tend) + + def test_solout_after_initial(self): + for integrator in ('dopri5', 'dop853'): + self._run_solout_after_initial_test(integrator) + + def _run_solout_break_test(self, integrator): + # Check correct usage of stopping via solout + ts = [] + ys = [] + t0 = 0.0 + tend = 10.0 + y0 = [1.0, 2.0] + + def solout(t, y): + ts.append(t) + ys.append(y.copy()) + if t > tend/2.0: + return -1 + + def rhs(t, y): + return [y[0] + y[1], -y[1]**2] + + ig = ode(rhs).set_integrator(integrator) + ig.set_solout(solout) + ig.set_initial_value(y0, t0) + ret = ig.integrate(tend) + assert_array_equal(ys[0], y0) + assert_array_equal(ys[-1], ret) + assert_equal(ts[0], t0) + assert_(ts[-1] > tend/2.0) + assert_(ts[-1] < tend) + + def test_solout_break(self): + for integrator in ('dopri5', 'dop853'): + self._run_solout_break_test(integrator) + + +class TestComplexSolout: + # Check integrate.ode correctly handles solout for dopri5 and dop853 + def _run_solout_test(self, integrator): + # Check correct usage of solout + ts = [] + ys = [] + t0 = 0.0 + tend = 20.0 + y0 = [0.0] + + def solout(t, y): + ts.append(t) + ys.append(y.copy()) + + def rhs(t, y): + return [1.0/(t - 10.0 - 1j)] + + ig = complex_ode(rhs).set_integrator(integrator) + ig.set_solout(solout) + ig.set_initial_value(y0, t0) + ret = ig.integrate(tend) + assert_array_equal(ys[0], y0) + assert_array_equal(ys[-1], ret) + assert_equal(ts[0], t0) + assert_equal(ts[-1], tend) + + def test_solout(self): + for integrator in ('dopri5', 'dop853'): + self._run_solout_test(integrator) + + def _run_solout_break_test(self, integrator): + # Check correct usage of stopping via solout + ts = [] + ys = [] + t0 = 0.0 + tend = 20.0 + y0 = [0.0] + + def solout(t, y): + ts.append(t) + ys.append(y.copy()) + if t > tend/2.0: + return -1 + + def rhs(t, y): + return [1.0/(t - 10.0 - 1j)] + + ig = complex_ode(rhs).set_integrator(integrator) + ig.set_solout(solout) + ig.set_initial_value(y0, t0) + ret = ig.integrate(tend) + assert_array_equal(ys[0], y0) + assert_array_equal(ys[-1], ret) + assert_equal(ts[0], t0) + assert_(ts[-1] > tend/2.0) + assert_(ts[-1] < tend) + + def test_solout_break(self): + for integrator in ('dopri5', 'dop853'): + self._run_solout_break_test(integrator) + + +#------------------------------------------------------------------------------ +# Test problems +#------------------------------------------------------------------------------ + + +class ODE: + """ + ODE problem + """ + stiff = False + cmplx = False + stop_t = 1 + z0 = [] + + lband = None + uband = None + + atol = 1e-6 + rtol = 1e-5 + + +class SimpleOscillator(ODE): + r""" + Free vibration of a simple oscillator:: + m \ddot{u} + k u = 0, u(0) = u_0 \dot{u}(0) \dot{u}_0 + Solution:: + u(t) = u_0*cos(sqrt(k/m)*t)+\dot{u}_0*sin(sqrt(k/m)*t)/sqrt(k/m) + """ + stop_t = 1 + 0.09 + z0 = array([1.0, 0.1], float) + + k = 4.0 + m = 1.0 + + def f(self, z, t): + tmp = zeros((2, 2), float) + tmp[0, 1] = 1.0 + tmp[1, 0] = -self.k / self.m + return dot(tmp, z) + + def verify(self, zs, t): + omega = sqrt(self.k / self.m) + u = self.z0[0]*cos(omega*t) + self.z0[1]*sin(omega*t)/omega + return allclose(u, zs[:, 0], atol=self.atol, rtol=self.rtol) + + +class ComplexExp(ODE): + r"""The equation :lm:`\dot u = i u`""" + stop_t = 1.23*pi + z0 = exp([1j, 2j, 3j, 4j, 5j]) + cmplx = True + + def f(self, z, t): + return 1j*z + + def jac(self, z, t): + return 1j*eye(5) + + def verify(self, zs, t): + u = self.z0 * exp(1j*t) + return allclose(u, zs, atol=self.atol, rtol=self.rtol) + + +class Pi(ODE): + r"""Integrate 1/(t + 1j) from t=-10 to t=10""" + stop_t = 20 + z0 = [0] + cmplx = True + + def f(self, z, t): + return array([1./(t - 10 + 1j)]) + + def verify(self, zs, t): + u = -2j * np.arctan(10) + return allclose(u, zs[-1, :], atol=self.atol, rtol=self.rtol) + + +class CoupledDecay(ODE): + r""" + 3 coupled decays suited for banded treatment + (banded mode makes it necessary when N>>3) + """ + + stiff = True + stop_t = 0.5 + z0 = [5.0, 7.0, 13.0] + lband = 1 + uband = 0 + + lmbd = [0.17, 0.23, 0.29] # fictitious decay constants + + def f(self, z, t): + lmbd = self.lmbd + return np.array([-lmbd[0]*z[0], + -lmbd[1]*z[1] + lmbd[0]*z[0], + -lmbd[2]*z[2] + lmbd[1]*z[1]]) + + def jac(self, z, t): + # The full Jacobian is + # + # [-lmbd[0] 0 0 ] + # [ lmbd[0] -lmbd[1] 0 ] + # [ 0 lmbd[1] -lmbd[2]] + # + # The lower and upper bandwidths are lband=1 and uband=0, resp. + # The representation of this array in packed format is + # + # [-lmbd[0] -lmbd[1] -lmbd[2]] + # [ lmbd[0] lmbd[1] 0 ] + + lmbd = self.lmbd + j = np.zeros((self.lband + self.uband + 1, 3), order='F') + + def set_j(ri, ci, val): + j[self.uband + ri - ci, ci] = val + set_j(0, 0, -lmbd[0]) + set_j(1, 0, lmbd[0]) + set_j(1, 1, -lmbd[1]) + set_j(2, 1, lmbd[1]) + set_j(2, 2, -lmbd[2]) + return j + + def verify(self, zs, t): + # Formulae derived by hand + lmbd = np.array(self.lmbd) + d10 = lmbd[1] - lmbd[0] + d21 = lmbd[2] - lmbd[1] + d20 = lmbd[2] - lmbd[0] + e0 = np.exp(-lmbd[0] * t) + e1 = np.exp(-lmbd[1] * t) + e2 = np.exp(-lmbd[2] * t) + u = np.vstack(( + self.z0[0] * e0, + self.z0[1] * e1 + self.z0[0] * lmbd[0] / d10 * (e0 - e1), + self.z0[2] * e2 + self.z0[1] * lmbd[1] / d21 * (e1 - e2) + + lmbd[1] * lmbd[0] * self.z0[0] / d10 * + (1 / d20 * (e0 - e2) - 1 / d21 * (e1 - e2)))).transpose() + return allclose(u, zs, atol=self.atol, rtol=self.rtol) + + +PROBLEMS = [SimpleOscillator, ComplexExp, Pi, CoupledDecay] + +#------------------------------------------------------------------------------ + + +def f(t, x): + dxdt = [x[1], -x[0]] + return dxdt + + +def jac(t, x): + j = array([[0.0, 1.0], + [-1.0, 0.0]]) + return j + + +def f1(t, x, omega): + dxdt = [omega*x[1], -omega*x[0]] + return dxdt + + +def jac1(t, x, omega): + j = array([[0.0, omega], + [-omega, 0.0]]) + return j + + +def f2(t, x, omega1, omega2): + dxdt = [omega1*x[1], -omega2*x[0]] + return dxdt + + +def jac2(t, x, omega1, omega2): + j = array([[0.0, omega1], + [-omega2, 0.0]]) + return j + + +def fv(t, x, omega): + dxdt = [omega[0]*x[1], -omega[1]*x[0]] + return dxdt + + +def jacv(t, x, omega): + j = array([[0.0, omega[0]], + [-omega[1], 0.0]]) + return j + + +class ODECheckParameterUse: + """Call an ode-class solver with several cases of parameter use.""" + + # solver_name must be set before tests can be run with this class. + + # Set these in subclasses. + solver_name = '' + solver_uses_jac = False + + def _get_solver(self, f, jac): + solver = ode(f, jac) + if self.solver_uses_jac: + solver.set_integrator(self.solver_name, atol=1e-9, rtol=1e-7, + with_jacobian=self.solver_uses_jac) + else: + # XXX Shouldn't set_integrator *always* accept the keyword arg + # 'with_jacobian', and perhaps raise an exception if it is set + # to True if the solver can't actually use it? + solver.set_integrator(self.solver_name, atol=1e-9, rtol=1e-7) + return solver + + def _check_solver(self, solver): + ic = [1.0, 0.0] + solver.set_initial_value(ic, 0.0) + solver.integrate(pi) + assert_array_almost_equal(solver.y, [-1.0, 0.0]) + + def test_no_params(self): + solver = self._get_solver(f, jac) + self._check_solver(solver) + + def test_one_scalar_param(self): + solver = self._get_solver(f1, jac1) + omega = 1.0 + solver.set_f_params(omega) + if self.solver_uses_jac: + solver.set_jac_params(omega) + self._check_solver(solver) + + def test_two_scalar_params(self): + solver = self._get_solver(f2, jac2) + omega1 = 1.0 + omega2 = 1.0 + solver.set_f_params(omega1, omega2) + if self.solver_uses_jac: + solver.set_jac_params(omega1, omega2) + self._check_solver(solver) + + def test_vector_param(self): + solver = self._get_solver(fv, jacv) + omega = [1.0, 1.0] + solver.set_f_params(omega) + if self.solver_uses_jac: + solver.set_jac_params(omega) + self._check_solver(solver) + + @pytest.mark.thread_unsafe + def test_warns_on_failure(self): + # Set nsteps small to ensure failure + solver = self._get_solver(f, jac) + solver.set_integrator(self.solver_name, nsteps=1) + ic = [1.0, 0.0] + solver.set_initial_value(ic, 0.0) + assert_warns(UserWarning, solver.integrate, pi) + + +class TestDOPRI5CheckParameterUse(ODECheckParameterUse): + solver_name = 'dopri5' + solver_uses_jac = False + + +class TestDOP853CheckParameterUse(ODECheckParameterUse): + solver_name = 'dop853' + solver_uses_jac = False + + +class TestVODECheckParameterUse(ODECheckParameterUse): + solver_name = 'vode' + solver_uses_jac = True + + +class TestZVODECheckParameterUse(ODECheckParameterUse): + solver_name = 'zvode' + solver_uses_jac = True + + +class TestLSODACheckParameterUse(ODECheckParameterUse): + solver_name = 'lsoda' + solver_uses_jac = True + + +def test_odeint_trivial_time(): + # Test that odeint succeeds when given a single time point + # and full_output=True. This is a regression test for gh-4282. + y0 = 1 + t = [0] + y, info = odeint(lambda y, t: -y, y0, t, full_output=True) + assert_array_equal(y, np.array([[y0]])) + + +def test_odeint_banded_jacobian(): + # Test the use of the `Dfun`, `ml` and `mu` options of odeint. + + def func(y, t, c): + return c.dot(y) + + def jac(y, t, c): + return c + + def jac_transpose(y, t, c): + return c.T.copy(order='C') + + def bjac_rows(y, t, c): + jac = np.vstack((np.r_[0, np.diag(c, 1)], + np.diag(c), + np.r_[np.diag(c, -1), 0], + np.r_[np.diag(c, -2), 0, 0])) + return jac + + def bjac_cols(y, t, c): + return bjac_rows(y, t, c).T.copy(order='C') + + c = array([[-205, 0.01, 0.00, 0.0], + [0.1, -2.50, 0.02, 0.0], + [1e-3, 0.01, -2.0, 0.01], + [0.00, 0.00, 0.1, -1.0]]) + + y0 = np.ones(4) + t = np.array([0, 5, 10, 100]) + + # Use the full Jacobian. + sol1, info1 = odeint(func, y0, t, args=(c,), full_output=True, + atol=1e-13, rtol=1e-11, mxstep=10000, + Dfun=jac) + + # Use the transposed full Jacobian, with col_deriv=True. + sol2, info2 = odeint(func, y0, t, args=(c,), full_output=True, + atol=1e-13, rtol=1e-11, mxstep=10000, + Dfun=jac_transpose, col_deriv=True) + + # Use the banded Jacobian. + sol3, info3 = odeint(func, y0, t, args=(c,), full_output=True, + atol=1e-13, rtol=1e-11, mxstep=10000, + Dfun=bjac_rows, ml=2, mu=1) + + # Use the transposed banded Jacobian, with col_deriv=True. + sol4, info4 = odeint(func, y0, t, args=(c,), full_output=True, + atol=1e-13, rtol=1e-11, mxstep=10000, + Dfun=bjac_cols, ml=2, mu=1, col_deriv=True) + + assert_allclose(sol1, sol2, err_msg="sol1 != sol2") + assert_allclose(sol1, sol3, atol=1e-12, err_msg="sol1 != sol3") + assert_allclose(sol3, sol4, err_msg="sol3 != sol4") + + # Verify that the number of jacobian evaluations was the same for the + # calls of odeint with a full jacobian and with a banded jacobian. This is + # a regression test--there was a bug in the handling of banded jacobians + # that resulted in an incorrect jacobian matrix being passed to the LSODA + # code. That would cause errors or excessive jacobian evaluations. + assert_array_equal(info1['nje'], info2['nje']) + assert_array_equal(info3['nje'], info4['nje']) + + # Test the use of tfirst + sol1ty, info1ty = odeint(lambda t, y, c: func(y, t, c), y0, t, args=(c,), + full_output=True, atol=1e-13, rtol=1e-11, + mxstep=10000, + Dfun=lambda t, y, c: jac(y, t, c), tfirst=True) + # The code should execute the exact same sequence of floating point + # calculations, so these should be exactly equal. We'll be safe and use + # a small tolerance. + assert_allclose(sol1, sol1ty, rtol=1e-12, err_msg="sol1 != sol1ty") + + +def test_odeint_errors(): + def sys1d(x, t): + return -100*x + + def bad1(x, t): + return 1.0/0 + + def bad2(x, t): + return "foo" + + def bad_jac1(x, t): + return 1.0/0 + + def bad_jac2(x, t): + return [["foo"]] + + def sys2d(x, t): + return [-100*x[0], -0.1*x[1]] + + def sys2d_bad_jac(x, t): + return [[1.0/0, 0], [0, -0.1]] + + assert_raises(ZeroDivisionError, odeint, bad1, 1.0, [0, 1]) + assert_raises(ValueError, odeint, bad2, 1.0, [0, 1]) + + assert_raises(ZeroDivisionError, odeint, sys1d, 1.0, [0, 1], Dfun=bad_jac1) + assert_raises(ValueError, odeint, sys1d, 1.0, [0, 1], Dfun=bad_jac2) + + assert_raises(ZeroDivisionError, odeint, sys2d, [1.0, 1.0], [0, 1], + Dfun=sys2d_bad_jac) + + +def test_odeint_bad_shapes(): + # Tests of some errors that can occur with odeint. + + def badrhs(x, t): + return [1, -1] + + def sys1(x, t): + return -100*x + + def badjac(x, t): + return [[0, 0, 0]] + + # y0 must be at most 1-d. + bad_y0 = [[0, 0], [0, 0]] + assert_raises(ValueError, odeint, sys1, bad_y0, [0, 1]) + + # t must be at most 1-d. + bad_t = [[0, 1], [2, 3]] + assert_raises(ValueError, odeint, sys1, [10.0], bad_t) + + # y0 is 10, but badrhs(x, t) returns [1, -1]. + assert_raises(RuntimeError, odeint, badrhs, 10, [0, 1]) + + # shape of array returned by badjac(x, t) is not correct. + assert_raises(RuntimeError, odeint, sys1, [10, 10], [0, 1], Dfun=badjac) + + +def test_repeated_t_values(): + """Regression test for gh-8217.""" + + def func(x, t): + return -0.25*x + + t = np.zeros(10) + sol = odeint(func, [1.], t) + assert_array_equal(sol, np.ones((len(t), 1))) + + tau = 4*np.log(2) + t = [0]*9 + [tau, 2*tau, 2*tau, 3*tau] + sol = odeint(func, [1, 2], t, rtol=1e-12, atol=1e-12) + expected_sol = np.array([[1.0, 2.0]]*9 + + [[0.5, 1.0], + [0.25, 0.5], + [0.25, 0.5], + [0.125, 0.25]]) + assert_allclose(sol, expected_sol) + + # Edge case: empty t sequence. + sol = odeint(func, [1.], []) + assert_array_equal(sol, np.array([], dtype=np.float64).reshape((0, 1))) + + # t values are not monotonic. + assert_raises(ValueError, odeint, func, [1.], [0, 1, 0.5, 0]) + assert_raises(ValueError, odeint, func, [1, 2, 3], [0, -1, -2, 3]) diff --git a/llava_video/lib/python3.10/site-packages/scipy/ndimage/__init__.py b/llava_video/lib/python3.10/site-packages/scipy/ndimage/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2e9d9f6ff99218088fd9e693aaca00ca8a070040 --- /dev/null +++ b/llava_video/lib/python3.10/site-packages/scipy/ndimage/__init__.py @@ -0,0 +1,173 @@ +""" +========================================================= +Multidimensional image processing (:mod:`scipy.ndimage`) +========================================================= + +.. currentmodule:: scipy.ndimage + +This package contains various functions for multidimensional image +processing. + + +Filters +======= + +.. autosummary:: + :toctree: generated/ + + convolve - Multidimensional convolution + convolve1d - 1-D convolution along the given axis + correlate - Multidimensional correlation + correlate1d - 1-D correlation along the given axis + gaussian_filter + gaussian_filter1d + gaussian_gradient_magnitude + gaussian_laplace + generic_filter - Multidimensional filter using a given function + generic_filter1d - 1-D generic filter along the given axis + generic_gradient_magnitude + generic_laplace + laplace - N-D Laplace filter based on approximate second derivatives + maximum_filter + maximum_filter1d + median_filter - Calculates a multidimensional median filter + minimum_filter + minimum_filter1d + percentile_filter - Calculates a multidimensional percentile filter + prewitt + rank_filter - Calculates a multidimensional rank filter + sobel + uniform_filter - Multidimensional uniform filter + uniform_filter1d - 1-D uniform filter along the given axis + +Fourier filters +=============== + +.. autosummary:: + :toctree: generated/ + + fourier_ellipsoid + fourier_gaussian + fourier_shift + fourier_uniform + +Interpolation +============= + +.. autosummary:: + :toctree: generated/ + + affine_transform - Apply an affine transformation + geometric_transform - Apply an arbitrary geometric transform + map_coordinates - Map input array to new coordinates by interpolation + rotate - Rotate an array + shift - Shift an array + spline_filter + spline_filter1d + zoom - Zoom an array + +Measurements +============ + +.. autosummary:: + :toctree: generated/ + + center_of_mass - The center of mass of the values of an array at labels + extrema - Min's and max's of an array at labels, with their positions + find_objects - Find objects in a labeled array + histogram - Histogram of the values of an array, optionally at labels + label - Label features in an array + labeled_comprehension + maximum + maximum_position + mean - Mean of the values of an array at labels + median + minimum + minimum_position + standard_deviation - Standard deviation of an N-D image array + sum_labels - Sum of the values of the array + value_indices - Find indices of each distinct value in given array + variance - Variance of the values of an N-D image array + watershed_ift + +Morphology +========== + +.. autosummary:: + :toctree: generated/ + + binary_closing + binary_dilation + binary_erosion + binary_fill_holes + binary_hit_or_miss + binary_opening + binary_propagation + black_tophat + distance_transform_bf + distance_transform_cdt + distance_transform_edt + generate_binary_structure + grey_closing + grey_dilation + grey_erosion + grey_opening + iterate_structure + morphological_gradient + morphological_laplace + white_tophat + +""" + +# Copyright (C) 2003-2005 Peter J. Verveer +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following +# disclaimer in the documentation and/or other materials provided +# with the distribution. +# +# 3. The name of the author may not be used to endorse or promote +# products derived from this software without specific prior +# written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS +# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE +# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# bring in the public functionality from private namespaces + +# mypy: ignore-errors + +from ._support_alternative_backends import * + +# adjust __all__ and do not leak implementation details +from . import _support_alternative_backends +__all__ = _support_alternative_backends.__all__ +del _support_alternative_backends, _ndimage_api, _delegators # noqa: F821 + + +# Deprecated namespaces, to be removed in v2.0.0 +from . import filters +from . import fourier +from . import interpolation +from . import measurements +from . import morphology + +from scipy._lib._testutils import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/llava_video/lib/python3.10/site-packages/scipy/ndimage/_ctest.cpython-310-x86_64-linux-gnu.so b/llava_video/lib/python3.10/site-packages/scipy/ndimage/_ctest.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..0d05e123ba1f7f45c1f37795e7ba5cd0257018b4 Binary files /dev/null and b/llava_video/lib/python3.10/site-packages/scipy/ndimage/_ctest.cpython-310-x86_64-linux-gnu.so differ diff --git a/llava_video/lib/python3.10/site-packages/scipy/ndimage/_delegators.py b/llava_video/lib/python3.10/site-packages/scipy/ndimage/_delegators.py new file mode 100644 index 0000000000000000000000000000000000000000..9647ea6456426c9a62178ff277b0f35017a8310b --- /dev/null +++ b/llava_video/lib/python3.10/site-packages/scipy/ndimage/_delegators.py @@ -0,0 +1,297 @@ +"""Delegators for alternative backends in scipy.ndimage. + +The signature of `func_signature` must match the signature of ndimage.func. +The job of a `func_signature` is to know which arguments of `ndimage.func` +are arrays. + +* signatures are generated by + +-------------- +import inspect +from scipy import ndimage + +names = [x for x in dir(ndimage) if not x.startswith('_')] +objs = [getattr(ndimage, name) for name in names] +funcs = [obj for obj in objs if inspect.isroutine(obj)] + +for func in funcs: + sig = inspect.signature(func) + print(f"def {func.__name__}_signature{sig}:\n\tpass\n\n") +--------------- + +* which arguments to delegate on: manually trawled the documentation for + array-like and array arguments + +""" +import numpy as np +from scipy._lib._array_api import array_namespace +from scipy.ndimage._ni_support import _skip_if_dtype, _skip_if_int + + +def affine_transform_signature( + input, matrix, offset=0.0, output_shape=None, output=None, *args, **kwds +): + return array_namespace(input, matrix, _skip_if_dtype(output)) + + +def binary_closing_signature( + input, structure=None, iterations=1, output=None, *args, **kwds +): + return array_namespace(input, structure, _skip_if_dtype(output)) + +binary_opening_signature = binary_closing_signature + + +def binary_dilation_signature( + input, structure=None, iterations=1, mask=None, output=None, *args, **kwds +): + return array_namespace(input, structure, _skip_if_dtype(output), mask) + +binary_erosion_signature = binary_dilation_signature + + +def binary_fill_holes_signature( + input, structure=None, output=None, origin=0, *args, **kwargs +): + return array_namespace(input, structure, _skip_if_dtype(output)) + + +def label_signature(input, structure=None, output=None, origin=0): + return array_namespace(input, structure, _skip_if_dtype(output)) + + +def binary_hit_or_miss_signature( + input, structure1=None, structure2=None, output=None, *args, **kwds +): + return array_namespace(input, structure1, structure2, _skip_if_dtype(output)) + + +def binary_propagation_signature( + input, structure=None, mask=None, output=None, *args, **kwds +): + return array_namespace(input, structure, mask, _skip_if_dtype(output)) + + +def convolve_signature(input, weights, output=None, *args, **kwds): + return array_namespace(input, weights, _skip_if_dtype(output)) + +correlate_signature = convolve_signature + + +def convolve1d_signature(input, weights, axis=-1, output=None, *args, **kwds): + return array_namespace(input, weights, _skip_if_dtype(output)) + +correlate1d_signature = convolve1d_signature + + +def distance_transform_bf_signature( + input, metric='euclidean', sampling=None, return_distances=True, + return_indices=False, distances=None, indices=None +): + return array_namespace(input, distances, indices) + + +def distance_transform_cdt_signature( + input, metric='chessboard', return_distances=True, return_indices=False, + distances=None, indices=None +): + return array_namespace(input, distances, indices) + + +def distance_transform_edt_signature( + input, sampling=None, return_distances=True, return_indices=False, + distances=None, indices=None +): + return array_namespace(input, distances, indices) + + +def find_objects_signature(input, max_label=0): + return array_namespace(input) + + +def fourier_ellipsoid_signature(input, size, n=-1, axis=-1, output=None): + return array_namespace(input, _skip_if_dtype(output)) + +fourier_uniform_signature = fourier_ellipsoid_signature + + +def fourier_gaussian_signature(input, sigma, n=-1, axis=-1, output=None): + return array_namespace(input, _skip_if_dtype(output)) + +def fourier_shift_signature(input, shift, n=-1, axis=-1, output=None): + return array_namespace(input, _skip_if_dtype(output)) + + +def gaussian_filter_signature(input, sigma, order=0, output=None, *args, **kwds): + return array_namespace(input, _skip_if_dtype(output)) + + +def gaussian_filter1d_signature( + input, sigma, axis=-1, order=0, output=None, *args, **kwds +): + return array_namespace(input, _skip_if_dtype(output)) + + +def gaussian_gradient_magnitude_signature(input, sigma, output=None, *args, **kwds): + return array_namespace(input, _skip_if_dtype(output)) + +gaussian_laplace_signature = gaussian_gradient_magnitude_signature + + +def generate_binary_structure_signature(rank, connectivity): + # XXX: no input arrays; always return numpy + return np + + +def generic_filter_signature( + input, function, size=None, footprint=None, output=None, *args, **kwds +): + # XXX: function LowLevelCallable w/backends + return array_namespace(input, footprint, _skip_if_dtype(output)) + + +def generic_filter1d_signature( + input, function, filter_size, axis=-1, output=None, *args, **kwds +): + return array_namespace(input, _skip_if_dtype(output)) + + +def generic_gradient_magnitude_signature( + input, derivative, output=None, *args, **kwds +): + # XXX: function LowLevelCallable w/backends + return array_namespace(input, _skip_if_dtype(output)) + + +def generic_laplace_signature(input, derivative2, output=None, *args, **kwds): + # XXX: function LowLevelCallable w/backends + return array_namespace(input, _skip_if_dtype(output)) + + +def geometric_transform_signature( + input, mapping, output_shape=None, output=None, *args, **kwds +): + return array_namespace(input, _skip_if_dtype(output)) + + +def histogram_signature(input, min, max, bins, labels=None, index=None): + return array_namespace(input, labels) + + +def iterate_structure_signature(structure, iterations, origin=None): + return array_namespace(structure) + + +def labeled_comprehension_signature(input, labels, *args, **kwds): + return array_namespace(input, labels) + + +def laplace_signature(input, output=None, *args, **kwds): + return array_namespace(input, _skip_if_dtype(output)) + + +def map_coordinates_signature(input, coordinates, output=None, *args, **kwds): + return array_namespace(input, coordinates, _skip_if_dtype(output)) + + +def maximum_filter1d_signature(input, size, axis=-1, output=None, *args, **kwds): + return array_namespace(input, _skip_if_dtype(output)) + +minimum_filter1d_signature = maximum_filter1d_signature +uniform_filter1d_signature = maximum_filter1d_signature + + +def maximum_signature(input, labels=None, index=None): + return array_namespace(input, labels, _skip_if_int(index)) + +minimum_signature = maximum_signature +median_signature = maximum_signature +mean_signature = maximum_signature +variance_signature = maximum_signature +standard_deviation_signature = maximum_signature +sum_labels_signature = maximum_signature +sum_signature = maximum_signature # ndimage.sum is sum_labels + +maximum_position_signature = maximum_signature +minimum_position_signature = maximum_signature + +extrema_signature = maximum_signature +center_of_mass_signature = extrema_signature + + +def median_filter_signature( + input, size=None, footprint=None, output=None, *args, **kwds +): + return array_namespace(input, footprint, _skip_if_dtype(output)) + +minimum_filter_signature = median_filter_signature +maximum_filter_signature = median_filter_signature + + +def morphological_gradient_signature( + input, size=None, footprint=None, structure=None, output=None, *args, **kwds +): + return array_namespace(input, footprint, structure, _skip_if_dtype(output)) + +morphological_laplace_signature = morphological_gradient_signature +white_tophat_signature = morphological_gradient_signature +black_tophat_signature = morphological_gradient_signature +grey_closing_signature = morphological_gradient_signature +grey_dilation_signature = morphological_gradient_signature +grey_erosion_signature = morphological_gradient_signature +grey_opening_signature = morphological_gradient_signature + + +def percentile_filter_signature( + input, percentile, size=None, footprint=None, output=None, *args, **kwds +): + return array_namespace(input, footprint, _skip_if_dtype(output)) + + +def prewitt_signature(input, axis=-1, output=None, *args, **kwds): + return array_namespace(input, _skip_if_dtype(output)) + +sobel_signature = prewitt_signature + + +def rank_filter_signature( + input, rank, size=None, footprint=None, output=None, *args, **kwds +): + return array_namespace(input, footprint, _skip_if_dtype(output)) + + +def rotate_signature( + input, angle, axes=(1, 0), reshape=True, output=None , *args, **kwds +): + return array_namespace(input, _skip_if_dtype(output)) + + +def shift_signature(input, shift, output=None, *args, **kwds): + return array_namespace(input, _skip_if_dtype(output)) + + +def spline_filter_signature(input, order=3, output=np.float64, *args, **kwds): + return array_namespace(input, _skip_if_dtype(output)) + + +def spline_filter1d_signature( + input, order=3, axis=-1, output=np.float64, *args, **kwds +): + return array_namespace(input, _skip_if_dtype(output)) + + +def uniform_filter_signature(input, size=3, output=None, *args, **kwds): + return array_namespace(input, _skip_if_dtype(output)) + + +def value_indices_signature(arr, *args, **kwds): + return array_namespace(arr) + + +def watershed_ift_signature(input, markers, structure=None, output=None): + return array_namespace(input, markers, structure, _skip_if_dtype(output)) + + +def zoom_signature(input, zoom, output=None, *args, **kwds): + return array_namespace(input, _skip_if_dtype(output)) + diff --git a/llava_video/lib/python3.10/site-packages/scipy/ndimage/_filters.py b/llava_video/lib/python3.10/site-packages/scipy/ndimage/_filters.py new file mode 100644 index 0000000000000000000000000000000000000000..710ea60c03653cc80ac3bd1eefd425b4268a5246 --- /dev/null +++ b/llava_video/lib/python3.10/site-packages/scipy/ndimage/_filters.py @@ -0,0 +1,1965 @@ +# Copyright (C) 2003-2005 Peter J. Verveer +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following +# disclaimer in the documentation and/or other materials provided +# with the distribution. +# +# 3. The name of the author may not be used to endorse or promote +# products derived from this software without specific prior +# written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS +# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE +# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from collections.abc import Iterable +import numbers +import warnings +import numpy as np +import operator + +from scipy._lib._util import normalize_axis_index +from . import _ni_support +from . import _nd_image +from . import _ni_docstrings +from . import _rank_filter_1d + +__all__ = ['correlate1d', 'convolve1d', 'gaussian_filter1d', 'gaussian_filter', + 'prewitt', 'sobel', 'generic_laplace', 'laplace', + 'gaussian_laplace', 'generic_gradient_magnitude', + 'gaussian_gradient_magnitude', 'correlate', 'convolve', + 'uniform_filter1d', 'uniform_filter', 'minimum_filter1d', + 'maximum_filter1d', 'minimum_filter', 'maximum_filter', + 'rank_filter', 'median_filter', 'percentile_filter', + 'generic_filter1d', 'generic_filter'] + + +def _invalid_origin(origin, lenw): + return (origin < -(lenw // 2)) or (origin > (lenw - 1) // 2) + + +def _complex_via_real_components(func, input, weights, output, cval, **kwargs): + """Complex convolution via a linear combination of real convolutions.""" + complex_input = input.dtype.kind == 'c' + complex_weights = weights.dtype.kind == 'c' + if complex_input and complex_weights: + # real component of the output + func(input.real, weights.real, output=output.real, + cval=np.real(cval), **kwargs) + output.real -= func(input.imag, weights.imag, output=None, + cval=np.imag(cval), **kwargs) + # imaginary component of the output + func(input.real, weights.imag, output=output.imag, + cval=np.real(cval), **kwargs) + output.imag += func(input.imag, weights.real, output=None, + cval=np.imag(cval), **kwargs) + elif complex_input: + func(input.real, weights, output=output.real, cval=np.real(cval), + **kwargs) + func(input.imag, weights, output=output.imag, cval=np.imag(cval), + **kwargs) + else: + if np.iscomplexobj(cval): + raise ValueError("Cannot provide a complex-valued cval when the " + "input is real.") + func(input, weights.real, output=output.real, cval=cval, **kwargs) + func(input, weights.imag, output=output.imag, cval=cval, **kwargs) + return output + + +def _expand_origin(ndim_image, axes, origin): + num_axes = len(axes) + origins = _ni_support._normalize_sequence(origin, num_axes) + if num_axes < ndim_image: + # set origin = 0 for any axes not being filtered + origins_temp = [0,] * ndim_image + for o, ax in zip(origins, axes): + origins_temp[ax] = o + origins = origins_temp + return origins + + +def _expand_footprint(ndim_image, axes, footprint, + footprint_name="footprint"): + num_axes = len(axes) + if num_axes < ndim_image: + if footprint.ndim != num_axes: + raise RuntimeError(f"{footprint_name}.ndim ({footprint.ndim}) " + f"must match len(axes) ({num_axes})") + + footprint = np.expand_dims( + footprint, + tuple(ax for ax in range(ndim_image) if ax not in axes) + ) + return footprint + + +def _expand_mode(ndim_image, axes, mode): + num_axes = len(axes) + if not isinstance(mode, str) and isinstance(mode, Iterable): + # set mode = 'constant' for any axes not being filtered + modes = _ni_support._normalize_sequence(mode, num_axes) + modes_temp = ['constant'] * ndim_image + for m, ax in zip(modes, axes): + modes_temp[ax] = m + mode = modes_temp + return mode + + +@_ni_docstrings.docfiller +def correlate1d(input, weights, axis=-1, output=None, mode="reflect", + cval=0.0, origin=0): + """Calculate a 1-D correlation along the given axis. + + The lines of the array along the given axis are correlated with the + given weights. + + Parameters + ---------- + %(input)s + weights : array + 1-D sequence of numbers. + %(axis)s + %(output)s + %(mode_reflect)s + %(cval)s + %(origin)s + + Returns + ------- + result : ndarray + Correlation result. Has the same shape as `input`. + + Examples + -------- + >>> from scipy.ndimage import correlate1d + >>> correlate1d([2, 8, 0, 4, 1, 9, 9, 0], weights=[1, 3]) + array([ 8, 26, 8, 12, 7, 28, 36, 9]) + """ + input = np.asarray(input) + weights = np.asarray(weights) + complex_input = input.dtype.kind == 'c' + complex_weights = weights.dtype.kind == 'c' + if complex_input or complex_weights: + if complex_weights: + weights = weights.conj() + weights = weights.astype(np.complex128, copy=False) + kwargs = dict(axis=axis, mode=mode, origin=origin) + output = _ni_support._get_output(output, input, complex_output=True) + return _complex_via_real_components(correlate1d, input, weights, + output, cval, **kwargs) + + output = _ni_support._get_output(output, input) + weights = np.asarray(weights, dtype=np.float64) + if weights.ndim != 1 or weights.shape[0] < 1: + raise RuntimeError('no filter weights given') + if not weights.flags.contiguous: + weights = weights.copy() + axis = normalize_axis_index(axis, input.ndim) + if _invalid_origin(origin, len(weights)): + raise ValueError('Invalid origin; origin must satisfy ' + '-(len(weights) // 2) <= origin <= ' + '(len(weights)-1) // 2') + mode = _ni_support._extend_mode_to_code(mode) + _nd_image.correlate1d(input, weights, axis, output, mode, cval, + origin) + return output + + +@_ni_docstrings.docfiller +def convolve1d(input, weights, axis=-1, output=None, mode="reflect", + cval=0.0, origin=0): + """Calculate a 1-D convolution along the given axis. + + The lines of the array along the given axis are convolved with the + given weights. + + Parameters + ---------- + %(input)s + weights : ndarray + 1-D sequence of numbers. + %(axis)s + %(output)s + %(mode_reflect)s + %(cval)s + %(origin)s + + Returns + ------- + convolve1d : ndarray + Convolved array with same shape as input + + Examples + -------- + >>> from scipy.ndimage import convolve1d + >>> convolve1d([2, 8, 0, 4, 1, 9, 9, 0], weights=[1, 3]) + array([14, 24, 4, 13, 12, 36, 27, 0]) + """ + weights = np.asarray(weights) + weights = weights[::-1] + origin = -origin + if not weights.shape[0] & 1: + origin -= 1 + if weights.dtype.kind == 'c': + # pre-conjugate here to counteract the conjugation in correlate1d + weights = weights.conj() + return correlate1d(input, weights, axis, output, mode, cval, origin) + + +def _gaussian_kernel1d(sigma, order, radius): + """ + Computes a 1-D Gaussian convolution kernel. + """ + if order < 0: + raise ValueError('order must be non-negative') + exponent_range = np.arange(order + 1) + sigma2 = sigma * sigma + x = np.arange(-radius, radius+1) + phi_x = np.exp(-0.5 / sigma2 * x ** 2) + phi_x = phi_x / phi_x.sum() + + if order == 0: + return phi_x + else: + # f(x) = q(x) * phi(x) = q(x) * exp(p(x)) + # f'(x) = (q'(x) + q(x) * p'(x)) * phi(x) + # p'(x) = -1 / sigma ** 2 + # Implement q'(x) + q(x) * p'(x) as a matrix operator and apply to the + # coefficients of q(x) + q = np.zeros(order + 1) + q[0] = 1 + D = np.diag(exponent_range[1:], 1) # D @ q(x) = q'(x) + P = np.diag(np.ones(order)/-sigma2, -1) # P @ q(x) = q(x) * p'(x) + Q_deriv = D + P + for _ in range(order): + q = Q_deriv.dot(q) + q = (x[:, None] ** exponent_range).dot(q) + return q * phi_x + + +@_ni_docstrings.docfiller +def gaussian_filter1d(input, sigma, axis=-1, order=0, output=None, + mode="reflect", cval=0.0, truncate=4.0, *, radius=None): + """1-D Gaussian filter. + + Parameters + ---------- + %(input)s + sigma : scalar + standard deviation for Gaussian kernel + %(axis)s + order : int, optional + An order of 0 corresponds to convolution with a Gaussian + kernel. A positive order corresponds to convolution with + that derivative of a Gaussian. + %(output)s + %(mode_reflect)s + %(cval)s + truncate : float, optional + Truncate the filter at this many standard deviations. + Default is 4.0. + radius : None or int, optional + Radius of the Gaussian kernel. If specified, the size of + the kernel will be ``2*radius + 1``, and `truncate` is ignored. + Default is None. + + Returns + ------- + gaussian_filter1d : ndarray + + Notes + ----- + The Gaussian kernel will have size ``2*radius + 1`` along each axis. If + `radius` is None, a default ``radius = round(truncate * sigma)`` will be + used. + + Examples + -------- + >>> from scipy.ndimage import gaussian_filter1d + >>> import numpy as np + >>> gaussian_filter1d([1.0, 2.0, 3.0, 4.0, 5.0], 1) + array([ 1.42704095, 2.06782203, 3. , 3.93217797, 4.57295905]) + >>> gaussian_filter1d([1.0, 2.0, 3.0, 4.0, 5.0], 4) + array([ 2.91948343, 2.95023502, 3. , 3.04976498, 3.08051657]) + >>> import matplotlib.pyplot as plt + >>> rng = np.random.default_rng() + >>> x = rng.standard_normal(101).cumsum() + >>> y3 = gaussian_filter1d(x, 3) + >>> y6 = gaussian_filter1d(x, 6) + >>> plt.plot(x, 'k', label='original data') + >>> plt.plot(y3, '--', label='filtered, sigma=3') + >>> plt.plot(y6, ':', label='filtered, sigma=6') + >>> plt.legend() + >>> plt.grid() + >>> plt.show() + + """ + sd = float(sigma) + # make the radius of the filter equal to truncate standard deviations + lw = int(truncate * sd + 0.5) + if radius is not None: + lw = radius + if not isinstance(lw, numbers.Integral) or lw < 0: + raise ValueError('Radius must be a nonnegative integer.') + # Since we are calling correlate, not convolve, revert the kernel + weights = _gaussian_kernel1d(sigma, order, lw)[::-1] + return correlate1d(input, weights, axis, output, mode, cval, 0) + + +@_ni_docstrings.docfiller +def gaussian_filter(input, sigma, order=0, output=None, + mode="reflect", cval=0.0, truncate=4.0, *, radius=None, + axes=None): + """Multidimensional Gaussian filter. + + Parameters + ---------- + %(input)s + sigma : scalar or sequence of scalars + Standard deviation for Gaussian kernel. The standard + deviations of the Gaussian filter are given for each axis as a + sequence, or as a single number, in which case it is equal for + all axes. + order : int or sequence of ints, optional + The order of the filter along each axis is given as a sequence + of integers, or as a single number. An order of 0 corresponds + to convolution with a Gaussian kernel. A positive order + corresponds to convolution with that derivative of a Gaussian. + %(output)s + %(mode_multiple)s + %(cval)s + truncate : float, optional + Truncate the filter at this many standard deviations. + Default is 4.0. + radius : None or int or sequence of ints, optional + Radius of the Gaussian kernel. The radius are given for each axis + as a sequence, or as a single number, in which case it is equal + for all axes. If specified, the size of the kernel along each axis + will be ``2*radius + 1``, and `truncate` is ignored. + Default is None. + axes : tuple of int or None, optional + If None, `input` is filtered along all axes. Otherwise, + `input` is filtered along the specified axes. When `axes` is + specified, any tuples used for `sigma`, `order`, `mode` and/or `radius` + must match the length of `axes`. The ith entry in any of these tuples + corresponds to the ith entry in `axes`. + + Returns + ------- + gaussian_filter : ndarray + Returned array of same shape as `input`. + + Notes + ----- + The multidimensional filter is implemented as a sequence of + 1-D convolution filters. The intermediate arrays are + stored in the same data type as the output. Therefore, for output + types with a limited precision, the results may be imprecise + because intermediate results may be stored with insufficient + precision. + + The Gaussian kernel will have size ``2*radius + 1`` along each axis. If + `radius` is None, the default ``radius = round(truncate * sigma)`` will be + used. + + Examples + -------- + >>> from scipy.ndimage import gaussian_filter + >>> import numpy as np + >>> a = np.arange(50, step=2).reshape((5,5)) + >>> a + array([[ 0, 2, 4, 6, 8], + [10, 12, 14, 16, 18], + [20, 22, 24, 26, 28], + [30, 32, 34, 36, 38], + [40, 42, 44, 46, 48]]) + >>> gaussian_filter(a, sigma=1) + array([[ 4, 6, 8, 9, 11], + [10, 12, 14, 15, 17], + [20, 22, 24, 25, 27], + [29, 31, 33, 34, 36], + [35, 37, 39, 40, 42]]) + + >>> from scipy import datasets + >>> import matplotlib.pyplot as plt + >>> fig = plt.figure() + >>> plt.gray() # show the filtered result in grayscale + >>> ax1 = fig.add_subplot(121) # left side + >>> ax2 = fig.add_subplot(122) # right side + >>> ascent = datasets.ascent() + >>> result = gaussian_filter(ascent, sigma=5) + >>> ax1.imshow(ascent) + >>> ax2.imshow(result) + >>> plt.show() + """ + input = np.asarray(input) + output = _ni_support._get_output(output, input) + + axes = _ni_support._check_axes(axes, input.ndim) + num_axes = len(axes) + orders = _ni_support._normalize_sequence(order, num_axes) + sigmas = _ni_support._normalize_sequence(sigma, num_axes) + modes = _ni_support._normalize_sequence(mode, num_axes) + radiuses = _ni_support._normalize_sequence(radius, num_axes) + axes = [(axes[ii], sigmas[ii], orders[ii], modes[ii], radiuses[ii]) + for ii in range(num_axes) if sigmas[ii] > 1e-15] + if len(axes) > 0: + for axis, sigma, order, mode, radius in axes: + gaussian_filter1d(input, sigma, axis, order, output, + mode, cval, truncate, radius=radius) + input = output + else: + output[...] = input[...] + return output + + +@_ni_docstrings.docfiller +def prewitt(input, axis=-1, output=None, mode="reflect", cval=0.0): + """Calculate a Prewitt filter. + + Parameters + ---------- + %(input)s + %(axis)s + %(output)s + %(mode_multiple)s + %(cval)s + + Returns + ------- + prewitt : ndarray + Filtered array. Has the same shape as `input`. + + See Also + -------- + sobel: Sobel filter + + Notes + ----- + This function computes the one-dimensional Prewitt filter. + Horizontal edges are emphasised with the horizontal transform (axis=0), + vertical edges with the vertical transform (axis=1), and so on for higher + dimensions. These can be combined to give the magnitude. + + Examples + -------- + >>> from scipy import ndimage, datasets + >>> import matplotlib.pyplot as plt + >>> import numpy as np + >>> ascent = datasets.ascent() + >>> prewitt_h = ndimage.prewitt(ascent, axis=0) + >>> prewitt_v = ndimage.prewitt(ascent, axis=1) + >>> magnitude = np.sqrt(prewitt_h ** 2 + prewitt_v ** 2) + >>> magnitude *= 255 / np.max(magnitude) # Normalization + >>> fig, axes = plt.subplots(2, 2, figsize = (8, 8)) + >>> plt.gray() + >>> axes[0, 0].imshow(ascent) + >>> axes[0, 1].imshow(prewitt_h) + >>> axes[1, 0].imshow(prewitt_v) + >>> axes[1, 1].imshow(magnitude) + >>> titles = ["original", "horizontal", "vertical", "magnitude"] + >>> for i, ax in enumerate(axes.ravel()): + ... ax.set_title(titles[i]) + ... ax.axis("off") + >>> plt.show() + + """ + input = np.asarray(input) + axis = normalize_axis_index(axis, input.ndim) + output = _ni_support._get_output(output, input) + modes = _ni_support._normalize_sequence(mode, input.ndim) + correlate1d(input, [-1, 0, 1], axis, output, modes[axis], cval, 0) + axes = [ii for ii in range(input.ndim) if ii != axis] + for ii in axes: + correlate1d(output, [1, 1, 1], ii, output, modes[ii], cval, 0,) + return output + + +@_ni_docstrings.docfiller +def sobel(input, axis=-1, output=None, mode="reflect", cval=0.0): + """Calculate a Sobel filter. + + Parameters + ---------- + %(input)s + %(axis)s + %(output)s + %(mode_multiple)s + %(cval)s + + Returns + ------- + sobel : ndarray + Filtered array. Has the same shape as `input`. + + Notes + ----- + This function computes the axis-specific Sobel gradient. + The horizontal edges can be emphasised with the horizontal transform (axis=0), + the vertical edges with the vertical transform (axis=1) and so on for higher + dimensions. These can be combined to give the magnitude. + + Examples + -------- + >>> from scipy import ndimage, datasets + >>> import matplotlib.pyplot as plt + >>> import numpy as np + >>> ascent = datasets.ascent().astype('int32') + >>> sobel_h = ndimage.sobel(ascent, 0) # horizontal gradient + >>> sobel_v = ndimage.sobel(ascent, 1) # vertical gradient + >>> magnitude = np.sqrt(sobel_h**2 + sobel_v**2) + >>> magnitude *= 255.0 / np.max(magnitude) # normalization + >>> fig, axs = plt.subplots(2, 2, figsize=(8, 8)) + >>> plt.gray() # show the filtered result in grayscale + >>> axs[0, 0].imshow(ascent) + >>> axs[0, 1].imshow(sobel_h) + >>> axs[1, 0].imshow(sobel_v) + >>> axs[1, 1].imshow(magnitude) + >>> titles = ["original", "horizontal", "vertical", "magnitude"] + >>> for i, ax in enumerate(axs.ravel()): + ... ax.set_title(titles[i]) + ... ax.axis("off") + >>> plt.show() + + """ + input = np.asarray(input) + axis = normalize_axis_index(axis, input.ndim) + output = _ni_support._get_output(output, input) + modes = _ni_support._normalize_sequence(mode, input.ndim) + correlate1d(input, [-1, 0, 1], axis, output, modes[axis], cval, 0) + axes = [ii for ii in range(input.ndim) if ii != axis] + for ii in axes: + correlate1d(output, [1, 2, 1], ii, output, modes[ii], cval, 0) + return output + + +@_ni_docstrings.docfiller +def generic_laplace(input, derivative2, output=None, mode="reflect", + cval=0.0, + extra_arguments=(), + extra_keywords=None, + *, axes=None): + """ + N-D Laplace filter using a provided second derivative function. + + Parameters + ---------- + %(input)s + derivative2 : callable + Callable with the following signature:: + + derivative2(input, axis, output, mode, cval, + *extra_arguments, **extra_keywords) + + See `extra_arguments`, `extra_keywords` below. + %(output)s + %(mode_multiple)s + %(cval)s + %(extra_keywords)s + %(extra_arguments)s + axes : tuple of int or None + The axes over which to apply the filter. If a `mode` tuple is + provided, its length must match the number of axes. + + Returns + ------- + generic_laplace : ndarray + Filtered array. Has the same shape as `input`. + + """ + if extra_keywords is None: + extra_keywords = {} + input = np.asarray(input) + output = _ni_support._get_output(output, input) + axes = _ni_support._check_axes(axes, input.ndim) + if len(axes) > 0: + modes = _ni_support._normalize_sequence(mode, len(axes)) + derivative2(input, axes[0], output, modes[0], cval, + *extra_arguments, **extra_keywords) + for ii in range(1, len(axes)): + tmp = derivative2(input, axes[ii], output.dtype, modes[ii], cval, + *extra_arguments, **extra_keywords) + output += tmp + else: + output[...] = input[...] + return output + + +@_ni_docstrings.docfiller +def laplace(input, output=None, mode="reflect", cval=0.0, *, axes=None): + """N-D Laplace filter based on approximate second derivatives. + + Parameters + ---------- + %(input)s + %(output)s + %(mode_multiple)s + %(cval)s + axes : tuple of int or None + The axes over which to apply the filter. If a `mode` tuple is + provided, its length must match the number of axes. + + Returns + ------- + laplace : ndarray + Filtered array. Has the same shape as `input`. + + Examples + -------- + >>> from scipy import ndimage, datasets + >>> import matplotlib.pyplot as plt + >>> fig = plt.figure() + >>> plt.gray() # show the filtered result in grayscale + >>> ax1 = fig.add_subplot(121) # left side + >>> ax2 = fig.add_subplot(122) # right side + >>> ascent = datasets.ascent() + >>> result = ndimage.laplace(ascent) + >>> ax1.imshow(ascent) + >>> ax2.imshow(result) + >>> plt.show() + """ + def derivative2(input, axis, output, mode, cval): + return correlate1d(input, [1, -2, 1], axis, output, mode, cval, 0) + return generic_laplace(input, derivative2, output, mode, cval, axes=axes) + + +@_ni_docstrings.docfiller +def gaussian_laplace(input, sigma, output=None, mode="reflect", + cval=0.0, *, axes=None, **kwargs): + """Multidimensional Laplace filter using Gaussian second derivatives. + + Parameters + ---------- + %(input)s + sigma : scalar or sequence of scalars + The standard deviations of the Gaussian filter are given for + each axis as a sequence, or as a single number, in which case + it is equal for all axes. + %(output)s + %(mode_multiple)s + %(cval)s + axes : tuple of int or None + The axes over which to apply the filter. If `sigma` or `mode` tuples + are provided, their length must match the number of axes. + Extra keyword arguments will be passed to gaussian_filter(). + + Returns + ------- + gaussian_laplace : ndarray + Filtered array. Has the same shape as `input`. + + Examples + -------- + >>> from scipy import ndimage, datasets + >>> import matplotlib.pyplot as plt + >>> ascent = datasets.ascent() + + >>> fig = plt.figure() + >>> plt.gray() # show the filtered result in grayscale + >>> ax1 = fig.add_subplot(121) # left side + >>> ax2 = fig.add_subplot(122) # right side + + >>> result = ndimage.gaussian_laplace(ascent, sigma=1) + >>> ax1.imshow(result) + + >>> result = ndimage.gaussian_laplace(ascent, sigma=3) + >>> ax2.imshow(result) + >>> plt.show() + """ + input = np.asarray(input) + + def derivative2(input, axis, output, mode, cval, sigma, **kwargs): + order = [0] * input.ndim + order[axis] = 2 + return gaussian_filter(input, sigma, order, output, mode, cval, + **kwargs) + + axes = _ni_support._check_axes(axes, input.ndim) + num_axes = len(axes) + sigma = _ni_support._normalize_sequence(sigma, num_axes) + if num_axes < input.ndim: + # set sigma = 0 for any axes not being filtered + sigma_temp = [0,] * input.ndim + for s, ax in zip(sigma, axes): + sigma_temp[ax] = s + sigma = sigma_temp + + return generic_laplace(input, derivative2, output, mode, cval, + extra_arguments=(sigma,), + extra_keywords=kwargs, + axes=axes) + + +@_ni_docstrings.docfiller +def generic_gradient_magnitude(input, derivative, output=None, + mode="reflect", cval=0.0, + extra_arguments=(), extra_keywords=None, + *, axes=None): + """Gradient magnitude using a provided gradient function. + + Parameters + ---------- + %(input)s + derivative : callable + Callable with the following signature:: + + derivative(input, axis, output, mode, cval, + *extra_arguments, **extra_keywords) + + See `extra_arguments`, `extra_keywords` below. + `derivative` can assume that `input` and `output` are ndarrays. + Note that the output from `derivative` is modified inplace; + be careful to copy important inputs before returning them. + %(output)s + %(mode_multiple)s + %(cval)s + %(extra_keywords)s + %(extra_arguments)s + axes : tuple of int or None + The axes over which to apply the filter. If a `mode` tuple is + provided, its length must match the number of axes. + + Returns + ------- + generic_gradient_matnitude : ndarray + Filtered array. Has the same shape as `input`. + + """ + if extra_keywords is None: + extra_keywords = {} + input = np.asarray(input) + output = _ni_support._get_output(output, input) + axes = _ni_support._check_axes(axes, input.ndim) + if len(axes) > 0: + modes = _ni_support._normalize_sequence(mode, len(axes)) + derivative(input, axes[0], output, modes[0], cval, + *extra_arguments, **extra_keywords) + np.multiply(output, output, output) + for ii in range(1, len(axes)): + tmp = derivative(input, axes[ii], output.dtype, modes[ii], cval, + *extra_arguments, **extra_keywords) + np.multiply(tmp, tmp, tmp) + output += tmp + # This allows the sqrt to work with a different default casting + np.sqrt(output, output, casting='unsafe') + else: + output[...] = input[...] + return output + + +@_ni_docstrings.docfiller +def gaussian_gradient_magnitude(input, sigma, output=None, + mode="reflect", cval=0.0, *, axes=None, + **kwargs): + """Multidimensional gradient magnitude using Gaussian derivatives. + + Parameters + ---------- + %(input)s + sigma : scalar or sequence of scalars + The standard deviations of the Gaussian filter are given for + each axis as a sequence, or as a single number, in which case + it is equal for all axes. + %(output)s + %(mode_multiple)s + %(cval)s + axes : tuple of int or None + The axes over which to apply the filter. If `sigma` or `mode` tuples + are provided, their length must match the number of axes. + Extra keyword arguments will be passed to gaussian_filter(). + + Returns + ------- + gaussian_gradient_magnitude : ndarray + Filtered array. Has the same shape as `input`. + + Examples + -------- + >>> from scipy import ndimage, datasets + >>> import matplotlib.pyplot as plt + >>> fig = plt.figure() + >>> plt.gray() # show the filtered result in grayscale + >>> ax1 = fig.add_subplot(121) # left side + >>> ax2 = fig.add_subplot(122) # right side + >>> ascent = datasets.ascent() + >>> result = ndimage.gaussian_gradient_magnitude(ascent, sigma=5) + >>> ax1.imshow(ascent) + >>> ax2.imshow(result) + >>> plt.show() + """ + input = np.asarray(input) + + def derivative(input, axis, output, mode, cval, sigma, **kwargs): + order = [0] * input.ndim + order[axis] = 1 + return gaussian_filter(input, sigma, order, output, mode, + cval, **kwargs) + + return generic_gradient_magnitude(input, derivative, output, mode, + cval, extra_arguments=(sigma,), + extra_keywords=kwargs, axes=axes) + + +def _correlate_or_convolve(input, weights, output, mode, cval, origin, + convolution, axes): + input = np.asarray(input) + weights = np.asarray(weights) + complex_input = input.dtype.kind == 'c' + complex_weights = weights.dtype.kind == 'c' + if complex_input or complex_weights: + if complex_weights and not convolution: + # As for np.correlate, conjugate weights rather than input. + weights = weights.conj() + kwargs = dict( + mode=mode, origin=origin, convolution=convolution, axes=axes + ) + output = _ni_support._get_output(output, input, complex_output=True) + + return _complex_via_real_components(_correlate_or_convolve, input, + weights, output, cval, **kwargs) + + axes = _ni_support._check_axes(axes, input.ndim) + weights = np.asarray(weights, dtype=np.float64) + + # expand weights and origins if num_axes < input.ndim + weights = _expand_footprint(input.ndim, axes, weights, "weights") + origins = _expand_origin(input.ndim, axes, origin) + + wshape = [ii for ii in weights.shape if ii > 0] + if len(wshape) != input.ndim: + raise RuntimeError(f"weights.ndim ({len(wshape)}) must match " + f"len(axes) ({len(axes)})") + if convolution: + weights = weights[tuple([slice(None, None, -1)] * weights.ndim)] + for ii in range(len(origins)): + origins[ii] = -origins[ii] + if not weights.shape[ii] & 1: + origins[ii] -= 1 + for origin, lenw in zip(origins, wshape): + if _invalid_origin(origin, lenw): + raise ValueError('Invalid origin; origin must satisfy ' + '-(weights.shape[k] // 2) <= origin[k] <= ' + '(weights.shape[k]-1) // 2') + + if not weights.flags.contiguous: + weights = weights.copy() + output = _ni_support._get_output(output, input) + temp_needed = np.may_share_memory(input, output) + if temp_needed: + # input and output arrays cannot share memory + temp = output + output = _ni_support._get_output(output.dtype, input) + if not isinstance(mode, str) and isinstance(mode, Iterable): + raise RuntimeError("A sequence of modes is not supported") + mode = _ni_support._extend_mode_to_code(mode) + _nd_image.correlate(input, weights, output, mode, cval, origins) + if temp_needed: + temp[...] = output + output = temp + return output + + +@_ni_docstrings.docfiller +def correlate(input, weights, output=None, mode='reflect', cval=0.0, + origin=0, *, axes=None): + """ + Multidimensional correlation. + + The array is correlated with the given kernel. + + Parameters + ---------- + %(input)s + weights : ndarray + array of weights, same number of dimensions as input + %(output)s + %(mode_reflect)s + %(cval)s + %(origin_multiple)s + axes : tuple of int or None, optional + If None, `input` is filtered along all axes. Otherwise, + `input` is filtered along the specified axes. When `axes` is + specified, any tuples used for `mode` or `origin` must match the length + of `axes`. The ith entry in any of these tuples corresponds to the ith + entry in `axes`. + + Returns + ------- + result : ndarray + The result of correlation of `input` with `weights`. + + See Also + -------- + convolve : Convolve an image with a kernel. + + Examples + -------- + Correlation is the process of moving a filter mask often referred to + as kernel over the image and computing the sum of products at each location. + + >>> from scipy.ndimage import correlate + >>> import numpy as np + >>> input_img = np.arange(25).reshape(5,5) + >>> print(input_img) + [[ 0 1 2 3 4] + [ 5 6 7 8 9] + [10 11 12 13 14] + [15 16 17 18 19] + [20 21 22 23 24]] + + Define a kernel (weights) for correlation. In this example, it is for sum of + center and up, down, left and right next elements. + + >>> weights = [[0, 1, 0], + ... [1, 1, 1], + ... [0, 1, 0]] + + We can calculate a correlation result: + For example, element ``[2,2]`` is ``7 + 11 + 12 + 13 + 17 = 60``. + + >>> correlate(input_img, weights) + array([[ 6, 10, 15, 20, 24], + [ 26, 30, 35, 40, 44], + [ 51, 55, 60, 65, 69], + [ 76, 80, 85, 90, 94], + [ 96, 100, 105, 110, 114]]) + + """ + return _correlate_or_convolve(input, weights, output, mode, cval, + origin, False, axes) + + +@_ni_docstrings.docfiller +def convolve(input, weights, output=None, mode='reflect', cval=0.0, + origin=0, *, axes=None): + """ + Multidimensional convolution. + + The array is convolved with the given kernel. + + Parameters + ---------- + %(input)s + weights : array_like + Array of weights, same number of dimensions as input + %(output)s + %(mode_reflect)s + cval : scalar, optional + Value to fill past edges of input if `mode` is 'constant'. Default + is 0.0 + origin : int or sequence, optional + Controls the placement of the filter on the input array's pixels. + A value of 0 (the default) centers the filter over the pixel, with + positive values shifting the filter to the right, and negative ones + to the left. By passing a sequence of origins with length equal to + the number of dimensions of the input array, different shifts can + be specified along each axis. + axes : tuple of int or None, optional + If None, `input` is filtered along all axes. Otherwise, + `input` is filtered along the specified axes. When `axes` is + specified, any tuples used for `mode` or `origin` must match the length + of `axes`. The ith entry in any of these tuples corresponds to the ith + entry in `axes`. + + Returns + ------- + result : ndarray + The result of convolution of `input` with `weights`. + + See Also + -------- + correlate : Correlate an image with a kernel. + + Notes + ----- + Each value in result is :math:`C_i = \\sum_j{I_{i+k-j} W_j}`, where + W is the `weights` kernel, + j is the N-D spatial index over :math:`W`, + I is the `input` and k is the coordinate of the center of + W, specified by `origin` in the input parameters. + + Examples + -------- + Perhaps the simplest case to understand is ``mode='constant', cval=0.0``, + because in this case borders (i.e., where the `weights` kernel, centered + on any one value, extends beyond an edge of `input`) are treated as zeros. + + >>> import numpy as np + >>> a = np.array([[1, 2, 0, 0], + ... [5, 3, 0, 4], + ... [0, 0, 0, 7], + ... [9, 3, 0, 0]]) + >>> k = np.array([[1,1,1],[1,1,0],[1,0,0]]) + >>> from scipy import ndimage + >>> ndimage.convolve(a, k, mode='constant', cval=0.0) + array([[11, 10, 7, 4], + [10, 3, 11, 11], + [15, 12, 14, 7], + [12, 3, 7, 0]]) + + Setting ``cval=1.0`` is equivalent to padding the outer edge of `input` + with 1.0's (and then extracting only the original region of the result). + + >>> ndimage.convolve(a, k, mode='constant', cval=1.0) + array([[13, 11, 8, 7], + [11, 3, 11, 14], + [16, 12, 14, 10], + [15, 6, 10, 5]]) + + With ``mode='reflect'`` (the default), outer values are reflected at the + edge of `input` to fill in missing values. + + >>> b = np.array([[2, 0, 0], + ... [1, 0, 0], + ... [0, 0, 0]]) + >>> k = np.array([[0,1,0], [0,1,0], [0,1,0]]) + >>> ndimage.convolve(b, k, mode='reflect') + array([[5, 0, 0], + [3, 0, 0], + [1, 0, 0]]) + + This includes diagonally at the corners. + + >>> k = np.array([[1,0,0],[0,1,0],[0,0,1]]) + >>> ndimage.convolve(b, k) + array([[4, 2, 0], + [3, 2, 0], + [1, 1, 0]]) + + With ``mode='nearest'``, the single nearest value in to an edge in + `input` is repeated as many times as needed to match the overlapping + `weights`. + + >>> c = np.array([[2, 0, 1], + ... [1, 0, 0], + ... [0, 0, 0]]) + >>> k = np.array([[0, 1, 0], + ... [0, 1, 0], + ... [0, 1, 0], + ... [0, 1, 0], + ... [0, 1, 0]]) + >>> ndimage.convolve(c, k, mode='nearest') + array([[7, 0, 3], + [5, 0, 2], + [3, 0, 1]]) + + """ + return _correlate_or_convolve(input, weights, output, mode, cval, + origin, True, axes) + + +@_ni_docstrings.docfiller +def uniform_filter1d(input, size, axis=-1, output=None, + mode="reflect", cval=0.0, origin=0): + """Calculate a 1-D uniform filter along the given axis. + + The lines of the array along the given axis are filtered with a + uniform filter of given size. + + Parameters + ---------- + %(input)s + size : int + length of uniform filter + %(axis)s + %(output)s + %(mode_reflect)s + %(cval)s + %(origin)s + + Returns + ------- + result : ndarray + Filtered array. Has same shape as `input`. + + Examples + -------- + >>> from scipy.ndimage import uniform_filter1d + >>> uniform_filter1d([2, 8, 0, 4, 1, 9, 9, 0], size=3) + array([4, 3, 4, 1, 4, 6, 6, 3]) + """ + input = np.asarray(input) + axis = normalize_axis_index(axis, input.ndim) + if size < 1: + raise RuntimeError('incorrect filter size') + complex_output = input.dtype.kind == 'c' + output = _ni_support._get_output(output, input, + complex_output=complex_output) + if (size // 2 + origin < 0) or (size // 2 + origin >= size): + raise ValueError('invalid origin') + mode = _ni_support._extend_mode_to_code(mode) + if not complex_output: + _nd_image.uniform_filter1d(input, size, axis, output, mode, cval, + origin) + else: + _nd_image.uniform_filter1d(input.real, size, axis, output.real, mode, + np.real(cval), origin) + _nd_image.uniform_filter1d(input.imag, size, axis, output.imag, mode, + np.imag(cval), origin) + return output + + +@_ni_docstrings.docfiller +def uniform_filter(input, size=3, output=None, mode="reflect", + cval=0.0, origin=0, *, axes=None): + """Multidimensional uniform filter. + + Parameters + ---------- + %(input)s + size : int or sequence of ints, optional + The sizes of the uniform filter are given for each axis as a + sequence, or as a single number, in which case the size is + equal for all axes. + %(output)s + %(mode_multiple)s + %(cval)s + %(origin_multiple)s + axes : tuple of int or None, optional + If None, `input` is filtered along all axes. Otherwise, + `input` is filtered along the specified axes. When `axes` is + specified, any tuples used for `size`, `origin`, and/or `mode` + must match the length of `axes`. The ith entry in any of these tuples + corresponds to the ith entry in `axes`. + + Returns + ------- + uniform_filter : ndarray + Filtered array. Has the same shape as `input`. + + Notes + ----- + The multidimensional filter is implemented as a sequence of + 1-D uniform filters. The intermediate arrays are stored + in the same data type as the output. Therefore, for output types + with a limited precision, the results may be imprecise because + intermediate results may be stored with insufficient precision. + + Examples + -------- + >>> from scipy import ndimage, datasets + >>> import matplotlib.pyplot as plt + >>> fig = plt.figure() + >>> plt.gray() # show the filtered result in grayscale + >>> ax1 = fig.add_subplot(121) # left side + >>> ax2 = fig.add_subplot(122) # right side + >>> ascent = datasets.ascent() + >>> result = ndimage.uniform_filter(ascent, size=20) + >>> ax1.imshow(ascent) + >>> ax2.imshow(result) + >>> plt.show() + """ + input = np.asarray(input) + output = _ni_support._get_output(output, input, + complex_output=input.dtype.kind == 'c') + axes = _ni_support._check_axes(axes, input.ndim) + num_axes = len(axes) + sizes = _ni_support._normalize_sequence(size, num_axes) + origins = _ni_support._normalize_sequence(origin, num_axes) + modes = _ni_support._normalize_sequence(mode, num_axes) + axes = [(axes[ii], sizes[ii], origins[ii], modes[ii]) + for ii in range(num_axes) if sizes[ii] > 1] + if len(axes) > 0: + for axis, size, origin, mode in axes: + uniform_filter1d(input, int(size), axis, output, mode, + cval, origin) + input = output + else: + output[...] = input[...] + return output + + +@_ni_docstrings.docfiller +def minimum_filter1d(input, size, axis=-1, output=None, + mode="reflect", cval=0.0, origin=0): + """Calculate a 1-D minimum filter along the given axis. + + The lines of the array along the given axis are filtered with a + minimum filter of given size. + + Parameters + ---------- + %(input)s + size : int + length along which to calculate 1D minimum + %(axis)s + %(output)s + %(mode_reflect)s + %(cval)s + %(origin)s + + Returns + ------- + result : ndarray. + Filtered image. Has the same shape as `input`. + + Notes + ----- + This function implements the MINLIST algorithm [1]_, as described by + Richard Harter [2]_, and has a guaranteed O(n) performance, `n` being + the `input` length, regardless of filter size. + + References + ---------- + .. [1] http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.42.2777 + .. [2] http://www.richardhartersworld.com/cri/2001/slidingmin.html + + + Examples + -------- + >>> from scipy.ndimage import minimum_filter1d + >>> minimum_filter1d([2, 8, 0, 4, 1, 9, 9, 0], size=3) + array([2, 0, 0, 0, 1, 1, 0, 0]) + """ + input = np.asarray(input) + if np.iscomplexobj(input): + raise TypeError('Complex type not supported') + axis = normalize_axis_index(axis, input.ndim) + if size < 1: + raise RuntimeError('incorrect filter size') + output = _ni_support._get_output(output, input) + if (size // 2 + origin < 0) or (size // 2 + origin >= size): + raise ValueError('invalid origin') + mode = _ni_support._extend_mode_to_code(mode) + _nd_image.min_or_max_filter1d(input, size, axis, output, mode, cval, + origin, 1) + return output + + +@_ni_docstrings.docfiller +def maximum_filter1d(input, size, axis=-1, output=None, + mode="reflect", cval=0.0, origin=0): + """Calculate a 1-D maximum filter along the given axis. + + The lines of the array along the given axis are filtered with a + maximum filter of given size. + + Parameters + ---------- + %(input)s + size : int + Length along which to calculate the 1-D maximum. + %(axis)s + %(output)s + %(mode_reflect)s + %(cval)s + %(origin)s + + Returns + ------- + maximum1d : ndarray, None + Maximum-filtered array with same shape as input. + None if `output` is not None + + Notes + ----- + This function implements the MAXLIST algorithm [1]_, as described by + Richard Harter [2]_, and has a guaranteed O(n) performance, `n` being + the `input` length, regardless of filter size. + + References + ---------- + .. [1] http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.42.2777 + .. [2] http://www.richardhartersworld.com/cri/2001/slidingmin.html + + Examples + -------- + >>> from scipy.ndimage import maximum_filter1d + >>> maximum_filter1d([2, 8, 0, 4, 1, 9, 9, 0], size=3) + array([8, 8, 8, 4, 9, 9, 9, 9]) + """ + input = np.asarray(input) + if np.iscomplexobj(input): + raise TypeError('Complex type not supported') + axis = normalize_axis_index(axis, input.ndim) + if size < 1: + raise RuntimeError('incorrect filter size') + output = _ni_support._get_output(output, input) + if (size // 2 + origin < 0) or (size // 2 + origin >= size): + raise ValueError('invalid origin') + mode = _ni_support._extend_mode_to_code(mode) + _nd_image.min_or_max_filter1d(input, size, axis, output, mode, cval, + origin, 0) + return output + + +def _min_or_max_filter(input, size, footprint, structure, output, mode, + cval, origin, minimum, axes=None): + if (size is not None) and (footprint is not None): + warnings.warn("ignoring size because footprint is set", + UserWarning, stacklevel=3) + if structure is None: + if footprint is None: + if size is None: + raise RuntimeError("no footprint provided") + separable = True + else: + footprint = np.asarray(footprint, dtype=bool) + if not footprint.any(): + raise ValueError("All-zero footprint is not supported.") + if footprint.all(): + size = footprint.shape + footprint = None + separable = True + else: + separable = False + else: + structure = np.asarray(structure, dtype=np.float64) + separable = False + if footprint is None: + footprint = np.ones(structure.shape, bool) + else: + footprint = np.asarray(footprint, dtype=bool) + input = np.asarray(input) + if np.iscomplexobj(input): + raise TypeError("Complex type not supported") + output = _ni_support._get_output(output, input) + temp_needed = np.may_share_memory(input, output) + if temp_needed: + # input and output arrays cannot share memory + temp = output + output = _ni_support._get_output(output.dtype, input) + axes = _ni_support._check_axes(axes, input.ndim) + num_axes = len(axes) + if separable: + origins = _ni_support._normalize_sequence(origin, num_axes) + sizes = _ni_support._normalize_sequence(size, num_axes) + modes = _ni_support._normalize_sequence(mode, num_axes) + axes = [(axes[ii], sizes[ii], origins[ii], modes[ii]) + for ii in range(len(axes)) if sizes[ii] > 1] + if minimum: + filter_ = minimum_filter1d + else: + filter_ = maximum_filter1d + if len(axes) > 0: + for axis, size, origin, mode in axes: + filter_(input, int(size), axis, output, mode, cval, origin) + input = output + else: + output[...] = input[...] + else: + # expand origins and footprint if num_axes < input.ndim + footprint = _expand_footprint(input.ndim, axes, footprint) + origins = _expand_origin(input.ndim, axes, origin) + + fshape = [ii for ii in footprint.shape if ii > 0] + if len(fshape) != input.ndim: + raise RuntimeError(f"footprint.ndim ({footprint.ndim}) must match " + f"len(axes) ({len(axes)})") + for origin, lenf in zip(origins, fshape): + if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf): + raise ValueError("invalid origin") + if not footprint.flags.contiguous: + footprint = footprint.copy() + if structure is not None: + if len(structure.shape) != num_axes: + raise RuntimeError("structure array has incorrect shape") + if num_axes != structure.ndim: + structure = np.expand_dims( + structure, + tuple(ax for ax in range(structure.ndim) if ax not in axes) + ) + if not structure.flags.contiguous: + structure = structure.copy() + if not isinstance(mode, str) and isinstance(mode, Iterable): + raise RuntimeError( + "A sequence of modes is not supported for non-separable " + "footprints") + mode = _ni_support._extend_mode_to_code(mode) + _nd_image.min_or_max_filter(input, footprint, structure, output, + mode, cval, origins, minimum) + if temp_needed: + temp[...] = output + output = temp + return output + + +@_ni_docstrings.docfiller +def minimum_filter(input, size=None, footprint=None, output=None, + mode="reflect", cval=0.0, origin=0, *, axes=None): + """Calculate a multidimensional minimum filter. + + Parameters + ---------- + %(input)s + %(size_foot)s + %(output)s + %(mode_multiple)s + %(cval)s + %(origin_multiple)s + axes : tuple of int or None, optional + If None, `input` is filtered along all axes. Otherwise, + `input` is filtered along the specified axes. When `axes` is + specified, any tuples used for `size`, `origin`, and/or `mode` + must match the length of `axes`. The ith entry in any of these tuples + corresponds to the ith entry in `axes`. + + Returns + ------- + minimum_filter : ndarray + Filtered array. Has the same shape as `input`. + + Notes + ----- + A sequence of modes (one per axis) is only supported when the footprint is + separable. Otherwise, a single mode string must be provided. + + Examples + -------- + >>> from scipy import ndimage, datasets + >>> import matplotlib.pyplot as plt + >>> fig = plt.figure() + >>> plt.gray() # show the filtered result in grayscale + >>> ax1 = fig.add_subplot(121) # left side + >>> ax2 = fig.add_subplot(122) # right side + >>> ascent = datasets.ascent() + >>> result = ndimage.minimum_filter(ascent, size=20) + >>> ax1.imshow(ascent) + >>> ax2.imshow(result) + >>> plt.show() + """ + return _min_or_max_filter(input, size, footprint, None, output, mode, + cval, origin, 1, axes) + + +@_ni_docstrings.docfiller +def maximum_filter(input, size=None, footprint=None, output=None, + mode="reflect", cval=0.0, origin=0, *, axes=None): + """Calculate a multidimensional maximum filter. + + Parameters + ---------- + %(input)s + %(size_foot)s + %(output)s + %(mode_multiple)s + %(cval)s + %(origin_multiple)s + axes : tuple of int or None, optional + If None, `input` is filtered along all axes. Otherwise, + `input` is filtered along the specified axes. When `axes` is + specified, any tuples used for `size`, `origin`, and/or `mode` + must match the length of `axes`. The ith entry in any of these tuples + corresponds to the ith entry in `axes`. + + Returns + ------- + maximum_filter : ndarray + Filtered array. Has the same shape as `input`. + + Notes + ----- + A sequence of modes (one per axis) is only supported when the footprint is + separable. Otherwise, a single mode string must be provided. + + Examples + -------- + >>> from scipy import ndimage, datasets + >>> import matplotlib.pyplot as plt + >>> fig = plt.figure() + >>> plt.gray() # show the filtered result in grayscale + >>> ax1 = fig.add_subplot(121) # left side + >>> ax2 = fig.add_subplot(122) # right side + >>> ascent = datasets.ascent() + >>> result = ndimage.maximum_filter(ascent, size=20) + >>> ax1.imshow(ascent) + >>> ax2.imshow(result) + >>> plt.show() + """ + return _min_or_max_filter(input, size, footprint, None, output, mode, + cval, origin, 0, axes) + + +@_ni_docstrings.docfiller +def _rank_filter(input, rank, size=None, footprint=None, output=None, + mode="reflect", cval=0.0, origin=0, operation='rank', + axes=None): + if (size is not None) and (footprint is not None): + warnings.warn("ignoring size because footprint is set", + UserWarning, stacklevel=3) + input = np.asarray(input) + if np.iscomplexobj(input): + raise TypeError('Complex type not supported') + axes = _ni_support._check_axes(axes, input.ndim) + num_axes = len(axes) + if footprint is None: + if size is None: + raise RuntimeError("no footprint or filter size provided") + sizes = _ni_support._normalize_sequence(size, num_axes) + footprint = np.ones(sizes, dtype=bool) + else: + footprint = np.asarray(footprint, dtype=bool) + # expand origins, footprint and modes if num_axes < input.ndim + footprint = _expand_footprint(input.ndim, axes, footprint) + origins = _expand_origin(input.ndim, axes, origin) + mode = _expand_mode(input.ndim, axes, mode) + + fshape = [ii for ii in footprint.shape if ii > 0] + if len(fshape) != input.ndim: + raise RuntimeError(f"footprint.ndim ({footprint.ndim}) must match " + f"len(axes) ({len(axes)})") + for origin, lenf in zip(origins, fshape): + if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf): + raise ValueError('invalid origin') + if not footprint.flags.contiguous: + footprint = footprint.copy() + filter_size = np.where(footprint, 1, 0).sum() + if operation == 'median': + rank = filter_size // 2 + elif operation == 'percentile': + percentile = rank + if percentile < 0.0: + percentile += 100.0 + if percentile < 0 or percentile > 100: + raise RuntimeError('invalid percentile') + if percentile == 100.0: + rank = filter_size - 1 + else: + rank = int(float(filter_size) * percentile / 100.0) + if rank < 0: + rank += filter_size + if rank < 0 or rank >= filter_size: + raise RuntimeError('rank not within filter footprint size') + if rank == 0: + return minimum_filter(input, None, footprint, output, mode, cval, + origins, axes=None) + elif rank == filter_size - 1: + return maximum_filter(input, None, footprint, output, mode, cval, + origins, axes=None) + else: + output = _ni_support._get_output(output, input) + temp_needed = np.may_share_memory(input, output) + if temp_needed: + # input and output arrays cannot share memory + temp = output + output = _ni_support._get_output(output.dtype, input) + if not isinstance(mode, str) and isinstance(mode, Iterable): + raise RuntimeError( + "A sequence of modes is not supported by non-separable rank " + "filters") + mode = _ni_support._extend_mode_to_code(mode, is_filter=True) + if input.ndim == 1: + if input.dtype in (np.int64, np.float64, np.float32): + x = input + x_out = output + elif input.dtype == np.float16: + x = input.astype('float32') + x_out = np.empty(x.shape, dtype='float32') + elif np.result_type(input, np.int64) == np.int64: + x = input.astype('int64') + x_out = np.empty(x.shape, dtype='int64') + elif input.dtype.kind in 'biu': + # cast any other boolean, integer or unsigned type to int64 + x = input.astype('int64') + x_out = np.empty(x.shape, dtype='int64') + else: + raise RuntimeError('Unsupported array type') + cval = x.dtype.type(cval) + _rank_filter_1d.rank_filter(x, rank, footprint.size, x_out, mode, cval, + origin) + if input.dtype not in (np.int64, np.float64, np.float32): + np.copyto(output, x_out, casting='unsafe') + else: + _nd_image.rank_filter(input, rank, footprint, output, mode, cval, origins) + if temp_needed: + temp[...] = output + output = temp + return output + + +@_ni_docstrings.docfiller +def rank_filter(input, rank, size=None, footprint=None, output=None, + mode="reflect", cval=0.0, origin=0, *, axes=None): + """Calculate a multidimensional rank filter. + + Parameters + ---------- + %(input)s + rank : int + The rank parameter may be less than zero, i.e., rank = -1 + indicates the largest element. + %(size_foot)s + %(output)s + %(mode_reflect)s + %(cval)s + %(origin_multiple)s + axes : tuple of int or None, optional + If None, `input` is filtered along all axes. Otherwise, + `input` is filtered along the specified axes. When `axes` is + specified, any tuples used for `size`, `origin`, and/or `mode` + must match the length of `axes`. The ith entry in any of these tuples + corresponds to the ith entry in `axes`. + + Returns + ------- + rank_filter : ndarray + Filtered array. Has the same shape as `input`. + + Examples + -------- + >>> from scipy import ndimage, datasets + >>> import matplotlib.pyplot as plt + >>> fig = plt.figure() + >>> plt.gray() # show the filtered result in grayscale + >>> ax1 = fig.add_subplot(121) # left side + >>> ax2 = fig.add_subplot(122) # right side + >>> ascent = datasets.ascent() + >>> result = ndimage.rank_filter(ascent, rank=42, size=20) + >>> ax1.imshow(ascent) + >>> ax2.imshow(result) + >>> plt.show() + """ + rank = operator.index(rank) + return _rank_filter(input, rank, size, footprint, output, mode, cval, + origin, 'rank', axes=axes) + + +@_ni_docstrings.docfiller +def median_filter(input, size=None, footprint=None, output=None, + mode="reflect", cval=0.0, origin=0, *, axes=None): + """ + Calculate a multidimensional median filter. + + Parameters + ---------- + %(input)s + %(size_foot)s + %(output)s + %(mode_reflect)s + %(cval)s + %(origin_multiple)s + axes : tuple of int or None, optional + If None, `input` is filtered along all axes. Otherwise, + `input` is filtered along the specified axes. When `axes` is + specified, any tuples used for `size`, `origin`, and/or `mode` + must match the length of `axes`. The ith entry in any of these tuples + corresponds to the ith entry in `axes`. + + Returns + ------- + median_filter : ndarray + Filtered array. Has the same shape as `input`. + + See Also + -------- + scipy.signal.medfilt2d + + Notes + ----- + For 2-dimensional images with ``uint8``, ``float32`` or ``float64`` dtypes + the specialised function `scipy.signal.medfilt2d` may be faster. It is + however limited to constant mode with ``cval=0``. + + Examples + -------- + >>> from scipy import ndimage, datasets + >>> import matplotlib.pyplot as plt + >>> fig = plt.figure() + >>> plt.gray() # show the filtered result in grayscale + >>> ax1 = fig.add_subplot(121) # left side + >>> ax2 = fig.add_subplot(122) # right side + >>> ascent = datasets.ascent() + >>> result = ndimage.median_filter(ascent, size=20) + >>> ax1.imshow(ascent) + >>> ax2.imshow(result) + >>> plt.show() + """ + return _rank_filter(input, 0, size, footprint, output, mode, cval, + origin, 'median', axes=axes) + + +@_ni_docstrings.docfiller +def percentile_filter(input, percentile, size=None, footprint=None, + output=None, mode="reflect", cval=0.0, origin=0, *, + axes=None): + """Calculate a multidimensional percentile filter. + + Parameters + ---------- + %(input)s + percentile : scalar + The percentile parameter may be less than zero, i.e., + percentile = -20 equals percentile = 80 + %(size_foot)s + %(output)s + %(mode_reflect)s + %(cval)s + %(origin_multiple)s + axes : tuple of int or None, optional + If None, `input` is filtered along all axes. Otherwise, + `input` is filtered along the specified axes. When `axes` is + specified, any tuples used for `size`, `origin`, and/or `mode` + must match the length of `axes`. The ith entry in any of these tuples + corresponds to the ith entry in `axes`. + + Returns + ------- + percentile_filter : ndarray + Filtered array. Has the same shape as `input`. + + Examples + -------- + >>> from scipy import ndimage, datasets + >>> import matplotlib.pyplot as plt + >>> fig = plt.figure() + >>> plt.gray() # show the filtered result in grayscale + >>> ax1 = fig.add_subplot(121) # left side + >>> ax2 = fig.add_subplot(122) # right side + >>> ascent = datasets.ascent() + >>> result = ndimage.percentile_filter(ascent, percentile=20, size=20) + >>> ax1.imshow(ascent) + >>> ax2.imshow(result) + >>> plt.show() + """ + return _rank_filter(input, percentile, size, footprint, output, mode, + cval, origin, 'percentile', axes=axes) + + +@_ni_docstrings.docfiller +def generic_filter1d(input, function, filter_size, axis=-1, + output=None, mode="reflect", cval=0.0, origin=0, + extra_arguments=(), extra_keywords=None): + """Calculate a 1-D filter along the given axis. + + `generic_filter1d` iterates over the lines of the array, calling the + given function at each line. The arguments of the line are the + input line, and the output line. The input and output lines are 1-D + double arrays. The input line is extended appropriately according + to the filter size and origin. The output line must be modified + in-place with the result. + + Parameters + ---------- + %(input)s + function : {callable, scipy.LowLevelCallable} + Function to apply along given axis. + filter_size : scalar + Length of the filter. + %(axis)s + %(output)s + %(mode_reflect)s + %(cval)s + %(origin)s + %(extra_arguments)s + %(extra_keywords)s + + Returns + ------- + generic_filter1d : ndarray + Filtered array. Has the same shape as `input`. + + Notes + ----- + This function also accepts low-level callback functions with one of + the following signatures and wrapped in `scipy.LowLevelCallable`: + + .. code:: c + + int function(double *input_line, npy_intp input_length, + double *output_line, npy_intp output_length, + void *user_data) + int function(double *input_line, intptr_t input_length, + double *output_line, intptr_t output_length, + void *user_data) + + The calling function iterates over the lines of the input and output + arrays, calling the callback function at each line. The current line + is extended according to the border conditions set by the calling + function, and the result is copied into the array that is passed + through ``input_line``. The length of the input line (after extension) + is passed through ``input_length``. The callback function should apply + the filter and store the result in the array passed through + ``output_line``. The length of the output line is passed through + ``output_length``. ``user_data`` is the data pointer provided + to `scipy.LowLevelCallable` as-is. + + The callback function must return an integer error status that is zero + if something went wrong and one otherwise. If an error occurs, you should + normally set the python error status with an informative message + before returning, otherwise a default error message is set by the + calling function. + + In addition, some other low-level function pointer specifications + are accepted, but these are for backward compatibility only and should + not be used in new code. + + """ + if extra_keywords is None: + extra_keywords = {} + input = np.asarray(input) + if np.iscomplexobj(input): + raise TypeError('Complex type not supported') + output = _ni_support._get_output(output, input) + if filter_size < 1: + raise RuntimeError('invalid filter size') + axis = normalize_axis_index(axis, input.ndim) + if (filter_size // 2 + origin < 0) or (filter_size // 2 + origin >= + filter_size): + raise ValueError('invalid origin') + mode = _ni_support._extend_mode_to_code(mode) + _nd_image.generic_filter1d(input, function, filter_size, axis, output, + mode, cval, origin, extra_arguments, + extra_keywords) + return output + + +@_ni_docstrings.docfiller +def generic_filter(input, function, size=None, footprint=None, + output=None, mode="reflect", cval=0.0, origin=0, + extra_arguments=(), extra_keywords=None, *, axes=None): + """Calculate a multidimensional filter using the given function. + + At each element the provided function is called. The input values + within the filter footprint at that element are passed to the function + as a 1-D array of double values. + + Parameters + ---------- + %(input)s + function : {callable, scipy.LowLevelCallable} + Function to apply at each element. + %(size_foot)s + %(output)s + %(mode_reflect)s + %(cval)s + %(origin_multiple)s + %(extra_arguments)s + %(extra_keywords)s + axes : tuple of int or None, optional + If None, `input` is filtered along all axes. Otherwise, + `input` is filtered along the specified axes. When `axes` is + specified, any tuples used for `size` or `origin` must match the length + of `axes`. The ith entry in any of these tuples corresponds to the ith + entry in `axes`. + + Returns + ------- + generic_filter : ndarray + Filtered array. Has the same shape as `input`. + + Notes + ----- + This function also accepts low-level callback functions with one of + the following signatures and wrapped in `scipy.LowLevelCallable`: + + .. code:: c + + int callback(double *buffer, npy_intp filter_size, + double *return_value, void *user_data) + int callback(double *buffer, intptr_t filter_size, + double *return_value, void *user_data) + + The calling function iterates over the elements of the input and + output arrays, calling the callback function at each element. The + elements within the footprint of the filter at the current element are + passed through the ``buffer`` parameter, and the number of elements + within the footprint through ``filter_size``. The calculated value is + returned in ``return_value``. ``user_data`` is the data pointer provided + to `scipy.LowLevelCallable` as-is. + + The callback function must return an integer error status that is zero + if something went wrong and one otherwise. If an error occurs, you should + normally set the python error status with an informative message + before returning, otherwise a default error message is set by the + calling function. + + In addition, some other low-level function pointer specifications + are accepted, but these are for backward compatibility only and should + not be used in new code. + + Examples + -------- + Import the necessary modules and load the example image used for + filtering. + + >>> import numpy as np + >>> from scipy import datasets + >>> from scipy.ndimage import zoom, generic_filter + >>> import matplotlib.pyplot as plt + >>> ascent = zoom(datasets.ascent(), 0.5) + + Compute a maximum filter with kernel size 5 by passing a simple NumPy + aggregation function as argument to `function`. + + >>> maximum_filter_result = generic_filter(ascent, np.amax, [5, 5]) + + While a maximum filter could also directly be obtained using + `maximum_filter`, `generic_filter` allows generic Python function or + `scipy.LowLevelCallable` to be used as a filter. Here, we compute the + range between maximum and minimum value as an example for a kernel size + of 5. + + >>> def custom_filter(image): + ... return np.amax(image) - np.amin(image) + >>> custom_filter_result = generic_filter(ascent, custom_filter, [5, 5]) + + Plot the original and filtered images. + + >>> fig, axes = plt.subplots(3, 1, figsize=(3, 9)) + >>> plt.gray() # show the filtered result in grayscale + >>> top, middle, bottom = axes + >>> for ax in axes: + ... ax.set_axis_off() # remove coordinate system + >>> top.imshow(ascent) + >>> top.set_title("Original image") + >>> middle.imshow(maximum_filter_result) + >>> middle.set_title("Maximum filter, Kernel: 5x5") + >>> bottom.imshow(custom_filter_result) + >>> bottom.set_title("Custom filter, Kernel: 5x5") + >>> fig.tight_layout() + + """ + if (size is not None) and (footprint is not None): + warnings.warn("ignoring size because footprint is set", + UserWarning, stacklevel=2) + if extra_keywords is None: + extra_keywords = {} + input = np.asarray(input) + if np.iscomplexobj(input): + raise TypeError('Complex type not supported') + axes = _ni_support._check_axes(axes, input.ndim) + num_axes = len(axes) + if footprint is None: + if size is None: + raise RuntimeError("no footprint or filter size provided") + sizes = _ni_support._normalize_sequence(size, num_axes) + footprint = np.ones(sizes, dtype=bool) + else: + footprint = np.asarray(footprint, dtype=bool) + + # expand origins, footprint if num_axes < input.ndim + footprint = _expand_footprint(input.ndim, axes, footprint) + origins = _expand_origin(input.ndim, axes, origin) + + fshape = [ii for ii in footprint.shape if ii > 0] + if len(fshape) != input.ndim: + raise RuntimeError(f"footprint.ndim ({footprint.ndim}) " + f"must match len(axes) ({num_axes})") + for origin, lenf in zip(origins, fshape): + if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf): + raise ValueError('invalid origin') + if not footprint.flags.contiguous: + footprint = footprint.copy() + output = _ni_support._get_output(output, input) + + mode = _ni_support._extend_mode_to_code(mode) + _nd_image.generic_filter(input, function, footprint, output, mode, + cval, origins, extra_arguments, extra_keywords) + return output diff --git a/llava_video/lib/python3.10/site-packages/scipy/ndimage/_interpolation.py b/llava_video/lib/python3.10/site-packages/scipy/ndimage/_interpolation.py new file mode 100644 index 0000000000000000000000000000000000000000..4e4ea94184871fe87f848532b21e2def29bd406b --- /dev/null +++ b/llava_video/lib/python3.10/site-packages/scipy/ndimage/_interpolation.py @@ -0,0 +1,1003 @@ +# Copyright (C) 2003-2005 Peter J. Verveer +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following +# disclaimer in the documentation and/or other materials provided +# with the distribution. +# +# 3. The name of the author may not be used to endorse or promote +# products derived from this software without specific prior +# written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS +# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE +# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import itertools +import warnings + +import numpy as np +from scipy._lib._util import normalize_axis_index + +from scipy import special +from . import _ni_support +from . import _nd_image +from ._ni_docstrings import docfiller + + +__all__ = ['spline_filter1d', 'spline_filter', 'geometric_transform', + 'map_coordinates', 'affine_transform', 'shift', 'zoom', 'rotate'] + + +@docfiller +def spline_filter1d(input, order=3, axis=-1, output=np.float64, + mode='mirror'): + """ + Calculate a 1-D spline filter along the given axis. + + The lines of the array along the given axis are filtered by a + spline filter. The order of the spline must be >= 2 and <= 5. + + Parameters + ---------- + %(input)s + order : int, optional + The order of the spline, default is 3. + axis : int, optional + The axis along which the spline filter is applied. Default is the last + axis. + output : ndarray or dtype, optional + The array in which to place the output, or the dtype of the returned + array. Default is ``numpy.float64``. + %(mode_interp_mirror)s + + Returns + ------- + spline_filter1d : ndarray + The filtered input. + + See Also + -------- + spline_filter : Multidimensional spline filter. + + Notes + ----- + All of the interpolation functions in `ndimage` do spline interpolation of + the input image. If using B-splines of `order > 1`, the input image + values have to be converted to B-spline coefficients first, which is + done by applying this 1-D filter sequentially along all + axes of the input. All functions that require B-spline coefficients + will automatically filter their inputs, a behavior controllable with + the `prefilter` keyword argument. For functions that accept a `mode` + parameter, the result will only be correct if it matches the `mode` + used when filtering. + + For complex-valued `input`, this function processes the real and imaginary + components independently. + + .. versionadded:: 1.6.0 + Complex-valued support added. + + Examples + -------- + We can filter an image using 1-D spline along the given axis: + + >>> from scipy.ndimage import spline_filter1d + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> orig_img = np.eye(20) # create an image + >>> orig_img[10, :] = 1.0 + >>> sp_filter_axis_0 = spline_filter1d(orig_img, axis=0) + >>> sp_filter_axis_1 = spline_filter1d(orig_img, axis=1) + >>> f, ax = plt.subplots(1, 3, sharex=True) + >>> for ind, data in enumerate([[orig_img, "original image"], + ... [sp_filter_axis_0, "spline filter (axis=0)"], + ... [sp_filter_axis_1, "spline filter (axis=1)"]]): + ... ax[ind].imshow(data[0], cmap='gray_r') + ... ax[ind].set_title(data[1]) + >>> plt.tight_layout() + >>> plt.show() + + """ + if order < 0 or order > 5: + raise RuntimeError('spline order not supported') + input = np.asarray(input) + complex_output = np.iscomplexobj(input) + output = _ni_support._get_output(output, input, + complex_output=complex_output) + if complex_output: + spline_filter1d(input.real, order, axis, output.real, mode) + spline_filter1d(input.imag, order, axis, output.imag, mode) + return output + if order in [0, 1]: + output[...] = np.array(input) + else: + mode = _ni_support._extend_mode_to_code(mode) + axis = normalize_axis_index(axis, input.ndim) + _nd_image.spline_filter1d(input, order, axis, output, mode) + return output + +@docfiller +def spline_filter(input, order=3, output=np.float64, mode='mirror'): + """ + Multidimensional spline filter. + + Parameters + ---------- + %(input)s + order : int, optional + The order of the spline, default is 3. + output : ndarray or dtype, optional + The array in which to place the output, or the dtype of the returned + array. Default is ``numpy.float64``. + %(mode_interp_mirror)s + + Returns + ------- + spline_filter : ndarray + Filtered array. Has the same shape as `input`. + + See Also + -------- + spline_filter1d : Calculate a 1-D spline filter along the given axis. + + Notes + ----- + The multidimensional filter is implemented as a sequence of + 1-D spline filters. The intermediate arrays are stored + in the same data type as the output. Therefore, for output types + with a limited precision, the results may be imprecise because + intermediate results may be stored with insufficient precision. + + For complex-valued `input`, this function processes the real and imaginary + components independently. + + .. versionadded:: 1.6.0 + Complex-valued support added. + + Examples + -------- + We can filter an image using multidimensional splines: + + >>> from scipy.ndimage import spline_filter + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> orig_img = np.eye(20) # create an image + >>> orig_img[10, :] = 1.0 + >>> sp_filter = spline_filter(orig_img, order=3) + >>> f, ax = plt.subplots(1, 2, sharex=True) + >>> for ind, data in enumerate([[orig_img, "original image"], + ... [sp_filter, "spline filter"]]): + ... ax[ind].imshow(data[0], cmap='gray_r') + ... ax[ind].set_title(data[1]) + >>> plt.tight_layout() + >>> plt.show() + + """ + if order < 2 or order > 5: + raise RuntimeError('spline order not supported') + input = np.asarray(input) + complex_output = np.iscomplexobj(input) + output = _ni_support._get_output(output, input, + complex_output=complex_output) + if complex_output: + spline_filter(input.real, order, output.real, mode) + spline_filter(input.imag, order, output.imag, mode) + return output + if order not in [0, 1] and input.ndim > 0: + for axis in range(input.ndim): + spline_filter1d(input, order, axis, output=output, mode=mode) + input = output + else: + output[...] = input[...] + return output + + +def _prepad_for_spline_filter(input, mode, cval): + if mode in ['nearest', 'grid-constant']: + npad = 12 + if mode == 'grid-constant': + padded = np.pad(input, npad, mode='constant', + constant_values=cval) + elif mode == 'nearest': + padded = np.pad(input, npad, mode='edge') + else: + # other modes have exact boundary conditions implemented so + # no prepadding is needed + npad = 0 + padded = input + return padded, npad + + +@docfiller +def geometric_transform(input, mapping, output_shape=None, + output=None, order=3, + mode='constant', cval=0.0, prefilter=True, + extra_arguments=(), extra_keywords=None): + """ + Apply an arbitrary geometric transform. + + The given mapping function is used to find, for each point in the + output, the corresponding coordinates in the input. The value of the + input at those coordinates is determined by spline interpolation of + the requested order. + + Parameters + ---------- + %(input)s + mapping : {callable, scipy.LowLevelCallable} + A callable object that accepts a tuple of length equal to the output + array rank, and returns the corresponding input coordinates as a tuple + of length equal to the input array rank. + output_shape : tuple of ints, optional + Shape tuple. + %(output)s + order : int, optional + The order of the spline interpolation, default is 3. + The order has to be in the range 0-5. + %(mode_interp_constant)s + %(cval)s + %(prefilter)s + extra_arguments : tuple, optional + Extra arguments passed to `mapping`. + extra_keywords : dict, optional + Extra keywords passed to `mapping`. + + Returns + ------- + output : ndarray + The filtered input. + + See Also + -------- + map_coordinates, affine_transform, spline_filter1d + + + Notes + ----- + This function also accepts low-level callback functions with one + the following signatures and wrapped in `scipy.LowLevelCallable`: + + .. code:: c + + int mapping(npy_intp *output_coordinates, double *input_coordinates, + int output_rank, int input_rank, void *user_data) + int mapping(intptr_t *output_coordinates, double *input_coordinates, + int output_rank, int input_rank, void *user_data) + + The calling function iterates over the elements of the output array, + calling the callback function at each element. The coordinates of the + current output element are passed through ``output_coordinates``. The + callback function must return the coordinates at which the input must + be interpolated in ``input_coordinates``. The rank of the input and + output arrays are given by ``input_rank`` and ``output_rank`` + respectively. ``user_data`` is the data pointer provided + to `scipy.LowLevelCallable` as-is. + + The callback function must return an integer error status that is zero + if something went wrong and one otherwise. If an error occurs, you should + normally set the Python error status with an informative message + before returning, otherwise a default error message is set by the + calling function. + + In addition, some other low-level function pointer specifications + are accepted, but these are for backward compatibility only and should + not be used in new code. + + For complex-valued `input`, this function transforms the real and imaginary + components independently. + + .. versionadded:: 1.6.0 + Complex-valued support added. + + Examples + -------- + >>> import numpy as np + >>> from scipy.ndimage import geometric_transform + >>> a = np.arange(12.).reshape((4, 3)) + >>> def shift_func(output_coords): + ... return (output_coords[0] - 0.5, output_coords[1] - 0.5) + ... + >>> geometric_transform(a, shift_func) + array([[ 0. , 0. , 0. ], + [ 0. , 1.362, 2.738], + [ 0. , 4.812, 6.187], + [ 0. , 8.263, 9.637]]) + + >>> b = [1, 2, 3, 4, 5] + >>> def shift_func(output_coords): + ... return (output_coords[0] - 3,) + ... + >>> geometric_transform(b, shift_func, mode='constant') + array([0, 0, 0, 1, 2]) + >>> geometric_transform(b, shift_func, mode='nearest') + array([1, 1, 1, 1, 2]) + >>> geometric_transform(b, shift_func, mode='reflect') + array([3, 2, 1, 1, 2]) + >>> geometric_transform(b, shift_func, mode='wrap') + array([2, 3, 4, 1, 2]) + + """ + if extra_keywords is None: + extra_keywords = {} + if order < 0 or order > 5: + raise RuntimeError('spline order not supported') + input = np.asarray(input) + if output_shape is None: + output_shape = input.shape + if input.ndim < 1 or len(output_shape) < 1: + raise RuntimeError('input and output rank must be > 0') + complex_output = np.iscomplexobj(input) + output = _ni_support._get_output(output, input, shape=output_shape, + complex_output=complex_output) + if complex_output: + kwargs = dict(order=order, mode=mode, prefilter=prefilter, + output_shape=output_shape, + extra_arguments=extra_arguments, + extra_keywords=extra_keywords) + geometric_transform(input.real, mapping, output=output.real, + cval=np.real(cval), **kwargs) + geometric_transform(input.imag, mapping, output=output.imag, + cval=np.imag(cval), **kwargs) + return output + + if prefilter and order > 1: + padded, npad = _prepad_for_spline_filter(input, mode, cval) + filtered = spline_filter(padded, order, output=np.float64, + mode=mode) + else: + npad = 0 + filtered = input + mode = _ni_support._extend_mode_to_code(mode) + _nd_image.geometric_transform(filtered, mapping, None, None, None, output, + order, mode, cval, npad, extra_arguments, + extra_keywords) + return output + + +@docfiller +def map_coordinates(input, coordinates, output=None, order=3, + mode='constant', cval=0.0, prefilter=True): + """ + Map the input array to new coordinates by interpolation. + + The array of coordinates is used to find, for each point in the output, + the corresponding coordinates in the input. The value of the input at + those coordinates is determined by spline interpolation of the + requested order. + + The shape of the output is derived from that of the coordinate + array by dropping the first axis. The values of the array along + the first axis are the coordinates in the input array at which the + output value is found. + + Parameters + ---------- + %(input)s + coordinates : array_like + The coordinates at which `input` is evaluated. + %(output)s + order : int, optional + The order of the spline interpolation, default is 3. + The order has to be in the range 0-5. + %(mode_interp_constant)s + %(cval)s + %(prefilter)s + + Returns + ------- + map_coordinates : ndarray + The result of transforming the input. The shape of the output is + derived from that of `coordinates` by dropping the first axis. + + See Also + -------- + spline_filter, geometric_transform, scipy.interpolate + + Notes + ----- + For complex-valued `input`, this function maps the real and imaginary + components independently. + + .. versionadded:: 1.6.0 + Complex-valued support added. + + Examples + -------- + >>> from scipy import ndimage + >>> import numpy as np + >>> a = np.arange(12.).reshape((4, 3)) + >>> a + array([[ 0., 1., 2.], + [ 3., 4., 5.], + [ 6., 7., 8.], + [ 9., 10., 11.]]) + >>> ndimage.map_coordinates(a, [[0.5, 2], [0.5, 1]], order=1) + array([ 2., 7.]) + + Above, the interpolated value of a[0.5, 0.5] gives output[0], while + a[2, 1] is output[1]. + + >>> inds = np.array([[0.5, 2], [0.5, 4]]) + >>> ndimage.map_coordinates(a, inds, order=1, cval=-33.3) + array([ 2. , -33.3]) + >>> ndimage.map_coordinates(a, inds, order=1, mode='nearest') + array([ 2., 8.]) + >>> ndimage.map_coordinates(a, inds, order=1, cval=0, output=bool) + array([ True, False], dtype=bool) + + """ + if order < 0 or order > 5: + raise RuntimeError('spline order not supported') + input = np.asarray(input) + coordinates = np.asarray(coordinates) + if np.iscomplexobj(coordinates): + raise TypeError('Complex type not supported') + output_shape = coordinates.shape[1:] + if input.ndim < 1 or len(output_shape) < 1: + raise RuntimeError('input and output rank must be > 0') + if coordinates.shape[0] != input.ndim: + raise RuntimeError('invalid shape for coordinate array') + complex_output = np.iscomplexobj(input) + output = _ni_support._get_output(output, input, shape=output_shape, + complex_output=complex_output) + if complex_output: + kwargs = dict(order=order, mode=mode, prefilter=prefilter) + map_coordinates(input.real, coordinates, output=output.real, + cval=np.real(cval), **kwargs) + map_coordinates(input.imag, coordinates, output=output.imag, + cval=np.imag(cval), **kwargs) + return output + if prefilter and order > 1: + padded, npad = _prepad_for_spline_filter(input, mode, cval) + filtered = spline_filter(padded, order, output=np.float64, mode=mode) + else: + npad = 0 + filtered = input + mode = _ni_support._extend_mode_to_code(mode) + _nd_image.geometric_transform(filtered, None, coordinates, None, None, + output, order, mode, cval, npad, None, None) + return output + + +@docfiller +def affine_transform(input, matrix, offset=0.0, output_shape=None, + output=None, order=3, + mode='constant', cval=0.0, prefilter=True): + """ + Apply an affine transformation. + + Given an output image pixel index vector ``o``, the pixel value + is determined from the input image at position + ``np.dot(matrix, o) + offset``. + + This does 'pull' (or 'backward') resampling, transforming the output space + to the input to locate data. Affine transformations are often described in + the 'push' (or 'forward') direction, transforming input to output. If you + have a matrix for the 'push' transformation, use its inverse + (:func:`numpy.linalg.inv`) in this function. + + Parameters + ---------- + %(input)s + matrix : ndarray + The inverse coordinate transformation matrix, mapping output + coordinates to input coordinates. If ``ndim`` is the number of + dimensions of ``input``, the given matrix must have one of the + following shapes: + + - ``(ndim, ndim)``: the linear transformation matrix for each + output coordinate. + - ``(ndim,)``: assume that the 2-D transformation matrix is + diagonal, with the diagonal specified by the given value. A more + efficient algorithm is then used that exploits the separability + of the problem. + - ``(ndim + 1, ndim + 1)``: assume that the transformation is + specified using homogeneous coordinates [1]_. In this case, any + value passed to ``offset`` is ignored. + - ``(ndim, ndim + 1)``: as above, but the bottom row of a + homogeneous transformation matrix is always ``[0, 0, ..., 1]``, + and may be omitted. + + offset : float or sequence, optional + The offset into the array where the transform is applied. If a float, + `offset` is the same for each axis. If a sequence, `offset` should + contain one value for each axis. + output_shape : tuple of ints, optional + Shape tuple. + %(output)s + order : int, optional + The order of the spline interpolation, default is 3. + The order has to be in the range 0-5. + %(mode_interp_constant)s + %(cval)s + %(prefilter)s + + Returns + ------- + affine_transform : ndarray + The transformed input. + + Notes + ----- + The given matrix and offset are used to find for each point in the + output the corresponding coordinates in the input by an affine + transformation. The value of the input at those coordinates is + determined by spline interpolation of the requested order. Points + outside the boundaries of the input are filled according to the given + mode. + + .. versionchanged:: 0.18.0 + Previously, the exact interpretation of the affine transformation + depended on whether the matrix was supplied as a 1-D or a + 2-D array. If a 1-D array was supplied + to the matrix parameter, the output pixel value at index ``o`` + was determined from the input image at position + ``matrix * (o + offset)``. + + For complex-valued `input`, this function transforms the real and imaginary + components independently. + + .. versionadded:: 1.6.0 + Complex-valued support added. + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Homogeneous_coordinates + """ + if order < 0 or order > 5: + raise RuntimeError('spline order not supported') + input = np.asarray(input) + if output_shape is None: + if isinstance(output, np.ndarray): + output_shape = output.shape + else: + output_shape = input.shape + if input.ndim < 1 or len(output_shape) < 1: + raise RuntimeError('input and output rank must be > 0') + complex_output = np.iscomplexobj(input) + output = _ni_support._get_output(output, input, shape=output_shape, + complex_output=complex_output) + if complex_output: + kwargs = dict(offset=offset, output_shape=output_shape, order=order, + mode=mode, prefilter=prefilter) + affine_transform(input.real, matrix, output=output.real, + cval=np.real(cval), **kwargs) + affine_transform(input.imag, matrix, output=output.imag, + cval=np.imag(cval), **kwargs) + return output + if prefilter and order > 1: + padded, npad = _prepad_for_spline_filter(input, mode, cval) + filtered = spline_filter(padded, order, output=np.float64, mode=mode) + else: + npad = 0 + filtered = input + mode = _ni_support._extend_mode_to_code(mode) + matrix = np.asarray(matrix, dtype=np.float64) + if matrix.ndim not in [1, 2] or matrix.shape[0] < 1: + raise RuntimeError('no proper affine matrix provided') + if (matrix.ndim == 2 and matrix.shape[1] == input.ndim + 1 and + (matrix.shape[0] in [input.ndim, input.ndim + 1])): + if matrix.shape[0] == input.ndim + 1: + exptd = [0] * input.ndim + [1] + if not np.all(matrix[input.ndim] == exptd): + msg = (f'Expected homogeneous transformation matrix with ' + f'shape {matrix.shape} for image shape {input.shape}, ' + f'but bottom row was not equal to {exptd}') + raise ValueError(msg) + # assume input is homogeneous coordinate transformation matrix + offset = matrix[:input.ndim, input.ndim] + matrix = matrix[:input.ndim, :input.ndim] + if matrix.shape[0] != input.ndim: + raise RuntimeError('affine matrix has wrong number of rows') + if matrix.ndim == 2 and matrix.shape[1] != output.ndim: + raise RuntimeError('affine matrix has wrong number of columns') + if not matrix.flags.contiguous: + matrix = matrix.copy() + offset = _ni_support._normalize_sequence(offset, input.ndim) + offset = np.asarray(offset, dtype=np.float64) + if offset.ndim != 1 or offset.shape[0] < 1: + raise RuntimeError('no proper offset provided') + if not offset.flags.contiguous: + offset = offset.copy() + if matrix.ndim == 1: + warnings.warn( + "The behavior of affine_transform with a 1-D " + "array supplied for the matrix parameter has changed in " + "SciPy 0.18.0.", + stacklevel=2 + ) + _nd_image.zoom_shift(filtered, matrix, offset/matrix, output, order, + mode, cval, npad, False) + else: + _nd_image.geometric_transform(filtered, None, None, matrix, offset, + output, order, mode, cval, npad, None, + None) + return output + + +@docfiller +def shift(input, shift, output=None, order=3, mode='constant', cval=0.0, + prefilter=True): + """ + Shift an array. + + The array is shifted using spline interpolation of the requested order. + Points outside the boundaries of the input are filled according to the + given mode. + + Parameters + ---------- + %(input)s + shift : float or sequence + The shift along the axes. If a float, `shift` is the same for each + axis. If a sequence, `shift` should contain one value for each axis. + %(output)s + order : int, optional + The order of the spline interpolation, default is 3. + The order has to be in the range 0-5. + %(mode_interp_constant)s + %(cval)s + %(prefilter)s + + Returns + ------- + shift : ndarray + The shifted input. + + See Also + -------- + affine_transform : Affine transformations + + Notes + ----- + For complex-valued `input`, this function shifts the real and imaginary + components independently. + + .. versionadded:: 1.6.0 + Complex-valued support added. + + Examples + -------- + Import the necessary modules and an exemplary image. + + >>> from scipy.ndimage import shift + >>> import matplotlib.pyplot as plt + >>> from scipy import datasets + >>> image = datasets.ascent() + + Shift the image vertically by 20 pixels. + + >>> image_shifted_vertically = shift(image, (20, 0)) + + Shift the image vertically by -200 pixels and horizontally by 100 pixels. + + >>> image_shifted_both_directions = shift(image, (-200, 100)) + + Plot the original and the shifted images. + + >>> fig, axes = plt.subplots(3, 1, figsize=(4, 12)) + >>> plt.gray() # show the filtered result in grayscale + >>> top, middle, bottom = axes + >>> for ax in axes: + ... ax.set_axis_off() # remove coordinate system + >>> top.imshow(image) + >>> top.set_title("Original image") + >>> middle.imshow(image_shifted_vertically) + >>> middle.set_title("Vertically shifted image") + >>> bottom.imshow(image_shifted_both_directions) + >>> bottom.set_title("Image shifted in both directions") + >>> fig.tight_layout() + """ + if order < 0 or order > 5: + raise RuntimeError('spline order not supported') + input = np.asarray(input) + if input.ndim < 1: + raise RuntimeError('input and output rank must be > 0') + complex_output = np.iscomplexobj(input) + output = _ni_support._get_output(output, input, complex_output=complex_output) + if complex_output: + # import under different name to avoid confusion with shift parameter + from scipy.ndimage._interpolation import shift as _shift + + kwargs = dict(order=order, mode=mode, prefilter=prefilter) + _shift(input.real, shift, output=output.real, cval=np.real(cval), **kwargs) + _shift(input.imag, shift, output=output.imag, cval=np.imag(cval), **kwargs) + return output + if prefilter and order > 1: + padded, npad = _prepad_for_spline_filter(input, mode, cval) + filtered = spline_filter(padded, order, output=np.float64, mode=mode) + else: + npad = 0 + filtered = input + mode = _ni_support._extend_mode_to_code(mode) + shift = _ni_support._normalize_sequence(shift, input.ndim) + shift = [-ii for ii in shift] + shift = np.asarray(shift, dtype=np.float64) + if not shift.flags.contiguous: + shift = shift.copy() + _nd_image.zoom_shift(filtered, None, shift, output, order, mode, cval, + npad, False) + return output + + +@docfiller +def zoom(input, zoom, output=None, order=3, mode='constant', cval=0.0, + prefilter=True, *, grid_mode=False): + """ + Zoom an array. + + The array is zoomed using spline interpolation of the requested order. + + Parameters + ---------- + %(input)s + zoom : float or sequence + The zoom factor along the axes. If a float, `zoom` is the same for each + axis. If a sequence, `zoom` should contain one value for each axis. + %(output)s + order : int, optional + The order of the spline interpolation, default is 3. + The order has to be in the range 0-5. + %(mode_interp_constant)s + %(cval)s + %(prefilter)s + grid_mode : bool, optional + If False, the distance from the pixel centers is zoomed. Otherwise, the + distance including the full pixel extent is used. For example, a 1d + signal of length 5 is considered to have length 4 when `grid_mode` is + False, but length 5 when `grid_mode` is True. See the following + visual illustration: + + .. code-block:: text + + | pixel 1 | pixel 2 | pixel 3 | pixel 4 | pixel 5 | + |<-------------------------------------->| + vs. + |<----------------------------------------------->| + + The starting point of the arrow in the diagram above corresponds to + coordinate location 0 in each mode. + + Returns + ------- + zoom : ndarray + The zoomed input. + + Notes + ----- + For complex-valued `input`, this function zooms the real and imaginary + components independently. + + .. versionadded:: 1.6.0 + Complex-valued support added. + + Examples + -------- + >>> from scipy import ndimage, datasets + >>> import matplotlib.pyplot as plt + + >>> fig = plt.figure() + >>> ax1 = fig.add_subplot(121) # left side + >>> ax2 = fig.add_subplot(122) # right side + >>> ascent = datasets.ascent() + >>> result = ndimage.zoom(ascent, 3.0) + >>> ax1.imshow(ascent, vmin=0, vmax=255) + >>> ax2.imshow(result, vmin=0, vmax=255) + >>> plt.show() + + >>> print(ascent.shape) + (512, 512) + + >>> print(result.shape) + (1536, 1536) + """ + if order < 0 or order > 5: + raise RuntimeError('spline order not supported') + input = np.asarray(input) + if input.ndim < 1: + raise RuntimeError('input and output rank must be > 0') + zoom = _ni_support._normalize_sequence(zoom, input.ndim) + output_shape = tuple( + [int(round(ii * jj)) for ii, jj in zip(input.shape, zoom)]) + complex_output = np.iscomplexobj(input) + output = _ni_support._get_output(output, input, shape=output_shape, + complex_output=complex_output) + if complex_output: + # import under different name to avoid confusion with zoom parameter + from scipy.ndimage._interpolation import zoom as _zoom + + kwargs = dict(order=order, mode=mode, prefilter=prefilter) + _zoom(input.real, zoom, output=output.real, cval=np.real(cval), **kwargs) + _zoom(input.imag, zoom, output=output.imag, cval=np.imag(cval), **kwargs) + return output + if prefilter and order > 1: + padded, npad = _prepad_for_spline_filter(input, mode, cval) + filtered = spline_filter(padded, order, output=np.float64, mode=mode) + else: + npad = 0 + filtered = input + if grid_mode: + # warn about modes that may have surprising behavior + suggest_mode = None + if mode == 'constant': + suggest_mode = 'grid-constant' + elif mode == 'wrap': + suggest_mode = 'grid-wrap' + if suggest_mode is not None: + warnings.warn( + (f"It is recommended to use mode = {suggest_mode} instead of {mode} " + f"when grid_mode is True."), + stacklevel=2 + ) + mode = _ni_support._extend_mode_to_code(mode) + + zoom_div = np.array(output_shape) + zoom_nominator = np.array(input.shape) + if not grid_mode: + zoom_div -= 1 + zoom_nominator -= 1 + + # Zooming to infinite values is unpredictable, so just choose + # zoom factor 1 instead + zoom = np.divide(zoom_nominator, zoom_div, + out=np.ones_like(input.shape, dtype=np.float64), + where=zoom_div != 0) + zoom = np.ascontiguousarray(zoom) + _nd_image.zoom_shift(filtered, zoom, None, output, order, mode, cval, npad, + grid_mode) + return output + + +@docfiller +def rotate(input, angle, axes=(1, 0), reshape=True, output=None, order=3, + mode='constant', cval=0.0, prefilter=True): + """ + Rotate an array. + + The array is rotated in the plane defined by the two axes given by the + `axes` parameter using spline interpolation of the requested order. + + Parameters + ---------- + %(input)s + angle : float + The rotation angle in degrees. + axes : tuple of 2 ints, optional + The two axes that define the plane of rotation. Default is the first + two axes. + reshape : bool, optional + If `reshape` is true, the output shape is adapted so that the input + array is contained completely in the output. Default is True. + %(output)s + order : int, optional + The order of the spline interpolation, default is 3. + The order has to be in the range 0-5. + %(mode_interp_constant)s + %(cval)s + %(prefilter)s + + Returns + ------- + rotate : ndarray + The rotated input. + + Notes + ----- + For complex-valued `input`, this function rotates the real and imaginary + components independently. + + .. versionadded:: 1.6.0 + Complex-valued support added. + + Examples + -------- + >>> from scipy import ndimage, datasets + >>> import matplotlib.pyplot as plt + >>> fig = plt.figure(figsize=(10, 3)) + >>> ax1, ax2, ax3 = fig.subplots(1, 3) + >>> img = datasets.ascent() + >>> img_45 = ndimage.rotate(img, 45, reshape=False) + >>> full_img_45 = ndimage.rotate(img, 45, reshape=True) + >>> ax1.imshow(img, cmap='gray') + >>> ax1.set_axis_off() + >>> ax2.imshow(img_45, cmap='gray') + >>> ax2.set_axis_off() + >>> ax3.imshow(full_img_45, cmap='gray') + >>> ax3.set_axis_off() + >>> fig.set_layout_engine('tight') + >>> plt.show() + >>> print(img.shape) + (512, 512) + >>> print(img_45.shape) + (512, 512) + >>> print(full_img_45.shape) + (724, 724) + + """ + input_arr = np.asarray(input) + ndim = input_arr.ndim + + if ndim < 2: + raise ValueError('input array should be at least 2D') + + axes = list(axes) + + if len(axes) != 2: + raise ValueError('axes should contain exactly two values') + + if not all([float(ax).is_integer() for ax in axes]): + raise ValueError('axes should contain only integer values') + + if axes[0] < 0: + axes[0] += ndim + if axes[1] < 0: + axes[1] += ndim + if axes[0] < 0 or axes[1] < 0 or axes[0] >= ndim or axes[1] >= ndim: + raise ValueError('invalid rotation plane specified') + + axes.sort() + + c, s = special.cosdg(angle), special.sindg(angle) + + rot_matrix = np.array([[c, s], + [-s, c]]) + + img_shape = np.asarray(input_arr.shape) + in_plane_shape = img_shape[axes] + if reshape: + # Compute transformed input bounds + iy, ix = in_plane_shape + out_bounds = rot_matrix @ [[0, 0, iy, iy], + [0, ix, 0, ix]] + # Compute the shape of the transformed input plane + out_plane_shape = (np.ptp(out_bounds, axis=1) + 0.5).astype(int) + else: + out_plane_shape = img_shape[axes] + + out_center = rot_matrix @ ((out_plane_shape - 1) / 2) + in_center = (in_plane_shape - 1) / 2 + offset = in_center - out_center + + output_shape = img_shape + output_shape[axes] = out_plane_shape + output_shape = tuple(output_shape) + + complex_output = np.iscomplexobj(input_arr) + output = _ni_support._get_output(output, input_arr, shape=output_shape, + complex_output=complex_output) + + if ndim <= 2: + affine_transform(input_arr, rot_matrix, offset, output_shape, output, + order, mode, cval, prefilter) + else: + # If ndim > 2, the rotation is applied over all the planes + # parallel to axes + planes_coord = itertools.product( + *[[slice(None)] if ax in axes else range(img_shape[ax]) + for ax in range(ndim)]) + + out_plane_shape = tuple(out_plane_shape) + + for coordinates in planes_coord: + ia = input_arr[coordinates] + oa = output[coordinates] + affine_transform(ia, rot_matrix, offset, out_plane_shape, + oa, order, mode, cval, prefilter) + + return output diff --git a/llava_video/lib/python3.10/site-packages/scipy/ndimage/_morphology.py b/llava_video/lib/python3.10/site-packages/scipy/ndimage/_morphology.py new file mode 100644 index 0000000000000000000000000000000000000000..12972c09a7cd5de0ca059814281fb9d210fbd395 --- /dev/null +++ b/llava_video/lib/python3.10/site-packages/scipy/ndimage/_morphology.py @@ -0,0 +1,2629 @@ +# Copyright (C) 2003-2005 Peter J. Verveer +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following +# disclaimer in the documentation and/or other materials provided +# with the distribution. +# +# 3. The name of the author may not be used to endorse or promote +# products derived from this software without specific prior +# written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS +# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE +# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import warnings +import operator + +import numpy as np +from . import _ni_support +from . import _nd_image +from . import _filters + +__all__ = ['iterate_structure', 'generate_binary_structure', 'binary_erosion', + 'binary_dilation', 'binary_opening', 'binary_closing', + 'binary_hit_or_miss', 'binary_propagation', 'binary_fill_holes', + 'grey_erosion', 'grey_dilation', 'grey_opening', 'grey_closing', + 'morphological_gradient', 'morphological_laplace', 'white_tophat', + 'black_tophat', 'distance_transform_bf', 'distance_transform_cdt', + 'distance_transform_edt'] + + +def _center_is_true(structure, origin): + structure = np.asarray(structure) + coor = tuple([oo + ss // 2 for ss, oo in zip(structure.shape, + origin)]) + return bool(structure[coor]) + + +def iterate_structure(structure, iterations, origin=None): + """ + Iterate a structure by dilating it with itself. + + Parameters + ---------- + structure : array_like + Structuring element (an array of bools, for example), to be dilated with + itself. + iterations : int + number of dilations performed on the structure with itself + origin : optional + If origin is None, only the iterated structure is returned. If + not, a tuple of the iterated structure and the modified origin is + returned. + + Returns + ------- + iterate_structure : ndarray of bools + A new structuring element obtained by dilating `structure` + (`iterations` - 1) times with itself. + + See Also + -------- + generate_binary_structure + + Examples + -------- + >>> from scipy import ndimage + >>> struct = ndimage.generate_binary_structure(2, 1) + >>> struct.astype(int) + array([[0, 1, 0], + [1, 1, 1], + [0, 1, 0]]) + >>> ndimage.iterate_structure(struct, 2).astype(int) + array([[0, 0, 1, 0, 0], + [0, 1, 1, 1, 0], + [1, 1, 1, 1, 1], + [0, 1, 1, 1, 0], + [0, 0, 1, 0, 0]]) + >>> ndimage.iterate_structure(struct, 3).astype(int) + array([[0, 0, 0, 1, 0, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 1, 1, 1, 1, 1, 0], + [1, 1, 1, 1, 1, 1, 1], + [0, 1, 1, 1, 1, 1, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 0, 1, 0, 0, 0]]) + + """ + structure = np.asarray(structure) + if iterations < 2: + return structure.copy() + ni = iterations - 1 + shape = [ii + ni * (ii - 1) for ii in structure.shape] + pos = [ni * (structure.shape[ii] // 2) for ii in range(len(shape))] + slc = tuple(slice(pos[ii], pos[ii] + structure.shape[ii], None) + for ii in range(len(shape))) + out = np.zeros(shape, bool) + out[slc] = structure != 0 + out = binary_dilation(out, structure, iterations=ni) + if origin is None: + return out + else: + origin = _ni_support._normalize_sequence(origin, structure.ndim) + origin = [iterations * o for o in origin] + return out, origin + + +def generate_binary_structure(rank, connectivity): + """ + Generate a binary structure for binary morphological operations. + + Parameters + ---------- + rank : int + Number of dimensions of the array to which the structuring element + will be applied, as returned by `np.ndim`. + connectivity : int + `connectivity` determines which elements of the output array belong + to the structure, i.e., are considered as neighbors of the central + element. Elements up to a squared distance of `connectivity` from + the center are considered neighbors. `connectivity` may range from 1 + (no diagonal elements are neighbors) to `rank` (all elements are + neighbors). + + Returns + ------- + output : ndarray of bools + Structuring element which may be used for binary morphological + operations, with `rank` dimensions and all dimensions equal to 3. + + See Also + -------- + iterate_structure, binary_dilation, binary_erosion + + Notes + ----- + `generate_binary_structure` can only create structuring elements with + dimensions equal to 3, i.e., minimal dimensions. For larger structuring + elements, that are useful e.g., for eroding large objects, one may either + use `iterate_structure`, or create directly custom arrays with + numpy functions such as `numpy.ones`. + + Examples + -------- + >>> from scipy import ndimage + >>> import numpy as np + >>> struct = ndimage.generate_binary_structure(2, 1) + >>> struct + array([[False, True, False], + [ True, True, True], + [False, True, False]], dtype=bool) + >>> a = np.zeros((5,5)) + >>> a[2, 2] = 1 + >>> a + array([[ 0., 0., 0., 0., 0.], + [ 0., 0., 0., 0., 0.], + [ 0., 0., 1., 0., 0.], + [ 0., 0., 0., 0., 0.], + [ 0., 0., 0., 0., 0.]]) + >>> b = ndimage.binary_dilation(a, structure=struct).astype(a.dtype) + >>> b + array([[ 0., 0., 0., 0., 0.], + [ 0., 0., 1., 0., 0.], + [ 0., 1., 1., 1., 0.], + [ 0., 0., 1., 0., 0.], + [ 0., 0., 0., 0., 0.]]) + >>> ndimage.binary_dilation(b, structure=struct).astype(a.dtype) + array([[ 0., 0., 1., 0., 0.], + [ 0., 1., 1., 1., 0.], + [ 1., 1., 1., 1., 1.], + [ 0., 1., 1., 1., 0.], + [ 0., 0., 1., 0., 0.]]) + >>> struct = ndimage.generate_binary_structure(2, 2) + >>> struct + array([[ True, True, True], + [ True, True, True], + [ True, True, True]], dtype=bool) + >>> struct = ndimage.generate_binary_structure(3, 1) + >>> struct # no diagonal elements + array([[[False, False, False], + [False, True, False], + [False, False, False]], + [[False, True, False], + [ True, True, True], + [False, True, False]], + [[False, False, False], + [False, True, False], + [False, False, False]]], dtype=bool) + + """ + if connectivity < 1: + connectivity = 1 + if rank < 1: + return np.array(True, dtype=bool) + output = np.fabs(np.indices([3] * rank) - 1) + output = np.add.reduce(output, 0) + return output <= connectivity + + +def _binary_erosion(input, structure, iterations, mask, output, + border_value, origin, invert, brute_force, axes): + try: + iterations = operator.index(iterations) + except TypeError as e: + raise TypeError('iterations parameter should be an integer') from e + + input = np.asarray(input) + ndim = input.ndim + if np.iscomplexobj(input): + raise TypeError('Complex type not supported') + axes = _ni_support._check_axes(axes, input.ndim) + num_axes = len(axes) + if structure is None: + structure = generate_binary_structure(num_axes, 1) + else: + structure = np.asarray(structure, dtype=bool) + if ndim > num_axes: + structure = _filters._expand_footprint(ndim, axes, structure, + footprint_name="structure") + + if structure.ndim != input.ndim: + raise RuntimeError('structure and input must have same dimensionality') + if not structure.flags.contiguous: + structure = structure.copy() + if structure.size < 1: + raise RuntimeError('structure must not be empty') + if mask is not None: + mask = np.asarray(mask) + if mask.shape != input.shape: + raise RuntimeError('mask and input must have equal sizes') + origin = _ni_support._normalize_sequence(origin, num_axes) + origin = _filters._expand_origin(ndim, axes, origin) + cit = _center_is_true(structure, origin) + if isinstance(output, np.ndarray): + if np.iscomplexobj(output): + raise TypeError('Complex output type not supported') + else: + output = bool + output = _ni_support._get_output(output, input) + temp_needed = np.may_share_memory(input, output) + if temp_needed: + # input and output arrays cannot share memory + temp = output + output = _ni_support._get_output(output.dtype, input) + if iterations == 1: + _nd_image.binary_erosion(input, structure, mask, output, + border_value, origin, invert, cit, 0) + elif cit and not brute_force: + changed, coordinate_list = _nd_image.binary_erosion( + input, structure, mask, output, + border_value, origin, invert, cit, 1) + structure = structure[tuple([slice(None, None, -1)] * + structure.ndim)] + for ii in range(len(origin)): + origin[ii] = -origin[ii] + if not structure.shape[ii] & 1: + origin[ii] -= 1 + if mask is not None: + mask = np.asarray(mask, dtype=np.int8) + if not structure.flags.contiguous: + structure = structure.copy() + _nd_image.binary_erosion2(output, structure, mask, iterations - 1, + origin, invert, coordinate_list) + else: + tmp_in = np.empty_like(input, dtype=bool) + tmp_out = output + if iterations >= 1 and not iterations & 1: + tmp_in, tmp_out = tmp_out, tmp_in + changed = _nd_image.binary_erosion( + input, structure, mask, tmp_out, + border_value, origin, invert, cit, 0) + ii = 1 + while ii < iterations or (iterations < 1 and changed): + tmp_in, tmp_out = tmp_out, tmp_in + changed = _nd_image.binary_erosion( + tmp_in, structure, mask, tmp_out, + border_value, origin, invert, cit, 0) + ii += 1 + if temp_needed: + temp[...] = output + output = temp + return output + + +def binary_erosion(input, structure=None, iterations=1, mask=None, output=None, + border_value=0, origin=0, brute_force=False, *, axes=None): + """ + Multidimensional binary erosion with a given structuring element. + + Binary erosion is a mathematical morphology operation used for image + processing. + + Parameters + ---------- + input : array_like + Binary image to be eroded. Non-zero (True) elements form + the subset to be eroded. + structure : array_like, optional + Structuring element used for the erosion. Non-zero elements are + considered True. If no structuring element is provided, an element + is generated with a square connectivity equal to one. + iterations : int, optional + The erosion is repeated `iterations` times (one, by default). + If iterations is less than 1, the erosion is repeated until the + result does not change anymore. + mask : array_like, optional + If a mask is given, only those elements with a True value at + the corresponding mask element are modified at each iteration. + output : ndarray, optional + Array of the same shape as input, into which the output is placed. + By default, a new array is created. + border_value : int (cast to 0 or 1), optional + Value at the border in the output array. + origin : int or tuple of ints, optional + Placement of the filter, by default 0. + brute_force : boolean, optional + Memory condition: if False, only the pixels whose value was changed in + the last iteration are tracked as candidates to be updated (eroded) in + the current iteration; if True all pixels are considered as candidates + for erosion, regardless of what happened in the previous iteration. + False by default. + axes : tuple of int or None + The axes over which to apply the filter. If None, `input` is filtered + along all axes. If an `origin` tuple is provided, its length must match + the number of axes. + + Returns + ------- + binary_erosion : ndarray of bools + Erosion of the input by the structuring element. + + See Also + -------- + grey_erosion, binary_dilation, binary_closing, binary_opening, + generate_binary_structure + + Notes + ----- + Erosion [1]_ is a mathematical morphology operation [2]_ that uses a + structuring element for shrinking the shapes in an image. The binary + erosion of an image by a structuring element is the locus of the points + where a superimposition of the structuring element centered on the point + is entirely contained in the set of non-zero elements of the image. + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Erosion_%28morphology%29 + .. [2] https://en.wikipedia.org/wiki/Mathematical_morphology + + Examples + -------- + >>> from scipy import ndimage + >>> import numpy as np + >>> a = np.zeros((7,7), dtype=int) + >>> a[1:6, 2:5] = 1 + >>> a + array([[0, 0, 0, 0, 0, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0]]) + >>> ndimage.binary_erosion(a).astype(a.dtype) + array([[0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 0, 0, 0], + [0, 0, 0, 1, 0, 0, 0], + [0, 0, 0, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0]]) + >>> #Erosion removes objects smaller than the structure + >>> ndimage.binary_erosion(a, structure=np.ones((5,5))).astype(a.dtype) + array([[0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0]]) + + """ + return _binary_erosion(input, structure, iterations, mask, + output, border_value, origin, 0, brute_force, axes) + + +def binary_dilation(input, structure=None, iterations=1, mask=None, + output=None, border_value=0, origin=0, + brute_force=False, *, axes=None): + """ + Multidimensional binary dilation with the given structuring element. + + Parameters + ---------- + input : array_like + Binary array_like to be dilated. Non-zero (True) elements form + the subset to be dilated. + structure : array_like, optional + Structuring element used for the dilation. Non-zero elements are + considered True. If no structuring element is provided an element + is generated with a square connectivity equal to one. + iterations : int, optional + The dilation is repeated `iterations` times (one, by default). + If iterations is less than 1, the dilation is repeated until the + result does not change anymore. Only an integer of iterations is + accepted. + mask : array_like, optional + If a mask is given, only those elements with a True value at + the corresponding mask element are modified at each iteration. + output : ndarray, optional + Array of the same shape as input, into which the output is placed. + By default, a new array is created. + border_value : int (cast to 0 or 1), optional + Value at the border in the output array. + origin : int or tuple of ints, optional + Placement of the filter, by default 0. + brute_force : boolean, optional + Memory condition: if False, only the pixels whose value was changed in + the last iteration are tracked as candidates to be updated (dilated) + in the current iteration; if True all pixels are considered as + candidates for dilation, regardless of what happened in the previous + iteration. False by default. + axes : tuple of int or None + The axes over which to apply the filter. If None, `input` is filtered + along all axes. If an `origin` tuple is provided, its length must match + the number of axes. + + Returns + ------- + binary_dilation : ndarray of bools + Dilation of the input by the structuring element. + + See Also + -------- + grey_dilation, binary_erosion, binary_closing, binary_opening, + generate_binary_structure + + Notes + ----- + Dilation [1]_ is a mathematical morphology operation [2]_ that uses a + structuring element for expanding the shapes in an image. The binary + dilation of an image by a structuring element is the locus of the points + covered by the structuring element, when its center lies within the + non-zero points of the image. + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Dilation_%28morphology%29 + .. [2] https://en.wikipedia.org/wiki/Mathematical_morphology + + Examples + -------- + >>> from scipy import ndimage + >>> import numpy as np + >>> a = np.zeros((5, 5)) + >>> a[2, 2] = 1 + >>> a + array([[ 0., 0., 0., 0., 0.], + [ 0., 0., 0., 0., 0.], + [ 0., 0., 1., 0., 0.], + [ 0., 0., 0., 0., 0.], + [ 0., 0., 0., 0., 0.]]) + >>> ndimage.binary_dilation(a) + array([[False, False, False, False, False], + [False, False, True, False, False], + [False, True, True, True, False], + [False, False, True, False, False], + [False, False, False, False, False]], dtype=bool) + >>> ndimage.binary_dilation(a).astype(a.dtype) + array([[ 0., 0., 0., 0., 0.], + [ 0., 0., 1., 0., 0.], + [ 0., 1., 1., 1., 0.], + [ 0., 0., 1., 0., 0.], + [ 0., 0., 0., 0., 0.]]) + >>> # 3x3 structuring element with connectivity 1, used by default + >>> struct1 = ndimage.generate_binary_structure(2, 1) + >>> struct1 + array([[False, True, False], + [ True, True, True], + [False, True, False]], dtype=bool) + >>> # 3x3 structuring element with connectivity 2 + >>> struct2 = ndimage.generate_binary_structure(2, 2) + >>> struct2 + array([[ True, True, True], + [ True, True, True], + [ True, True, True]], dtype=bool) + >>> ndimage.binary_dilation(a, structure=struct1).astype(a.dtype) + array([[ 0., 0., 0., 0., 0.], + [ 0., 0., 1., 0., 0.], + [ 0., 1., 1., 1., 0.], + [ 0., 0., 1., 0., 0.], + [ 0., 0., 0., 0., 0.]]) + >>> ndimage.binary_dilation(a, structure=struct2).astype(a.dtype) + array([[ 0., 0., 0., 0., 0.], + [ 0., 1., 1., 1., 0.], + [ 0., 1., 1., 1., 0.], + [ 0., 1., 1., 1., 0.], + [ 0., 0., 0., 0., 0.]]) + >>> ndimage.binary_dilation(a, structure=struct1,\\ + ... iterations=2).astype(a.dtype) + array([[ 0., 0., 1., 0., 0.], + [ 0., 1., 1., 1., 0.], + [ 1., 1., 1., 1., 1.], + [ 0., 1., 1., 1., 0.], + [ 0., 0., 1., 0., 0.]]) + + """ + input = np.asarray(input) + axes = _ni_support._check_axes(axes, input.ndim) + num_axes = len(axes) + if structure is None: + structure = generate_binary_structure(num_axes, 1) + origin = _ni_support._normalize_sequence(origin, num_axes) + structure = np.asarray(structure) + structure = structure[tuple([slice(None, None, -1)] * + structure.ndim)] + for ii in range(len(origin)): + origin[ii] = -origin[ii] + if not structure.shape[ii] & 1: + origin[ii] -= 1 + + return _binary_erosion(input, structure, iterations, mask, + output, border_value, origin, 1, brute_force, axes) + + +def binary_opening(input, structure=None, iterations=1, output=None, + origin=0, mask=None, border_value=0, brute_force=False, *, + axes=None): + """ + Multidimensional binary opening with the given structuring element. + + The *opening* of an input image by a structuring element is the + *dilation* of the *erosion* of the image by the structuring element. + + Parameters + ---------- + input : array_like + Binary array_like to be opened. Non-zero (True) elements form + the subset to be opened. + structure : array_like, optional + Structuring element used for the opening. Non-zero elements are + considered True. If no structuring element is provided an element + is generated with a square connectivity equal to one (i.e., only + nearest neighbors are connected to the center, diagonally-connected + elements are not considered neighbors). + iterations : int, optional + The erosion step of the opening, then the dilation step are each + repeated `iterations` times (one, by default). If `iterations` is + less than 1, each operation is repeated until the result does + not change anymore. Only an integer of iterations is accepted. + output : ndarray, optional + Array of the same shape as input, into which the output is placed. + By default, a new array is created. + origin : int or tuple of ints, optional + Placement of the filter, by default 0. + mask : array_like, optional + If a mask is given, only those elements with a True value at + the corresponding mask element are modified at each iteration. + + .. versionadded:: 1.1.0 + border_value : int (cast to 0 or 1), optional + Value at the border in the output array. + + .. versionadded:: 1.1.0 + brute_force : boolean, optional + Memory condition: if False, only the pixels whose value was changed in + the last iteration are tracked as candidates to be updated in the + current iteration; if true all pixels are considered as candidates for + update, regardless of what happened in the previous iteration. + False by default. + + .. versionadded:: 1.1.0 + axes : tuple of int or None + The axes over which to apply the filter. If None, `input` is filtered + along all axes. If an `origin` tuple is provided, its length must match + the number of axes. + + Returns + ------- + binary_opening : ndarray of bools + Opening of the input by the structuring element. + + See Also + -------- + grey_opening, binary_closing, binary_erosion, binary_dilation, + generate_binary_structure + + Notes + ----- + *Opening* [1]_ is a mathematical morphology operation [2]_ that + consists in the succession of an erosion and a dilation of the + input with the same structuring element. Opening, therefore, removes + objects smaller than the structuring element. + + Together with *closing* (`binary_closing`), opening can be used for + noise removal. + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Opening_%28morphology%29 + .. [2] https://en.wikipedia.org/wiki/Mathematical_morphology + + Examples + -------- + >>> from scipy import ndimage + >>> import numpy as np + >>> a = np.zeros((5,5), dtype=int) + >>> a[1:4, 1:4] = 1; a[4, 4] = 1 + >>> a + array([[0, 0, 0, 0, 0], + [0, 1, 1, 1, 0], + [0, 1, 1, 1, 0], + [0, 1, 1, 1, 0], + [0, 0, 0, 0, 1]]) + >>> # Opening removes small objects + >>> ndimage.binary_opening(a, structure=np.ones((3,3))).astype(int) + array([[0, 0, 0, 0, 0], + [0, 1, 1, 1, 0], + [0, 1, 1, 1, 0], + [0, 1, 1, 1, 0], + [0, 0, 0, 0, 0]]) + >>> # Opening can also smooth corners + >>> ndimage.binary_opening(a).astype(int) + array([[0, 0, 0, 0, 0], + [0, 0, 1, 0, 0], + [0, 1, 1, 1, 0], + [0, 0, 1, 0, 0], + [0, 0, 0, 0, 0]]) + >>> # Opening is the dilation of the erosion of the input + >>> ndimage.binary_erosion(a).astype(int) + array([[0, 0, 0, 0, 0], + [0, 0, 0, 0, 0], + [0, 0, 1, 0, 0], + [0, 0, 0, 0, 0], + [0, 0, 0, 0, 0]]) + >>> ndimage.binary_dilation(ndimage.binary_erosion(a)).astype(int) + array([[0, 0, 0, 0, 0], + [0, 0, 1, 0, 0], + [0, 1, 1, 1, 0], + [0, 0, 1, 0, 0], + [0, 0, 0, 0, 0]]) + + """ + input = np.asarray(input) + axes = _ni_support._check_axes(axes, input.ndim) + num_axes = len(axes) + if structure is None: + structure = generate_binary_structure(num_axes, 1) + + tmp = binary_erosion(input, structure, iterations, mask, None, + border_value, origin, brute_force, axes=axes) + return binary_dilation(tmp, structure, iterations, mask, output, + border_value, origin, brute_force, axes=axes) + + +def binary_closing(input, structure=None, iterations=1, output=None, + origin=0, mask=None, border_value=0, brute_force=False, *, + axes=None): + """ + Multidimensional binary closing with the given structuring element. + + The *closing* of an input image by a structuring element is the + *erosion* of the *dilation* of the image by the structuring element. + + Parameters + ---------- + input : array_like + Binary array_like to be closed. Non-zero (True) elements form + the subset to be closed. + structure : array_like, optional + Structuring element used for the closing. Non-zero elements are + considered True. If no structuring element is provided an element + is generated with a square connectivity equal to one (i.e., only + nearest neighbors are connected to the center, diagonally-connected + elements are not considered neighbors). + iterations : int, optional + The dilation step of the closing, then the erosion step are each + repeated `iterations` times (one, by default). If iterations is + less than 1, each operations is repeated until the result does + not change anymore. Only an integer of iterations is accepted. + output : ndarray, optional + Array of the same shape as input, into which the output is placed. + By default, a new array is created. + origin : int or tuple of ints, optional + Placement of the filter, by default 0. + mask : array_like, optional + If a mask is given, only those elements with a True value at + the corresponding mask element are modified at each iteration. + + .. versionadded:: 1.1.0 + border_value : int (cast to 0 or 1), optional + Value at the border in the output array. + + .. versionadded:: 1.1.0 + brute_force : boolean, optional + Memory condition: if False, only the pixels whose value was changed in + the last iteration are tracked as candidates to be updated in the + current iteration; if true al pixels are considered as candidates for + update, regardless of what happened in the previous iteration. + False by default. + + .. versionadded:: 1.1.0 + axes : tuple of int or None + The axes over which to apply the filter. If None, `input` is filtered + along all axes. If an `origin` tuple is provided, its length must match + the number of axes. + + Returns + ------- + binary_closing : ndarray of bools + Closing of the input by the structuring element. + + See Also + -------- + grey_closing, binary_opening, binary_dilation, binary_erosion, + generate_binary_structure + + Notes + ----- + *Closing* [1]_ is a mathematical morphology operation [2]_ that + consists in the succession of a dilation and an erosion of the + input with the same structuring element. Closing therefore fills + holes smaller than the structuring element. + + Together with *opening* (`binary_opening`), closing can be used for + noise removal. + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Closing_%28morphology%29 + .. [2] https://en.wikipedia.org/wiki/Mathematical_morphology + + Examples + -------- + >>> from scipy import ndimage + >>> import numpy as np + >>> a = np.zeros((5,5), dtype=int) + >>> a[1:-1, 1:-1] = 1; a[2,2] = 0 + >>> a + array([[0, 0, 0, 0, 0], + [0, 1, 1, 1, 0], + [0, 1, 0, 1, 0], + [0, 1, 1, 1, 0], + [0, 0, 0, 0, 0]]) + >>> # Closing removes small holes + >>> ndimage.binary_closing(a).astype(int) + array([[0, 0, 0, 0, 0], + [0, 1, 1, 1, 0], + [0, 1, 1, 1, 0], + [0, 1, 1, 1, 0], + [0, 0, 0, 0, 0]]) + >>> # Closing is the erosion of the dilation of the input + >>> ndimage.binary_dilation(a).astype(int) + array([[0, 1, 1, 1, 0], + [1, 1, 1, 1, 1], + [1, 1, 1, 1, 1], + [1, 1, 1, 1, 1], + [0, 1, 1, 1, 0]]) + >>> ndimage.binary_erosion(ndimage.binary_dilation(a)).astype(int) + array([[0, 0, 0, 0, 0], + [0, 1, 1, 1, 0], + [0, 1, 1, 1, 0], + [0, 1, 1, 1, 0], + [0, 0, 0, 0, 0]]) + + + >>> a = np.zeros((7,7), dtype=int) + >>> a[1:6, 2:5] = 1; a[1:3,3] = 0 + >>> a + array([[0, 0, 0, 0, 0, 0, 0], + [0, 0, 1, 0, 1, 0, 0], + [0, 0, 1, 0, 1, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0]]) + >>> # In addition to removing holes, closing can also + >>> # coarsen boundaries with fine hollows. + >>> ndimage.binary_closing(a).astype(int) + array([[0, 0, 0, 0, 0, 0, 0], + [0, 0, 1, 0, 1, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0]]) + >>> ndimage.binary_closing(a, structure=np.ones((2,2))).astype(int) + array([[0, 0, 0, 0, 0, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0]]) + + """ + input = np.asarray(input) + axes = _ni_support._check_axes(axes, input.ndim) + num_axes = len(axes) + if structure is None: + structure = generate_binary_structure(num_axes, 1) + + tmp = binary_dilation(input, structure, iterations, mask, None, + border_value, origin, brute_force, axes=axes) + return binary_erosion(tmp, structure, iterations, mask, output, + border_value, origin, brute_force, axes=axes) + + +def binary_hit_or_miss(input, structure1=None, structure2=None, + output=None, origin1=0, origin2=None, *, axes=None): + """ + Multidimensional binary hit-or-miss transform. + + The hit-or-miss transform finds the locations of a given pattern + inside the input image. + + Parameters + ---------- + input : array_like (cast to booleans) + Binary image where a pattern is to be detected. + structure1 : array_like (cast to booleans), optional + Part of the structuring element to be fitted to the foreground + (non-zero elements) of `input`. If no value is provided, a + structure of square connectivity 1 is chosen. + structure2 : array_like (cast to booleans), optional + Second part of the structuring element that has to miss completely + the foreground. If no value is provided, the complementary of + `structure1` is taken. + output : ndarray, optional + Array of the same shape as input, into which the output is placed. + By default, a new array is created. + origin1 : int or tuple of ints, optional + Placement of the first part of the structuring element `structure1`, + by default 0 for a centered structure. + origin2 : int or tuple of ints, optional + Placement of the second part of the structuring element `structure2`, + by default 0 for a centered structure. If a value is provided for + `origin1` and not for `origin2`, then `origin2` is set to `origin1`. + axes : tuple of int or None + The axes over which to apply the filter. If None, `input` is filtered + along all axes. If `origin1` or `origin2` tuples are provided, their + length must match the number of axes. + + Returns + ------- + binary_hit_or_miss : ndarray + Hit-or-miss transform of `input` with the given structuring + element (`structure1`, `structure2`). + + See Also + -------- + binary_erosion + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Hit-or-miss_transform + + Examples + -------- + >>> from scipy import ndimage + >>> import numpy as np + >>> a = np.zeros((7,7), dtype=int) + >>> a[1, 1] = 1; a[2:4, 2:4] = 1; a[4:6, 4:6] = 1 + >>> a + array([[0, 0, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 0, 0, 0], + [0, 0, 1, 1, 0, 0, 0], + [0, 0, 1, 1, 0, 0, 0], + [0, 0, 0, 0, 1, 1, 0], + [0, 0, 0, 0, 1, 1, 0], + [0, 0, 0, 0, 0, 0, 0]]) + >>> structure1 = np.array([[1, 0, 0], [0, 1, 1], [0, 1, 1]]) + >>> structure1 + array([[1, 0, 0], + [0, 1, 1], + [0, 1, 1]]) + >>> # Find the matches of structure1 in the array a + >>> ndimage.binary_hit_or_miss(a, structure1=structure1).astype(int) + array([[0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 1, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0]]) + >>> # Change the origin of the filter + >>> # origin1=1 is equivalent to origin1=(1,1) here + >>> ndimage.binary_hit_or_miss(a, structure1=structure1,\\ + ... origin1=1).astype(int) + array([[0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 1, 0], + [0, 0, 0, 0, 0, 0, 0]]) + + """ + input = np.asarray(input) + axes = _ni_support._check_axes(axes, input.ndim) + num_axes = len(axes) + if structure1 is None: + structure1 = generate_binary_structure(num_axes, 1) + else: + structure1 = np.asarray(structure1) + if structure2 is None: + structure2 = np.logical_not(structure1) + origin1 = _ni_support._normalize_sequence(origin1, num_axes) + if origin2 is None: + origin2 = origin1 + else: + origin2 = _ni_support._normalize_sequence(origin2, num_axes) + + tmp1 = _binary_erosion(input, structure1, 1, None, None, 0, origin1, + 0, False, axes) + inplace = isinstance(output, np.ndarray) + result = _binary_erosion(input, structure2, 1, None, output, 0, + origin2, 1, False, axes) + if inplace: + np.logical_not(output, output) + np.logical_and(tmp1, output, output) + else: + np.logical_not(result, result) + return np.logical_and(tmp1, result) + + +def binary_propagation(input, structure=None, mask=None, + output=None, border_value=0, origin=0, *, axes=None): + """ + Multidimensional binary propagation with the given structuring element. + + Parameters + ---------- + input : array_like + Binary image to be propagated inside `mask`. + structure : array_like, optional + Structuring element used in the successive dilations. The output + may depend on the structuring element, especially if `mask` has + several connex components. If no structuring element is + provided, an element is generated with a squared connectivity equal + to one. + mask : array_like, optional + Binary mask defining the region into which `input` is allowed to + propagate. + output : ndarray, optional + Array of the same shape as input, into which the output is placed. + By default, a new array is created. + border_value : int (cast to 0 or 1), optional + Value at the border in the output array. + origin : int or tuple of ints, optional + Placement of the filter, by default 0. + axes : tuple of int or None + The axes over which to apply the filter. If None, `input` is filtered + along all axes. If an `origin` tuple is provided, its length must match + the number of axes. + + Returns + ------- + binary_propagation : ndarray + Binary propagation of `input` inside `mask`. + + Notes + ----- + This function is functionally equivalent to calling binary_dilation + with the number of iterations less than one: iterative dilation until + the result does not change anymore. + + The succession of an erosion and propagation inside the original image + can be used instead of an *opening* for deleting small objects while + keeping the contours of larger objects untouched. + + References + ---------- + .. [1] http://cmm.ensmp.fr/~serra/cours/pdf/en/ch6en.pdf, slide 15. + .. [2] I.T. Young, J.J. Gerbrands, and L.J. van Vliet, "Fundamentals of + image processing", 1998 + ftp://qiftp.tudelft.nl/DIPimage/docs/FIP2.3.pdf + + Examples + -------- + >>> from scipy import ndimage + >>> import numpy as np + >>> input = np.zeros((8, 8), dtype=int) + >>> input[2, 2] = 1 + >>> mask = np.zeros((8, 8), dtype=int) + >>> mask[1:4, 1:4] = mask[4, 4] = mask[6:8, 6:8] = 1 + >>> input + array([[0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 1, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0]]) + >>> mask + array([[0, 0, 0, 0, 0, 0, 0, 0], + [0, 1, 1, 1, 0, 0, 0, 0], + [0, 1, 1, 1, 0, 0, 0, 0], + [0, 1, 1, 1, 0, 0, 0, 0], + [0, 0, 0, 0, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 1, 1], + [0, 0, 0, 0, 0, 0, 1, 1]]) + >>> ndimage.binary_propagation(input, mask=mask).astype(int) + array([[0, 0, 0, 0, 0, 0, 0, 0], + [0, 1, 1, 1, 0, 0, 0, 0], + [0, 1, 1, 1, 0, 0, 0, 0], + [0, 1, 1, 1, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0]]) + >>> ndimage.binary_propagation(input, mask=mask,\\ + ... structure=np.ones((3,3))).astype(int) + array([[0, 0, 0, 0, 0, 0, 0, 0], + [0, 1, 1, 1, 0, 0, 0, 0], + [0, 1, 1, 1, 0, 0, 0, 0], + [0, 1, 1, 1, 0, 0, 0, 0], + [0, 0, 0, 0, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0]]) + + >>> # Comparison between opening and erosion+propagation + >>> a = np.zeros((6,6), dtype=int) + >>> a[2:5, 2:5] = 1; a[0, 0] = 1; a[5, 5] = 1 + >>> a + array([[1, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 1, 1, 1, 0], + [0, 0, 1, 1, 1, 0], + [0, 0, 1, 1, 1, 0], + [0, 0, 0, 0, 0, 1]]) + >>> ndimage.binary_opening(a).astype(int) + array([[0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 0, 0], + [0, 0, 1, 1, 1, 0], + [0, 0, 0, 1, 0, 0], + [0, 0, 0, 0, 0, 0]]) + >>> b = ndimage.binary_erosion(a) + >>> b.astype(int) + array([[0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0]]) + >>> ndimage.binary_propagation(b, mask=a).astype(int) + array([[0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 1, 1, 1, 0], + [0, 0, 1, 1, 1, 0], + [0, 0, 1, 1, 1, 0], + [0, 0, 0, 0, 0, 0]]) + + """ + return binary_dilation(input, structure, -1, mask, output, + border_value, origin, axes=axes) + + +def binary_fill_holes(input, structure=None, output=None, origin=0, *, + axes=None): + """ + Fill the holes in binary objects. + + + Parameters + ---------- + input : array_like + N-D binary array with holes to be filled + structure : array_like, optional + Structuring element used in the computation; large-size elements + make computations faster but may miss holes separated from the + background by thin regions. The default element (with a square + connectivity equal to one) yields the intuitive result where all + holes in the input have been filled. + output : ndarray, optional + Array of the same shape as input, into which the output is placed. + By default, a new array is created. + origin : int, tuple of ints, optional + Position of the structuring element. + axes : tuple of int or None + The axes over which to apply the filter. If None, `input` is filtered + along all axes. If an `origin` tuple is provided, its length must match + the number of axes. + + Returns + ------- + out : ndarray + Transformation of the initial image `input` where holes have been + filled. + + See Also + -------- + binary_dilation, binary_propagation, label + + Notes + ----- + The algorithm used in this function consists in invading the complementary + of the shapes in `input` from the outer boundary of the image, + using binary dilations. Holes are not connected to the boundary and are + therefore not invaded. The result is the complementary subset of the + invaded region. + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Mathematical_morphology + + + Examples + -------- + >>> from scipy import ndimage + >>> import numpy as np + >>> a = np.zeros((5, 5), dtype=int) + >>> a[1:4, 1:4] = 1 + >>> a[2,2] = 0 + >>> a + array([[0, 0, 0, 0, 0], + [0, 1, 1, 1, 0], + [0, 1, 0, 1, 0], + [0, 1, 1, 1, 0], + [0, 0, 0, 0, 0]]) + >>> ndimage.binary_fill_holes(a).astype(int) + array([[0, 0, 0, 0, 0], + [0, 1, 1, 1, 0], + [0, 1, 1, 1, 0], + [0, 1, 1, 1, 0], + [0, 0, 0, 0, 0]]) + >>> # Too big structuring element + >>> ndimage.binary_fill_holes(a, structure=np.ones((5,5))).astype(int) + array([[0, 0, 0, 0, 0], + [0, 1, 1, 1, 0], + [0, 1, 0, 1, 0], + [0, 1, 1, 1, 0], + [0, 0, 0, 0, 0]]) + + """ + input = np.asarray(input) + mask = np.logical_not(input) + tmp = np.zeros(mask.shape, bool) + inplace = isinstance(output, np.ndarray) + if inplace: + binary_dilation(tmp, structure, -1, mask, output, 1, origin, axes=axes) + np.logical_not(output, output) + else: + output = binary_dilation(tmp, structure, -1, mask, None, 1, + origin, axes=axes) + np.logical_not(output, output) + return output + + +def grey_erosion(input, size=None, footprint=None, structure=None, + output=None, mode="reflect", cval=0.0, origin=0, *, + axes=None): + """ + Calculate a greyscale erosion, using either a structuring element, + or a footprint corresponding to a flat structuring element. + + Grayscale erosion is a mathematical morphology operation. For the + simple case of a full and flat structuring element, it can be viewed + as a minimum filter over a sliding window. + + Parameters + ---------- + input : array_like + Array over which the grayscale erosion is to be computed. + size : tuple of ints + Shape of a flat and full structuring element used for the grayscale + erosion. Optional if `footprint` or `structure` is provided. + footprint : array of ints, optional + Positions of non-infinite elements of a flat structuring element + used for the grayscale erosion. Non-zero values give the set of + neighbors of the center over which the minimum is chosen. + structure : array of ints, optional + Structuring element used for the grayscale erosion. `structure` + may be a non-flat structuring element. The `structure` array applies a + subtractive offset for each pixel in the neighborhood. + output : array, optional + An array used for storing the output of the erosion may be provided. + mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional + The `mode` parameter determines how the array borders are + handled, where `cval` is the value when mode is equal to + 'constant'. Default is 'reflect' + cval : scalar, optional + Value to fill past edges of input if `mode` is 'constant'. Default + is 0.0. + origin : scalar, optional + The `origin` parameter controls the placement of the filter. + Default 0 + axes : tuple of int or None + The axes over which to apply the filter. If None, `input` is filtered + along all axes. If an `origin` tuple is provided, its length must match + the number of axes. + + Returns + ------- + output : ndarray + Grayscale erosion of `input`. + + See Also + -------- + binary_erosion, grey_dilation, grey_opening, grey_closing + generate_binary_structure, minimum_filter + + Notes + ----- + The grayscale erosion of an image input by a structuring element s defined + over a domain E is given by: + + (input+s)(x) = min {input(y) - s(x-y), for y in E} + + In particular, for structuring elements defined as + s(y) = 0 for y in E, the grayscale erosion computes the minimum of the + input image inside a sliding window defined by E. + + Grayscale erosion [1]_ is a *mathematical morphology* operation [2]_. + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Erosion_%28morphology%29 + .. [2] https://en.wikipedia.org/wiki/Mathematical_morphology + + Examples + -------- + >>> from scipy import ndimage + >>> import numpy as np + >>> a = np.zeros((7,7), dtype=int) + >>> a[1:6, 1:6] = 3 + >>> a[4,4] = 2; a[2,3] = 1 + >>> a + array([[0, 0, 0, 0, 0, 0, 0], + [0, 3, 3, 3, 3, 3, 0], + [0, 3, 3, 1, 3, 3, 0], + [0, 3, 3, 3, 3, 3, 0], + [0, 3, 3, 3, 2, 3, 0], + [0, 3, 3, 3, 3, 3, 0], + [0, 0, 0, 0, 0, 0, 0]]) + >>> ndimage.grey_erosion(a, size=(3,3)) + array([[0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 3, 2, 2, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0]]) + >>> footprint = ndimage.generate_binary_structure(2, 1) + >>> footprint + array([[False, True, False], + [ True, True, True], + [False, True, False]], dtype=bool) + >>> # Diagonally-connected elements are not considered neighbors + >>> ndimage.grey_erosion(a, footprint=footprint) + array([[0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 3, 1, 2, 0, 0], + [0, 0, 3, 2, 2, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0]]) + + """ + if size is None and footprint is None and structure is None: + raise ValueError("size, footprint, or structure must be specified") + + return _filters._min_or_max_filter(input, size, footprint, structure, + output, mode, cval, origin, 1, + axes=axes) + + +def grey_dilation(input, size=None, footprint=None, structure=None, + output=None, mode="reflect", cval=0.0, origin=0, *, + axes=None): + """ + Calculate a greyscale dilation, using either a structuring element, + or a footprint corresponding to a flat structuring element. + + Grayscale dilation is a mathematical morphology operation. For the + simple case of a full and flat structuring element, it can be viewed + as a maximum filter over a sliding window. + + Parameters + ---------- + input : array_like + Array over which the grayscale dilation is to be computed. + size : tuple of ints + Shape of a flat and full structuring element used for the grayscale + dilation. Optional if `footprint` or `structure` is provided. + footprint : array of ints, optional + Positions of non-infinite elements of a flat structuring element + used for the grayscale dilation. Non-zero values give the set of + neighbors of the center over which the maximum is chosen. + structure : array of ints, optional + Structuring element used for the grayscale dilation. `structure` + may be a non-flat structuring element. The `structure` array applies an + additive offset for each pixel in the neighborhood. + output : array, optional + An array used for storing the output of the dilation may be provided. + mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional + The `mode` parameter determines how the array borders are + handled, where `cval` is the value when mode is equal to + 'constant'. Default is 'reflect' + cval : scalar, optional + Value to fill past edges of input if `mode` is 'constant'. Default + is 0.0. + origin : scalar, optional + The `origin` parameter controls the placement of the filter. + Default 0 + axes : tuple of int or None + The axes over which to apply the filter. If None, `input` is filtered + along all axes. If an `origin` tuple is provided, its length must match + the number of axes. + + Returns + ------- + grey_dilation : ndarray + Grayscale dilation of `input`. + + See Also + -------- + binary_dilation, grey_erosion, grey_closing, grey_opening + generate_binary_structure, maximum_filter + + Notes + ----- + The grayscale dilation of an image input by a structuring element s defined + over a domain E is given by: + + (input+s)(x) = max {input(y) + s(x-y), for y in E} + + In particular, for structuring elements defined as + s(y) = 0 for y in E, the grayscale dilation computes the maximum of the + input image inside a sliding window defined by E. + + Grayscale dilation [1]_ is a *mathematical morphology* operation [2]_. + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Dilation_%28morphology%29 + .. [2] https://en.wikipedia.org/wiki/Mathematical_morphology + + Examples + -------- + >>> from scipy import ndimage + >>> import numpy as np + >>> a = np.zeros((7,7), dtype=int) + >>> a[2:5, 2:5] = 1 + >>> a[4,4] = 2; a[2,3] = 3 + >>> a + array([[0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 1, 3, 1, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 2, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0]]) + >>> ndimage.grey_dilation(a, size=(3,3)) + array([[0, 0, 0, 0, 0, 0, 0], + [0, 1, 3, 3, 3, 1, 0], + [0, 1, 3, 3, 3, 1, 0], + [0, 1, 3, 3, 3, 2, 0], + [0, 1, 1, 2, 2, 2, 0], + [0, 1, 1, 2, 2, 2, 0], + [0, 0, 0, 0, 0, 0, 0]]) + >>> ndimage.grey_dilation(a, footprint=np.ones((3,3))) + array([[0, 0, 0, 0, 0, 0, 0], + [0, 1, 3, 3, 3, 1, 0], + [0, 1, 3, 3, 3, 1, 0], + [0, 1, 3, 3, 3, 2, 0], + [0, 1, 1, 2, 2, 2, 0], + [0, 1, 1, 2, 2, 2, 0], + [0, 0, 0, 0, 0, 0, 0]]) + >>> s = ndimage.generate_binary_structure(2,1) + >>> s + array([[False, True, False], + [ True, True, True], + [False, True, False]], dtype=bool) + >>> ndimage.grey_dilation(a, footprint=s) + array([[0, 0, 0, 0, 0, 0, 0], + [0, 0, 1, 3, 1, 0, 0], + [0, 1, 3, 3, 3, 1, 0], + [0, 1, 1, 3, 2, 1, 0], + [0, 1, 1, 2, 2, 2, 0], + [0, 0, 1, 1, 2, 0, 0], + [0, 0, 0, 0, 0, 0, 0]]) + >>> ndimage.grey_dilation(a, size=(3,3), structure=np.ones((3,3))) + array([[1, 1, 1, 1, 1, 1, 1], + [1, 2, 4, 4, 4, 2, 1], + [1, 2, 4, 4, 4, 2, 1], + [1, 2, 4, 4, 4, 3, 1], + [1, 2, 2, 3, 3, 3, 1], + [1, 2, 2, 3, 3, 3, 1], + [1, 1, 1, 1, 1, 1, 1]]) + + """ + if size is None and footprint is None and structure is None: + raise ValueError("size, footprint, or structure must be specified") + if structure is not None: + structure = np.asarray(structure) + structure = structure[tuple([slice(None, None, -1)] * + structure.ndim)] + if footprint is not None: + footprint = np.asarray(footprint) + footprint = footprint[tuple([slice(None, None, -1)] * + footprint.ndim)] + + input = np.asarray(input) + axes = _ni_support._check_axes(axes, input.ndim) + origin = _ni_support._normalize_sequence(origin, len(axes)) + for ii in range(len(origin)): + origin[ii] = -origin[ii] + if footprint is not None: + sz = footprint.shape[ii] + elif structure is not None: + sz = structure.shape[ii] + elif np.isscalar(size): + sz = size + else: + sz = size[ii] + if not sz & 1: + origin[ii] -= 1 + + return _filters._min_or_max_filter(input, size, footprint, structure, + output, mode, cval, origin, 0, + axes=axes) + + +def grey_opening(input, size=None, footprint=None, structure=None, + output=None, mode="reflect", cval=0.0, origin=0, *, + axes=None): + """ + Multidimensional grayscale opening. + + A grayscale opening consists in the succession of a grayscale erosion, + and a grayscale dilation. + + Parameters + ---------- + input : array_like + Array over which the grayscale opening is to be computed. + size : tuple of ints + Shape of a flat and full structuring element used for the grayscale + opening. Optional if `footprint` or `structure` is provided. + footprint : array of ints, optional + Positions of non-infinite elements of a flat structuring element + used for the grayscale opening. + structure : array of ints, optional + Structuring element used for the grayscale opening. `structure` + may be a non-flat structuring element. The `structure` array applies + offsets to the pixels in a neighborhood (the offset is additive during + dilation and subtractive during erosion). + output : array, optional + An array used for storing the output of the opening may be provided. + mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional + The `mode` parameter determines how the array borders are + handled, where `cval` is the value when mode is equal to + 'constant'. Default is 'reflect' + cval : scalar, optional + Value to fill past edges of input if `mode` is 'constant'. Default + is 0.0. + origin : scalar, optional + The `origin` parameter controls the placement of the filter. + Default 0 + axes : tuple of int or None + The axes over which to apply the filter. If None, `input` is filtered + along all axes. If an `origin` tuple is provided, its length must match + the number of axes. + + Returns + ------- + grey_opening : ndarray + Result of the grayscale opening of `input` with `structure`. + + See Also + -------- + binary_opening, grey_dilation, grey_erosion, grey_closing + generate_binary_structure + + Notes + ----- + The action of a grayscale opening with a flat structuring element amounts + to smoothen high local maxima, whereas binary opening erases small objects. + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Mathematical_morphology + + Examples + -------- + >>> from scipy import ndimage + >>> import numpy as np + >>> a = np.arange(36).reshape((6,6)) + >>> a[3, 3] = 50 + >>> a + array([[ 0, 1, 2, 3, 4, 5], + [ 6, 7, 8, 9, 10, 11], + [12, 13, 14, 15, 16, 17], + [18, 19, 20, 50, 22, 23], + [24, 25, 26, 27, 28, 29], + [30, 31, 32, 33, 34, 35]]) + >>> ndimage.grey_opening(a, size=(3,3)) + array([[ 0, 1, 2, 3, 4, 4], + [ 6, 7, 8, 9, 10, 10], + [12, 13, 14, 15, 16, 16], + [18, 19, 20, 22, 22, 22], + [24, 25, 26, 27, 28, 28], + [24, 25, 26, 27, 28, 28]]) + >>> # Note that the local maximum a[3,3] has disappeared + + """ + if (size is not None) and (footprint is not None): + warnings.warn("ignoring size because footprint is set", + UserWarning, stacklevel=2) + tmp = grey_erosion(input, size, footprint, structure, None, mode, + cval, origin, axes=axes) + return grey_dilation(tmp, size, footprint, structure, output, mode, + cval, origin, axes=axes) + + +def grey_closing(input, size=None, footprint=None, structure=None, + output=None, mode="reflect", cval=0.0, origin=0, *, + axes=None): + """ + Multidimensional grayscale closing. + + A grayscale closing consists in the succession of a grayscale dilation, + and a grayscale erosion. + + Parameters + ---------- + input : array_like + Array over which the grayscale closing is to be computed. + size : tuple of ints + Shape of a flat and full structuring element used for the grayscale + closing. Optional if `footprint` or `structure` is provided. + footprint : array of ints, optional + Positions of non-infinite elements of a flat structuring element + used for the grayscale closing. + structure : array of ints, optional + Structuring element used for the grayscale closing. `structure` + may be a non-flat structuring element. The `structure` array applies + offsets to the pixels in a neighborhood (the offset is additive during + dilation and subtractive during erosion) + output : array, optional + An array used for storing the output of the closing may be provided. + mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional + The `mode` parameter determines how the array borders are + handled, where `cval` is the value when mode is equal to + 'constant'. Default is 'reflect' + cval : scalar, optional + Value to fill past edges of input if `mode` is 'constant'. Default + is 0.0. + origin : scalar, optional + The `origin` parameter controls the placement of the filter. + Default 0 + axes : tuple of int or None + The axes over which to apply the filter. If None, `input` is filtered + along all axes. If an `origin` tuple is provided, its length must match + the number of axes. + + Returns + ------- + grey_closing : ndarray + Result of the grayscale closing of `input` with `structure`. + + See Also + -------- + binary_closing, grey_dilation, grey_erosion, grey_opening, + generate_binary_structure + + Notes + ----- + The action of a grayscale closing with a flat structuring element amounts + to smoothen deep local minima, whereas binary closing fills small holes. + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Mathematical_morphology + + Examples + -------- + >>> from scipy import ndimage + >>> import numpy as np + >>> a = np.arange(36).reshape((6,6)) + >>> a[3,3] = 0 + >>> a + array([[ 0, 1, 2, 3, 4, 5], + [ 6, 7, 8, 9, 10, 11], + [12, 13, 14, 15, 16, 17], + [18, 19, 20, 0, 22, 23], + [24, 25, 26, 27, 28, 29], + [30, 31, 32, 33, 34, 35]]) + >>> ndimage.grey_closing(a, size=(3,3)) + array([[ 7, 7, 8, 9, 10, 11], + [ 7, 7, 8, 9, 10, 11], + [13, 13, 14, 15, 16, 17], + [19, 19, 20, 20, 22, 23], + [25, 25, 26, 27, 28, 29], + [31, 31, 32, 33, 34, 35]]) + >>> # Note that the local minimum a[3,3] has disappeared + + """ + if (size is not None) and (footprint is not None): + warnings.warn("ignoring size because footprint is set", + UserWarning, stacklevel=2) + tmp = grey_dilation(input, size, footprint, structure, None, mode, + cval, origin, axes=axes) + return grey_erosion(tmp, size, footprint, structure, output, mode, + cval, origin, axes=axes) + + +def morphological_gradient(input, size=None, footprint=None, structure=None, + output=None, mode="reflect", cval=0.0, origin=0, *, + axes=None): + """ + Multidimensional morphological gradient. + + The morphological gradient is calculated as the difference between a + dilation and an erosion of the input with a given structuring element. + + Parameters + ---------- + input : array_like + Array over which to compute the morphlogical gradient. + size : tuple of ints + Shape of a flat and full structuring element used for the mathematical + morphology operations. Optional if `footprint` or `structure` is + provided. A larger `size` yields a more blurred gradient. + footprint : array of ints, optional + Positions of non-infinite elements of a flat structuring element + used for the morphology operations. Larger footprints + give a more blurred morphological gradient. + structure : array of ints, optional + Structuring element used for the morphology operations. `structure` may + be a non-flat structuring element. The `structure` array applies + offsets to the pixels in a neighborhood (the offset is additive during + dilation and subtractive during erosion) + output : array, optional + An array used for storing the output of the morphological gradient + may be provided. + mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional + The `mode` parameter determines how the array borders are + handled, where `cval` is the value when mode is equal to + 'constant'. Default is 'reflect' + cval : scalar, optional + Value to fill past edges of input if `mode` is 'constant'. Default + is 0.0. + origin : scalar, optional + The `origin` parameter controls the placement of the filter. + Default 0 + axes : tuple of int or None + The axes over which to apply the filter. If None, `input` is filtered + along all axes. If an `origin` tuple is provided, its length must match + the number of axes. + + Returns + ------- + morphological_gradient : ndarray + Morphological gradient of `input`. + + See Also + -------- + grey_dilation, grey_erosion, gaussian_gradient_magnitude + + Notes + ----- + For a flat structuring element, the morphological gradient + computed at a given point corresponds to the maximal difference + between elements of the input among the elements covered by the + structuring element centered on the point. + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Mathematical_morphology + + Examples + -------- + >>> from scipy import ndimage + >>> import numpy as np + >>> a = np.zeros((7,7), dtype=int) + >>> a[2:5, 2:5] = 1 + >>> ndimage.morphological_gradient(a, size=(3,3)) + array([[0, 0, 0, 0, 0, 0, 0], + [0, 1, 1, 1, 1, 1, 0], + [0, 1, 1, 1, 1, 1, 0], + [0, 1, 1, 0, 1, 1, 0], + [0, 1, 1, 1, 1, 1, 0], + [0, 1, 1, 1, 1, 1, 0], + [0, 0, 0, 0, 0, 0, 0]]) + >>> # The morphological gradient is computed as the difference + >>> # between a dilation and an erosion + >>> ndimage.grey_dilation(a, size=(3,3)) -\\ + ... ndimage.grey_erosion(a, size=(3,3)) + array([[0, 0, 0, 0, 0, 0, 0], + [0, 1, 1, 1, 1, 1, 0], + [0, 1, 1, 1, 1, 1, 0], + [0, 1, 1, 0, 1, 1, 0], + [0, 1, 1, 1, 1, 1, 0], + [0, 1, 1, 1, 1, 1, 0], + [0, 0, 0, 0, 0, 0, 0]]) + >>> a = np.zeros((7,7), dtype=int) + >>> a[2:5, 2:5] = 1 + >>> a[4,4] = 2; a[2,3] = 3 + >>> a + array([[0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 1, 3, 1, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 2, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0]]) + >>> ndimage.morphological_gradient(a, size=(3,3)) + array([[0, 0, 0, 0, 0, 0, 0], + [0, 1, 3, 3, 3, 1, 0], + [0, 1, 3, 3, 3, 1, 0], + [0, 1, 3, 2, 3, 2, 0], + [0, 1, 1, 2, 2, 2, 0], + [0, 1, 1, 2, 2, 2, 0], + [0, 0, 0, 0, 0, 0, 0]]) + + """ + tmp = grey_dilation(input, size, footprint, structure, None, mode, + cval, origin, axes=axes) + if isinstance(output, np.ndarray): + grey_erosion(input, size, footprint, structure, output, mode, + cval, origin, axes=axes) + return np.subtract(tmp, output, output) + else: + return (tmp - grey_erosion(input, size, footprint, structure, + None, mode, cval, origin, axes=axes)) + + +def morphological_laplace(input, size=None, footprint=None, structure=None, + output=None, mode="reflect", cval=0.0, origin=0, *, + axes=None): + """ + Multidimensional morphological laplace. + + Parameters + ---------- + input : array_like + Input. + size : tuple of ints + Shape of a flat and full structuring element used for the mathematical + morphology operations. Optional if `footprint` or `structure` is + provided. + footprint : array of ints, optional + Positions of non-infinite elements of a flat structuring element + used for the morphology operations. + structure : array of ints, optional + Structuring element used for the morphology operations. `structure` may + be a non-flat structuring element. The `structure` array applies + offsets to the pixels in a neighborhood (the offset is additive during + dilation and subtractive during erosion) + output : ndarray, optional + An output array can optionally be provided. + mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional + The mode parameter determines how the array borders are handled. + For 'constant' mode, values beyond borders are set to be `cval`. + Default is 'reflect'. + cval : scalar, optional + Value to fill past edges of input if mode is 'constant'. + Default is 0.0 + origin : origin, optional + The origin parameter controls the placement of the filter. + axes : tuple of int or None + The axes over which to apply the filter. If None, `input` is filtered + along all axes. If an `origin` tuple is provided, its length must match + the number of axes. + + Returns + ------- + morphological_laplace : ndarray + Output + + """ + tmp1 = grey_dilation(input, size, footprint, structure, None, mode, + cval, origin, axes=axes) + if isinstance(output, np.ndarray): + grey_erosion(input, size, footprint, structure, output, mode, + cval, origin, axes=axes) + np.add(tmp1, output, output) + np.subtract(output, input, output) + return np.subtract(output, input, output) + else: + tmp2 = grey_erosion(input, size, footprint, structure, None, mode, + cval, origin, axes=axes) + np.add(tmp1, tmp2, tmp2) + np.subtract(tmp2, input, tmp2) + np.subtract(tmp2, input, tmp2) + return tmp2 + + +def white_tophat(input, size=None, footprint=None, structure=None, + output=None, mode="reflect", cval=0.0, origin=0, *, + axes=None): + """ + Multidimensional white tophat filter. + + Parameters + ---------- + input : array_like + Input. + size : tuple of ints + Shape of a flat and full structuring element used for the filter. + Optional if `footprint` or `structure` is provided. + footprint : array of ints, optional + Positions of elements of a flat structuring element + used for the white tophat filter. + structure : array of ints, optional + Structuring element used for the filter. `structure` may be a non-flat + structuring element. The `structure` array applies offsets to the + pixels in a neighborhood (the offset is additive during dilation and + subtractive during erosion) + output : array, optional + An array used for storing the output of the filter may be provided. + mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional + The `mode` parameter determines how the array borders are + handled, where `cval` is the value when mode is equal to + 'constant'. Default is 'reflect' + cval : scalar, optional + Value to fill past edges of input if `mode` is 'constant'. + Default is 0.0. + origin : scalar, optional + The `origin` parameter controls the placement of the filter. + Default is 0. + axes : tuple of int or None + The axes over which to apply the filter. If None, `input` is filtered + along all axes. If an `origin` tuple is provided, its length must match + the number of axes. + + Returns + ------- + output : ndarray + Result of the filter of `input` with `structure`. + + See Also + -------- + black_tophat + + Examples + -------- + Subtract gray background from a bright peak. + + >>> from scipy.ndimage import generate_binary_structure, white_tophat + >>> import numpy as np + >>> square = generate_binary_structure(rank=2, connectivity=3) + >>> bright_on_gray = np.array([[2, 3, 3, 3, 2], + ... [3, 4, 5, 4, 3], + ... [3, 5, 9, 5, 3], + ... [3, 4, 5, 4, 3], + ... [2, 3, 3, 3, 2]]) + >>> white_tophat(input=bright_on_gray, structure=square) + array([[0, 0, 0, 0, 0], + [0, 0, 1, 0, 0], + [0, 1, 5, 1, 0], + [0, 0, 1, 0, 0], + [0, 0, 0, 0, 0]]) + + """ + input = np.asarray(input) + + if (size is not None) and (footprint is not None): + warnings.warn("ignoring size because footprint is set", + UserWarning, stacklevel=2) + tmp = grey_erosion(input, size, footprint, structure, None, mode, + cval, origin, axes=axes) + tmp = grey_dilation(tmp, size, footprint, structure, output, mode, + cval, origin, axes=axes) + if tmp is None: + tmp = output + + if input.dtype == np.bool_ and tmp.dtype == np.bool_: + np.bitwise_xor(input, tmp, out=tmp) + else: + np.subtract(input, tmp, out=tmp) + return tmp + + +def black_tophat(input, size=None, footprint=None, structure=None, output=None, + mode="reflect", cval=0.0, origin=0, *, axes=None): + """ + Multidimensional black tophat filter. + + Parameters + ---------- + input : array_like + Input. + size : tuple of ints, optional + Shape of a flat and full structuring element used for the filter. + Optional if `footprint` or `structure` is provided. + footprint : array of ints, optional + Positions of non-infinite elements of a flat structuring element + used for the black tophat filter. + structure : array of ints, optional + Structuring element used for the filter. `structure` may be a non-flat + structuring element. The `structure` array applies offsets to the + pixels in a neighborhood (the offset is additive during dilation and + subtractive during erosion) + output : array, optional + An array used for storing the output of the filter may be provided. + mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional + The `mode` parameter determines how the array borders are + handled, where `cval` is the value when mode is equal to + 'constant'. Default is 'reflect' + cval : scalar, optional + Value to fill past edges of input if `mode` is 'constant'. Default + is 0.0. + origin : scalar, optional + The `origin` parameter controls the placement of the filter. + Default 0 + axes : tuple of int or None + The axes over which to apply the filter. If None, `input` is filtered + along all axes. If an `origin` tuple is provided, its length must match + the number of axes. + + Returns + ------- + black_tophat : ndarray + Result of the filter of `input` with `structure`. + + See Also + -------- + white_tophat, grey_opening, grey_closing + + Examples + -------- + Change dark peak to bright peak and subtract background. + + >>> from scipy.ndimage import generate_binary_structure, black_tophat + >>> import numpy as np + >>> square = generate_binary_structure(rank=2, connectivity=3) + >>> dark_on_gray = np.array([[7, 6, 6, 6, 7], + ... [6, 5, 4, 5, 6], + ... [6, 4, 0, 4, 6], + ... [6, 5, 4, 5, 6], + ... [7, 6, 6, 6, 7]]) + >>> black_tophat(input=dark_on_gray, structure=square) + array([[0, 0, 0, 0, 0], + [0, 0, 1, 0, 0], + [0, 1, 5, 1, 0], + [0, 0, 1, 0, 0], + [0, 0, 0, 0, 0]]) + + """ + input = np.asarray(input) + + if (size is not None) and (footprint is not None): + warnings.warn("ignoring size because footprint is set", + UserWarning, stacklevel=2) + tmp = grey_dilation(input, size, footprint, structure, None, mode, + cval, origin, axes=axes) + tmp = grey_erosion(tmp, size, footprint, structure, output, mode, + cval, origin, axes=axes) + if tmp is None: + tmp = output + + if input.dtype == np.bool_ and tmp.dtype == np.bool_: + np.bitwise_xor(tmp, input, out=tmp) + else: + np.subtract(tmp, input, out=tmp) + return tmp + + +def distance_transform_bf(input, metric="euclidean", sampling=None, + return_distances=True, return_indices=False, + distances=None, indices=None): + """ + Distance transform function by a brute force algorithm. + + This function calculates the distance transform of the `input`, by + replacing each foreground (non-zero) element, with its + shortest distance to the background (any zero-valued element). + + In addition to the distance transform, the feature transform can + be calculated. In this case the index of the closest background + element to each foreground element is returned in a separate array. + + Parameters + ---------- + input : array_like + Input + metric : {'euclidean', 'taxicab', 'chessboard'}, optional + 'cityblock' and 'manhattan' are also valid, and map to 'taxicab'. + The default is 'euclidean'. + sampling : float, or sequence of float, optional + This parameter is only used when `metric` is 'euclidean'. + Spacing of elements along each dimension. If a sequence, must be of + length equal to the input rank; if a single number, this is used for + all axes. If not specified, a grid spacing of unity is implied. + return_distances : bool, optional + Whether to calculate the distance transform. + Default is True. + return_indices : bool, optional + Whether to calculate the feature transform. + Default is False. + distances : ndarray, optional + An output array to store the calculated distance transform, instead of + returning it. + `return_distances` must be True. + It must be the same shape as `input`, and of type float64 if `metric` + is 'euclidean', uint32 otherwise. + indices : int32 ndarray, optional + An output array to store the calculated feature transform, instead of + returning it. + `return_indicies` must be True. + Its shape must be ``(input.ndim,) + input.shape``. + + Returns + ------- + distances : ndarray, optional + The calculated distance transform. Returned only when + `return_distances` is True and `distances` is not supplied. + It will have the same shape as the input array. + indices : int32 ndarray, optional + The calculated feature transform. It has an input-shaped array for each + dimension of the input. See distance_transform_edt documentation for an + example. + Returned only when `return_indices` is True and `indices` is not + supplied. + + See Also + -------- + distance_transform_cdt : Faster distance transform for taxicab and + chessboard metrics + distance_transform_edt : Faster distance transform for euclidean metric + + Notes + ----- + This function employs a slow brute force algorithm. See also the + function `distance_transform_cdt` for more efficient taxicab [1]_ and + chessboard algorithms [2]_. + + References + ---------- + .. [1] Taxicab distance. Wikipedia, 2023. + https://en.wikipedia.org/wiki/Taxicab_geometry + .. [2] Chessboard distance. Wikipedia, 2023. + https://en.wikipedia.org/wiki/Chebyshev_distance + + Examples + -------- + Import the necessary modules. + + >>> import numpy as np + >>> from scipy.ndimage import distance_transform_bf + >>> import matplotlib.pyplot as plt + >>> from mpl_toolkits.axes_grid1 import ImageGrid + + First, we create a toy binary image. + + >>> def add_circle(center_x, center_y, radius, image, fillvalue=1): + ... # fill circular area with 1 + ... xx, yy = np.mgrid[:image.shape[0], :image.shape[1]] + ... circle = (xx - center_x) ** 2 + (yy - center_y) ** 2 + ... circle_shape = np.sqrt(circle) < radius + ... image[circle_shape] = fillvalue + ... return image + >>> image = np.zeros((100, 100), dtype=np.uint8) + >>> image[35:65, 20:80] = 1 + >>> image = add_circle(28, 65, 10, image) + >>> image = add_circle(37, 30, 10, image) + >>> image = add_circle(70, 45, 20, image) + >>> image = add_circle(45, 80, 10, image) + + Next, we set up the figure. + + >>> fig = plt.figure(figsize=(8, 8)) # set up the figure structure + >>> grid = ImageGrid(fig, 111, nrows_ncols=(2, 2), axes_pad=(0.4, 0.3), + ... label_mode="1", share_all=True, + ... cbar_location="right", cbar_mode="each", + ... cbar_size="7%", cbar_pad="2%") + >>> for ax in grid: + ... ax.axis('off') # remove axes from images + + The top left image is the original binary image. + + >>> binary_image = grid[0].imshow(image, cmap='gray') + >>> cbar_binary_image = grid.cbar_axes[0].colorbar(binary_image) + >>> cbar_binary_image.set_ticks([0, 1]) + >>> grid[0].set_title("Binary image: foreground in white") + + The distance transform calculates the distance between foreground pixels + and the image background according to a distance metric. Available metrics + in `distance_transform_bf` are: ``euclidean`` (default), ``taxicab`` + and ``chessboard``. The top right image contains the distance transform + based on the ``euclidean`` metric. + + >>> distance_transform_euclidean = distance_transform_bf(image) + >>> euclidean_transform = grid[1].imshow(distance_transform_euclidean, + ... cmap='gray') + >>> cbar_euclidean = grid.cbar_axes[1].colorbar(euclidean_transform) + >>> colorbar_ticks = [0, 10, 20] + >>> cbar_euclidean.set_ticks(colorbar_ticks) + >>> grid[1].set_title("Euclidean distance") + + The lower left image contains the distance transform using the ``taxicab`` + metric. + + >>> distance_transform_taxicab = distance_transform_bf(image, + ... metric='taxicab') + >>> taxicab_transformation = grid[2].imshow(distance_transform_taxicab, + ... cmap='gray') + >>> cbar_taxicab = grid.cbar_axes[2].colorbar(taxicab_transformation) + >>> cbar_taxicab.set_ticks(colorbar_ticks) + >>> grid[2].set_title("Taxicab distance") + + Finally, the lower right image contains the distance transform using the + ``chessboard`` metric. + + >>> distance_transform_cb = distance_transform_bf(image, + ... metric='chessboard') + >>> chessboard_transformation = grid[3].imshow(distance_transform_cb, + ... cmap='gray') + >>> cbar_taxicab = grid.cbar_axes[3].colorbar(chessboard_transformation) + >>> cbar_taxicab.set_ticks(colorbar_ticks) + >>> grid[3].set_title("Chessboard distance") + >>> plt.show() + + """ + ft_inplace = isinstance(indices, np.ndarray) + dt_inplace = isinstance(distances, np.ndarray) + _distance_tranform_arg_check( + dt_inplace, ft_inplace, return_distances, return_indices + ) + + tmp1 = np.asarray(input) != 0 + struct = generate_binary_structure(tmp1.ndim, tmp1.ndim) + tmp2 = binary_dilation(tmp1, struct) + tmp2 = np.logical_xor(tmp1, tmp2) + tmp1 = tmp1.astype(np.int8) - tmp2.astype(np.int8) + metric = metric.lower() + if metric == 'euclidean': + metric = 1 + elif metric in ['taxicab', 'cityblock', 'manhattan']: + metric = 2 + elif metric == 'chessboard': + metric = 3 + else: + raise RuntimeError('distance metric not supported') + if sampling is not None: + sampling = _ni_support._normalize_sequence(sampling, tmp1.ndim) + sampling = np.asarray(sampling, dtype=np.float64) + if not sampling.flags.contiguous: + sampling = sampling.copy() + if return_indices: + ft = np.zeros(tmp1.shape, dtype=np.int32) + else: + ft = None + if return_distances: + if distances is None: + if metric == 1: + dt = np.zeros(tmp1.shape, dtype=np.float64) + else: + dt = np.zeros(tmp1.shape, dtype=np.uint32) + else: + if distances.shape != tmp1.shape: + raise RuntimeError('distances array has wrong shape') + if metric == 1: + if distances.dtype.type != np.float64: + raise RuntimeError('distances array must be float64') + else: + if distances.dtype.type != np.uint32: + raise RuntimeError('distances array must be uint32') + dt = distances + else: + dt = None + + _nd_image.distance_transform_bf(tmp1, metric, sampling, dt, ft) + if return_indices: + if isinstance(indices, np.ndarray): + if indices.dtype.type != np.int32: + raise RuntimeError('indices array must be int32') + if indices.shape != (tmp1.ndim,) + tmp1.shape: + raise RuntimeError('indices array has wrong shape') + tmp2 = indices + else: + tmp2 = np.indices(tmp1.shape, dtype=np.int32) + ft = np.ravel(ft) + for ii in range(tmp2.shape[0]): + rtmp = np.ravel(tmp2[ii, ...])[ft] + rtmp.shape = tmp1.shape + tmp2[ii, ...] = rtmp + ft = tmp2 + + # construct and return the result + result = [] + if return_distances and not dt_inplace: + result.append(dt) + if return_indices and not ft_inplace: + result.append(ft) + + if len(result) == 2: + return tuple(result) + elif len(result) == 1: + return result[0] + else: + return None + + +def distance_transform_cdt(input, metric='chessboard', return_distances=True, + return_indices=False, distances=None, indices=None): + """ + Distance transform for chamfer type of transforms. + + This function calculates the distance transform of the `input`, by + replacing each foreground (non-zero) element, with its + shortest distance to the background (any zero-valued element). + + In addition to the distance transform, the feature transform can + be calculated. In this case the index of the closest background + element to each foreground element is returned in a separate array. + + Parameters + ---------- + input : array_like + Input. Values of 0 are treated as background. + metric : {'chessboard', 'taxicab'} or array_like, optional + The `metric` determines the type of chamfering that is done. If the + `metric` is equal to 'taxicab' a structure is generated using + `generate_binary_structure` with a squared distance equal to 1. If + the `metric` is equal to 'chessboard', a `metric` is generated + using `generate_binary_structure` with a squared distance equal to + the dimensionality of the array. These choices correspond to the + common interpretations of the 'taxicab' and the 'chessboard' + distance metrics in two dimensions. + A custom metric may be provided, in the form of a matrix where + each dimension has a length of three. + 'cityblock' and 'manhattan' are also valid, and map to 'taxicab'. + The default is 'chessboard'. + return_distances : bool, optional + Whether to calculate the distance transform. + Default is True. + return_indices : bool, optional + Whether to calculate the feature transform. + Default is False. + distances : int32 ndarray, optional + An output array to store the calculated distance transform, instead of + returning it. + `return_distances` must be True. + It must be the same shape as `input`. + indices : int32 ndarray, optional + An output array to store the calculated feature transform, instead of + returning it. + `return_indicies` must be True. + Its shape must be ``(input.ndim,) + input.shape``. + + Returns + ------- + distances : int32 ndarray, optional + The calculated distance transform. Returned only when + `return_distances` is True, and `distances` is not supplied. + It will have the same shape as the input array. + indices : int32 ndarray, optional + The calculated feature transform. It has an input-shaped array for each + dimension of the input. See distance_transform_edt documentation for an + example. + Returned only when `return_indices` is True, and `indices` is not + supplied. + + See Also + -------- + distance_transform_edt : Fast distance transform for euclidean metric + distance_transform_bf : Distance transform for different metrics using + a slower brute force algorithm + + Examples + -------- + Import the necessary modules. + + >>> import numpy as np + >>> from scipy.ndimage import distance_transform_cdt + >>> import matplotlib.pyplot as plt + >>> from mpl_toolkits.axes_grid1 import ImageGrid + + First, we create a toy binary image. + + >>> def add_circle(center_x, center_y, radius, image, fillvalue=1): + ... # fill circular area with 1 + ... xx, yy = np.mgrid[:image.shape[0], :image.shape[1]] + ... circle = (xx - center_x) ** 2 + (yy - center_y) ** 2 + ... circle_shape = np.sqrt(circle) < radius + ... image[circle_shape] = fillvalue + ... return image + >>> image = np.zeros((100, 100), dtype=np.uint8) + >>> image[35:65, 20:80] = 1 + >>> image = add_circle(28, 65, 10, image) + >>> image = add_circle(37, 30, 10, image) + >>> image = add_circle(70, 45, 20, image) + >>> image = add_circle(45, 80, 10, image) + + Next, we set up the figure. + + >>> fig = plt.figure(figsize=(5, 15)) + >>> grid = ImageGrid(fig, 111, nrows_ncols=(3, 1), axes_pad=(0.5, 0.3), + ... label_mode="1", share_all=True, + ... cbar_location="right", cbar_mode="each", + ... cbar_size="7%", cbar_pad="2%") + >>> for ax in grid: + ... ax.axis('off') + >>> top, middle, bottom = grid + >>> colorbar_ticks = [0, 10, 20] + + The top image contains the original binary image. + + >>> binary_image = top.imshow(image, cmap='gray') + >>> cbar_binary_image = top.cax.colorbar(binary_image) + >>> cbar_binary_image.set_ticks([0, 1]) + >>> top.set_title("Binary image: foreground in white") + + The middle image contains the distance transform using the ``taxicab`` + metric. + + >>> distance_taxicab = distance_transform_cdt(image, metric="taxicab") + >>> taxicab_transform = middle.imshow(distance_taxicab, cmap='gray') + >>> cbar_taxicab = middle.cax.colorbar(taxicab_transform) + >>> cbar_taxicab.set_ticks(colorbar_ticks) + >>> middle.set_title("Taxicab metric") + + The bottom image contains the distance transform using the ``chessboard`` + metric. + + >>> distance_chessboard = distance_transform_cdt(image, + ... metric="chessboard") + >>> chessboard_transform = bottom.imshow(distance_chessboard, cmap='gray') + >>> cbar_chessboard = bottom.cax.colorbar(chessboard_transform) + >>> cbar_chessboard.set_ticks(colorbar_ticks) + >>> bottom.set_title("Chessboard metric") + >>> plt.tight_layout() + >>> plt.show() + + """ + ft_inplace = isinstance(indices, np.ndarray) + dt_inplace = isinstance(distances, np.ndarray) + _distance_tranform_arg_check( + dt_inplace, ft_inplace, return_distances, return_indices + ) + input = np.asarray(input) + if isinstance(metric, str): + if metric in ['taxicab', 'cityblock', 'manhattan']: + rank = input.ndim + metric = generate_binary_structure(rank, 1) + elif metric == 'chessboard': + rank = input.ndim + metric = generate_binary_structure(rank, rank) + else: + raise ValueError('invalid metric provided') + else: + try: + metric = np.asarray(metric) + except Exception as e: + raise ValueError('invalid metric provided') from e + for s in metric.shape: + if s != 3: + raise ValueError('metric sizes must be equal to 3') + + if not metric.flags.contiguous: + metric = metric.copy() + if dt_inplace: + if distances.dtype.type != np.int32: + raise ValueError('distances must be of int32 type') + if distances.shape != input.shape: + raise ValueError('distances has wrong shape') + dt = distances + dt[...] = np.where(input, -1, 0).astype(np.int32) + else: + dt = np.where(input, -1, 0).astype(np.int32) + + rank = dt.ndim + if return_indices: + ft = np.arange(dt.size, dtype=np.int32) + ft.shape = dt.shape + else: + ft = None + + _nd_image.distance_transform_op(metric, dt, ft) + dt = dt[tuple([slice(None, None, -1)] * rank)] + if return_indices: + ft = ft[tuple([slice(None, None, -1)] * rank)] + _nd_image.distance_transform_op(metric, dt, ft) + dt = dt[tuple([slice(None, None, -1)] * rank)] + if return_indices: + ft = ft[tuple([slice(None, None, -1)] * rank)] + ft = np.ravel(ft) + if ft_inplace: + if indices.dtype.type != np.int32: + raise ValueError('indices array must be int32') + if indices.shape != (dt.ndim,) + dt.shape: + raise ValueError('indices array has wrong shape') + tmp = indices + else: + tmp = np.indices(dt.shape, dtype=np.int32) + for ii in range(tmp.shape[0]): + rtmp = np.ravel(tmp[ii, ...])[ft] + rtmp.shape = dt.shape + tmp[ii, ...] = rtmp + ft = tmp + + # construct and return the result + result = [] + if return_distances and not dt_inplace: + result.append(dt) + if return_indices and not ft_inplace: + result.append(ft) + + if len(result) == 2: + return tuple(result) + elif len(result) == 1: + return result[0] + else: + return None + + +def distance_transform_edt(input, sampling=None, return_distances=True, + return_indices=False, distances=None, indices=None): + """ + Exact Euclidean distance transform. + + This function calculates the distance transform of the `input`, by + replacing each foreground (non-zero) element, with its + shortest distance to the background (any zero-valued element). + + In addition to the distance transform, the feature transform can + be calculated. In this case the index of the closest background + element to each foreground element is returned in a separate array. + + Parameters + ---------- + input : array_like + Input data to transform. Can be any type but will be converted + into binary: 1 wherever input equates to True, 0 elsewhere. + sampling : float, or sequence of float, optional + Spacing of elements along each dimension. If a sequence, must be of + length equal to the input rank; if a single number, this is used for + all axes. If not specified, a grid spacing of unity is implied. + return_distances : bool, optional + Whether to calculate the distance transform. + Default is True. + return_indices : bool, optional + Whether to calculate the feature transform. + Default is False. + distances : float64 ndarray, optional + An output array to store the calculated distance transform, instead of + returning it. + `return_distances` must be True. + It must be the same shape as `input`. + indices : int32 ndarray, optional + An output array to store the calculated feature transform, instead of + returning it. + `return_indicies` must be True. + Its shape must be ``(input.ndim,) + input.shape``. + + Returns + ------- + distances : float64 ndarray, optional + The calculated distance transform. Returned only when + `return_distances` is True and `distances` is not supplied. + It will have the same shape as the input array. + indices : int32 ndarray, optional + The calculated feature transform. It has an input-shaped array for each + dimension of the input. See example below. + Returned only when `return_indices` is True and `indices` is not + supplied. + + Notes + ----- + The Euclidean distance transform gives values of the Euclidean + distance:: + + n + y_i = sqrt(sum (x[i]-b[i])**2) + i + + where b[i] is the background point (value 0) with the smallest + Euclidean distance to input points x[i], and n is the + number of dimensions. + + Examples + -------- + >>> from scipy import ndimage + >>> import numpy as np + >>> a = np.array(([0,1,1,1,1], + ... [0,0,1,1,1], + ... [0,1,1,1,1], + ... [0,1,1,1,0], + ... [0,1,1,0,0])) + >>> ndimage.distance_transform_edt(a) + array([[ 0. , 1. , 1.4142, 2.2361, 3. ], + [ 0. , 0. , 1. , 2. , 2. ], + [ 0. , 1. , 1.4142, 1.4142, 1. ], + [ 0. , 1. , 1.4142, 1. , 0. ], + [ 0. , 1. , 1. , 0. , 0. ]]) + + With a sampling of 2 units along x, 1 along y: + + >>> ndimage.distance_transform_edt(a, sampling=[2,1]) + array([[ 0. , 1. , 2. , 2.8284, 3.6056], + [ 0. , 0. , 1. , 2. , 3. ], + [ 0. , 1. , 2. , 2.2361, 2. ], + [ 0. , 1. , 2. , 1. , 0. ], + [ 0. , 1. , 1. , 0. , 0. ]]) + + Asking for indices as well: + + >>> edt, inds = ndimage.distance_transform_edt(a, return_indices=True) + >>> inds + array([[[0, 0, 1, 1, 3], + [1, 1, 1, 1, 3], + [2, 2, 1, 3, 3], + [3, 3, 4, 4, 3], + [4, 4, 4, 4, 4]], + [[0, 0, 1, 1, 4], + [0, 1, 1, 1, 4], + [0, 0, 1, 4, 4], + [0, 0, 3, 3, 4], + [0, 0, 3, 3, 4]]], dtype=int32) + + With arrays provided for inplace outputs: + + >>> indices = np.zeros(((np.ndim(a),) + a.shape), dtype=np.int32) + >>> ndimage.distance_transform_edt(a, return_indices=True, indices=indices) + array([[ 0. , 1. , 1.4142, 2.2361, 3. ], + [ 0. , 0. , 1. , 2. , 2. ], + [ 0. , 1. , 1.4142, 1.4142, 1. ], + [ 0. , 1. , 1.4142, 1. , 0. ], + [ 0. , 1. , 1. , 0. , 0. ]]) + >>> indices + array([[[0, 0, 1, 1, 3], + [1, 1, 1, 1, 3], + [2, 2, 1, 3, 3], + [3, 3, 4, 4, 3], + [4, 4, 4, 4, 4]], + [[0, 0, 1, 1, 4], + [0, 1, 1, 1, 4], + [0, 0, 1, 4, 4], + [0, 0, 3, 3, 4], + [0, 0, 3, 3, 4]]], dtype=int32) + + """ + ft_inplace = isinstance(indices, np.ndarray) + dt_inplace = isinstance(distances, np.ndarray) + _distance_tranform_arg_check( + dt_inplace, ft_inplace, return_distances, return_indices + ) + + # calculate the feature transform + input = np.atleast_1d(np.where(input, 1, 0).astype(np.int8)) + if sampling is not None: + sampling = _ni_support._normalize_sequence(sampling, input.ndim) + sampling = np.asarray(sampling, dtype=np.float64) + if not sampling.flags.contiguous: + sampling = sampling.copy() + + if ft_inplace: + ft = indices + if ft.shape != (input.ndim,) + input.shape: + raise RuntimeError('indices array has wrong shape') + if ft.dtype.type != np.int32: + raise RuntimeError('indices array must be int32') + else: + ft = np.zeros((input.ndim,) + input.shape, dtype=np.int32) + + _nd_image.euclidean_feature_transform(input, sampling, ft) + # if requested, calculate the distance transform + if return_distances: + dt = ft - np.indices(input.shape, dtype=ft.dtype) + dt = dt.astype(np.float64) + if sampling is not None: + for ii in range(len(sampling)): + dt[ii, ...] *= sampling[ii] + np.multiply(dt, dt, dt) + if dt_inplace: + dt = np.add.reduce(dt, axis=0) + if distances.shape != dt.shape: + raise RuntimeError('distances array has wrong shape') + if distances.dtype.type != np.float64: + raise RuntimeError('distances array must be float64') + np.sqrt(dt, distances) + else: + dt = np.add.reduce(dt, axis=0) + dt = np.sqrt(dt) + + # construct and return the result + result = [] + if return_distances and not dt_inplace: + result.append(dt) + if return_indices and not ft_inplace: + result.append(ft) + + if len(result) == 2: + return tuple(result) + elif len(result) == 1: + return result[0] + else: + return None + + +def _distance_tranform_arg_check(distances_out, indices_out, + return_distances, return_indices): + """Raise a RuntimeError if the arguments are invalid""" + error_msgs = [] + if (not return_distances) and (not return_indices): + error_msgs.append( + 'at least one of return_distances/return_indices must be True') + if distances_out and not return_distances: + error_msgs.append( + 'return_distances must be True if distances is supplied' + ) + if indices_out and not return_indices: + error_msgs.append('return_indices must be True if indices is supplied') + if error_msgs: + raise RuntimeError(', '.join(error_msgs)) diff --git a/llava_video/lib/python3.10/site-packages/scipy/ndimage/_ni_support.py b/llava_video/lib/python3.10/site-packages/scipy/ndimage/_ni_support.py new file mode 100644 index 0000000000000000000000000000000000000000..f8d41d00d9edf8d347c4ffc95598210489fed5e9 --- /dev/null +++ b/llava_video/lib/python3.10/site-packages/scipy/ndimage/_ni_support.py @@ -0,0 +1,143 @@ +# Copyright (C) 2003-2005 Peter J. Verveer +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following +# disclaimer in the documentation and/or other materials provided +# with the distribution. +# +# 3. The name of the author may not be used to endorse or promote +# products derived from this software without specific prior +# written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS +# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE +# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from collections.abc import Iterable +import operator +import warnings +import numpy as np + + +def _extend_mode_to_code(mode, is_filter=False): + """Convert an extension mode to the corresponding integer code. + """ + if mode == 'nearest': + return 0 + elif mode == 'wrap': + return 1 + elif mode in ['reflect', 'grid-mirror']: + return 2 + elif mode == 'mirror': + return 3 + elif mode == 'constant': + return 4 + elif mode == 'grid-wrap' and is_filter: + return 1 + elif mode == 'grid-wrap': + return 5 + elif mode == 'grid-constant' and is_filter: + return 4 + elif mode == 'grid-constant': + return 6 + else: + raise RuntimeError('boundary mode not supported') + + +def _normalize_sequence(input, rank): + """If input is a scalar, create a sequence of length equal to the + rank by duplicating the input. If input is a sequence, + check if its length is equal to the length of array. + """ + is_str = isinstance(input, str) + if not is_str and np.iterable(input): + normalized = list(input) + if len(normalized) != rank: + err = "sequence argument must have length equal to input rank" + raise RuntimeError(err) + else: + normalized = [input] * rank + return normalized + + +def _get_output(output, input, shape=None, complex_output=False): + if shape is None: + shape = input.shape + if output is None: + if not complex_output: + output = np.zeros(shape, dtype=input.dtype.name) + else: + complex_type = np.promote_types(input.dtype, np.complex64) + output = np.zeros(shape, dtype=complex_type) + elif isinstance(output, (type, np.dtype)): + # Classes (like `np.float32`) and dtypes are interpreted as dtype + if complex_output and np.dtype(output).kind != 'c': + warnings.warn("promoting specified output dtype to complex", stacklevel=3) + output = np.promote_types(output, np.complex64) + output = np.zeros(shape, dtype=output) + elif isinstance(output, str): + output = np.dtype(output) + if complex_output and output.kind != 'c': + raise RuntimeError("output must have complex dtype") + elif not issubclass(output.type, np.number): + raise RuntimeError("output must have numeric dtype") + output = np.zeros(shape, dtype=output) + else: + # output was supplied as an array + output = np.asarray(output) + if output.shape != shape: + raise RuntimeError("output shape not correct") + elif complex_output and output.dtype.kind != 'c': + raise RuntimeError("output must have complex dtype") + return output + + +def _check_axes(axes, ndim): + if axes is None: + return tuple(range(ndim)) + elif np.isscalar(axes): + axes = (operator.index(axes),) + elif isinstance(axes, Iterable): + for ax in axes: + axes = tuple(operator.index(ax) for ax in axes) + if ax < -ndim or ax > ndim - 1: + raise ValueError(f"specified axis: {ax} is out of range") + axes = tuple(ax % ndim if ax < 0 else ax for ax in axes) + else: + message = "axes must be an integer, iterable of integers, or None" + raise ValueError(message) + if len(tuple(set(axes))) != len(axes): + raise ValueError("axes must be unique") + return axes + +def _skip_if_dtype(arg): + """'array or dtype' polymorphism. + + Return None for np.int8, dtype('float32') or 'f' etc + arg for np.empty(3) etc + """ + if isinstance(arg, str): + return None + if type(arg) is type: + return None if issubclass(arg, np.generic) else arg + else: + return None if isinstance(arg, np.dtype) else arg + + +def _skip_if_int(arg): + return None if (arg is None or isinstance(arg, int)) else arg diff --git a/llava_video/lib/python3.10/site-packages/scipy/ndimage/_rank_filter_1d.cpython-310-x86_64-linux-gnu.so b/llava_video/lib/python3.10/site-packages/scipy/ndimage/_rank_filter_1d.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..01be7a14257d46641c9c5c05961d2ea8b77f1450 Binary files /dev/null and b/llava_video/lib/python3.10/site-packages/scipy/ndimage/_rank_filter_1d.cpython-310-x86_64-linux-gnu.so differ diff --git a/llava_video/lib/python3.10/site-packages/scipy/ndimage/fourier.py b/llava_video/lib/python3.10/site-packages/scipy/ndimage/fourier.py new file mode 100644 index 0000000000000000000000000000000000000000..73c49bd52d9a446ce0fe25d9e15b8de68fbd46fb --- /dev/null +++ b/llava_video/lib/python3.10/site-packages/scipy/ndimage/fourier.py @@ -0,0 +1,21 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.ndimage` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + + +__all__ = [ # noqa: F822 + 'fourier_gaussian', 'fourier_uniform', + 'fourier_ellipsoid', 'fourier_shift' +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package='ndimage', module='fourier', + private_modules=['_fourier'], all=__all__, + attribute=name) diff --git a/llava_video/lib/python3.10/site-packages/scipy/ndimage/interpolation.py b/llava_video/lib/python3.10/site-packages/scipy/ndimage/interpolation.py new file mode 100644 index 0000000000000000000000000000000000000000..a2739c60c51037487ae8892c407e2f3d7870d5da --- /dev/null +++ b/llava_video/lib/python3.10/site-packages/scipy/ndimage/interpolation.py @@ -0,0 +1,22 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.ndimage` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + + +__all__ = [ # noqa: F822 + 'spline_filter1d', 'spline_filter', + 'geometric_transform', 'map_coordinates', + 'affine_transform', 'shift', 'zoom', 'rotate', +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package='ndimage', module='interpolation', + private_modules=['_interpolation'], all=__all__, + attribute=name) diff --git a/llava_video/lib/python3.10/site-packages/scipy/ndimage/measurements.py b/llava_video/lib/python3.10/site-packages/scipy/ndimage/measurements.py new file mode 100644 index 0000000000000000000000000000000000000000..22f76b01840ffb829205bd1d28a7ad1f9ac5db61 --- /dev/null +++ b/llava_video/lib/python3.10/site-packages/scipy/ndimage/measurements.py @@ -0,0 +1,24 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.ndimage` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + + +__all__ = [ # noqa: F822 + 'label', 'find_objects', 'labeled_comprehension', + 'sum', 'mean', 'variance', 'standard_deviation', + 'minimum', 'maximum', 'median', 'minimum_position', + 'maximum_position', 'extrema', 'center_of_mass', + 'histogram', 'watershed_ift', 'sum_labels' +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package='ndimage', module='measurements', + private_modules=['_measurements'], all=__all__, + attribute=name) diff --git a/llava_video/lib/python3.10/site-packages/scipy/odr/__init__.py b/llava_video/lib/python3.10/site-packages/scipy/odr/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a44a8c133b674aea416efeb4da469241b50a547f --- /dev/null +++ b/llava_video/lib/python3.10/site-packages/scipy/odr/__init__.py @@ -0,0 +1,131 @@ +""" +================================================= +Orthogonal distance regression (:mod:`scipy.odr`) +================================================= + +.. currentmodule:: scipy.odr + +Package Content +=============== + +.. autosummary:: + :toctree: generated/ + + Data -- The data to fit. + RealData -- Data with weights as actual std. dev.s and/or covariances. + Model -- Stores information about the function to be fit. + ODR -- Gathers all info & manages the main fitting routine. + Output -- Result from the fit. + odr -- Low-level function for ODR. + + OdrWarning -- Warning about potential problems when running ODR. + OdrError -- Error exception. + OdrStop -- Stop exception. + + polynomial -- Factory function for a general polynomial model. + exponential -- Exponential model + multilinear -- Arbitrary-dimensional linear model + unilinear -- Univariate linear model + quadratic -- Quadratic model + +Usage information +================= + +Introduction +------------ + +Why Orthogonal Distance Regression (ODR)? Sometimes one has +measurement errors in the explanatory (a.k.a., "independent") +variable(s), not just the response (a.k.a., "dependent") variable(s). +Ordinary Least Squares (OLS) fitting procedures treat the data for +explanatory variables as fixed, i.e., not subject to error of any kind. +Furthermore, OLS procedures require that the response variables be an +explicit function of the explanatory variables; sometimes making the +equation explicit is impractical and/or introduces errors. ODR can +handle both of these cases with ease, and can even reduce to the OLS +case if that is sufficient for the problem. + +ODRPACK is a FORTRAN-77 library for performing ODR with possibly +non-linear fitting functions. It uses a modified trust-region +Levenberg-Marquardt-type algorithm [1]_ to estimate the function +parameters. The fitting functions are provided by Python functions +operating on NumPy arrays. The required derivatives may be provided +by Python functions as well, or may be estimated numerically. ODRPACK +can do explicit or implicit ODR fits, or it can do OLS. Input and +output variables may be multidimensional. Weights can be provided to +account for different variances of the observations, and even +covariances between dimensions of the variables. + +The `scipy.odr` package offers an object-oriented interface to +ODRPACK, in addition to the low-level `odr` function. + +Additional background information about ODRPACK can be found in the +`ODRPACK User's Guide +`_, reading +which is recommended. + +Basic usage +----------- + +1. Define the function you want to fit against.:: + + def f(B, x): + '''Linear function y = m*x + b''' + # B is a vector of the parameters. + # x is an array of the current x values. + # x is in the same format as the x passed to Data or RealData. + # + # Return an array in the same format as y passed to Data or RealData. + return B[0]*x + B[1] + +2. Create a Model.:: + + linear = Model(f) + +3. Create a Data or RealData instance.:: + + mydata = Data(x, y, wd=1./power(sx,2), we=1./power(sy,2)) + + or, when the actual covariances are known:: + + mydata = RealData(x, y, sx=sx, sy=sy) + +4. Instantiate ODR with your data, model and initial parameter estimate.:: + + myodr = ODR(mydata, linear, beta0=[1., 2.]) + +5. Run the fit.:: + + myoutput = myodr.run() + +6. Examine output.:: + + myoutput.pprint() + + +References +---------- +.. [1] P. T. Boggs and J. E. Rogers, "Orthogonal Distance Regression," + in "Statistical analysis of measurement error models and + applications: proceedings of the AMS-IMS-SIAM joint summer research + conference held June 10-16, 1989," Contemporary Mathematics, + vol. 112, pg. 186, 1990. + +""" +# version: 0.7 +# author: Robert Kern +# date: 2006-09-21 + +from ._odrpack import * +from ._models import * +from . import _add_newdocs + +# Deprecated namespaces, to be removed in v2.0.0 +from . import models, odrpack + +__all__ = [s for s in dir() + if not (s.startswith('_') or s in ('odr_stop', 'odr_error'))] + +from scipy._lib._testutils import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/llava_video/lib/python3.10/site-packages/scipy/odr/__odrpack.cpython-310-x86_64-linux-gnu.so b/llava_video/lib/python3.10/site-packages/scipy/odr/__odrpack.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..87a8a9bddfd4cafdf465d6cc08a1bdfdc73f026d --- /dev/null +++ b/llava_video/lib/python3.10/site-packages/scipy/odr/__odrpack.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5b72231a3ef9e1995615d4b2cf5925df58ba6f8f10d0b8a8eaa692270fb8c8c4 +size 622553 diff --git a/llava_video/lib/python3.10/site-packages/scipy/odr/__pycache__/__init__.cpython-310.pyc b/llava_video/lib/python3.10/site-packages/scipy/odr/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..27ef049d4fceeca567ffbe6d6be17970d9a59fdc Binary files /dev/null and b/llava_video/lib/python3.10/site-packages/scipy/odr/__pycache__/__init__.cpython-310.pyc differ diff --git a/llava_video/lib/python3.10/site-packages/scipy/odr/__pycache__/_add_newdocs.cpython-310.pyc b/llava_video/lib/python3.10/site-packages/scipy/odr/__pycache__/_add_newdocs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..37d5c9d1860d9ac3d890041539a60d99a1fdced3 Binary files /dev/null and b/llava_video/lib/python3.10/site-packages/scipy/odr/__pycache__/_add_newdocs.cpython-310.pyc differ diff --git a/llava_video/lib/python3.10/site-packages/scipy/odr/__pycache__/_models.cpython-310.pyc b/llava_video/lib/python3.10/site-packages/scipy/odr/__pycache__/_models.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9429431e16a6c40b8a28535bf0fc0614b2f6c402 Binary files /dev/null and b/llava_video/lib/python3.10/site-packages/scipy/odr/__pycache__/_models.cpython-310.pyc differ diff --git a/llava_video/lib/python3.10/site-packages/scipy/odr/__pycache__/_odrpack.cpython-310.pyc b/llava_video/lib/python3.10/site-packages/scipy/odr/__pycache__/_odrpack.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..def204ef9f8ba3c715fb42f82148b7666c99924f Binary files /dev/null and b/llava_video/lib/python3.10/site-packages/scipy/odr/__pycache__/_odrpack.cpython-310.pyc differ diff --git a/llava_video/lib/python3.10/site-packages/scipy/odr/__pycache__/models.cpython-310.pyc b/llava_video/lib/python3.10/site-packages/scipy/odr/__pycache__/models.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..399bfef692d7482b843a2d0bcc2fb9c6b1bbe37c Binary files /dev/null and b/llava_video/lib/python3.10/site-packages/scipy/odr/__pycache__/models.cpython-310.pyc differ diff --git a/llava_video/lib/python3.10/site-packages/scipy/odr/__pycache__/odrpack.cpython-310.pyc b/llava_video/lib/python3.10/site-packages/scipy/odr/__pycache__/odrpack.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1629d356b740d8465e1fcd20c4e39553fe1a2f01 Binary files /dev/null and b/llava_video/lib/python3.10/site-packages/scipy/odr/__pycache__/odrpack.cpython-310.pyc differ diff --git a/llava_video/lib/python3.10/site-packages/scipy/odr/_add_newdocs.py b/llava_video/lib/python3.10/site-packages/scipy/odr/_add_newdocs.py new file mode 100644 index 0000000000000000000000000000000000000000..e09fb6cc8c5f1523dfbeaef466a5b76bd22c01bb --- /dev/null +++ b/llava_video/lib/python3.10/site-packages/scipy/odr/_add_newdocs.py @@ -0,0 +1,34 @@ +from numpy.lib import add_newdoc + +add_newdoc('scipy.odr', 'odr', + """ + odr(fcn, beta0, y, x, we=None, wd=None, fjacb=None, fjacd=None, extra_args=None, + ifixx=None, ifixb=None, job=0, iprint=0, errfile=None, rptfile=None, ndigit=0, + taufac=0.0, sstol=-1.0, partol=-1.0, maxit=-1, stpb=None, stpd=None, sclb=None, + scld=None, work=None, iwork=None, full_output=0) + + Low-level function for ODR. + + See Also + -------- + ODR : The ODR class gathers all information and coordinates the running of the + main fitting routine. + Model : The Model class stores information about the function you wish to fit. + Data : The data to fit. + RealData : Data with weights as actual std. dev.s and/or covariances. + + Notes + ----- + This is a function performing the same operation as the `ODR`, + `Model`, and `Data` classes together. The parameters of this + function are explained in the class documentation. + + """) + +add_newdoc('scipy.odr.__odrpack', '_set_exceptions', + """ + _set_exceptions(odr_error, odr_stop) + + Internal function: set exception classes. + + """) diff --git a/llava_video/lib/python3.10/site-packages/scipy/odr/_models.py b/llava_video/lib/python3.10/site-packages/scipy/odr/_models.py new file mode 100644 index 0000000000000000000000000000000000000000..e0a8d2275dcc4698a9ea61be5871d62069be2599 --- /dev/null +++ b/llava_video/lib/python3.10/site-packages/scipy/odr/_models.py @@ -0,0 +1,315 @@ +""" Collection of Model instances for use with the odrpack fitting package. +""" +import numpy as np +from scipy.odr._odrpack import Model + +__all__ = ['Model', 'exponential', 'multilinear', 'unilinear', 'quadratic', + 'polynomial'] + + +def _lin_fcn(B, x): + a, b = B[0], B[1:] + b.shape = (b.shape[0], 1) + + return a + (x*b).sum(axis=0) + + +def _lin_fjb(B, x): + a = np.ones(x.shape[-1], float) + res = np.concatenate((a, x.ravel())) + res.shape = (B.shape[-1], x.shape[-1]) + return res + + +def _lin_fjd(B, x): + b = B[1:] + b = np.repeat(b, (x.shape[-1],)*b.shape[-1], axis=0) + b.shape = x.shape + return b + + +def _lin_est(data): + # Eh. The answer is analytical, so just return all ones. + # Don't return zeros since that will interfere with + # ODRPACK's auto-scaling procedures. + + if len(data.x.shape) == 2: + m = data.x.shape[0] + else: + m = 1 + + return np.ones((m + 1,), float) + + +def _poly_fcn(B, x, powers): + a, b = B[0], B[1:] + b.shape = (b.shape[0], 1) + + return a + np.sum(b * np.power(x, powers), axis=0) + + +def _poly_fjacb(B, x, powers): + res = np.concatenate((np.ones(x.shape[-1], float), + np.power(x, powers).flat)) + res.shape = (B.shape[-1], x.shape[-1]) + return res + + +def _poly_fjacd(B, x, powers): + b = B[1:] + b.shape = (b.shape[0], 1) + + b = b * powers + + return np.sum(b * np.power(x, powers-1), axis=0) + + +def _exp_fcn(B, x): + return B[0] + np.exp(B[1] * x) + + +def _exp_fjd(B, x): + return B[1] * np.exp(B[1] * x) + + +def _exp_fjb(B, x): + res = np.concatenate((np.ones(x.shape[-1], float), x * np.exp(B[1] * x))) + res.shape = (2, x.shape[-1]) + return res + + +def _exp_est(data): + # Eh. + return np.array([1., 1.]) + + +class _MultilinearModel(Model): + r""" + Arbitrary-dimensional linear model + + This model is defined by :math:`y=\beta_0 + \sum_{i=1}^m \beta_i x_i` + + Examples + -------- + We can calculate orthogonal distance regression with an arbitrary + dimensional linear model: + + >>> from scipy import odr + >>> import numpy as np + >>> x = np.linspace(0.0, 5.0) + >>> y = 10.0 + 5.0 * x + >>> data = odr.Data(x, y) + >>> odr_obj = odr.ODR(data, odr.multilinear) + >>> output = odr_obj.run() + >>> print(output.beta) + [10. 5.] + + """ + + def __init__(self): + super().__init__( + _lin_fcn, fjacb=_lin_fjb, fjacd=_lin_fjd, estimate=_lin_est, + meta={'name': 'Arbitrary-dimensional Linear', + 'equ': 'y = B_0 + Sum[i=1..m, B_i * x_i]', + 'TeXequ': r'$y=\beta_0 + \sum_{i=1}^m \beta_i x_i$'}) + + +multilinear = _MultilinearModel() + + +def polynomial(order): + """ + Factory function for a general polynomial model. + + Parameters + ---------- + order : int or sequence + If an integer, it becomes the order of the polynomial to fit. If + a sequence of numbers, then these are the explicit powers in the + polynomial. + A constant term (power 0) is always included, so don't include 0. + Thus, polynomial(n) is equivalent to polynomial(range(1, n+1)). + + Returns + ------- + polynomial : Model instance + Model instance. + + Examples + -------- + We can fit an input data using orthogonal distance regression (ODR) with + a polynomial model: + + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> from scipy import odr + >>> x = np.linspace(0.0, 5.0) + >>> y = np.sin(x) + >>> poly_model = odr.polynomial(3) # using third order polynomial model + >>> data = odr.Data(x, y) + >>> odr_obj = odr.ODR(data, poly_model) + >>> output = odr_obj.run() # running ODR fitting + >>> poly = np.poly1d(output.beta[::-1]) + >>> poly_y = poly(x) + >>> plt.plot(x, y, label="input data") + >>> plt.plot(x, poly_y, label="polynomial ODR") + >>> plt.legend() + >>> plt.show() + + """ + + powers = np.asarray(order) + if powers.shape == (): + # Scalar. + powers = np.arange(1, powers + 1) + + powers.shape = (len(powers), 1) + len_beta = len(powers) + 1 + + def _poly_est(data, len_beta=len_beta): + # Eh. Ignore data and return all ones. + return np.ones((len_beta,), float) + + return Model(_poly_fcn, fjacd=_poly_fjacd, fjacb=_poly_fjacb, + estimate=_poly_est, extra_args=(powers,), + meta={'name': 'Sorta-general Polynomial', + 'equ': 'y = B_0 + Sum[i=1..%s, B_i * (x**i)]' % (len_beta-1), + 'TeXequ': r'$y=\beta_0 + \sum_{i=1}^{%s} \beta_i x^i$' % + (len_beta-1)}) + + +class _ExponentialModel(Model): + r""" + Exponential model + + This model is defined by :math:`y=\beta_0 + e^{\beta_1 x}` + + Examples + -------- + We can calculate orthogonal distance regression with an exponential model: + + >>> from scipy import odr + >>> import numpy as np + >>> x = np.linspace(0.0, 5.0) + >>> y = -10.0 + np.exp(0.5*x) + >>> data = odr.Data(x, y) + >>> odr_obj = odr.ODR(data, odr.exponential) + >>> output = odr_obj.run() + >>> print(output.beta) + [-10. 0.5] + + """ + + def __init__(self): + super().__init__(_exp_fcn, fjacd=_exp_fjd, fjacb=_exp_fjb, + estimate=_exp_est, + meta={'name': 'Exponential', + 'equ': 'y= B_0 + exp(B_1 * x)', + 'TeXequ': r'$y=\beta_0 + e^{\beta_1 x}$'}) + + +exponential = _ExponentialModel() + + +def _unilin(B, x): + return x*B[0] + B[1] + + +def _unilin_fjd(B, x): + return np.ones(x.shape, float) * B[0] + + +def _unilin_fjb(B, x): + _ret = np.concatenate((x, np.ones(x.shape, float))) + _ret.shape = (2,) + x.shape + + return _ret + + +def _unilin_est(data): + return (1., 1.) + + +def _quadratic(B, x): + return x*(x*B[0] + B[1]) + B[2] + + +def _quad_fjd(B, x): + return 2*x*B[0] + B[1] + + +def _quad_fjb(B, x): + _ret = np.concatenate((x*x, x, np.ones(x.shape, float))) + _ret.shape = (3,) + x.shape + + return _ret + + +def _quad_est(data): + return (1.,1.,1.) + + +class _UnilinearModel(Model): + r""" + Univariate linear model + + This model is defined by :math:`y = \beta_0 x + \beta_1` + + Examples + -------- + We can calculate orthogonal distance regression with an unilinear model: + + >>> from scipy import odr + >>> import numpy as np + >>> x = np.linspace(0.0, 5.0) + >>> y = 1.0 * x + 2.0 + >>> data = odr.Data(x, y) + >>> odr_obj = odr.ODR(data, odr.unilinear) + >>> output = odr_obj.run() + >>> print(output.beta) + [1. 2.] + + """ + + def __init__(self): + super().__init__(_unilin, fjacd=_unilin_fjd, fjacb=_unilin_fjb, + estimate=_unilin_est, + meta={'name': 'Univariate Linear', + 'equ': 'y = B_0 * x + B_1', + 'TeXequ': '$y = \\beta_0 x + \\beta_1$'}) + + +unilinear = _UnilinearModel() + + +class _QuadraticModel(Model): + r""" + Quadratic model + + This model is defined by :math:`y = \beta_0 x^2 + \beta_1 x + \beta_2` + + Examples + -------- + We can calculate orthogonal distance regression with a quadratic model: + + >>> from scipy import odr + >>> import numpy as np + >>> x = np.linspace(0.0, 5.0) + >>> y = 1.0 * x ** 2 + 2.0 * x + 3.0 + >>> data = odr.Data(x, y) + >>> odr_obj = odr.ODR(data, odr.quadratic) + >>> output = odr_obj.run() + >>> print(output.beta) + [1. 2. 3.] + + """ + + def __init__(self): + super().__init__( + _quadratic, fjacd=_quad_fjd, fjacb=_quad_fjb, estimate=_quad_est, + meta={'name': 'Quadratic', + 'equ': 'y = B_0*x**2 + B_1*x + B_2', + 'TeXequ': '$y = \\beta_0 x^2 + \\beta_1 x + \\beta_2'}) + + +quadratic = _QuadraticModel() diff --git a/llava_video/lib/python3.10/site-packages/scipy/odr/_odrpack.py b/llava_video/lib/python3.10/site-packages/scipy/odr/_odrpack.py new file mode 100644 index 0000000000000000000000000000000000000000..30d46aa3f4465f31b32e5f13f0b01b940981d489 --- /dev/null +++ b/llava_video/lib/python3.10/site-packages/scipy/odr/_odrpack.py @@ -0,0 +1,1154 @@ +""" +Python wrappers for Orthogonal Distance Regression (ODRPACK). + +Notes +===== + +* Array formats -- FORTRAN stores its arrays in memory column first, i.e., an + array element A(i, j, k) will be next to A(i+1, j, k). In C and, consequently, + NumPy, arrays are stored row first: A[i, j, k] is next to A[i, j, k+1]. For + efficiency and convenience, the input and output arrays of the fitting + function (and its Jacobians) are passed to FORTRAN without transposition. + Therefore, where the ODRPACK documentation says that the X array is of shape + (N, M), it will be passed to the Python function as an array of shape (M, N). + If M==1, the 1-D case, then nothing matters; if M>1, then your + Python functions will be dealing with arrays that are indexed in reverse of + the ODRPACK documentation. No real issue, but watch out for your indexing of + the Jacobians: the i,jth elements (@f_i/@x_j) evaluated at the nth + observation will be returned as jacd[j, i, n]. Except for the Jacobians, it + really is easier to deal with x[0] and x[1] than x[:,0] and x[:,1]. Of course, + you can always use the transpose() function from SciPy explicitly. + +* Examples -- See the accompanying file test/test.py for examples of how to set + up fits of your own. Some are taken from the User's Guide; some are from + other sources. + +* Models -- Some common models are instantiated in the accompanying module + models.py . Contributions are welcome. + +Credits +======= + +* Thanks to Arnold Moene and Gerard Vermeulen for fixing some killer bugs. + +Robert Kern +robert.kern@gmail.com + +""" +import os +from threading import Lock + +import numpy as np +from warnings import warn +from scipy.odr import __odrpack + +__all__ = ['odr', 'OdrWarning', 'OdrError', 'OdrStop', + 'Data', 'RealData', 'Model', 'Output', 'ODR', + 'odr_error', 'odr_stop'] + +odr = __odrpack.odr +ODR_LOCK = Lock() + + +class OdrWarning(UserWarning): + """ + Warning indicating that the data passed into + ODR will cause problems when passed into 'odr' + that the user should be aware of. + """ + pass + + +class OdrError(Exception): + """ + Exception indicating an error in fitting. + + This is raised by `~scipy.odr.odr` if an error occurs during fitting. + """ + pass + + +class OdrStop(Exception): + """ + Exception stopping fitting. + + You can raise this exception in your objective function to tell + `~scipy.odr.odr` to stop fitting. + """ + pass + + +# Backwards compatibility +odr_error = OdrError +odr_stop = OdrStop + +__odrpack._set_exceptions(OdrError, OdrStop) + + +def _conv(obj, dtype=None): + """ Convert an object to the preferred form for input to the odr routine. + """ + + if obj is None: + return obj + else: + if dtype is None: + obj = np.asarray(obj) + else: + obj = np.asarray(obj, dtype) + if obj.shape == (): + # Scalar. + return obj.dtype.type(obj) + else: + return obj + + +def _report_error(info): + """ Interprets the return code of the odr routine. + + Parameters + ---------- + info : int + The return code of the odr routine. + + Returns + ------- + problems : list(str) + A list of messages about why the odr() routine stopped. + """ + + stopreason = ('Blank', + 'Sum of squares convergence', + 'Parameter convergence', + 'Both sum of squares and parameter convergence', + 'Iteration limit reached')[info % 5] + + if info >= 5: + # questionable results or fatal error + + I = (info//10000 % 10, + info//1000 % 10, + info//100 % 10, + info//10 % 10, + info % 10) + problems = [] + + if I[0] == 0: + if I[1] != 0: + problems.append('Derivatives possibly not correct') + if I[2] != 0: + problems.append('Error occurred in callback') + if I[3] != 0: + problems.append('Problem is not full rank at solution') + problems.append(stopreason) + elif I[0] == 1: + if I[1] != 0: + problems.append('N < 1') + if I[2] != 0: + problems.append('M < 1') + if I[3] != 0: + problems.append('NP < 1 or NP > N') + if I[4] != 0: + problems.append('NQ < 1') + elif I[0] == 2: + if I[1] != 0: + problems.append('LDY and/or LDX incorrect') + if I[2] != 0: + problems.append('LDWE, LD2WE, LDWD, and/or LD2WD incorrect') + if I[3] != 0: + problems.append('LDIFX, LDSTPD, and/or LDSCLD incorrect') + if I[4] != 0: + problems.append('LWORK and/or LIWORK too small') + elif I[0] == 3: + if I[1] != 0: + problems.append('STPB and/or STPD incorrect') + if I[2] != 0: + problems.append('SCLB and/or SCLD incorrect') + if I[3] != 0: + problems.append('WE incorrect') + if I[4] != 0: + problems.append('WD incorrect') + elif I[0] == 4: + problems.append('Error in derivatives') + elif I[0] == 5: + problems.append('Error occurred in callback') + elif I[0] == 6: + problems.append('Numerical error detected') + + return problems + + else: + return [stopreason] + + +class Data: + """ + The data to fit. + + Parameters + ---------- + x : array_like + Observed data for the independent variable of the regression + y : array_like, optional + If array-like, observed data for the dependent variable of the + regression. A scalar input implies that the model to be used on + the data is implicit. + we : array_like, optional + If `we` is a scalar, then that value is used for all data points (and + all dimensions of the response variable). + If `we` is a rank-1 array of length q (the dimensionality of the + response variable), then this vector is the diagonal of the covariant + weighting matrix for all data points. + If `we` is a rank-1 array of length n (the number of data points), then + the i'th element is the weight for the i'th response variable + observation (single-dimensional only). + If `we` is a rank-2 array of shape (q, q), then this is the full + covariant weighting matrix broadcast to each observation. + If `we` is a rank-2 array of shape (q, n), then `we[:,i]` is the + diagonal of the covariant weighting matrix for the i'th observation. + If `we` is a rank-3 array of shape (q, q, n), then `we[:,:,i]` is the + full specification of the covariant weighting matrix for each + observation. + If the fit is implicit, then only a positive scalar value is used. + wd : array_like, optional + If `wd` is a scalar, then that value is used for all data points + (and all dimensions of the input variable). If `wd` = 0, then the + covariant weighting matrix for each observation is set to the identity + matrix (so each dimension of each observation has the same weight). + If `wd` is a rank-1 array of length m (the dimensionality of the input + variable), then this vector is the diagonal of the covariant weighting + matrix for all data points. + If `wd` is a rank-1 array of length n (the number of data points), then + the i'th element is the weight for the ith input variable observation + (single-dimensional only). + If `wd` is a rank-2 array of shape (m, m), then this is the full + covariant weighting matrix broadcast to each observation. + If `wd` is a rank-2 array of shape (m, n), then `wd[:,i]` is the + diagonal of the covariant weighting matrix for the ith observation. + If `wd` is a rank-3 array of shape (m, m, n), then `wd[:,:,i]` is the + full specification of the covariant weighting matrix for each + observation. + fix : array_like of ints, optional + The `fix` argument is the same as ifixx in the class ODR. It is an + array of integers with the same shape as data.x that determines which + input observations are treated as fixed. One can use a sequence of + length m (the dimensionality of the input observations) to fix some + dimensions for all observations. A value of 0 fixes the observation, + a value > 0 makes it free. + meta : dict, optional + Free-form dictionary for metadata. + + Notes + ----- + Each argument is attached to the member of the instance of the same name. + The structures of `x` and `y` are described in the Model class docstring. + If `y` is an integer, then the Data instance can only be used to fit with + implicit models where the dimensionality of the response is equal to the + specified value of `y`. + + The `we` argument weights the effect a deviation in the response variable + has on the fit. The `wd` argument weights the effect a deviation in the + input variable has on the fit. To handle multidimensional inputs and + responses easily, the structure of these arguments has the n'th + dimensional axis first. These arguments heavily use the structured + arguments feature of ODRPACK to conveniently and flexibly support all + options. See the ODRPACK User's Guide for a full explanation of how these + weights are used in the algorithm. Basically, a higher value of the weight + for a particular data point makes a deviation at that point more + detrimental to the fit. + + """ + + def __init__(self, x, y=None, we=None, wd=None, fix=None, meta=None): + self.x = _conv(x) + + if not isinstance(self.x, np.ndarray): + raise ValueError("Expected an 'ndarray' of data for 'x', " + f"but instead got data of type '{type(self.x).__name__}'") + + self.y = _conv(y) + self.we = _conv(we) + self.wd = _conv(wd) + self.fix = _conv(fix) + self.meta = {} if meta is None else meta + + def set_meta(self, **kwds): + """ Update the metadata dictionary with the keywords and data provided + by keywords. + + Examples + -------- + :: + + data.set_meta(lab="Ph 7; Lab 26", title="Ag110 + Ag108 Decay") + """ + + self.meta.update(kwds) + + def __getattr__(self, attr): + """ Dispatch attribute access to the metadata dictionary. + """ + if attr != "meta" and attr in self.meta: + return self.meta[attr] + else: + raise AttributeError(f"'{attr}' not in metadata") + + +class RealData(Data): + """ + The data, with weightings as actual standard deviations and/or + covariances. + + Parameters + ---------- + x : array_like + Observed data for the independent variable of the regression + y : array_like, optional + If array-like, observed data for the dependent variable of the + regression. A scalar input implies that the model to be used on + the data is implicit. + sx : array_like, optional + Standard deviations of `x`. + `sx` are standard deviations of `x` and are converted to weights by + dividing 1.0 by their squares. + sy : array_like, optional + Standard deviations of `y`. + `sy` are standard deviations of `y` and are converted to weights by + dividing 1.0 by their squares. + covx : array_like, optional + Covariance of `x` + `covx` is an array of covariance matrices of `x` and are converted to + weights by performing a matrix inversion on each observation's + covariance matrix. + covy : array_like, optional + Covariance of `y` + `covy` is an array of covariance matrices and are converted to + weights by performing a matrix inversion on each observation's + covariance matrix. + fix : array_like, optional + The argument and member fix is the same as Data.fix and ODR.ifixx: + It is an array of integers with the same shape as `x` that + determines which input observations are treated as fixed. One can + use a sequence of length m (the dimensionality of the input + observations) to fix some dimensions for all observations. A value + of 0 fixes the observation, a value > 0 makes it free. + meta : dict, optional + Free-form dictionary for metadata. + + Notes + ----- + The weights `wd` and `we` are computed from provided values as follows: + + `sx` and `sy` are converted to weights by dividing 1.0 by their squares. + For example, ``wd = 1./np.power(`sx`, 2)``. + + `covx` and `covy` are arrays of covariance matrices and are converted to + weights by performing a matrix inversion on each observation's covariance + matrix. For example, ``we[i] = np.linalg.inv(covy[i])``. + + These arguments follow the same structured argument conventions as wd and + we only restricted by their natures: `sx` and `sy` can't be rank-3, but + `covx` and `covy` can be. + + Only set *either* `sx` or `covx` (not both). Setting both will raise an + exception. Same with `sy` and `covy`. + + """ + + def __init__(self, x, y=None, sx=None, sy=None, covx=None, covy=None, + fix=None, meta=None): + if (sx is not None) and (covx is not None): + raise ValueError("cannot set both sx and covx") + if (sy is not None) and (covy is not None): + raise ValueError("cannot set both sy and covy") + + # Set flags for __getattr__ + self._ga_flags = {} + if sx is not None: + self._ga_flags['wd'] = 'sx' + else: + self._ga_flags['wd'] = 'covx' + if sy is not None: + self._ga_flags['we'] = 'sy' + else: + self._ga_flags['we'] = 'covy' + + self.x = _conv(x) + + if not isinstance(self.x, np.ndarray): + raise ValueError("Expected an 'ndarray' of data for 'x', " + f"but instead got data of type '{type(self.x).__name__}'") + + self.y = _conv(y) + self.sx = _conv(sx) + self.sy = _conv(sy) + self.covx = _conv(covx) + self.covy = _conv(covy) + self.fix = _conv(fix) + self.meta = {} if meta is None else meta + + def _sd2wt(self, sd): + """ Convert standard deviation to weights. + """ + + return 1./np.power(sd, 2) + + def _cov2wt(self, cov): + """ Convert covariance matrix(-ices) to weights. + """ + + from scipy.linalg import inv + + if len(cov.shape) == 2: + return inv(cov) + else: + weights = np.zeros(cov.shape, float) + + for i in range(cov.shape[-1]): # n + weights[:,:,i] = inv(cov[:,:,i]) + + return weights + + def __getattr__(self, attr): + + if attr not in ('wd', 'we'): + if attr != "meta" and attr in self.meta: + return self.meta[attr] + else: + raise AttributeError(f"'{attr}' not in metadata") + else: + lookup_tbl = {('wd', 'sx'): (self._sd2wt, self.sx), + ('wd', 'covx'): (self._cov2wt, self.covx), + ('we', 'sy'): (self._sd2wt, self.sy), + ('we', 'covy'): (self._cov2wt, self.covy)} + + func, arg = lookup_tbl[(attr, self._ga_flags[attr])] + + if arg is not None: + return func(*(arg,)) + else: + return None + + +class Model: + """ + The Model class stores information about the function you wish to fit. + + It stores the function itself, at the least, and optionally stores + functions which compute the Jacobians used during fitting. Also, one + can provide a function that will provide reasonable starting values + for the fit parameters possibly given the set of data. + + Parameters + ---------- + fcn : function + fcn(beta, x) --> y + fjacb : function + Jacobian of fcn wrt the fit parameters beta. + + fjacb(beta, x) --> @f_i(x,B)/@B_j + fjacd : function + Jacobian of fcn wrt the (possibly multidimensional) input + variable. + + fjacd(beta, x) --> @f_i(x,B)/@x_j + extra_args : tuple, optional + If specified, `extra_args` should be a tuple of extra + arguments to pass to `fcn`, `fjacb`, and `fjacd`. Each will be called + by `apply(fcn, (beta, x) + extra_args)` + estimate : array_like of rank-1 + Provides estimates of the fit parameters from the data + + estimate(data) --> estbeta + implicit : boolean + If TRUE, specifies that the model + is implicit; i.e `fcn(beta, x)` ~= 0 and there is no y data to fit + against + meta : dict, optional + freeform dictionary of metadata for the model + + Notes + ----- + Note that the `fcn`, `fjacb`, and `fjacd` operate on NumPy arrays and + return a NumPy array. The `estimate` object takes an instance of the + Data class. + + Here are the rules for the shapes of the argument and return + arrays of the callback functions: + + `x` + if the input data is single-dimensional, then `x` is rank-1 + array; i.e., ``x = array([1, 2, 3, ...]); x.shape = (n,)`` + If the input data is multi-dimensional, then `x` is a rank-2 array; + i.e., ``x = array([[1, 2, ...], [2, 4, ...]]); x.shape = (m, n)``. + In all cases, it has the same shape as the input data array passed to + `~scipy.odr.odr`. `m` is the dimensionality of the input data, + `n` is the number of observations. + `y` + if the response variable is single-dimensional, then `y` is a + rank-1 array, i.e., ``y = array([2, 4, ...]); y.shape = (n,)``. + If the response variable is multi-dimensional, then `y` is a rank-2 + array, i.e., ``y = array([[2, 4, ...], [3, 6, ...]]); y.shape = + (q, n)`` where `q` is the dimensionality of the response variable. + `beta` + rank-1 array of length `p` where `p` is the number of parameters; + i.e. ``beta = array([B_1, B_2, ..., B_p])`` + `fjacb` + if the response variable is multi-dimensional, then the + return array's shape is ``(q, p, n)`` such that ``fjacb(x,beta)[l,k,i] = + d f_l(X,B)/d B_k`` evaluated at the ith data point. If ``q == 1``, then + the return array is only rank-2 and with shape ``(p, n)``. + `fjacd` + as with fjacb, only the return array's shape is ``(q, m, n)`` + such that ``fjacd(x,beta)[l,j,i] = d f_l(X,B)/d X_j`` at the ith data + point. If ``q == 1``, then the return array's shape is ``(m, n)``. If + ``m == 1``, the shape is (q, n). If `m == q == 1`, the shape is ``(n,)``. + + """ + + def __init__(self, fcn, fjacb=None, fjacd=None, + extra_args=None, estimate=None, implicit=0, meta=None): + + self.fcn = fcn + self.fjacb = fjacb + self.fjacd = fjacd + + if extra_args is not None: + extra_args = tuple(extra_args) + + self.extra_args = extra_args + self.estimate = estimate + self.implicit = implicit + self.meta = meta if meta is not None else {} + + def set_meta(self, **kwds): + """ Update the metadata dictionary with the keywords and data provided + here. + + Examples + -------- + set_meta(name="Exponential", equation="y = a exp(b x) + c") + """ + + self.meta.update(kwds) + + def __getattr__(self, attr): + """ Dispatch attribute access to the metadata. + """ + + if attr != "meta" and attr in self.meta: + return self.meta[attr] + else: + raise AttributeError(f"'{attr}' not in metadata") + + +class Output: + """ + The Output class stores the output of an ODR run. + + Attributes + ---------- + beta : ndarray + Estimated parameter values, of shape (q,). + sd_beta : ndarray + Standard deviations of the estimated parameters, of shape (p,). + cov_beta : ndarray + Covariance matrix of the estimated parameters, of shape (p,p). + Note that this `cov_beta` is not scaled by the residual variance + `res_var`, whereas `sd_beta` is. This means + ``np.sqrt(np.diag(output.cov_beta * output.res_var))`` is the same + result as `output.sd_beta`. + delta : ndarray, optional + Array of estimated errors in input variables, of same shape as `x`. + eps : ndarray, optional + Array of estimated errors in response variables, of same shape as `y`. + xplus : ndarray, optional + Array of ``x + delta``. + y : ndarray, optional + Array ``y = fcn(x + delta)``. + res_var : float, optional + Residual variance. + sum_square : float, optional + Sum of squares error. + sum_square_delta : float, optional + Sum of squares of delta error. + sum_square_eps : float, optional + Sum of squares of eps error. + inv_condnum : float, optional + Inverse condition number (cf. ODRPACK UG p. 77). + rel_error : float, optional + Relative error in function values computed within fcn. + work : ndarray, optional + Final work array. + work_ind : dict, optional + Indices into work for drawing out values (cf. ODRPACK UG p. 83). + info : int, optional + Reason for returning, as output by ODRPACK (cf. ODRPACK UG p. 38). + stopreason : list of str, optional + `info` interpreted into English. + + Notes + ----- + Takes one argument for initialization, the return value from the + function `~scipy.odr.odr`. The attributes listed as "optional" above are + only present if `~scipy.odr.odr` was run with ``full_output=1``. + + """ + + def __init__(self, output): + self.beta = output[0] + self.sd_beta = output[1] + self.cov_beta = output[2] + + if len(output) == 4: + # full output + self.__dict__.update(output[3]) + self.stopreason = _report_error(self.info) + + def pprint(self): + """ Pretty-print important results. + """ + + print('Beta:', self.beta) + print('Beta Std Error:', self.sd_beta) + print('Beta Covariance:', self.cov_beta) + if hasattr(self, 'info'): + print('Residual Variance:',self.res_var) + print('Inverse Condition #:', self.inv_condnum) + print('Reason(s) for Halting:') + for r in self.stopreason: + print(f' {r}') + + +class ODR: + """ + The ODR class gathers all information and coordinates the running of the + main fitting routine. + + Members of instances of the ODR class have the same names as the arguments + to the initialization routine. + + Parameters + ---------- + data : Data class instance + instance of the Data class + model : Model class instance + instance of the Model class + + Other Parameters + ---------------- + beta0 : array_like of rank-1 + a rank-1 sequence of initial parameter values. Optional if + model provides an "estimate" function to estimate these values. + delta0 : array_like of floats of rank-1, optional + a (double-precision) float array to hold the initial values of + the errors in the input variables. Must be same shape as data.x + ifixb : array_like of ints of rank-1, optional + sequence of integers with the same length as beta0 that determines + which parameters are held fixed. A value of 0 fixes the parameter, + a value > 0 makes the parameter free. + ifixx : array_like of ints with same shape as data.x, optional + an array of integers with the same shape as data.x that determines + which input observations are treated as fixed. One can use a sequence + of length m (the dimensionality of the input observations) to fix some + dimensions for all observations. A value of 0 fixes the observation, + a value > 0 makes it free. + job : int, optional + an integer telling ODRPACK what tasks to perform. See p. 31 of the + ODRPACK User's Guide if you absolutely must set the value here. Use the + method set_job post-initialization for a more readable interface. + iprint : int, optional + an integer telling ODRPACK what to print. See pp. 33-34 of the + ODRPACK User's Guide if you absolutely must set the value here. Use the + method set_iprint post-initialization for a more readable interface. + errfile : str, optional + string with the filename to print ODRPACK errors to. If the file already + exists, an error will be thrown. The `overwrite` argument can be used to + prevent this. *Do Not Open This File Yourself!* + rptfile : str, optional + string with the filename to print ODRPACK summaries to. If the file + already exists, an error will be thrown. The `overwrite` argument can be + used to prevent this. *Do Not Open This File Yourself!* + ndigit : int, optional + integer specifying the number of reliable digits in the computation + of the function. + taufac : float, optional + float specifying the initial trust region. The default value is 1. + The initial trust region is equal to taufac times the length of the + first computed Gauss-Newton step. taufac must be less than 1. + sstol : float, optional + float specifying the tolerance for convergence based on the relative + change in the sum-of-squares. The default value is eps**(1/2) where eps + is the smallest value such that 1 + eps > 1 for double precision + computation on the machine. sstol must be less than 1. + partol : float, optional + float specifying the tolerance for convergence based on the relative + change in the estimated parameters. The default value is eps**(2/3) for + explicit models and ``eps**(1/3)`` for implicit models. partol must be less + than 1. + maxit : int, optional + integer specifying the maximum number of iterations to perform. For + first runs, maxit is the total number of iterations performed and + defaults to 50. For restarts, maxit is the number of additional + iterations to perform and defaults to 10. + stpb : array_like, optional + sequence (``len(stpb) == len(beta0)``) of relative step sizes to compute + finite difference derivatives wrt the parameters. + stpd : optional + array (``stpd.shape == data.x.shape`` or ``stpd.shape == (m,)``) of relative + step sizes to compute finite difference derivatives wrt the input + variable errors. If stpd is a rank-1 array with length m (the + dimensionality of the input variable), then the values are broadcast to + all observations. + sclb : array_like, optional + sequence (``len(stpb) == len(beta0)``) of scaling factors for the + parameters. The purpose of these scaling factors are to scale all of + the parameters to around unity. Normally appropriate scaling factors + are computed if this argument is not specified. Specify them yourself + if the automatic procedure goes awry. + scld : array_like, optional + array (scld.shape == data.x.shape or scld.shape == (m,)) of scaling + factors for the *errors* in the input variables. Again, these factors + are automatically computed if you do not provide them. If scld.shape == + (m,), then the scaling factors are broadcast to all observations. + work : ndarray, optional + array to hold the double-valued working data for ODRPACK. When + restarting, takes the value of self.output.work. + iwork : ndarray, optional + array to hold the integer-valued working data for ODRPACK. When + restarting, takes the value of self.output.iwork. + overwrite : bool, optional + If it is True, output files defined by `errfile` and `rptfile` are + overwritten. The default is False. + + Attributes + ---------- + data : Data + The data for this fit + model : Model + The model used in fit + output : Output + An instance if the Output class containing all of the returned + data from an invocation of ODR.run() or ODR.restart() + + """ + + def __init__(self, data, model, beta0=None, delta0=None, ifixb=None, + ifixx=None, job=None, iprint=None, errfile=None, rptfile=None, + ndigit=None, taufac=None, sstol=None, partol=None, maxit=None, + stpb=None, stpd=None, sclb=None, scld=None, work=None, iwork=None, + overwrite=False): + + self.data = data + self.model = model + + if beta0 is None: + if self.model.estimate is not None: + self.beta0 = _conv(self.model.estimate(self.data)) + else: + raise ValueError( + "must specify beta0 or provide an estimator with the model" + ) + else: + self.beta0 = _conv(beta0) + + if ifixx is None and data.fix is not None: + ifixx = data.fix + + if overwrite: + # remove output files for overwriting. + if rptfile is not None and os.path.exists(rptfile): + os.remove(rptfile) + if errfile is not None and os.path.exists(errfile): + os.remove(errfile) + + self.delta0 = _conv(delta0) + # These really are 32-bit integers in FORTRAN (gfortran), even on 64-bit + # platforms. + # XXX: some other FORTRAN compilers may not agree. + self.ifixx = _conv(ifixx, dtype=np.int32) + self.ifixb = _conv(ifixb, dtype=np.int32) + self.job = job + self.iprint = iprint + self.errfile = errfile + self.rptfile = rptfile + self.ndigit = ndigit + self.taufac = taufac + self.sstol = sstol + self.partol = partol + self.maxit = maxit + self.stpb = _conv(stpb) + self.stpd = _conv(stpd) + self.sclb = _conv(sclb) + self.scld = _conv(scld) + self.work = _conv(work) + self.iwork = _conv(iwork) + + self.output = None + + self._check() + + def _check(self): + """ Check the inputs for consistency, but don't bother checking things + that the builtin function odr will check. + """ + + x_s = list(self.data.x.shape) + + if isinstance(self.data.y, np.ndarray): + y_s = list(self.data.y.shape) + if self.model.implicit: + raise OdrError("an implicit model cannot use response data") + else: + # implicit model with q == self.data.y + y_s = [self.data.y, x_s[-1]] + if not self.model.implicit: + raise OdrError("an explicit model needs response data") + self.set_job(fit_type=1) + + if x_s[-1] != y_s[-1]: + raise OdrError("number of observations do not match") + + n = x_s[-1] + + if len(x_s) == 2: + m = x_s[0] + else: + m = 1 + if len(y_s) == 2: + q = y_s[0] + else: + q = 1 + + p = len(self.beta0) + + # permissible output array shapes + + fcn_perms = [(q, n)] + fjacd_perms = [(q, m, n)] + fjacb_perms = [(q, p, n)] + + if q == 1: + fcn_perms.append((n,)) + fjacd_perms.append((m, n)) + fjacb_perms.append((p, n)) + if m == 1: + fjacd_perms.append((q, n)) + if p == 1: + fjacb_perms.append((q, n)) + if m == q == 1: + fjacd_perms.append((n,)) + if p == q == 1: + fjacb_perms.append((n,)) + + # try evaluating the supplied functions to make sure they provide + # sensible outputs + + arglist = (self.beta0, self.data.x) + if self.model.extra_args is not None: + arglist = arglist + self.model.extra_args + res = self.model.fcn(*arglist) + + if res.shape not in fcn_perms: + print(res.shape) + print(fcn_perms) + raise OdrError(f"fcn does not output {y_s}-shaped array") + + if self.model.fjacd is not None: + res = self.model.fjacd(*arglist) + if res.shape not in fjacd_perms: + raise OdrError( + f"fjacd does not output {repr((q, m, n))}-shaped array") + if self.model.fjacb is not None: + res = self.model.fjacb(*arglist) + if res.shape not in fjacb_perms: + raise OdrError( + f"fjacb does not output {repr((q, p, n))}-shaped array") + + # check shape of delta0 + + if self.delta0 is not None and self.delta0.shape != self.data.x.shape: + raise OdrError( + f"delta0 is not a {repr(self.data.x.shape)}-shaped array") + + if self.data.x.size == 0: + warn("Empty data detected for ODR instance. " + "Do not expect any fitting to occur", + OdrWarning, stacklevel=3) + + def _gen_work(self): + """ Generate a suitable work array if one does not already exist. + """ + + n = self.data.x.shape[-1] + p = self.beta0.shape[0] + + if len(self.data.x.shape) == 2: + m = self.data.x.shape[0] + else: + m = 1 + + if self.model.implicit: + q = self.data.y + elif len(self.data.y.shape) == 2: + q = self.data.y.shape[0] + else: + q = 1 + + if self.data.we is None: + ldwe = ld2we = 1 + elif len(self.data.we.shape) == 3: + ld2we, ldwe = self.data.we.shape[1:] + else: + we = self.data.we + ldwe = 1 + ld2we = 1 + if we.ndim == 1 and q == 1: + ldwe = n + elif we.ndim == 2: + if we.shape == (q, q): + ld2we = q + elif we.shape == (q, n): + ldwe = n + + if self.job % 10 < 2: + # ODR not OLS + lwork = (18 + 11*p + p*p + m + m*m + 4*n*q + 6*n*m + 2*n*q*p + + 2*n*q*m + q*q + 5*q + q*(p+m) + ldwe*ld2we*q) + else: + # OLS not ODR + lwork = (18 + 11*p + p*p + m + m*m + 4*n*q + 2*n*m + 2*n*q*p + + 5*q + q*(p+m) + ldwe*ld2we*q) + + if isinstance(self.work, np.ndarray) and self.work.shape == (lwork,)\ + and self.work.dtype.str.endswith('f8'): + # the existing array is fine + return + else: + self.work = np.zeros((lwork,), float) + + def set_job(self, fit_type=None, deriv=None, var_calc=None, + del_init=None, restart=None): + """ + Sets the "job" parameter is a hopefully comprehensible way. + + If an argument is not specified, then the value is left as is. The + default value from class initialization is for all of these options set + to 0. + + Parameters + ---------- + fit_type : {0, 1, 2} int + 0 -> explicit ODR + + 1 -> implicit ODR + + 2 -> ordinary least-squares + deriv : {0, 1, 2, 3} int + 0 -> forward finite differences + + 1 -> central finite differences + + 2 -> user-supplied derivatives (Jacobians) with results + checked by ODRPACK + + 3 -> user-supplied derivatives, no checking + var_calc : {0, 1, 2} int + 0 -> calculate asymptotic covariance matrix and fit + parameter uncertainties (V_B, s_B) using derivatives + recomputed at the final solution + + 1 -> calculate V_B and s_B using derivatives from last iteration + + 2 -> do not calculate V_B and s_B + del_init : {0, 1} int + 0 -> initial input variable offsets set to 0 + + 1 -> initial offsets provided by user in variable "work" + restart : {0, 1} int + 0 -> fit is not a restart + + 1 -> fit is a restart + + Notes + ----- + The permissible values are different from those given on pg. 31 of the + ODRPACK User's Guide only in that one cannot specify numbers greater than + the last value for each variable. + + If one does not supply functions to compute the Jacobians, the fitting + procedure will change deriv to 0, finite differences, as a default. To + initialize the input variable offsets by yourself, set del_init to 1 and + put the offsets into the "work" variable correctly. + + """ + + if self.job is None: + job_l = [0, 0, 0, 0, 0] + else: + job_l = [self.job // 10000 % 10, + self.job // 1000 % 10, + self.job // 100 % 10, + self.job // 10 % 10, + self.job % 10] + + if fit_type in (0, 1, 2): + job_l[4] = fit_type + if deriv in (0, 1, 2, 3): + job_l[3] = deriv + if var_calc in (0, 1, 2): + job_l[2] = var_calc + if del_init in (0, 1): + job_l[1] = del_init + if restart in (0, 1): + job_l[0] = restart + + self.job = (job_l[0]*10000 + job_l[1]*1000 + + job_l[2]*100 + job_l[3]*10 + job_l[4]) + + def set_iprint(self, init=None, so_init=None, + iter=None, so_iter=None, iter_step=None, final=None, so_final=None): + """ Set the iprint parameter for the printing of computation reports. + + If any of the arguments are specified here, then they are set in the + iprint member. If iprint is not set manually or with this method, then + ODRPACK defaults to no printing. If no filename is specified with the + member rptfile, then ODRPACK prints to stdout. One can tell ODRPACK to + print to stdout in addition to the specified filename by setting the + so_* arguments to this function, but one cannot specify to print to + stdout but not a file since one can do that by not specifying a rptfile + filename. + + There are three reports: initialization, iteration, and final reports. + They are represented by the arguments init, iter, and final + respectively. The permissible values are 0, 1, and 2 representing "no + report", "short report", and "long report" respectively. + + The argument iter_step (0 <= iter_step <= 9) specifies how often to make + the iteration report; the report will be made for every iter_step'th + iteration starting with iteration one. If iter_step == 0, then no + iteration report is made, regardless of the other arguments. + + If the rptfile is None, then any so_* arguments supplied will raise an + exception. + """ + if self.iprint is None: + self.iprint = 0 + + ip = [self.iprint // 1000 % 10, + self.iprint // 100 % 10, + self.iprint // 10 % 10, + self.iprint % 10] + + # make a list to convert iprint digits to/from argument inputs + # rptfile, stdout + ip2arg = [[0, 0], # none, none + [1, 0], # short, none + [2, 0], # long, none + [1, 1], # short, short + [2, 1], # long, short + [1, 2], # short, long + [2, 2]] # long, long + + if (self.rptfile is None and + (so_init is not None or + so_iter is not None or + so_final is not None)): + raise OdrError( + "no rptfile specified, cannot output to stdout twice") + + iprint_l = ip2arg[ip[0]] + ip2arg[ip[1]] + ip2arg[ip[3]] + + if init is not None: + iprint_l[0] = init + if so_init is not None: + iprint_l[1] = so_init + if iter is not None: + iprint_l[2] = iter + if so_iter is not None: + iprint_l[3] = so_iter + if final is not None: + iprint_l[4] = final + if so_final is not None: + iprint_l[5] = so_final + + if iter_step in range(10): + # 0..9 + ip[2] = iter_step + + ip[0] = ip2arg.index(iprint_l[0:2]) + ip[1] = ip2arg.index(iprint_l[2:4]) + ip[3] = ip2arg.index(iprint_l[4:6]) + + self.iprint = ip[0]*1000 + ip[1]*100 + ip[2]*10 + ip[3] + + def run(self): + """ Run the fitting routine with all of the information given and with ``full_output=1``. + + Returns + ------- + output : Output instance + This object is also assigned to the attribute .output . + """ # noqa: E501 + + args = (self.model.fcn, self.beta0, self.data.y, self.data.x) + kwds = {'full_output': 1} + kwd_l = ['ifixx', 'ifixb', 'job', 'iprint', 'errfile', 'rptfile', + 'ndigit', 'taufac', 'sstol', 'partol', 'maxit', 'stpb', + 'stpd', 'sclb', 'scld', 'work', 'iwork'] + + if self.delta0 is not None and (self.job // 10000) % 10 == 0: + # delta0 provided and fit is not a restart + self._gen_work() + + d0 = np.ravel(self.delta0) + + self.work[:len(d0)] = d0 + + # set the kwds from other objects explicitly + if self.model.fjacb is not None: + kwds['fjacb'] = self.model.fjacb + if self.model.fjacd is not None: + kwds['fjacd'] = self.model.fjacd + if self.data.we is not None: + kwds['we'] = self.data.we + if self.data.wd is not None: + kwds['wd'] = self.data.wd + if self.model.extra_args is not None: + kwds['extra_args'] = self.model.extra_args + + # implicitly set kwds from self's members + for attr in kwd_l: + obj = getattr(self, attr) + if obj is not None: + kwds[attr] = obj + + with ODR_LOCK: + self.output = Output(odr(*args, **kwds)) + + return self.output + + def restart(self, iter=None): + """ Restarts the run with iter more iterations. + + Parameters + ---------- + iter : int, optional + ODRPACK's default for the number of new iterations is 10. + + Returns + ------- + output : Output instance + This object is also assigned to the attribute .output . + """ + + if self.output is None: + raise OdrError("cannot restart: run() has not been called before") + + self.set_job(restart=1) + self.work = self.output.work + self.iwork = self.output.iwork + + self.maxit = iter + + return self.run() diff --git a/llava_video/lib/python3.10/site-packages/scipy/odr/models.py b/llava_video/lib/python3.10/site-packages/scipy/odr/models.py new file mode 100644 index 0000000000000000000000000000000000000000..0289b59747bb68a4954e58732ac69d7df144f5f6 --- /dev/null +++ b/llava_video/lib/python3.10/site-packages/scipy/odr/models.py @@ -0,0 +1,20 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.odr` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + +__all__ = [ # noqa: F822 + 'Model', 'exponential', 'multilinear', 'unilinear', + 'quadratic', 'polynomial' +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="odr", module="models", + private_modules=["_models"], all=__all__, + attribute=name) diff --git a/llava_video/lib/python3.10/site-packages/scipy/odr/odrpack.py b/llava_video/lib/python3.10/site-packages/scipy/odr/odrpack.py new file mode 100644 index 0000000000000000000000000000000000000000..192fb3342b7957703996957c882d44656706e41b --- /dev/null +++ b/llava_video/lib/python3.10/site-packages/scipy/odr/odrpack.py @@ -0,0 +1,21 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.odr` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + +__all__ = [ # noqa: F822 + 'odr', 'OdrWarning', 'OdrError', 'OdrStop', + 'Data', 'RealData', 'Model', 'Output', 'ODR', + 'odr_error', 'odr_stop' +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="odr", module="odrpack", + private_modules=["_odrpack"], all=__all__, + attribute=name) diff --git a/llava_video/lib/python3.10/site-packages/scipy/odr/tests/__init__.py b/llava_video/lib/python3.10/site-packages/scipy/odr/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llava_video/lib/python3.10/site-packages/scipy/odr/tests/__pycache__/__init__.cpython-310.pyc b/llava_video/lib/python3.10/site-packages/scipy/odr/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7692044e9299ce676da5660c1499ce974bf8dee8 Binary files /dev/null and b/llava_video/lib/python3.10/site-packages/scipy/odr/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/llava_video/lib/python3.10/site-packages/scipy/odr/tests/__pycache__/test_odr.cpython-310.pyc b/llava_video/lib/python3.10/site-packages/scipy/odr/tests/__pycache__/test_odr.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2f06d900d49a83b2eab39c9319b291923a99bc16 Binary files /dev/null and b/llava_video/lib/python3.10/site-packages/scipy/odr/tests/__pycache__/test_odr.cpython-310.pyc differ diff --git a/llava_video/lib/python3.10/site-packages/scipy/odr/tests/test_odr.py b/llava_video/lib/python3.10/site-packages/scipy/odr/tests/test_odr.py new file mode 100644 index 0000000000000000000000000000000000000000..971cce6c55a84e08a182e3b25bf9a7e362937e01 --- /dev/null +++ b/llava_video/lib/python3.10/site-packages/scipy/odr/tests/test_odr.py @@ -0,0 +1,607 @@ +import pickle +import tempfile +import shutil +import os + +import numpy as np +from numpy import pi +from numpy.testing import (assert_array_almost_equal, + assert_equal, assert_warns, + assert_allclose) +import pytest +from pytest import raises as assert_raises + +from scipy.odr import (Data, Model, ODR, RealData, OdrStop, OdrWarning, + multilinear, exponential, unilinear, quadratic, + polynomial) + + +class TestODR: + + # Bad Data for 'x' + + def test_bad_data(self): + assert_raises(ValueError, Data, 2, 1) + assert_raises(ValueError, RealData, 2, 1) + + # Empty Data for 'x' + def empty_data_func(self, B, x): + return B[0]*x + B[1] + + @pytest.mark.thread_unsafe + def test_empty_data(self): + beta0 = [0.02, 0.0] + linear = Model(self.empty_data_func) + + empty_dat = Data([], []) + assert_warns(OdrWarning, ODR, + empty_dat, linear, beta0=beta0) + + empty_dat = RealData([], []) + assert_warns(OdrWarning, ODR, + empty_dat, linear, beta0=beta0) + + # Explicit Example + + def explicit_fcn(self, B, x): + ret = B[0] + B[1] * np.power(np.exp(B[2]*x) - 1.0, 2) + return ret + + def explicit_fjd(self, B, x): + eBx = np.exp(B[2]*x) + ret = B[1] * 2.0 * (eBx-1.0) * B[2] * eBx + return ret + + def explicit_fjb(self, B, x): + eBx = np.exp(B[2]*x) + res = np.vstack([np.ones(x.shape[-1]), + np.power(eBx-1.0, 2), + B[1]*2.0*(eBx-1.0)*eBx*x]) + return res + + def test_explicit(self): + explicit_mod = Model( + self.explicit_fcn, + fjacb=self.explicit_fjb, + fjacd=self.explicit_fjd, + meta=dict(name='Sample Explicit Model', + ref='ODRPACK UG, pg. 39'), + ) + explicit_dat = Data([0.,0.,5.,7.,7.5,10.,16.,26.,30.,34.,34.5,100.], + [1265.,1263.6,1258.,1254.,1253.,1249.8,1237.,1218.,1220.6, + 1213.8,1215.5,1212.]) + explicit_odr = ODR(explicit_dat, explicit_mod, beta0=[1500.0, -50.0, -0.1], + ifixx=[0,0,1,1,1,1,1,1,1,1,1,0]) + explicit_odr.set_job(deriv=2) + explicit_odr.set_iprint(init=0, iter=0, final=0) + + out = explicit_odr.run() + assert_array_almost_equal( + out.beta, + np.array([1.2646548050648876e+03, -5.4018409956678255e+01, + -8.7849712165253724e-02]), + ) + assert_array_almost_equal( + out.sd_beta, + np.array([1.0349270280543437, 1.583997785262061, 0.0063321988657267]), + ) + assert_array_almost_equal( + out.cov_beta, + np.array([[4.4949592379003039e-01, -3.7421976890364739e-01, + -8.0978217468468912e-04], + [-3.7421976890364739e-01, 1.0529686462751804e+00, + -1.9453521827942002e-03], + [-8.0978217468468912e-04, -1.9453521827942002e-03, + 1.6827336938454476e-05]]), + ) + + # Implicit Example + + def implicit_fcn(self, B, x): + return (B[2]*np.power(x[0]-B[0], 2) + + 2.0*B[3]*(x[0]-B[0])*(x[1]-B[1]) + + B[4]*np.power(x[1]-B[1], 2) - 1.0) + + def test_implicit(self): + implicit_mod = Model( + self.implicit_fcn, + implicit=1, + meta=dict(name='Sample Implicit Model', + ref='ODRPACK UG, pg. 49'), + ) + implicit_dat = Data([ + [0.5,1.2,1.6,1.86,2.12,2.36,2.44,2.36,2.06,1.74,1.34,0.9,-0.28, + -0.78,-1.36,-1.9,-2.5,-2.88,-3.18,-3.44], + [-0.12,-0.6,-1.,-1.4,-2.54,-3.36,-4.,-4.75,-5.25,-5.64,-5.97,-6.32, + -6.44,-6.44,-6.41,-6.25,-5.88,-5.5,-5.24,-4.86]], + 1, + ) + implicit_odr = ODR(implicit_dat, implicit_mod, + beta0=[-1.0, -3.0, 0.09, 0.02, 0.08]) + + out = implicit_odr.run() + assert_array_almost_equal( + out.beta, + np.array([-0.9993809167281279, -2.9310484652026476, 0.0875730502693354, + 0.0162299708984738, 0.0797537982976416]), + ) + assert_array_almost_equal( + out.sd_beta, + np.array([0.1113840353364371, 0.1097673310686467, 0.0041060738314314, + 0.0027500347539902, 0.0034962501532468]), + ) + assert_allclose( + out.cov_beta, + np.array([[2.1089274602333052e+00, -1.9437686411979040e+00, + 7.0263550868344446e-02, -4.7175267373474862e-02, + 5.2515575927380355e-02], + [-1.9437686411979040e+00, 2.0481509222414456e+00, + -6.1600515853057307e-02, 4.6268827806232933e-02, + -5.8822307501391467e-02], + [7.0263550868344446e-02, -6.1600515853057307e-02, + 2.8659542561579308e-03, -1.4628662260014491e-03, + 1.4528860663055824e-03], + [-4.7175267373474862e-02, 4.6268827806232933e-02, + -1.4628662260014491e-03, 1.2855592885514335e-03, + -1.2692942951415293e-03], + [5.2515575927380355e-02, -5.8822307501391467e-02, + 1.4528860663055824e-03, -1.2692942951415293e-03, + 2.0778813389755596e-03]]), + rtol=1e-6, atol=2e-6, + ) + + # Multi-variable Example + + def multi_fcn(self, B, x): + if (x < 0.0).any(): + raise OdrStop + theta = pi*B[3]/2. + ctheta = np.cos(theta) + stheta = np.sin(theta) + omega = np.power(2.*pi*x*np.exp(-B[2]), B[3]) + phi = np.arctan2((omega*stheta), (1.0 + omega*ctheta)) + r = (B[0] - B[1]) * np.power(np.sqrt(np.power(1.0 + omega*ctheta, 2) + + np.power(omega*stheta, 2)), -B[4]) + ret = np.vstack([B[1] + r*np.cos(B[4]*phi), + r*np.sin(B[4]*phi)]) + return ret + + def test_multi(self): + multi_mod = Model( + self.multi_fcn, + meta=dict(name='Sample Multi-Response Model', + ref='ODRPACK UG, pg. 56'), + ) + + multi_x = np.array([30.0, 50.0, 70.0, 100.0, 150.0, 200.0, 300.0, 500.0, + 700.0, 1000.0, 1500.0, 2000.0, 3000.0, 5000.0, 7000.0, 10000.0, + 15000.0, 20000.0, 30000.0, 50000.0, 70000.0, 100000.0, 150000.0]) + multi_y = np.array([ + [4.22, 4.167, 4.132, 4.038, 4.019, 3.956, 3.884, 3.784, 3.713, + 3.633, 3.54, 3.433, 3.358, 3.258, 3.193, 3.128, 3.059, 2.984, + 2.934, 2.876, 2.838, 2.798, 2.759], + [0.136, 0.167, 0.188, 0.212, 0.236, 0.257, 0.276, 0.297, 0.309, + 0.311, 0.314, 0.311, 0.305, 0.289, 0.277, 0.255, 0.24, 0.218, + 0.202, 0.182, 0.168, 0.153, 0.139], + ]) + n = len(multi_x) + multi_we = np.zeros((2, 2, n), dtype=float) + multi_ifixx = np.ones(n, dtype=int) + multi_delta = np.zeros(n, dtype=float) + + multi_we[0,0,:] = 559.6 + multi_we[1,0,:] = multi_we[0,1,:] = -1634.0 + multi_we[1,1,:] = 8397.0 + + for i in range(n): + if multi_x[i] < 100.0: + multi_ifixx[i] = 0 + elif multi_x[i] <= 150.0: + pass # defaults are fine + elif multi_x[i] <= 1000.0: + multi_delta[i] = 25.0 + elif multi_x[i] <= 10000.0: + multi_delta[i] = 560.0 + elif multi_x[i] <= 100000.0: + multi_delta[i] = 9500.0 + else: + multi_delta[i] = 144000.0 + if multi_x[i] == 100.0 or multi_x[i] == 150.0: + multi_we[:,:,i] = 0.0 + + multi_dat = Data(multi_x, multi_y, wd=1e-4/np.power(multi_x, 2), + we=multi_we) + multi_odr = ODR(multi_dat, multi_mod, beta0=[4.,2.,7.,.4,.5], + delta0=multi_delta, ifixx=multi_ifixx) + multi_odr.set_job(deriv=1, del_init=1) + + out = multi_odr.run() + assert_array_almost_equal( + out.beta, + np.array([4.3799880305938963, 2.4333057577497703, 8.0028845899503978, + 0.5101147161764654, 0.5173902330489161]), + ) + assert_array_almost_equal( + out.sd_beta, + np.array([0.0130625231081944, 0.0130499785273277, 0.1167085962217757, + 0.0132642749596149, 0.0288529201353984]), + ) + assert_array_almost_equal( + out.cov_beta, + np.array([[0.0064918418231375, 0.0036159705923791, 0.0438637051470406, + -0.0058700836512467, 0.011281212888768], + [0.0036159705923791, 0.0064793789429006, 0.0517610978353126, + -0.0051181304940204, 0.0130726943624117], + [0.0438637051470406, 0.0517610978353126, 0.5182263323095322, + -0.0563083340093696, 0.1269490939468611], + [-0.0058700836512467, -0.0051181304940204, -0.0563083340093696, + 0.0066939246261263, -0.0140184391377962], + [0.011281212888768, 0.0130726943624117, 0.1269490939468611, + -0.0140184391377962, 0.0316733013820852]]), + ) + + # Pearson's Data + # K. Pearson, Philosophical Magazine, 2, 559 (1901) + + def pearson_fcn(self, B, x): + return B[0] + B[1]*x + + def test_pearson(self): + p_x = np.array([0.,.9,1.8,2.6,3.3,4.4,5.2,6.1,6.5,7.4]) + p_y = np.array([5.9,5.4,4.4,4.6,3.5,3.7,2.8,2.8,2.4,1.5]) + p_sx = np.array([.03,.03,.04,.035,.07,.11,.13,.22,.74,1.]) + p_sy = np.array([1.,.74,.5,.35,.22,.22,.12,.12,.1,.04]) + + p_dat = RealData(p_x, p_y, sx=p_sx, sy=p_sy) + + # Reverse the data to test invariance of results + pr_dat = RealData(p_y, p_x, sx=p_sy, sy=p_sx) + + p_mod = Model(self.pearson_fcn, meta=dict(name='Uni-linear Fit')) + + p_odr = ODR(p_dat, p_mod, beta0=[1.,1.]) + pr_odr = ODR(pr_dat, p_mod, beta0=[1.,1.]) + + out = p_odr.run() + assert_array_almost_equal( + out.beta, + np.array([5.4767400299231674, -0.4796082367610305]), + ) + assert_array_almost_equal( + out.sd_beta, + np.array([0.3590121690702467, 0.0706291186037444]), + ) + assert_array_almost_equal( + out.cov_beta, + np.array([[0.0854275622946333, -0.0161807025443155], + [-0.0161807025443155, 0.003306337993922]]), + ) + + rout = pr_odr.run() + assert_array_almost_equal( + rout.beta, + np.array([11.4192022410781231, -2.0850374506165474]), + ) + assert_array_almost_equal( + rout.sd_beta, + np.array([0.9820231665657161, 0.3070515616198911]), + ) + assert_array_almost_equal( + rout.cov_beta, + np.array([[0.6391799462548782, -0.1955657291119177], + [-0.1955657291119177, 0.0624888159223392]]), + ) + + # Lorentz Peak + # The data is taken from one of the undergraduate physics labs I performed. + + def lorentz(self, beta, x): + return (beta[0]*beta[1]*beta[2] / np.sqrt(np.power(x*x - + beta[2]*beta[2], 2.0) + np.power(beta[1]*x, 2.0))) + + def test_lorentz(self): + l_sy = np.array([.29]*18) + l_sx = np.array([.000972971,.000948268,.000707632,.000706679, + .000706074, .000703918,.000698955,.000456856, + .000455207,.000662717,.000654619,.000652694, + .000000859202,.00106589,.00106378,.00125483, .00140818,.00241839]) + + l_dat = RealData( + [3.9094, 3.85945, 3.84976, 3.84716, 3.84551, 3.83964, 3.82608, + 3.78847, 3.78163, 3.72558, 3.70274, 3.6973, 3.67373, 3.65982, + 3.6562, 3.62498, 3.55525, 3.41886], + [652, 910.5, 984, 1000, 1007.5, 1053, 1160.5, 1409.5, 1430, 1122, + 957.5, 920, 777.5, 709.5, 698, 578.5, 418.5, 275.5], + sx=l_sx, + sy=l_sy, + ) + l_mod = Model(self.lorentz, meta=dict(name='Lorentz Peak')) + l_odr = ODR(l_dat, l_mod, beta0=(1000., .1, 3.8)) + + out = l_odr.run() + assert_array_almost_equal( + out.beta, + np.array([1.4306780846149925e+03, 1.3390509034538309e-01, + 3.7798193600109009e+00]), + ) + assert_array_almost_equal( + out.sd_beta, + np.array([7.3621186811330963e-01, 3.5068899941471650e-04, + 2.4451209281408992e-04]), + ) + assert_array_almost_equal( + out.cov_beta, + np.array([[2.4714409064597873e-01, -6.9067261911110836e-05, + -3.1236953270424990e-05], + [-6.9067261911110836e-05, 5.6077531517333009e-08, + 3.6133261832722601e-08], + [-3.1236953270424990e-05, 3.6133261832722601e-08, + 2.7261220025171730e-08]]), + ) + + def test_ticket_1253(self): + def linear(c, x): + return c[0]*x+c[1] + + c = [2.0, 3.0] + x = np.linspace(0, 10) + y = linear(c, x) + + model = Model(linear) + data = Data(x, y, wd=1.0, we=1.0) + job = ODR(data, model, beta0=[1.0, 1.0]) + result = job.run() + assert_equal(result.info, 2) + + # Verify fix for gh-9140 + + def test_ifixx(self): + x1 = [-2.01, -0.99, -0.001, 1.02, 1.98] + x2 = [3.98, 1.01, 0.001, 0.998, 4.01] + fix = np.vstack((np.zeros_like(x1, dtype=int), np.ones_like(x2, dtype=int))) + data = Data(np.vstack((x1, x2)), y=1, fix=fix) + model = Model(lambda beta, x: x[1, :] - beta[0] * x[0, :]**2., implicit=True) + + odr1 = ODR(data, model, beta0=np.array([1.])) + sol1 = odr1.run() + odr2 = ODR(data, model, beta0=np.array([1.]), ifixx=fix) + sol2 = odr2.run() + assert_equal(sol1.beta, sol2.beta) + + # verify bugfix for #11800 in #11802 + def test_ticket_11800(self): + # parameters + beta_true = np.array([1.0, 2.3, 1.1, -1.0, 1.3, 0.5]) + nr_measurements = 10 + + std_dev_x = 0.01 + x_error = np.array([[0.00063445, 0.00515731, 0.00162719, 0.01022866, + -0.01624845, 0.00482652, 0.00275988, -0.00714734, -0.00929201, -0.00687301], + [-0.00831623, -0.00821211, -0.00203459, 0.00938266, -0.00701829, + 0.0032169, 0.00259194, -0.00581017, -0.0030283, 0.01014164]]) + + std_dev_y = 0.05 + y_error = np.array([[0.05275304, 0.04519563, -0.07524086, 0.03575642, + 0.04745194, 0.03806645, 0.07061601, -0.00753604, -0.02592543, -0.02394929], + [0.03632366, 0.06642266, 0.08373122, 0.03988822, -0.0092536, + -0.03750469, -0.03198903, 0.01642066, 0.01293648, -0.05627085]]) + + beta_solution = np.array([ + 2.62920235756665876536e+00, -1.26608484996299608838e+02, + 1.29703572775403074502e+02, -1.88560985401185465804e+00, + 7.83834160771274923718e+01, -7.64124076838087091801e+01]) + + # model's function and Jacobians + def func(beta, x): + y0 = beta[0] + beta[1] * x[0, :] + beta[2] * x[1, :] + y1 = beta[3] + beta[4] * x[0, :] + beta[5] * x[1, :] + + return np.vstack((y0, y1)) + + def df_dbeta_odr(beta, x): + nr_meas = np.shape(x)[1] + zeros = np.zeros(nr_meas) + ones = np.ones(nr_meas) + + dy0 = np.array([ones, x[0, :], x[1, :], zeros, zeros, zeros]) + dy1 = np.array([zeros, zeros, zeros, ones, x[0, :], x[1, :]]) + + return np.stack((dy0, dy1)) + + def df_dx_odr(beta, x): + nr_meas = np.shape(x)[1] + ones = np.ones(nr_meas) + + dy0 = np.array([beta[1] * ones, beta[2] * ones]) + dy1 = np.array([beta[4] * ones, beta[5] * ones]) + return np.stack((dy0, dy1)) + + # do measurements with errors in independent and dependent variables + x0_true = np.linspace(1, 10, nr_measurements) + x1_true = np.linspace(1, 10, nr_measurements) + x_true = np.array([x0_true, x1_true]) + + y_true = func(beta_true, x_true) + + x_meas = x_true + x_error + y_meas = y_true + y_error + + # estimate model's parameters + model_f = Model(func, fjacb=df_dbeta_odr, fjacd=df_dx_odr) + + data = RealData(x_meas, y_meas, sx=std_dev_x, sy=std_dev_y) + + odr_obj = ODR(data, model_f, beta0=0.9 * beta_true, maxit=100) + #odr_obj.set_iprint(init=2, iter=0, iter_step=1, final=1) + odr_obj.set_job(deriv=3) + + odr_out = odr_obj.run() + + # check results + assert_equal(odr_out.info, 1) + assert_array_almost_equal(odr_out.beta, beta_solution) + + def test_multilinear_model(self): + x = np.linspace(0.0, 5.0) + y = 10.0 + 5.0 * x + data = Data(x, y) + odr_obj = ODR(data, multilinear) + output = odr_obj.run() + assert_array_almost_equal(output.beta, [10.0, 5.0]) + + def test_exponential_model(self): + x = np.linspace(0.0, 5.0) + y = -10.0 + np.exp(0.5*x) + data = Data(x, y) + odr_obj = ODR(data, exponential) + output = odr_obj.run() + assert_array_almost_equal(output.beta, [-10.0, 0.5]) + + def test_polynomial_model(self): + x = np.linspace(0.0, 5.0) + y = 1.0 + 2.0 * x + 3.0 * x ** 2 + 4.0 * x ** 3 + poly_model = polynomial(3) + data = Data(x, y) + odr_obj = ODR(data, poly_model) + output = odr_obj.run() + assert_array_almost_equal(output.beta, [1.0, 2.0, 3.0, 4.0]) + + def test_unilinear_model(self): + x = np.linspace(0.0, 5.0) + y = 1.0 * x + 2.0 + data = Data(x, y) + odr_obj = ODR(data, unilinear) + output = odr_obj.run() + assert_array_almost_equal(output.beta, [1.0, 2.0]) + + def test_quadratic_model(self): + x = np.linspace(0.0, 5.0) + y = 1.0 * x ** 2 + 2.0 * x + 3.0 + data = Data(x, y) + odr_obj = ODR(data, quadratic) + output = odr_obj.run() + assert_array_almost_equal(output.beta, [1.0, 2.0, 3.0]) + + def test_work_ind(self): + + def func(par, x): + b0, b1 = par + return b0 + b1 * x + + # generate some data + n_data = 4 + x = np.arange(n_data) + y = np.where(x % 2, x + 0.1, x - 0.1) + x_err = np.full(n_data, 0.1) + y_err = np.full(n_data, 0.1) + + # do the fitting + linear_model = Model(func) + real_data = RealData(x, y, sx=x_err, sy=y_err) + odr_obj = ODR(real_data, linear_model, beta0=[0.4, 0.4]) + odr_obj.set_job(fit_type=0) + out = odr_obj.run() + + sd_ind = out.work_ind['sd'] + assert_array_almost_equal(out.sd_beta, + out.work[sd_ind:sd_ind + len(out.sd_beta)]) + + @pytest.mark.skipif(True, reason="Fortran I/O prone to crashing so better " + "not to run this test, see gh-13127") + def test_output_file_overwrite(self): + """ + Verify fix for gh-1892 + """ + def func(b, x): + return b[0] + b[1] * x + + p = Model(func) + data = Data(np.arange(10), 12 * np.arange(10)) + tmp_dir = tempfile.mkdtemp() + error_file_path = os.path.join(tmp_dir, "error.dat") + report_file_path = os.path.join(tmp_dir, "report.dat") + try: + ODR(data, p, beta0=[0.1, 13], errfile=error_file_path, + rptfile=report_file_path).run() + ODR(data, p, beta0=[0.1, 13], errfile=error_file_path, + rptfile=report_file_path, overwrite=True).run() + finally: + # remove output files for clean up + shutil.rmtree(tmp_dir) + + def test_odr_model_default_meta(self): + def func(b, x): + return b[0] + b[1] * x + + p = Model(func) + p.set_meta(name='Sample Model Meta', ref='ODRPACK') + assert_equal(p.meta, {'name': 'Sample Model Meta', 'ref': 'ODRPACK'}) + + def test_work_array_del_init(self): + """ + Verify fix for gh-18739 where del_init=1 fails. + """ + def func(b, x): + return b[0] + b[1] * x + + # generate some data + n_data = 4 + x = np.arange(n_data) + y = np.where(x % 2, x + 0.1, x - 0.1) + x_err = np.full(n_data, 0.1) + y_err = np.full(n_data, 0.1) + + linear_model = Model(func) + # Try various shapes of the `we` array from various `sy` and `covy` + rd0 = RealData(x, y, sx=x_err, sy=y_err) + rd1 = RealData(x, y, sx=x_err, sy=0.1) + rd2 = RealData(x, y, sx=x_err, sy=[0.1]) + rd3 = RealData(x, y, sx=x_err, sy=np.full((1, n_data), 0.1)) + rd4 = RealData(x, y, sx=x_err, covy=[[0.01]]) + rd5 = RealData(x, y, sx=x_err, covy=np.full((1, 1, n_data), 0.01)) + for rd in [rd0, rd1, rd2, rd3, rd4, rd5]: + odr_obj = ODR(rd, linear_model, beta0=[0.4, 0.4], + delta0=np.full(n_data, -0.1)) + odr_obj.set_job(fit_type=0, del_init=1) + # Just make sure that it runs without raising an exception. + odr_obj.run() + + def test_pickling_data(self): + x = np.linspace(0.0, 5.0) + y = 1.0 * x + 2.0 + data = Data(x, y) + + obj_pickle = pickle.dumps(data) + del data + pickle.loads(obj_pickle) + + def test_pickling_real_data(self): + x = np.linspace(0.0, 5.0) + y = 1.0 * x + 2.0 + data = RealData(x, y) + + obj_pickle = pickle.dumps(data) + del data + pickle.loads(obj_pickle) + + def test_pickling_model(self): + obj_pickle = pickle.dumps(unilinear) + pickle.loads(obj_pickle) + + def test_pickling_odr(self): + x = np.linspace(0.0, 5.0) + y = 1.0 * x + 2.0 + odr_obj = ODR(Data(x, y), unilinear) + + obj_pickle = pickle.dumps(odr_obj) + del odr_obj + pickle.loads(obj_pickle) + + def test_pickling_output(self): + x = np.linspace(0.0, 5.0) + y = 1.0 * x + 2.0 + output = ODR(Data(x, y), unilinear).run + + obj_pickle = pickle.dumps(output) + del output + pickle.loads(obj_pickle) diff --git a/llava_video/lib/python3.10/site-packages/scipy/sparse/__pycache__/_data.cpython-310.pyc b/llava_video/lib/python3.10/site-packages/scipy/sparse/__pycache__/_data.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e98c38b1f972075e61f5a1adb23887eef9240b31 Binary files /dev/null and b/llava_video/lib/python3.10/site-packages/scipy/sparse/__pycache__/_data.cpython-310.pyc differ diff --git a/llava_video/lib/python3.10/site-packages/scipy/sparse/__pycache__/_dia.cpython-310.pyc b/llava_video/lib/python3.10/site-packages/scipy/sparse/__pycache__/_dia.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..568892f7af20dddc5c31fd6278acb3390b3d27e1 Binary files /dev/null and b/llava_video/lib/python3.10/site-packages/scipy/sparse/__pycache__/_dia.cpython-310.pyc differ diff --git a/llava_video/lib/python3.10/site-packages/scipy/sparse/__pycache__/_extract.cpython-310.pyc b/llava_video/lib/python3.10/site-packages/scipy/sparse/__pycache__/_extract.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0aa5a43c02f6455a472feab2d977ff609981fe53 Binary files /dev/null and b/llava_video/lib/python3.10/site-packages/scipy/sparse/__pycache__/_extract.cpython-310.pyc differ diff --git a/llava_video/lib/python3.10/site-packages/scipy/sparse/__pycache__/_lil.cpython-310.pyc b/llava_video/lib/python3.10/site-packages/scipy/sparse/__pycache__/_lil.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d31a2b40e8d75d6de11489e8e74f972118f280ee Binary files /dev/null and b/llava_video/lib/python3.10/site-packages/scipy/sparse/__pycache__/_lil.cpython-310.pyc differ diff --git a/llava_video/lib/python3.10/site-packages/scipy/sparse/__pycache__/_matrix.cpython-310.pyc b/llava_video/lib/python3.10/site-packages/scipy/sparse/__pycache__/_matrix.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..55378c2f0c4ceabc0e99cd3d36a5ab20edd6013a Binary files /dev/null and b/llava_video/lib/python3.10/site-packages/scipy/sparse/__pycache__/_matrix.cpython-310.pyc differ diff --git a/llava_video/lib/python3.10/site-packages/scipy/sparse/__pycache__/_spfuncs.cpython-310.pyc b/llava_video/lib/python3.10/site-packages/scipy/sparse/__pycache__/_spfuncs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..335aa9822739a302a2a1282f5643e016c8e24cfc Binary files /dev/null and b/llava_video/lib/python3.10/site-packages/scipy/sparse/__pycache__/_spfuncs.cpython-310.pyc differ diff --git a/llava_video/lib/python3.10/site-packages/scipy/sparse/__pycache__/base.cpython-310.pyc b/llava_video/lib/python3.10/site-packages/scipy/sparse/__pycache__/base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cbc4b0eb5121ab5a0dbe439a78b2d37007294849 Binary files /dev/null and b/llava_video/lib/python3.10/site-packages/scipy/sparse/__pycache__/base.cpython-310.pyc differ diff --git a/llava_video/lib/python3.10/site-packages/scipy/sparse/__pycache__/sparsetools.cpython-310.pyc b/llava_video/lib/python3.10/site-packages/scipy/sparse/__pycache__/sparsetools.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..13cf906132e08d7053dada1136b6096f51d3f5fa Binary files /dev/null and b/llava_video/lib/python3.10/site-packages/scipy/sparse/__pycache__/sparsetools.cpython-310.pyc differ diff --git a/llava_video/lib/python3.10/site-packages/scipy/sparse/linalg/_propack/_cpropack.cpython-310-x86_64-linux-gnu.so b/llava_video/lib/python3.10/site-packages/scipy/sparse/linalg/_propack/_cpropack.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..18658162c669c2f3ae246e89d977ede959d30944 --- /dev/null +++ b/llava_video/lib/python3.10/site-packages/scipy/sparse/linalg/_propack/_cpropack.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dc73ebd84452ea03f301703d6b1ef46f442c3ddbb157f3a7e59984fda345165e +size 566033