ZTWHHH commited on
Commit
3c3d98b
·
verified ·
1 Parent(s): 98137ab

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +2 -0
  2. llava_video/lib/python3.10/site-packages/scipy/datasets/__init__.py +90 -0
  3. llava_video/lib/python3.10/site-packages/scipy/datasets/__pycache__/__init__.cpython-310.pyc +0 -0
  4. llava_video/lib/python3.10/site-packages/scipy/datasets/__pycache__/_download_all.cpython-310.pyc +0 -0
  5. llava_video/lib/python3.10/site-packages/scipy/datasets/__pycache__/_fetchers.cpython-310.pyc +0 -0
  6. llava_video/lib/python3.10/site-packages/scipy/datasets/__pycache__/_registry.cpython-310.pyc +0 -0
  7. llava_video/lib/python3.10/site-packages/scipy/datasets/__pycache__/_utils.cpython-310.pyc +0 -0
  8. llava_video/lib/python3.10/site-packages/scipy/datasets/_download_all.py +57 -0
  9. llava_video/lib/python3.10/site-packages/scipy/datasets/_fetchers.py +219 -0
  10. llava_video/lib/python3.10/site-packages/scipy/datasets/_registry.py +26 -0
  11. llava_video/lib/python3.10/site-packages/scipy/datasets/_utils.py +81 -0
  12. llava_video/lib/python3.10/site-packages/scipy/datasets/tests/__init__.py +0 -0
  13. llava_video/lib/python3.10/site-packages/scipy/datasets/tests/__pycache__/__init__.cpython-310.pyc +0 -0
  14. llava_video/lib/python3.10/site-packages/scipy/datasets/tests/__pycache__/test_data.cpython-310.pyc +0 -0
  15. llava_video/lib/python3.10/site-packages/scipy/datasets/tests/test_data.py +128 -0
  16. llava_video/lib/python3.10/site-packages/scipy/integrate/__pycache__/__init__.cpython-310.pyc +0 -0
  17. llava_video/lib/python3.10/site-packages/scipy/integrate/__pycache__/_bvp.cpython-310.pyc +0 -0
  18. llava_video/lib/python3.10/site-packages/scipy/integrate/__pycache__/_cubature.cpython-310.pyc +0 -0
  19. llava_video/lib/python3.10/site-packages/scipy/integrate/__pycache__/_ode.cpython-310.pyc +0 -0
  20. llava_video/lib/python3.10/site-packages/scipy/integrate/__pycache__/_odepack_py.cpython-310.pyc +0 -0
  21. llava_video/lib/python3.10/site-packages/scipy/integrate/__pycache__/_quad_vec.cpython-310.pyc +0 -0
  22. llava_video/lib/python3.10/site-packages/scipy/integrate/__pycache__/_quadpack_py.cpython-310.pyc +0 -0
  23. llava_video/lib/python3.10/site-packages/scipy/integrate/__pycache__/_quadrature.cpython-310.pyc +0 -0
  24. llava_video/lib/python3.10/site-packages/scipy/integrate/__pycache__/_tanhsinh.cpython-310.pyc +0 -0
  25. llava_video/lib/python3.10/site-packages/scipy/integrate/__pycache__/dop.cpython-310.pyc +0 -0
  26. llava_video/lib/python3.10/site-packages/scipy/integrate/__pycache__/lsoda.cpython-310.pyc +0 -0
  27. llava_video/lib/python3.10/site-packages/scipy/integrate/__pycache__/odepack.cpython-310.pyc +0 -0
  28. llava_video/lib/python3.10/site-packages/scipy/integrate/__pycache__/quadpack.cpython-310.pyc +0 -0
  29. llava_video/lib/python3.10/site-packages/scipy/integrate/__pycache__/vode.cpython-310.pyc +0 -0
  30. llava_video/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/__init__.cpython-310.pyc +0 -0
  31. llava_video/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test__quad_vec.cpython-310.pyc +0 -0
  32. llava_video/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_banded_ode_solvers.cpython-310.pyc +0 -0
  33. llava_video/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_bvp.cpython-310.pyc +0 -0
  34. llava_video/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_cubature.cpython-310.pyc +0 -0
  35. llava_video/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_integrate.cpython-310.pyc +0 -0
  36. llava_video/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_odeint_jac.cpython-310.pyc +0 -0
  37. llava_video/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_quadpack.cpython-310.pyc +0 -0
  38. llava_video/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_quadrature.cpython-310.pyc +0 -0
  39. llava_video/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_tanhsinh.cpython-310.pyc +0 -0
  40. llava_video/lib/python3.10/site-packages/scipy/integrate/tests/test_banded_ode_solvers.py +220 -0
  41. llava_video/lib/python3.10/site-packages/scipy/integrate/tests/test_integrate.py +840 -0
  42. llava_video/lib/python3.10/site-packages/scipy/ndimage/__init__.py +173 -0
  43. llava_video/lib/python3.10/site-packages/scipy/ndimage/_ctest.cpython-310-x86_64-linux-gnu.so +0 -0
  44. llava_video/lib/python3.10/site-packages/scipy/ndimage/_delegators.py +297 -0
  45. llava_video/lib/python3.10/site-packages/scipy/ndimage/_filters.py +1965 -0
  46. llava_video/lib/python3.10/site-packages/scipy/ndimage/_interpolation.py +1003 -0
  47. llava_video/lib/python3.10/site-packages/scipy/ndimage/_morphology.py +0 -0
  48. llava_video/lib/python3.10/site-packages/scipy/ndimage/_ni_support.py +143 -0
  49. llava_video/lib/python3.10/site-packages/scipy/ndimage/_rank_filter_1d.cpython-310-x86_64-linux-gnu.so +0 -0
  50. llava_video/lib/python3.10/site-packages/scipy/ndimage/fourier.py +21 -0
.gitattributes CHANGED
@@ -659,3 +659,5 @@ llava_video/lib/python3.10/site-packages/scipy/integrate/_vode.cpython-310-x86_6
659
  llava_video/lib/python3.10/site-packages/scipy/special/__pycache__/_add_newdocs.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
660
  llava_video/lib/python3.10/site-packages/scipy/sparse/tests/__pycache__/test_base.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
661
  llava_video/lib/python3.10/site-packages/scipy/optimize/__pycache__/_optimize.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
 
 
 
659
  llava_video/lib/python3.10/site-packages/scipy/special/__pycache__/_add_newdocs.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
660
  llava_video/lib/python3.10/site-packages/scipy/sparse/tests/__pycache__/test_base.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
661
  llava_video/lib/python3.10/site-packages/scipy/optimize/__pycache__/_optimize.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
662
+ llava_video/lib/python3.10/site-packages/scipy/sparse/linalg/_propack/_cpropack.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
663
+ llava_video/lib/python3.10/site-packages/scipy/odr/__odrpack.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
llava_video/lib/python3.10/site-packages/scipy/datasets/__init__.py ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ ================================
3
+ Datasets (:mod:`scipy.datasets`)
4
+ ================================
5
+
6
+ .. currentmodule:: scipy.datasets
7
+
8
+ Dataset Methods
9
+ ===============
10
+
11
+ .. autosummary::
12
+ :toctree: generated/
13
+
14
+ ascent
15
+ face
16
+ electrocardiogram
17
+
18
+ Utility Methods
19
+ ===============
20
+
21
+ .. autosummary::
22
+ :toctree: generated/
23
+
24
+ download_all -- Download all the dataset files to specified path.
25
+ clear_cache -- Clear cached dataset directory.
26
+
27
+
28
+ Usage of Datasets
29
+ =================
30
+
31
+ SciPy dataset methods can be simply called as follows: ``'<dataset-name>()'``
32
+ This downloads the dataset files over the network once, and saves the cache,
33
+ before returning a `numpy.ndarray` object representing the dataset.
34
+
35
+ Note that the return data structure and data type might be different for
36
+ different dataset methods. For a more detailed example on usage, please look
37
+ into the particular dataset method documentation above.
38
+
39
+
40
+ How dataset retrieval and storage works
41
+ =======================================
42
+
43
+ SciPy dataset files are stored within individual GitHub repositories under the
44
+ SciPy GitHub organization, following a naming convention as
45
+ ``'dataset-<name>'``, for example `scipy.datasets.face` files live at
46
+ https://github.com/scipy/dataset-face. The `scipy.datasets` submodule utilizes
47
+ and depends on `Pooch <https://www.fatiando.org/pooch/latest/>`_, a Python
48
+ package built to simplify fetching data files. Pooch uses these repos to
49
+ retrieve the respective dataset files when calling the dataset function.
50
+
51
+ A registry of all the datasets, essentially a mapping of filenames with their
52
+ SHA256 hash and repo urls are maintained, which Pooch uses to handle and verify
53
+ the downloads on function call. After downloading the dataset once, the files
54
+ are saved in the system cache directory under ``'scipy-data'``.
55
+
56
+ Dataset cache locations may vary on different platforms.
57
+
58
+ For macOS::
59
+
60
+ '~/Library/Caches/scipy-data'
61
+
62
+ For Linux and other Unix-like platforms::
63
+
64
+ '~/.cache/scipy-data' # or the value of the XDG_CACHE_HOME env var, if defined
65
+
66
+ For Windows::
67
+
68
+ 'C:\\Users\\<user>\\AppData\\Local\\<AppAuthor>\\scipy-data\\Cache'
69
+
70
+
71
+ In environments with constrained network connectivity for various security
72
+ reasons or on systems without continuous internet connections, one may manually
73
+ load the cache of the datasets by placing the contents of the dataset repo in
74
+ the above mentioned cache directory to avoid fetching dataset errors without
75
+ the internet connectivity.
76
+
77
+ """
78
+
79
+
80
+ from ._fetchers import face, ascent, electrocardiogram
81
+ from ._download_all import download_all
82
+ from ._utils import clear_cache
83
+
84
+ __all__ = ['ascent', 'electrocardiogram', 'face',
85
+ 'download_all', 'clear_cache']
86
+
87
+
88
+ from scipy._lib._testutils import PytestTester
89
+ test = PytestTester(__name__)
90
+ del PytestTester
llava_video/lib/python3.10/site-packages/scipy/datasets/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (2.98 kB). View file
 
llava_video/lib/python3.10/site-packages/scipy/datasets/__pycache__/_download_all.cpython-310.pyc ADDED
Binary file (1.7 kB). View file
 
llava_video/lib/python3.10/site-packages/scipy/datasets/__pycache__/_fetchers.cpython-310.pyc ADDED
Binary file (6.28 kB). View file
 
llava_video/lib/python3.10/site-packages/scipy/datasets/__pycache__/_registry.cpython-310.pyc ADDED
Binary file (763 Bytes). View file
 
llava_video/lib/python3.10/site-packages/scipy/datasets/__pycache__/_utils.cpython-310.pyc ADDED
Binary file (2.36 kB). View file
 
llava_video/lib/python3.10/site-packages/scipy/datasets/_download_all.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Platform independent script to download all the
3
+ `scipy.datasets` module data files.
4
+ This doesn't require a full scipy build.
5
+
6
+ Run: python _download_all.py <download_dir>
7
+ """
8
+
9
+ import argparse
10
+ try:
11
+ import pooch
12
+ except ImportError:
13
+ pooch = None
14
+
15
+
16
+ if __package__ is None or __package__ == '':
17
+ # Running as python script, use absolute import
18
+ import _registry # type: ignore
19
+ else:
20
+ # Running as python module, use relative import
21
+ from . import _registry
22
+
23
+
24
+ def download_all(path=None):
25
+ """
26
+ Utility method to download all the dataset files
27
+ for `scipy.datasets` module.
28
+
29
+ Parameters
30
+ ----------
31
+ path : str, optional
32
+ Directory path to download all the dataset files.
33
+ If None, default to the system cache_dir detected by pooch.
34
+ """
35
+ if pooch is None:
36
+ raise ImportError("Missing optional dependency 'pooch' required "
37
+ "for scipy.datasets module. Please use pip or "
38
+ "conda to install 'pooch'.")
39
+ if path is None:
40
+ path = pooch.os_cache('scipy-data')
41
+ for dataset_name, dataset_hash in _registry.registry.items():
42
+ pooch.retrieve(url=_registry.registry_urls[dataset_name],
43
+ known_hash=dataset_hash,
44
+ fname=dataset_name, path=path)
45
+
46
+
47
+ def main():
48
+ parser = argparse.ArgumentParser(description='Download SciPy data files.')
49
+ parser.add_argument("path", nargs='?', type=str,
50
+ default=pooch.os_cache('scipy-data'),
51
+ help="Directory path to download all the data files.")
52
+ args = parser.parse_args()
53
+ download_all(args.path)
54
+
55
+
56
+ if __name__ == "__main__":
57
+ main()
llava_video/lib/python3.10/site-packages/scipy/datasets/_fetchers.py ADDED
@@ -0,0 +1,219 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from numpy import array, frombuffer, load
2
+ from ._registry import registry, registry_urls
3
+
4
+ try:
5
+ import pooch
6
+ except ImportError:
7
+ pooch = None
8
+ data_fetcher = None
9
+ else:
10
+ data_fetcher = pooch.create(
11
+ # Use the default cache folder for the operating system
12
+ # Pooch uses appdirs (https://github.com/ActiveState/appdirs) to
13
+ # select an appropriate directory for the cache on each platform.
14
+ path=pooch.os_cache("scipy-data"),
15
+
16
+ # The remote data is on Github
17
+ # base_url is a required param, even though we override this
18
+ # using individual urls in the registry.
19
+ base_url="https://github.com/scipy/",
20
+ registry=registry,
21
+ urls=registry_urls
22
+ )
23
+
24
+
25
+ def fetch_data(dataset_name, data_fetcher=data_fetcher):
26
+ if data_fetcher is None:
27
+ raise ImportError("Missing optional dependency 'pooch' required "
28
+ "for scipy.datasets module. Please use pip or "
29
+ "conda to install 'pooch'.")
30
+ # The "fetch" method returns the full path to the downloaded data file.
31
+ return data_fetcher.fetch(dataset_name)
32
+
33
+
34
+ def ascent():
35
+ """
36
+ Get an 8-bit grayscale bit-depth, 512 x 512 derived image for easy
37
+ use in demos.
38
+
39
+ The image is derived from
40
+ https://pixnio.com/people/accent-to-the-top
41
+
42
+ Parameters
43
+ ----------
44
+ None
45
+
46
+ Returns
47
+ -------
48
+ ascent : ndarray
49
+ convenient image to use for testing and demonstration
50
+
51
+ Examples
52
+ --------
53
+ >>> import scipy.datasets
54
+ >>> ascent = scipy.datasets.ascent()
55
+ >>> ascent.shape
56
+ (512, 512)
57
+ >>> ascent.max()
58
+ np.uint8(255)
59
+
60
+ >>> import matplotlib.pyplot as plt
61
+ >>> plt.gray()
62
+ >>> plt.imshow(ascent)
63
+ >>> plt.show()
64
+
65
+ """
66
+ import pickle
67
+
68
+ # The file will be downloaded automatically the first time this is run,
69
+ # returning the path to the downloaded file. Afterwards, Pooch finds
70
+ # it in the local cache and doesn't repeat the download.
71
+ fname = fetch_data("ascent.dat")
72
+ # Now we just need to load it with our standard Python tools.
73
+ with open(fname, 'rb') as f:
74
+ ascent = array(pickle.load(f))
75
+ return ascent
76
+
77
+
78
+ def electrocardiogram():
79
+ """
80
+ Load an electrocardiogram as an example for a 1-D signal.
81
+
82
+ The returned signal is a 5 minute long electrocardiogram (ECG), a medical
83
+ recording of the heart's electrical activity, sampled at 360 Hz.
84
+
85
+ Returns
86
+ -------
87
+ ecg : ndarray
88
+ The electrocardiogram in millivolt (mV) sampled at 360 Hz.
89
+
90
+ Notes
91
+ -----
92
+ The provided signal is an excerpt (19:35 to 24:35) from the `record 208`_
93
+ (lead MLII) provided by the MIT-BIH Arrhythmia Database [1]_ on
94
+ PhysioNet [2]_. The excerpt includes noise induced artifacts, typical
95
+ heartbeats as well as pathological changes.
96
+
97
+ .. _record 208: https://physionet.org/physiobank/database/html/mitdbdir/records.htm#208
98
+
99
+ .. versionadded:: 1.1.0
100
+
101
+ References
102
+ ----------
103
+ .. [1] Moody GB, Mark RG. The impact of the MIT-BIH Arrhythmia Database.
104
+ IEEE Eng in Med and Biol 20(3):45-50 (May-June 2001).
105
+ (PMID: 11446209); :doi:`10.13026/C2F305`
106
+ .. [2] Goldberger AL, Amaral LAN, Glass L, Hausdorff JM, Ivanov PCh,
107
+ Mark RG, Mietus JE, Moody GB, Peng C-K, Stanley HE. PhysioBank,
108
+ PhysioToolkit, and PhysioNet: Components of a New Research Resource
109
+ for Complex Physiologic Signals. Circulation 101(23):e215-e220;
110
+ :doi:`10.1161/01.CIR.101.23.e215`
111
+
112
+ Examples
113
+ --------
114
+ >>> from scipy.datasets import electrocardiogram
115
+ >>> ecg = electrocardiogram()
116
+ >>> ecg
117
+ array([-0.245, -0.215, -0.185, ..., -0.405, -0.395, -0.385], shape=(108000,))
118
+ >>> ecg.shape, ecg.mean(), ecg.std()
119
+ ((108000,), -0.16510875, 0.5992473991177294)
120
+
121
+ As stated the signal features several areas with a different morphology.
122
+ E.g., the first few seconds show the electrical activity of a heart in
123
+ normal sinus rhythm as seen below.
124
+
125
+ >>> import numpy as np
126
+ >>> import matplotlib.pyplot as plt
127
+ >>> fs = 360
128
+ >>> time = np.arange(ecg.size) / fs
129
+ >>> plt.plot(time, ecg)
130
+ >>> plt.xlabel("time in s")
131
+ >>> plt.ylabel("ECG in mV")
132
+ >>> plt.xlim(9, 10.2)
133
+ >>> plt.ylim(-1, 1.5)
134
+ >>> plt.show()
135
+
136
+ After second 16, however, the first premature ventricular contractions,
137
+ also called extrasystoles, appear. These have a different morphology
138
+ compared to typical heartbeats. The difference can easily be observed
139
+ in the following plot.
140
+
141
+ >>> plt.plot(time, ecg)
142
+ >>> plt.xlabel("time in s")
143
+ >>> plt.ylabel("ECG in mV")
144
+ >>> plt.xlim(46.5, 50)
145
+ >>> plt.ylim(-2, 1.5)
146
+ >>> plt.show()
147
+
148
+ At several points large artifacts disturb the recording, e.g.:
149
+
150
+ >>> plt.plot(time, ecg)
151
+ >>> plt.xlabel("time in s")
152
+ >>> plt.ylabel("ECG in mV")
153
+ >>> plt.xlim(207, 215)
154
+ >>> plt.ylim(-2, 3.5)
155
+ >>> plt.show()
156
+
157
+ Finally, examining the power spectrum reveals that most of the biosignal is
158
+ made up of lower frequencies. At 60 Hz the noise induced by the mains
159
+ electricity can be clearly observed.
160
+
161
+ >>> from scipy.signal import welch
162
+ >>> f, Pxx = welch(ecg, fs=fs, nperseg=2048, scaling="spectrum")
163
+ >>> plt.semilogy(f, Pxx)
164
+ >>> plt.xlabel("Frequency in Hz")
165
+ >>> plt.ylabel("Power spectrum of the ECG in mV**2")
166
+ >>> plt.xlim(f[[0, -1]])
167
+ >>> plt.show()
168
+ """
169
+ fname = fetch_data("ecg.dat")
170
+ with load(fname) as file:
171
+ ecg = file["ecg"].astype(int) # np.uint16 -> int
172
+ # Convert raw output of ADC to mV: (ecg - adc_zero) / adc_gain
173
+ ecg = (ecg - 1024) / 200.0
174
+ return ecg
175
+
176
+
177
+ def face(gray=False):
178
+ """
179
+ Get a 1024 x 768, color image of a raccoon face.
180
+
181
+ The image is derived from
182
+ https://pixnio.com/fauna-animals/raccoons/raccoon-procyon-lotor
183
+
184
+ Parameters
185
+ ----------
186
+ gray : bool, optional
187
+ If True return 8-bit grey-scale image, otherwise return a color image
188
+
189
+ Returns
190
+ -------
191
+ face : ndarray
192
+ image of a raccoon face
193
+
194
+ Examples
195
+ --------
196
+ >>> import scipy.datasets
197
+ >>> face = scipy.datasets.face()
198
+ >>> face.shape
199
+ (768, 1024, 3)
200
+ >>> face.max()
201
+ np.uint8(255)
202
+
203
+ >>> import matplotlib.pyplot as plt
204
+ >>> plt.gray()
205
+ >>> plt.imshow(face)
206
+ >>> plt.show()
207
+
208
+ """
209
+ import bz2
210
+ fname = fetch_data("face.dat")
211
+ with open(fname, 'rb') as f:
212
+ rawdata = f.read()
213
+ face_data = bz2.decompress(rawdata)
214
+ face = frombuffer(face_data, dtype='uint8')
215
+ face.shape = (768, 1024, 3)
216
+ if gray is True:
217
+ face = (0.21 * face[:, :, 0] + 0.71 * face[:, :, 1] +
218
+ 0.07 * face[:, :, 2]).astype('uint8')
219
+ return face
llava_video/lib/python3.10/site-packages/scipy/datasets/_registry.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ##########################################################################
2
+ # This file serves as the dataset registry for SciPy Datasets SubModule.
3
+ ##########################################################################
4
+
5
+
6
+ # To generate the SHA256 hash, use the command
7
+ # openssl sha256 <filename>
8
+ registry = {
9
+ "ascent.dat": "03ce124c1afc880f87b55f6b061110e2e1e939679184f5614e38dacc6c1957e2",
10
+ "ecg.dat": "f20ad3365fb9b7f845d0e5c48b6fe67081377ee466c3a220b7f69f35c8958baf",
11
+ "face.dat": "9d8b0b4d081313e2b485748c770472e5a95ed1738146883d84c7030493e82886"
12
+ }
13
+
14
+ registry_urls = {
15
+ "ascent.dat": "https://raw.githubusercontent.com/scipy/dataset-ascent/main/ascent.dat",
16
+ "ecg.dat": "https://raw.githubusercontent.com/scipy/dataset-ecg/main/ecg.dat",
17
+ "face.dat": "https://raw.githubusercontent.com/scipy/dataset-face/main/face.dat"
18
+ }
19
+
20
+ # dataset method mapping with their associated filenames
21
+ # <method_name> : ["filename1", "filename2", ...]
22
+ method_files_map = {
23
+ "ascent": ["ascent.dat"],
24
+ "electrocardiogram": ["ecg.dat"],
25
+ "face": ["face.dat"]
26
+ }
llava_video/lib/python3.10/site-packages/scipy/datasets/_utils.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import shutil
3
+ from ._registry import method_files_map
4
+
5
+ try:
6
+ import platformdirs
7
+ except ImportError:
8
+ platformdirs = None # type: ignore[assignment]
9
+
10
+
11
+ def _clear_cache(datasets, cache_dir=None, method_map=None):
12
+ if method_map is None:
13
+ # Use SciPy Datasets method map
14
+ method_map = method_files_map
15
+ if cache_dir is None:
16
+ # Use default cache_dir path
17
+ if platformdirs is None:
18
+ # platformdirs is pooch dependency
19
+ raise ImportError("Missing optional dependency 'pooch' required "
20
+ "for scipy.datasets module. Please use pip or "
21
+ "conda to install 'pooch'.")
22
+ cache_dir = platformdirs.user_cache_dir("scipy-data")
23
+
24
+ if not os.path.exists(cache_dir):
25
+ print(f"Cache Directory {cache_dir} doesn't exist. Nothing to clear.")
26
+ return
27
+
28
+ if datasets is None:
29
+ print(f"Cleaning the cache directory {cache_dir}!")
30
+ shutil.rmtree(cache_dir)
31
+ else:
32
+ if not isinstance(datasets, (list, tuple)):
33
+ # single dataset method passed should be converted to list
34
+ datasets = [datasets, ]
35
+ for dataset in datasets:
36
+ assert callable(dataset)
37
+ dataset_name = dataset.__name__ # Name of the dataset method
38
+ if dataset_name not in method_map:
39
+ raise ValueError(f"Dataset method {dataset_name} doesn't "
40
+ "exist. Please check if the passed dataset "
41
+ "is a subset of the following dataset "
42
+ f"methods: {list(method_map.keys())}")
43
+
44
+ data_files = method_map[dataset_name]
45
+ data_filepaths = [os.path.join(cache_dir, file)
46
+ for file in data_files]
47
+ for data_filepath in data_filepaths:
48
+ if os.path.exists(data_filepath):
49
+ print("Cleaning the file "
50
+ f"{os.path.split(data_filepath)[1]} "
51
+ f"for dataset {dataset_name}")
52
+ os.remove(data_filepath)
53
+ else:
54
+ print(f"Path {data_filepath} doesn't exist. "
55
+ "Nothing to clear.")
56
+
57
+
58
+ def clear_cache(datasets=None):
59
+ """
60
+ Cleans the scipy datasets cache directory.
61
+
62
+ If a scipy.datasets method or a list/tuple of the same is
63
+ provided, then clear_cache removes all the data files
64
+ associated to the passed dataset method callable(s).
65
+
66
+ By default, it removes all the cached data files.
67
+
68
+ Parameters
69
+ ----------
70
+ datasets : callable or list/tuple of callable or None
71
+
72
+ Examples
73
+ --------
74
+ >>> from scipy import datasets
75
+ >>> ascent_array = datasets.ascent()
76
+ >>> ascent_array.shape
77
+ (512, 512)
78
+ >>> datasets.clear_cache([datasets.ascent])
79
+ Cleaning the file ascent.dat for dataset ascent
80
+ """
81
+ _clear_cache(datasets)
llava_video/lib/python3.10/site-packages/scipy/datasets/tests/__init__.py ADDED
File without changes
llava_video/lib/python3.10/site-packages/scipy/datasets/tests/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (177 Bytes). View file
 
llava_video/lib/python3.10/site-packages/scipy/datasets/tests/__pycache__/test_data.cpython-310.pyc ADDED
Binary file (3.94 kB). View file
 
llava_video/lib/python3.10/site-packages/scipy/datasets/tests/test_data.py ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from scipy.datasets._registry import registry
2
+ from scipy.datasets._fetchers import data_fetcher
3
+ from scipy.datasets._utils import _clear_cache
4
+ from scipy.datasets import ascent, face, electrocardiogram, download_all
5
+ from numpy.testing import assert_equal, assert_almost_equal
6
+ import os
7
+ from threading import get_ident
8
+ import pytest
9
+
10
+ try:
11
+ import pooch
12
+ except ImportError:
13
+ raise ImportError("Missing optional dependency 'pooch' required "
14
+ "for scipy.datasets module. Please use pip or "
15
+ "conda to install 'pooch'.")
16
+
17
+
18
+ data_dir = data_fetcher.path # type: ignore
19
+
20
+
21
+ def _has_hash(path, expected_hash):
22
+ """Check if the provided path has the expected hash."""
23
+ if not os.path.exists(path):
24
+ return False
25
+ return pooch.file_hash(path) == expected_hash
26
+
27
+
28
+ class TestDatasets:
29
+
30
+ @pytest.fixture(scope='module', autouse=True)
31
+ def test_download_all(self):
32
+ # This fixture requires INTERNET CONNECTION
33
+
34
+ # test_setup phase
35
+ download_all()
36
+
37
+ yield
38
+
39
+ @pytest.mark.fail_slow(10)
40
+ def test_existence_all(self):
41
+ assert len(os.listdir(data_dir)) >= len(registry)
42
+
43
+ def test_ascent(self):
44
+ assert_equal(ascent().shape, (512, 512))
45
+
46
+ # hash check
47
+ assert _has_hash(os.path.join(data_dir, "ascent.dat"),
48
+ registry["ascent.dat"])
49
+
50
+ def test_face(self):
51
+ assert_equal(face().shape, (768, 1024, 3))
52
+
53
+ # hash check
54
+ assert _has_hash(os.path.join(data_dir, "face.dat"),
55
+ registry["face.dat"])
56
+
57
+ def test_electrocardiogram(self):
58
+ # Test shape, dtype and stats of signal
59
+ ecg = electrocardiogram()
60
+ assert_equal(ecg.dtype, float)
61
+ assert_equal(ecg.shape, (108000,))
62
+ assert_almost_equal(ecg.mean(), -0.16510875)
63
+ assert_almost_equal(ecg.std(), 0.5992473991177294)
64
+
65
+ # hash check
66
+ assert _has_hash(os.path.join(data_dir, "ecg.dat"),
67
+ registry["ecg.dat"])
68
+
69
+
70
+ def test_clear_cache(tmp_path):
71
+ # Note: `tmp_path` is a pytest fixture, it handles cleanup
72
+ thread_basepath = tmp_path / str(get_ident())
73
+ thread_basepath.mkdir()
74
+
75
+ dummy_basepath = thread_basepath / "dummy_cache_dir"
76
+ dummy_basepath.mkdir()
77
+
78
+ # Create three dummy dataset files for dummy dataset methods
79
+ dummy_method_map = {}
80
+ for i in range(4):
81
+ dummy_method_map[f"data{i}"] = [f"data{i}.dat"]
82
+ data_filepath = dummy_basepath / f"data{i}.dat"
83
+ data_filepath.write_text("")
84
+
85
+ # clear files associated to single dataset method data0
86
+ # also test callable argument instead of list of callables
87
+ def data0():
88
+ pass
89
+ _clear_cache(datasets=data0, cache_dir=dummy_basepath,
90
+ method_map=dummy_method_map)
91
+ assert not os.path.exists(dummy_basepath/"data0.dat")
92
+
93
+ # clear files associated to multiple dataset methods "data3" and "data4"
94
+ def data1():
95
+ pass
96
+
97
+ def data2():
98
+ pass
99
+ _clear_cache(datasets=[data1, data2], cache_dir=dummy_basepath,
100
+ method_map=dummy_method_map)
101
+ assert not os.path.exists(dummy_basepath/"data1.dat")
102
+ assert not os.path.exists(dummy_basepath/"data2.dat")
103
+
104
+ # clear multiple dataset files "data3_0.dat" and "data3_1.dat"
105
+ # associated with dataset method "data3"
106
+ def data4():
107
+ pass
108
+ # create files
109
+ (dummy_basepath / "data4_0.dat").write_text("")
110
+ (dummy_basepath / "data4_1.dat").write_text("")
111
+
112
+ dummy_method_map["data4"] = ["data4_0.dat", "data4_1.dat"]
113
+ _clear_cache(datasets=[data4], cache_dir=dummy_basepath,
114
+ method_map=dummy_method_map)
115
+ assert not os.path.exists(dummy_basepath/"data4_0.dat")
116
+ assert not os.path.exists(dummy_basepath/"data4_1.dat")
117
+
118
+ # wrong dataset method should raise ValueError since it
119
+ # doesn't exist in the dummy_method_map
120
+ def data5():
121
+ pass
122
+ with pytest.raises(ValueError):
123
+ _clear_cache(datasets=[data5], cache_dir=dummy_basepath,
124
+ method_map=dummy_method_map)
125
+
126
+ # remove all dataset cache
127
+ _clear_cache(datasets=None, cache_dir=dummy_basepath)
128
+ assert not os.path.exists(dummy_basepath)
llava_video/lib/python3.10/site-packages/scipy/integrate/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (4.76 kB). View file
 
llava_video/lib/python3.10/site-packages/scipy/integrate/__pycache__/_bvp.cpython-310.pyc ADDED
Binary file (35.7 kB). View file
 
llava_video/lib/python3.10/site-packages/scipy/integrate/__pycache__/_cubature.cpython-310.pyc ADDED
Binary file (20.5 kB). View file
 
llava_video/lib/python3.10/site-packages/scipy/integrate/__pycache__/_ode.cpython-310.pyc ADDED
Binary file (38.4 kB). View file
 
llava_video/lib/python3.10/site-packages/scipy/integrate/__pycache__/_odepack_py.cpython-310.pyc ADDED
Binary file (11 kB). View file
 
llava_video/lib/python3.10/site-packages/scipy/integrate/__pycache__/_quad_vec.cpython-310.pyc ADDED
Binary file (16.2 kB). View file
 
llava_video/lib/python3.10/site-packages/scipy/integrate/__pycache__/_quadpack_py.cpython-310.pyc ADDED
Binary file (49 kB). View file
 
llava_video/lib/python3.10/site-packages/scipy/integrate/__pycache__/_quadrature.cpython-310.pyc ADDED
Binary file (39.2 kB). View file
 
llava_video/lib/python3.10/site-packages/scipy/integrate/__pycache__/_tanhsinh.cpython-310.pyc ADDED
Binary file (41.1 kB). View file
 
llava_video/lib/python3.10/site-packages/scipy/integrate/__pycache__/dop.cpython-310.pyc ADDED
Binary file (619 Bytes). View file
 
llava_video/lib/python3.10/site-packages/scipy/integrate/__pycache__/lsoda.cpython-310.pyc ADDED
Binary file (585 Bytes). View file
 
llava_video/lib/python3.10/site-packages/scipy/integrate/__pycache__/odepack.cpython-310.pyc ADDED
Binary file (614 Bytes). View file
 
llava_video/lib/python3.10/site-packages/scipy/integrate/__pycache__/quadpack.cpython-310.pyc ADDED
Binary file (647 Bytes). View file
 
llava_video/lib/python3.10/site-packages/scipy/integrate/__pycache__/vode.cpython-310.pyc ADDED
Binary file (622 Bytes). View file
 
llava_video/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (178 Bytes). View file
 
llava_video/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test__quad_vec.cpython-310.pyc ADDED
Binary file (7 kB). View file
 
llava_video/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_banded_ode_solvers.cpython-310.pyc ADDED
Binary file (5.31 kB). View file
 
llava_video/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_bvp.cpython-310.pyc ADDED
Binary file (19.7 kB). View file
 
llava_video/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_cubature.cpython-310.pyc ADDED
Binary file (28.5 kB). View file
 
llava_video/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_integrate.cpython-310.pyc ADDED
Binary file (24.7 kB). View file
 
llava_video/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_odeint_jac.cpython-310.pyc ADDED
Binary file (2.09 kB). View file
 
llava_video/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_quadpack.cpython-310.pyc ADDED
Binary file (25.9 kB). View file
 
llava_video/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_quadrature.cpython-310.pyc ADDED
Binary file (26.8 kB). View file
 
llava_video/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_tanhsinh.cpython-310.pyc ADDED
Binary file (37.8 kB). View file
 
llava_video/lib/python3.10/site-packages/scipy/integrate/tests/test_banded_ode_solvers.py ADDED
@@ -0,0 +1,220 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import itertools
2
+ import pytest
3
+ import numpy as np
4
+ from numpy.testing import assert_allclose
5
+ from scipy.integrate import ode
6
+
7
+
8
+ def _band_count(a):
9
+ """Returns ml and mu, the lower and upper band sizes of a."""
10
+ nrows, ncols = a.shape
11
+ ml = 0
12
+ for k in range(-nrows+1, 0):
13
+ if np.diag(a, k).any():
14
+ ml = -k
15
+ break
16
+ mu = 0
17
+ for k in range(nrows-1, 0, -1):
18
+ if np.diag(a, k).any():
19
+ mu = k
20
+ break
21
+ return ml, mu
22
+
23
+
24
+ def _linear_func(t, y, a):
25
+ """Linear system dy/dt = a * y"""
26
+ return a.dot(y)
27
+
28
+
29
+ def _linear_jac(t, y, a):
30
+ """Jacobian of a * y is a."""
31
+ return a
32
+
33
+
34
+ def _linear_banded_jac(t, y, a):
35
+ """Banded Jacobian."""
36
+ ml, mu = _band_count(a)
37
+ bjac = [np.r_[[0] * k, np.diag(a, k)] for k in range(mu, 0, -1)]
38
+ bjac.append(np.diag(a))
39
+ for k in range(-1, -ml-1, -1):
40
+ bjac.append(np.r_[np.diag(a, k), [0] * (-k)])
41
+ return bjac
42
+
43
+
44
+ def _solve_linear_sys(a, y0, tend=1, dt=0.1,
45
+ solver=None, method='bdf', use_jac=True,
46
+ with_jacobian=False, banded=False):
47
+ """Use scipy.integrate.ode to solve a linear system of ODEs.
48
+
49
+ a : square ndarray
50
+ Matrix of the linear system to be solved.
51
+ y0 : ndarray
52
+ Initial condition
53
+ tend : float
54
+ Stop time.
55
+ dt : float
56
+ Step size of the output.
57
+ solver : str
58
+ If not None, this must be "vode", "lsoda" or "zvode".
59
+ method : str
60
+ Either "bdf" or "adams".
61
+ use_jac : bool
62
+ Determines if the jacobian function is passed to ode().
63
+ with_jacobian : bool
64
+ Passed to ode.set_integrator().
65
+ banded : bool
66
+ Determines whether a banded or full jacobian is used.
67
+ If `banded` is True, `lband` and `uband` are determined by the
68
+ values in `a`.
69
+ """
70
+ if banded:
71
+ lband, uband = _band_count(a)
72
+ else:
73
+ lband = None
74
+ uband = None
75
+
76
+ if use_jac:
77
+ if banded:
78
+ r = ode(_linear_func, _linear_banded_jac)
79
+ else:
80
+ r = ode(_linear_func, _linear_jac)
81
+ else:
82
+ r = ode(_linear_func)
83
+
84
+ if solver is None:
85
+ if np.iscomplexobj(a):
86
+ solver = "zvode"
87
+ else:
88
+ solver = "vode"
89
+
90
+ r.set_integrator(solver,
91
+ with_jacobian=with_jacobian,
92
+ method=method,
93
+ lband=lband, uband=uband,
94
+ rtol=1e-9, atol=1e-10,
95
+ )
96
+ t0 = 0
97
+ r.set_initial_value(y0, t0)
98
+ r.set_f_params(a)
99
+ r.set_jac_params(a)
100
+
101
+ t = [t0]
102
+ y = [y0]
103
+ while r.successful() and r.t < tend:
104
+ r.integrate(r.t + dt)
105
+ t.append(r.t)
106
+ y.append(r.y)
107
+
108
+ t = np.array(t)
109
+ y = np.array(y)
110
+ return t, y
111
+
112
+
113
+ def _analytical_solution(a, y0, t):
114
+ """
115
+ Analytical solution to the linear differential equations dy/dt = a*y.
116
+
117
+ The solution is only valid if `a` is diagonalizable.
118
+
119
+ Returns a 2-D array with shape (len(t), len(y0)).
120
+ """
121
+ lam, v = np.linalg.eig(a)
122
+ c = np.linalg.solve(v, y0)
123
+ e = c * np.exp(lam * t.reshape(-1, 1))
124
+ sol = e.dot(v.T)
125
+ return sol
126
+
127
+
128
+ @pytest.mark.thread_unsafe
129
+ def test_banded_ode_solvers():
130
+ # Test the "lsoda", "vode" and "zvode" solvers of the `ode` class
131
+ # with a system that has a banded Jacobian matrix.
132
+
133
+ t_exact = np.linspace(0, 1.0, 5)
134
+
135
+ # --- Real arrays for testing the "lsoda" and "vode" solvers ---
136
+
137
+ # lband = 2, uband = 1:
138
+ a_real = np.array([[-0.6, 0.1, 0.0, 0.0, 0.0],
139
+ [0.2, -0.5, 0.9, 0.0, 0.0],
140
+ [0.1, 0.1, -0.4, 0.1, 0.0],
141
+ [0.0, 0.3, -0.1, -0.9, -0.3],
142
+ [0.0, 0.0, 0.1, 0.1, -0.7]])
143
+
144
+ # lband = 0, uband = 1:
145
+ a_real_upper = np.triu(a_real)
146
+
147
+ # lband = 2, uband = 0:
148
+ a_real_lower = np.tril(a_real)
149
+
150
+ # lband = 0, uband = 0:
151
+ a_real_diag = np.triu(a_real_lower)
152
+
153
+ real_matrices = [a_real, a_real_upper, a_real_lower, a_real_diag]
154
+ real_solutions = []
155
+
156
+ for a in real_matrices:
157
+ y0 = np.arange(1, a.shape[0] + 1)
158
+ y_exact = _analytical_solution(a, y0, t_exact)
159
+ real_solutions.append((y0, t_exact, y_exact))
160
+
161
+ def check_real(idx, solver, meth, use_jac, with_jac, banded):
162
+ a = real_matrices[idx]
163
+ y0, t_exact, y_exact = real_solutions[idx]
164
+ t, y = _solve_linear_sys(a, y0,
165
+ tend=t_exact[-1],
166
+ dt=t_exact[1] - t_exact[0],
167
+ solver=solver,
168
+ method=meth,
169
+ use_jac=use_jac,
170
+ with_jacobian=with_jac,
171
+ banded=banded)
172
+ assert_allclose(t, t_exact)
173
+ assert_allclose(y, y_exact)
174
+
175
+ for idx in range(len(real_matrices)):
176
+ p = [['vode', 'lsoda'], # solver
177
+ ['bdf', 'adams'], # method
178
+ [False, True], # use_jac
179
+ [False, True], # with_jacobian
180
+ [False, True]] # banded
181
+ for solver, meth, use_jac, with_jac, banded in itertools.product(*p):
182
+ check_real(idx, solver, meth, use_jac, with_jac, banded)
183
+
184
+ # --- Complex arrays for testing the "zvode" solver ---
185
+
186
+ # complex, lband = 2, uband = 1:
187
+ a_complex = a_real - 0.5j * a_real
188
+
189
+ # complex, lband = 0, uband = 0:
190
+ a_complex_diag = np.diag(np.diag(a_complex))
191
+
192
+ complex_matrices = [a_complex, a_complex_diag]
193
+ complex_solutions = []
194
+
195
+ for a in complex_matrices:
196
+ y0 = np.arange(1, a.shape[0] + 1) + 1j
197
+ y_exact = _analytical_solution(a, y0, t_exact)
198
+ complex_solutions.append((y0, t_exact, y_exact))
199
+
200
+ def check_complex(idx, solver, meth, use_jac, with_jac, banded):
201
+ a = complex_matrices[idx]
202
+ y0, t_exact, y_exact = complex_solutions[idx]
203
+ t, y = _solve_linear_sys(a, y0,
204
+ tend=t_exact[-1],
205
+ dt=t_exact[1] - t_exact[0],
206
+ solver=solver,
207
+ method=meth,
208
+ use_jac=use_jac,
209
+ with_jacobian=with_jac,
210
+ banded=banded)
211
+ assert_allclose(t, t_exact)
212
+ assert_allclose(y, y_exact)
213
+
214
+ for idx in range(len(complex_matrices)):
215
+ p = [['bdf', 'adams'], # method
216
+ [False, True], # use_jac
217
+ [False, True], # with_jacobian
218
+ [False, True]] # banded
219
+ for meth, use_jac, with_jac, banded in itertools.product(*p):
220
+ check_complex(idx, "zvode", meth, use_jac, with_jac, banded)
llava_video/lib/python3.10/site-packages/scipy/integrate/tests/test_integrate.py ADDED
@@ -0,0 +1,840 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Authors: Nils Wagner, Ed Schofield, Pauli Virtanen, John Travers
2
+ """
3
+ Tests for numerical integration.
4
+ """
5
+ import numpy as np
6
+ from numpy import (arange, zeros, array, dot, sqrt, cos, sin, eye, pi, exp,
7
+ allclose)
8
+
9
+ from numpy.testing import (
10
+ assert_, assert_array_almost_equal,
11
+ assert_allclose, assert_array_equal, assert_equal, assert_warns)
12
+ import pytest
13
+ from pytest import raises as assert_raises
14
+ from scipy.integrate import odeint, ode, complex_ode
15
+
16
+ #------------------------------------------------------------------------------
17
+ # Test ODE integrators
18
+ #------------------------------------------------------------------------------
19
+
20
+
21
+ class TestOdeint:
22
+ # Check integrate.odeint
23
+
24
+ def _do_problem(self, problem):
25
+ t = arange(0.0, problem.stop_t, 0.05)
26
+
27
+ # Basic case
28
+ z, infodict = odeint(problem.f, problem.z0, t, full_output=True)
29
+ assert_(problem.verify(z, t))
30
+
31
+ # Use tfirst=True
32
+ z, infodict = odeint(lambda t, y: problem.f(y, t), problem.z0, t,
33
+ full_output=True, tfirst=True)
34
+ assert_(problem.verify(z, t))
35
+
36
+ if hasattr(problem, 'jac'):
37
+ # Use Dfun
38
+ z, infodict = odeint(problem.f, problem.z0, t, Dfun=problem.jac,
39
+ full_output=True)
40
+ assert_(problem.verify(z, t))
41
+
42
+ # Use Dfun and tfirst=True
43
+ z, infodict = odeint(lambda t, y: problem.f(y, t), problem.z0, t,
44
+ Dfun=lambda t, y: problem.jac(y, t),
45
+ full_output=True, tfirst=True)
46
+ assert_(problem.verify(z, t))
47
+
48
+ def test_odeint(self):
49
+ for problem_cls in PROBLEMS:
50
+ problem = problem_cls()
51
+ if problem.cmplx:
52
+ continue
53
+ self._do_problem(problem)
54
+
55
+
56
+ class TestODEClass:
57
+
58
+ ode_class = None # Set in subclass.
59
+
60
+ def _do_problem(self, problem, integrator, method='adams'):
61
+
62
+ # ode has callback arguments in different order than odeint
63
+ def f(t, z):
64
+ return problem.f(z, t)
65
+ jac = None
66
+ if hasattr(problem, 'jac'):
67
+ def jac(t, z):
68
+ return problem.jac(z, t)
69
+
70
+ integrator_params = {}
71
+ if problem.lband is not None or problem.uband is not None:
72
+ integrator_params['uband'] = problem.uband
73
+ integrator_params['lband'] = problem.lband
74
+
75
+ ig = self.ode_class(f, jac)
76
+ ig.set_integrator(integrator,
77
+ atol=problem.atol/10,
78
+ rtol=problem.rtol/10,
79
+ method=method,
80
+ **integrator_params)
81
+
82
+ ig.set_initial_value(problem.z0, t=0.0)
83
+ z = ig.integrate(problem.stop_t)
84
+
85
+ assert_array_equal(z, ig.y)
86
+ assert_(ig.successful(), (problem, method))
87
+ assert_(ig.get_return_code() > 0, (problem, method))
88
+ assert_(problem.verify(array([z]), problem.stop_t), (problem, method))
89
+
90
+
91
+ class TestOde(TestODEClass):
92
+
93
+ ode_class = ode
94
+
95
+ def test_vode(self):
96
+ # Check the vode solver
97
+ for problem_cls in PROBLEMS:
98
+ problem = problem_cls()
99
+ if problem.cmplx:
100
+ continue
101
+ if not problem.stiff:
102
+ self._do_problem(problem, 'vode', 'adams')
103
+ self._do_problem(problem, 'vode', 'bdf')
104
+
105
+ def test_zvode(self):
106
+ # Check the zvode solver
107
+ for problem_cls in PROBLEMS:
108
+ problem = problem_cls()
109
+ if not problem.stiff:
110
+ self._do_problem(problem, 'zvode', 'adams')
111
+ self._do_problem(problem, 'zvode', 'bdf')
112
+
113
+ def test_lsoda(self):
114
+ # Check the lsoda solver
115
+ for problem_cls in PROBLEMS:
116
+ problem = problem_cls()
117
+ if problem.cmplx:
118
+ continue
119
+ self._do_problem(problem, 'lsoda')
120
+
121
+ def test_dopri5(self):
122
+ # Check the dopri5 solver
123
+ for problem_cls in PROBLEMS:
124
+ problem = problem_cls()
125
+ if problem.cmplx:
126
+ continue
127
+ if problem.stiff:
128
+ continue
129
+ if hasattr(problem, 'jac'):
130
+ continue
131
+ self._do_problem(problem, 'dopri5')
132
+
133
+ def test_dop853(self):
134
+ # Check the dop853 solver
135
+ for problem_cls in PROBLEMS:
136
+ problem = problem_cls()
137
+ if problem.cmplx:
138
+ continue
139
+ if problem.stiff:
140
+ continue
141
+ if hasattr(problem, 'jac'):
142
+ continue
143
+ self._do_problem(problem, 'dop853')
144
+
145
+ @pytest.mark.thread_unsafe
146
+ def test_concurrent_fail(self):
147
+ for sol in ('vode', 'zvode', 'lsoda'):
148
+ def f(t, y):
149
+ return 1.0
150
+
151
+ r = ode(f).set_integrator(sol)
152
+ r.set_initial_value(0, 0)
153
+
154
+ r2 = ode(f).set_integrator(sol)
155
+ r2.set_initial_value(0, 0)
156
+
157
+ r.integrate(r.t + 0.1)
158
+ r2.integrate(r2.t + 0.1)
159
+
160
+ assert_raises(RuntimeError, r.integrate, r.t + 0.1)
161
+
162
+ def test_concurrent_ok(self, num_parallel_threads):
163
+ def f(t, y):
164
+ return 1.0
165
+
166
+ for k in range(3):
167
+ for sol in ('vode', 'zvode', 'lsoda', 'dopri5', 'dop853'):
168
+ if sol in {'vode', 'zvode', 'lsoda'} and num_parallel_threads > 1:
169
+ continue
170
+ r = ode(f).set_integrator(sol)
171
+ r.set_initial_value(0, 0)
172
+
173
+ r2 = ode(f).set_integrator(sol)
174
+ r2.set_initial_value(0, 0)
175
+
176
+ r.integrate(r.t + 0.1)
177
+ r2.integrate(r2.t + 0.1)
178
+ r2.integrate(r2.t + 0.1)
179
+
180
+ assert_allclose(r.y, 0.1)
181
+ assert_allclose(r2.y, 0.2)
182
+
183
+ for sol in ('dopri5', 'dop853'):
184
+ r = ode(f).set_integrator(sol)
185
+ r.set_initial_value(0, 0)
186
+
187
+ r2 = ode(f).set_integrator(sol)
188
+ r2.set_initial_value(0, 0)
189
+
190
+ r.integrate(r.t + 0.1)
191
+ r.integrate(r.t + 0.1)
192
+ r2.integrate(r2.t + 0.1)
193
+ r.integrate(r.t + 0.1)
194
+ r2.integrate(r2.t + 0.1)
195
+
196
+ assert_allclose(r.y, 0.3)
197
+ assert_allclose(r2.y, 0.2)
198
+
199
+
200
+ class TestComplexOde(TestODEClass):
201
+
202
+ ode_class = complex_ode
203
+
204
+ def test_vode(self):
205
+ # Check the vode solver
206
+ for problem_cls in PROBLEMS:
207
+ problem = problem_cls()
208
+ if not problem.stiff:
209
+ self._do_problem(problem, 'vode', 'adams')
210
+ else:
211
+ self._do_problem(problem, 'vode', 'bdf')
212
+
213
+ def test_lsoda(self):
214
+
215
+ # Check the lsoda solver
216
+ for problem_cls in PROBLEMS:
217
+ problem = problem_cls()
218
+ self._do_problem(problem, 'lsoda')
219
+
220
+ def test_dopri5(self):
221
+ # Check the dopri5 solver
222
+ for problem_cls in PROBLEMS:
223
+ problem = problem_cls()
224
+ if problem.stiff:
225
+ continue
226
+ if hasattr(problem, 'jac'):
227
+ continue
228
+ self._do_problem(problem, 'dopri5')
229
+
230
+ def test_dop853(self):
231
+ # Check the dop853 solver
232
+ for problem_cls in PROBLEMS:
233
+ problem = problem_cls()
234
+ if problem.stiff:
235
+ continue
236
+ if hasattr(problem, 'jac'):
237
+ continue
238
+ self._do_problem(problem, 'dop853')
239
+
240
+
241
+ class TestSolout:
242
+ # Check integrate.ode correctly handles solout for dopri5 and dop853
243
+ def _run_solout_test(self, integrator):
244
+ # Check correct usage of solout
245
+ ts = []
246
+ ys = []
247
+ t0 = 0.0
248
+ tend = 10.0
249
+ y0 = [1.0, 2.0]
250
+
251
+ def solout(t, y):
252
+ ts.append(t)
253
+ ys.append(y.copy())
254
+
255
+ def rhs(t, y):
256
+ return [y[0] + y[1], -y[1]**2]
257
+
258
+ ig = ode(rhs).set_integrator(integrator)
259
+ ig.set_solout(solout)
260
+ ig.set_initial_value(y0, t0)
261
+ ret = ig.integrate(tend)
262
+ assert_array_equal(ys[0], y0)
263
+ assert_array_equal(ys[-1], ret)
264
+ assert_equal(ts[0], t0)
265
+ assert_equal(ts[-1], tend)
266
+
267
+ def test_solout(self):
268
+ for integrator in ('dopri5', 'dop853'):
269
+ self._run_solout_test(integrator)
270
+
271
+ def _run_solout_after_initial_test(self, integrator):
272
+ # Check if solout works even if it is set after the initial value.
273
+ ts = []
274
+ ys = []
275
+ t0 = 0.0
276
+ tend = 10.0
277
+ y0 = [1.0, 2.0]
278
+
279
+ def solout(t, y):
280
+ ts.append(t)
281
+ ys.append(y.copy())
282
+
283
+ def rhs(t, y):
284
+ return [y[0] + y[1], -y[1]**2]
285
+
286
+ ig = ode(rhs).set_integrator(integrator)
287
+ ig.set_initial_value(y0, t0)
288
+ ig.set_solout(solout)
289
+ ret = ig.integrate(tend)
290
+ assert_array_equal(ys[0], y0)
291
+ assert_array_equal(ys[-1], ret)
292
+ assert_equal(ts[0], t0)
293
+ assert_equal(ts[-1], tend)
294
+
295
+ def test_solout_after_initial(self):
296
+ for integrator in ('dopri5', 'dop853'):
297
+ self._run_solout_after_initial_test(integrator)
298
+
299
+ def _run_solout_break_test(self, integrator):
300
+ # Check correct usage of stopping via solout
301
+ ts = []
302
+ ys = []
303
+ t0 = 0.0
304
+ tend = 10.0
305
+ y0 = [1.0, 2.0]
306
+
307
+ def solout(t, y):
308
+ ts.append(t)
309
+ ys.append(y.copy())
310
+ if t > tend/2.0:
311
+ return -1
312
+
313
+ def rhs(t, y):
314
+ return [y[0] + y[1], -y[1]**2]
315
+
316
+ ig = ode(rhs).set_integrator(integrator)
317
+ ig.set_solout(solout)
318
+ ig.set_initial_value(y0, t0)
319
+ ret = ig.integrate(tend)
320
+ assert_array_equal(ys[0], y0)
321
+ assert_array_equal(ys[-1], ret)
322
+ assert_equal(ts[0], t0)
323
+ assert_(ts[-1] > tend/2.0)
324
+ assert_(ts[-1] < tend)
325
+
326
+ def test_solout_break(self):
327
+ for integrator in ('dopri5', 'dop853'):
328
+ self._run_solout_break_test(integrator)
329
+
330
+
331
+ class TestComplexSolout:
332
+ # Check integrate.ode correctly handles solout for dopri5 and dop853
333
+ def _run_solout_test(self, integrator):
334
+ # Check correct usage of solout
335
+ ts = []
336
+ ys = []
337
+ t0 = 0.0
338
+ tend = 20.0
339
+ y0 = [0.0]
340
+
341
+ def solout(t, y):
342
+ ts.append(t)
343
+ ys.append(y.copy())
344
+
345
+ def rhs(t, y):
346
+ return [1.0/(t - 10.0 - 1j)]
347
+
348
+ ig = complex_ode(rhs).set_integrator(integrator)
349
+ ig.set_solout(solout)
350
+ ig.set_initial_value(y0, t0)
351
+ ret = ig.integrate(tend)
352
+ assert_array_equal(ys[0], y0)
353
+ assert_array_equal(ys[-1], ret)
354
+ assert_equal(ts[0], t0)
355
+ assert_equal(ts[-1], tend)
356
+
357
+ def test_solout(self):
358
+ for integrator in ('dopri5', 'dop853'):
359
+ self._run_solout_test(integrator)
360
+
361
+ def _run_solout_break_test(self, integrator):
362
+ # Check correct usage of stopping via solout
363
+ ts = []
364
+ ys = []
365
+ t0 = 0.0
366
+ tend = 20.0
367
+ y0 = [0.0]
368
+
369
+ def solout(t, y):
370
+ ts.append(t)
371
+ ys.append(y.copy())
372
+ if t > tend/2.0:
373
+ return -1
374
+
375
+ def rhs(t, y):
376
+ return [1.0/(t - 10.0 - 1j)]
377
+
378
+ ig = complex_ode(rhs).set_integrator(integrator)
379
+ ig.set_solout(solout)
380
+ ig.set_initial_value(y0, t0)
381
+ ret = ig.integrate(tend)
382
+ assert_array_equal(ys[0], y0)
383
+ assert_array_equal(ys[-1], ret)
384
+ assert_equal(ts[0], t0)
385
+ assert_(ts[-1] > tend/2.0)
386
+ assert_(ts[-1] < tend)
387
+
388
+ def test_solout_break(self):
389
+ for integrator in ('dopri5', 'dop853'):
390
+ self._run_solout_break_test(integrator)
391
+
392
+
393
+ #------------------------------------------------------------------------------
394
+ # Test problems
395
+ #------------------------------------------------------------------------------
396
+
397
+
398
+ class ODE:
399
+ """
400
+ ODE problem
401
+ """
402
+ stiff = False
403
+ cmplx = False
404
+ stop_t = 1
405
+ z0 = []
406
+
407
+ lband = None
408
+ uband = None
409
+
410
+ atol = 1e-6
411
+ rtol = 1e-5
412
+
413
+
414
+ class SimpleOscillator(ODE):
415
+ r"""
416
+ Free vibration of a simple oscillator::
417
+ m \ddot{u} + k u = 0, u(0) = u_0 \dot{u}(0) \dot{u}_0
418
+ Solution::
419
+ u(t) = u_0*cos(sqrt(k/m)*t)+\dot{u}_0*sin(sqrt(k/m)*t)/sqrt(k/m)
420
+ """
421
+ stop_t = 1 + 0.09
422
+ z0 = array([1.0, 0.1], float)
423
+
424
+ k = 4.0
425
+ m = 1.0
426
+
427
+ def f(self, z, t):
428
+ tmp = zeros((2, 2), float)
429
+ tmp[0, 1] = 1.0
430
+ tmp[1, 0] = -self.k / self.m
431
+ return dot(tmp, z)
432
+
433
+ def verify(self, zs, t):
434
+ omega = sqrt(self.k / self.m)
435
+ u = self.z0[0]*cos(omega*t) + self.z0[1]*sin(omega*t)/omega
436
+ return allclose(u, zs[:, 0], atol=self.atol, rtol=self.rtol)
437
+
438
+
439
+ class ComplexExp(ODE):
440
+ r"""The equation :lm:`\dot u = i u`"""
441
+ stop_t = 1.23*pi
442
+ z0 = exp([1j, 2j, 3j, 4j, 5j])
443
+ cmplx = True
444
+
445
+ def f(self, z, t):
446
+ return 1j*z
447
+
448
+ def jac(self, z, t):
449
+ return 1j*eye(5)
450
+
451
+ def verify(self, zs, t):
452
+ u = self.z0 * exp(1j*t)
453
+ return allclose(u, zs, atol=self.atol, rtol=self.rtol)
454
+
455
+
456
+ class Pi(ODE):
457
+ r"""Integrate 1/(t + 1j) from t=-10 to t=10"""
458
+ stop_t = 20
459
+ z0 = [0]
460
+ cmplx = True
461
+
462
+ def f(self, z, t):
463
+ return array([1./(t - 10 + 1j)])
464
+
465
+ def verify(self, zs, t):
466
+ u = -2j * np.arctan(10)
467
+ return allclose(u, zs[-1, :], atol=self.atol, rtol=self.rtol)
468
+
469
+
470
+ class CoupledDecay(ODE):
471
+ r"""
472
+ 3 coupled decays suited for banded treatment
473
+ (banded mode makes it necessary when N>>3)
474
+ """
475
+
476
+ stiff = True
477
+ stop_t = 0.5
478
+ z0 = [5.0, 7.0, 13.0]
479
+ lband = 1
480
+ uband = 0
481
+
482
+ lmbd = [0.17, 0.23, 0.29] # fictitious decay constants
483
+
484
+ def f(self, z, t):
485
+ lmbd = self.lmbd
486
+ return np.array([-lmbd[0]*z[0],
487
+ -lmbd[1]*z[1] + lmbd[0]*z[0],
488
+ -lmbd[2]*z[2] + lmbd[1]*z[1]])
489
+
490
+ def jac(self, z, t):
491
+ # The full Jacobian is
492
+ #
493
+ # [-lmbd[0] 0 0 ]
494
+ # [ lmbd[0] -lmbd[1] 0 ]
495
+ # [ 0 lmbd[1] -lmbd[2]]
496
+ #
497
+ # The lower and upper bandwidths are lband=1 and uband=0, resp.
498
+ # The representation of this array in packed format is
499
+ #
500
+ # [-lmbd[0] -lmbd[1] -lmbd[2]]
501
+ # [ lmbd[0] lmbd[1] 0 ]
502
+
503
+ lmbd = self.lmbd
504
+ j = np.zeros((self.lband + self.uband + 1, 3), order='F')
505
+
506
+ def set_j(ri, ci, val):
507
+ j[self.uband + ri - ci, ci] = val
508
+ set_j(0, 0, -lmbd[0])
509
+ set_j(1, 0, lmbd[0])
510
+ set_j(1, 1, -lmbd[1])
511
+ set_j(2, 1, lmbd[1])
512
+ set_j(2, 2, -lmbd[2])
513
+ return j
514
+
515
+ def verify(self, zs, t):
516
+ # Formulae derived by hand
517
+ lmbd = np.array(self.lmbd)
518
+ d10 = lmbd[1] - lmbd[0]
519
+ d21 = lmbd[2] - lmbd[1]
520
+ d20 = lmbd[2] - lmbd[0]
521
+ e0 = np.exp(-lmbd[0] * t)
522
+ e1 = np.exp(-lmbd[1] * t)
523
+ e2 = np.exp(-lmbd[2] * t)
524
+ u = np.vstack((
525
+ self.z0[0] * e0,
526
+ self.z0[1] * e1 + self.z0[0] * lmbd[0] / d10 * (e0 - e1),
527
+ self.z0[2] * e2 + self.z0[1] * lmbd[1] / d21 * (e1 - e2) +
528
+ lmbd[1] * lmbd[0] * self.z0[0] / d10 *
529
+ (1 / d20 * (e0 - e2) - 1 / d21 * (e1 - e2)))).transpose()
530
+ return allclose(u, zs, atol=self.atol, rtol=self.rtol)
531
+
532
+
533
+ PROBLEMS = [SimpleOscillator, ComplexExp, Pi, CoupledDecay]
534
+
535
+ #------------------------------------------------------------------------------
536
+
537
+
538
+ def f(t, x):
539
+ dxdt = [x[1], -x[0]]
540
+ return dxdt
541
+
542
+
543
+ def jac(t, x):
544
+ j = array([[0.0, 1.0],
545
+ [-1.0, 0.0]])
546
+ return j
547
+
548
+
549
+ def f1(t, x, omega):
550
+ dxdt = [omega*x[1], -omega*x[0]]
551
+ return dxdt
552
+
553
+
554
+ def jac1(t, x, omega):
555
+ j = array([[0.0, omega],
556
+ [-omega, 0.0]])
557
+ return j
558
+
559
+
560
+ def f2(t, x, omega1, omega2):
561
+ dxdt = [omega1*x[1], -omega2*x[0]]
562
+ return dxdt
563
+
564
+
565
+ def jac2(t, x, omega1, omega2):
566
+ j = array([[0.0, omega1],
567
+ [-omega2, 0.0]])
568
+ return j
569
+
570
+
571
+ def fv(t, x, omega):
572
+ dxdt = [omega[0]*x[1], -omega[1]*x[0]]
573
+ return dxdt
574
+
575
+
576
+ def jacv(t, x, omega):
577
+ j = array([[0.0, omega[0]],
578
+ [-omega[1], 0.0]])
579
+ return j
580
+
581
+
582
+ class ODECheckParameterUse:
583
+ """Call an ode-class solver with several cases of parameter use."""
584
+
585
+ # solver_name must be set before tests can be run with this class.
586
+
587
+ # Set these in subclasses.
588
+ solver_name = ''
589
+ solver_uses_jac = False
590
+
591
+ def _get_solver(self, f, jac):
592
+ solver = ode(f, jac)
593
+ if self.solver_uses_jac:
594
+ solver.set_integrator(self.solver_name, atol=1e-9, rtol=1e-7,
595
+ with_jacobian=self.solver_uses_jac)
596
+ else:
597
+ # XXX Shouldn't set_integrator *always* accept the keyword arg
598
+ # 'with_jacobian', and perhaps raise an exception if it is set
599
+ # to True if the solver can't actually use it?
600
+ solver.set_integrator(self.solver_name, atol=1e-9, rtol=1e-7)
601
+ return solver
602
+
603
+ def _check_solver(self, solver):
604
+ ic = [1.0, 0.0]
605
+ solver.set_initial_value(ic, 0.0)
606
+ solver.integrate(pi)
607
+ assert_array_almost_equal(solver.y, [-1.0, 0.0])
608
+
609
+ def test_no_params(self):
610
+ solver = self._get_solver(f, jac)
611
+ self._check_solver(solver)
612
+
613
+ def test_one_scalar_param(self):
614
+ solver = self._get_solver(f1, jac1)
615
+ omega = 1.0
616
+ solver.set_f_params(omega)
617
+ if self.solver_uses_jac:
618
+ solver.set_jac_params(omega)
619
+ self._check_solver(solver)
620
+
621
+ def test_two_scalar_params(self):
622
+ solver = self._get_solver(f2, jac2)
623
+ omega1 = 1.0
624
+ omega2 = 1.0
625
+ solver.set_f_params(omega1, omega2)
626
+ if self.solver_uses_jac:
627
+ solver.set_jac_params(omega1, omega2)
628
+ self._check_solver(solver)
629
+
630
+ def test_vector_param(self):
631
+ solver = self._get_solver(fv, jacv)
632
+ omega = [1.0, 1.0]
633
+ solver.set_f_params(omega)
634
+ if self.solver_uses_jac:
635
+ solver.set_jac_params(omega)
636
+ self._check_solver(solver)
637
+
638
+ @pytest.mark.thread_unsafe
639
+ def test_warns_on_failure(self):
640
+ # Set nsteps small to ensure failure
641
+ solver = self._get_solver(f, jac)
642
+ solver.set_integrator(self.solver_name, nsteps=1)
643
+ ic = [1.0, 0.0]
644
+ solver.set_initial_value(ic, 0.0)
645
+ assert_warns(UserWarning, solver.integrate, pi)
646
+
647
+
648
+ class TestDOPRI5CheckParameterUse(ODECheckParameterUse):
649
+ solver_name = 'dopri5'
650
+ solver_uses_jac = False
651
+
652
+
653
+ class TestDOP853CheckParameterUse(ODECheckParameterUse):
654
+ solver_name = 'dop853'
655
+ solver_uses_jac = False
656
+
657
+
658
+ class TestVODECheckParameterUse(ODECheckParameterUse):
659
+ solver_name = 'vode'
660
+ solver_uses_jac = True
661
+
662
+
663
+ class TestZVODECheckParameterUse(ODECheckParameterUse):
664
+ solver_name = 'zvode'
665
+ solver_uses_jac = True
666
+
667
+
668
+ class TestLSODACheckParameterUse(ODECheckParameterUse):
669
+ solver_name = 'lsoda'
670
+ solver_uses_jac = True
671
+
672
+
673
+ def test_odeint_trivial_time():
674
+ # Test that odeint succeeds when given a single time point
675
+ # and full_output=True. This is a regression test for gh-4282.
676
+ y0 = 1
677
+ t = [0]
678
+ y, info = odeint(lambda y, t: -y, y0, t, full_output=True)
679
+ assert_array_equal(y, np.array([[y0]]))
680
+
681
+
682
+ def test_odeint_banded_jacobian():
683
+ # Test the use of the `Dfun`, `ml` and `mu` options of odeint.
684
+
685
+ def func(y, t, c):
686
+ return c.dot(y)
687
+
688
+ def jac(y, t, c):
689
+ return c
690
+
691
+ def jac_transpose(y, t, c):
692
+ return c.T.copy(order='C')
693
+
694
+ def bjac_rows(y, t, c):
695
+ jac = np.vstack((np.r_[0, np.diag(c, 1)],
696
+ np.diag(c),
697
+ np.r_[np.diag(c, -1), 0],
698
+ np.r_[np.diag(c, -2), 0, 0]))
699
+ return jac
700
+
701
+ def bjac_cols(y, t, c):
702
+ return bjac_rows(y, t, c).T.copy(order='C')
703
+
704
+ c = array([[-205, 0.01, 0.00, 0.0],
705
+ [0.1, -2.50, 0.02, 0.0],
706
+ [1e-3, 0.01, -2.0, 0.01],
707
+ [0.00, 0.00, 0.1, -1.0]])
708
+
709
+ y0 = np.ones(4)
710
+ t = np.array([0, 5, 10, 100])
711
+
712
+ # Use the full Jacobian.
713
+ sol1, info1 = odeint(func, y0, t, args=(c,), full_output=True,
714
+ atol=1e-13, rtol=1e-11, mxstep=10000,
715
+ Dfun=jac)
716
+
717
+ # Use the transposed full Jacobian, with col_deriv=True.
718
+ sol2, info2 = odeint(func, y0, t, args=(c,), full_output=True,
719
+ atol=1e-13, rtol=1e-11, mxstep=10000,
720
+ Dfun=jac_transpose, col_deriv=True)
721
+
722
+ # Use the banded Jacobian.
723
+ sol3, info3 = odeint(func, y0, t, args=(c,), full_output=True,
724
+ atol=1e-13, rtol=1e-11, mxstep=10000,
725
+ Dfun=bjac_rows, ml=2, mu=1)
726
+
727
+ # Use the transposed banded Jacobian, with col_deriv=True.
728
+ sol4, info4 = odeint(func, y0, t, args=(c,), full_output=True,
729
+ atol=1e-13, rtol=1e-11, mxstep=10000,
730
+ Dfun=bjac_cols, ml=2, mu=1, col_deriv=True)
731
+
732
+ assert_allclose(sol1, sol2, err_msg="sol1 != sol2")
733
+ assert_allclose(sol1, sol3, atol=1e-12, err_msg="sol1 != sol3")
734
+ assert_allclose(sol3, sol4, err_msg="sol3 != sol4")
735
+
736
+ # Verify that the number of jacobian evaluations was the same for the
737
+ # calls of odeint with a full jacobian and with a banded jacobian. This is
738
+ # a regression test--there was a bug in the handling of banded jacobians
739
+ # that resulted in an incorrect jacobian matrix being passed to the LSODA
740
+ # code. That would cause errors or excessive jacobian evaluations.
741
+ assert_array_equal(info1['nje'], info2['nje'])
742
+ assert_array_equal(info3['nje'], info4['nje'])
743
+
744
+ # Test the use of tfirst
745
+ sol1ty, info1ty = odeint(lambda t, y, c: func(y, t, c), y0, t, args=(c,),
746
+ full_output=True, atol=1e-13, rtol=1e-11,
747
+ mxstep=10000,
748
+ Dfun=lambda t, y, c: jac(y, t, c), tfirst=True)
749
+ # The code should execute the exact same sequence of floating point
750
+ # calculations, so these should be exactly equal. We'll be safe and use
751
+ # a small tolerance.
752
+ assert_allclose(sol1, sol1ty, rtol=1e-12, err_msg="sol1 != sol1ty")
753
+
754
+
755
+ def test_odeint_errors():
756
+ def sys1d(x, t):
757
+ return -100*x
758
+
759
+ def bad1(x, t):
760
+ return 1.0/0
761
+
762
+ def bad2(x, t):
763
+ return "foo"
764
+
765
+ def bad_jac1(x, t):
766
+ return 1.0/0
767
+
768
+ def bad_jac2(x, t):
769
+ return [["foo"]]
770
+
771
+ def sys2d(x, t):
772
+ return [-100*x[0], -0.1*x[1]]
773
+
774
+ def sys2d_bad_jac(x, t):
775
+ return [[1.0/0, 0], [0, -0.1]]
776
+
777
+ assert_raises(ZeroDivisionError, odeint, bad1, 1.0, [0, 1])
778
+ assert_raises(ValueError, odeint, bad2, 1.0, [0, 1])
779
+
780
+ assert_raises(ZeroDivisionError, odeint, sys1d, 1.0, [0, 1], Dfun=bad_jac1)
781
+ assert_raises(ValueError, odeint, sys1d, 1.0, [0, 1], Dfun=bad_jac2)
782
+
783
+ assert_raises(ZeroDivisionError, odeint, sys2d, [1.0, 1.0], [0, 1],
784
+ Dfun=sys2d_bad_jac)
785
+
786
+
787
+ def test_odeint_bad_shapes():
788
+ # Tests of some errors that can occur with odeint.
789
+
790
+ def badrhs(x, t):
791
+ return [1, -1]
792
+
793
+ def sys1(x, t):
794
+ return -100*x
795
+
796
+ def badjac(x, t):
797
+ return [[0, 0, 0]]
798
+
799
+ # y0 must be at most 1-d.
800
+ bad_y0 = [[0, 0], [0, 0]]
801
+ assert_raises(ValueError, odeint, sys1, bad_y0, [0, 1])
802
+
803
+ # t must be at most 1-d.
804
+ bad_t = [[0, 1], [2, 3]]
805
+ assert_raises(ValueError, odeint, sys1, [10.0], bad_t)
806
+
807
+ # y0 is 10, but badrhs(x, t) returns [1, -1].
808
+ assert_raises(RuntimeError, odeint, badrhs, 10, [0, 1])
809
+
810
+ # shape of array returned by badjac(x, t) is not correct.
811
+ assert_raises(RuntimeError, odeint, sys1, [10, 10], [0, 1], Dfun=badjac)
812
+
813
+
814
+ def test_repeated_t_values():
815
+ """Regression test for gh-8217."""
816
+
817
+ def func(x, t):
818
+ return -0.25*x
819
+
820
+ t = np.zeros(10)
821
+ sol = odeint(func, [1.], t)
822
+ assert_array_equal(sol, np.ones((len(t), 1)))
823
+
824
+ tau = 4*np.log(2)
825
+ t = [0]*9 + [tau, 2*tau, 2*tau, 3*tau]
826
+ sol = odeint(func, [1, 2], t, rtol=1e-12, atol=1e-12)
827
+ expected_sol = np.array([[1.0, 2.0]]*9 +
828
+ [[0.5, 1.0],
829
+ [0.25, 0.5],
830
+ [0.25, 0.5],
831
+ [0.125, 0.25]])
832
+ assert_allclose(sol, expected_sol)
833
+
834
+ # Edge case: empty t sequence.
835
+ sol = odeint(func, [1.], [])
836
+ assert_array_equal(sol, np.array([], dtype=np.float64).reshape((0, 1)))
837
+
838
+ # t values are not monotonic.
839
+ assert_raises(ValueError, odeint, func, [1.], [0, 1, 0.5, 0])
840
+ assert_raises(ValueError, odeint, func, [1, 2, 3], [0, -1, -2, 3])
llava_video/lib/python3.10/site-packages/scipy/ndimage/__init__.py ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ =========================================================
3
+ Multidimensional image processing (:mod:`scipy.ndimage`)
4
+ =========================================================
5
+
6
+ .. currentmodule:: scipy.ndimage
7
+
8
+ This package contains various functions for multidimensional image
9
+ processing.
10
+
11
+
12
+ Filters
13
+ =======
14
+
15
+ .. autosummary::
16
+ :toctree: generated/
17
+
18
+ convolve - Multidimensional convolution
19
+ convolve1d - 1-D convolution along the given axis
20
+ correlate - Multidimensional correlation
21
+ correlate1d - 1-D correlation along the given axis
22
+ gaussian_filter
23
+ gaussian_filter1d
24
+ gaussian_gradient_magnitude
25
+ gaussian_laplace
26
+ generic_filter - Multidimensional filter using a given function
27
+ generic_filter1d - 1-D generic filter along the given axis
28
+ generic_gradient_magnitude
29
+ generic_laplace
30
+ laplace - N-D Laplace filter based on approximate second derivatives
31
+ maximum_filter
32
+ maximum_filter1d
33
+ median_filter - Calculates a multidimensional median filter
34
+ minimum_filter
35
+ minimum_filter1d
36
+ percentile_filter - Calculates a multidimensional percentile filter
37
+ prewitt
38
+ rank_filter - Calculates a multidimensional rank filter
39
+ sobel
40
+ uniform_filter - Multidimensional uniform filter
41
+ uniform_filter1d - 1-D uniform filter along the given axis
42
+
43
+ Fourier filters
44
+ ===============
45
+
46
+ .. autosummary::
47
+ :toctree: generated/
48
+
49
+ fourier_ellipsoid
50
+ fourier_gaussian
51
+ fourier_shift
52
+ fourier_uniform
53
+
54
+ Interpolation
55
+ =============
56
+
57
+ .. autosummary::
58
+ :toctree: generated/
59
+
60
+ affine_transform - Apply an affine transformation
61
+ geometric_transform - Apply an arbitrary geometric transform
62
+ map_coordinates - Map input array to new coordinates by interpolation
63
+ rotate - Rotate an array
64
+ shift - Shift an array
65
+ spline_filter
66
+ spline_filter1d
67
+ zoom - Zoom an array
68
+
69
+ Measurements
70
+ ============
71
+
72
+ .. autosummary::
73
+ :toctree: generated/
74
+
75
+ center_of_mass - The center of mass of the values of an array at labels
76
+ extrema - Min's and max's of an array at labels, with their positions
77
+ find_objects - Find objects in a labeled array
78
+ histogram - Histogram of the values of an array, optionally at labels
79
+ label - Label features in an array
80
+ labeled_comprehension
81
+ maximum
82
+ maximum_position
83
+ mean - Mean of the values of an array at labels
84
+ median
85
+ minimum
86
+ minimum_position
87
+ standard_deviation - Standard deviation of an N-D image array
88
+ sum_labels - Sum of the values of the array
89
+ value_indices - Find indices of each distinct value in given array
90
+ variance - Variance of the values of an N-D image array
91
+ watershed_ift
92
+
93
+ Morphology
94
+ ==========
95
+
96
+ .. autosummary::
97
+ :toctree: generated/
98
+
99
+ binary_closing
100
+ binary_dilation
101
+ binary_erosion
102
+ binary_fill_holes
103
+ binary_hit_or_miss
104
+ binary_opening
105
+ binary_propagation
106
+ black_tophat
107
+ distance_transform_bf
108
+ distance_transform_cdt
109
+ distance_transform_edt
110
+ generate_binary_structure
111
+ grey_closing
112
+ grey_dilation
113
+ grey_erosion
114
+ grey_opening
115
+ iterate_structure
116
+ morphological_gradient
117
+ morphological_laplace
118
+ white_tophat
119
+
120
+ """
121
+
122
+ # Copyright (C) 2003-2005 Peter J. Verveer
123
+ #
124
+ # Redistribution and use in source and binary forms, with or without
125
+ # modification, are permitted provided that the following conditions
126
+ # are met:
127
+ #
128
+ # 1. Redistributions of source code must retain the above copyright
129
+ # notice, this list of conditions and the following disclaimer.
130
+ #
131
+ # 2. Redistributions in binary form must reproduce the above
132
+ # copyright notice, this list of conditions and the following
133
+ # disclaimer in the documentation and/or other materials provided
134
+ # with the distribution.
135
+ #
136
+ # 3. The name of the author may not be used to endorse or promote
137
+ # products derived from this software without specific prior
138
+ # written permission.
139
+ #
140
+ # THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
141
+ # OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
142
+ # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
143
+ # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
144
+ # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
145
+ # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
146
+ # GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
147
+ # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
148
+ # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
149
+ # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
150
+ # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
151
+
152
+ # bring in the public functionality from private namespaces
153
+
154
+ # mypy: ignore-errors
155
+
156
+ from ._support_alternative_backends import *
157
+
158
+ # adjust __all__ and do not leak implementation details
159
+ from . import _support_alternative_backends
160
+ __all__ = _support_alternative_backends.__all__
161
+ del _support_alternative_backends, _ndimage_api, _delegators # noqa: F821
162
+
163
+
164
+ # Deprecated namespaces, to be removed in v2.0.0
165
+ from . import filters
166
+ from . import fourier
167
+ from . import interpolation
168
+ from . import measurements
169
+ from . import morphology
170
+
171
+ from scipy._lib._testutils import PytestTester
172
+ test = PytestTester(__name__)
173
+ del PytestTester
llava_video/lib/python3.10/site-packages/scipy/ndimage/_ctest.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (17 kB). View file
 
llava_video/lib/python3.10/site-packages/scipy/ndimage/_delegators.py ADDED
@@ -0,0 +1,297 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Delegators for alternative backends in scipy.ndimage.
2
+
3
+ The signature of `func_signature` must match the signature of ndimage.func.
4
+ The job of a `func_signature` is to know which arguments of `ndimage.func`
5
+ are arrays.
6
+
7
+ * signatures are generated by
8
+
9
+ --------------
10
+ import inspect
11
+ from scipy import ndimage
12
+
13
+ names = [x for x in dir(ndimage) if not x.startswith('_')]
14
+ objs = [getattr(ndimage, name) for name in names]
15
+ funcs = [obj for obj in objs if inspect.isroutine(obj)]
16
+
17
+ for func in funcs:
18
+ sig = inspect.signature(func)
19
+ print(f"def {func.__name__}_signature{sig}:\n\tpass\n\n")
20
+ ---------------
21
+
22
+ * which arguments to delegate on: manually trawled the documentation for
23
+ array-like and array arguments
24
+
25
+ """
26
+ import numpy as np
27
+ from scipy._lib._array_api import array_namespace
28
+ from scipy.ndimage._ni_support import _skip_if_dtype, _skip_if_int
29
+
30
+
31
+ def affine_transform_signature(
32
+ input, matrix, offset=0.0, output_shape=None, output=None, *args, **kwds
33
+ ):
34
+ return array_namespace(input, matrix, _skip_if_dtype(output))
35
+
36
+
37
+ def binary_closing_signature(
38
+ input, structure=None, iterations=1, output=None, *args, **kwds
39
+ ):
40
+ return array_namespace(input, structure, _skip_if_dtype(output))
41
+
42
+ binary_opening_signature = binary_closing_signature
43
+
44
+
45
+ def binary_dilation_signature(
46
+ input, structure=None, iterations=1, mask=None, output=None, *args, **kwds
47
+ ):
48
+ return array_namespace(input, structure, _skip_if_dtype(output), mask)
49
+
50
+ binary_erosion_signature = binary_dilation_signature
51
+
52
+
53
+ def binary_fill_holes_signature(
54
+ input, structure=None, output=None, origin=0, *args, **kwargs
55
+ ):
56
+ return array_namespace(input, structure, _skip_if_dtype(output))
57
+
58
+
59
+ def label_signature(input, structure=None, output=None, origin=0):
60
+ return array_namespace(input, structure, _skip_if_dtype(output))
61
+
62
+
63
+ def binary_hit_or_miss_signature(
64
+ input, structure1=None, structure2=None, output=None, *args, **kwds
65
+ ):
66
+ return array_namespace(input, structure1, structure2, _skip_if_dtype(output))
67
+
68
+
69
+ def binary_propagation_signature(
70
+ input, structure=None, mask=None, output=None, *args, **kwds
71
+ ):
72
+ return array_namespace(input, structure, mask, _skip_if_dtype(output))
73
+
74
+
75
+ def convolve_signature(input, weights, output=None, *args, **kwds):
76
+ return array_namespace(input, weights, _skip_if_dtype(output))
77
+
78
+ correlate_signature = convolve_signature
79
+
80
+
81
+ def convolve1d_signature(input, weights, axis=-1, output=None, *args, **kwds):
82
+ return array_namespace(input, weights, _skip_if_dtype(output))
83
+
84
+ correlate1d_signature = convolve1d_signature
85
+
86
+
87
+ def distance_transform_bf_signature(
88
+ input, metric='euclidean', sampling=None, return_distances=True,
89
+ return_indices=False, distances=None, indices=None
90
+ ):
91
+ return array_namespace(input, distances, indices)
92
+
93
+
94
+ def distance_transform_cdt_signature(
95
+ input, metric='chessboard', return_distances=True, return_indices=False,
96
+ distances=None, indices=None
97
+ ):
98
+ return array_namespace(input, distances, indices)
99
+
100
+
101
+ def distance_transform_edt_signature(
102
+ input, sampling=None, return_distances=True, return_indices=False,
103
+ distances=None, indices=None
104
+ ):
105
+ return array_namespace(input, distances, indices)
106
+
107
+
108
+ def find_objects_signature(input, max_label=0):
109
+ return array_namespace(input)
110
+
111
+
112
+ def fourier_ellipsoid_signature(input, size, n=-1, axis=-1, output=None):
113
+ return array_namespace(input, _skip_if_dtype(output))
114
+
115
+ fourier_uniform_signature = fourier_ellipsoid_signature
116
+
117
+
118
+ def fourier_gaussian_signature(input, sigma, n=-1, axis=-1, output=None):
119
+ return array_namespace(input, _skip_if_dtype(output))
120
+
121
+ def fourier_shift_signature(input, shift, n=-1, axis=-1, output=None):
122
+ return array_namespace(input, _skip_if_dtype(output))
123
+
124
+
125
+ def gaussian_filter_signature(input, sigma, order=0, output=None, *args, **kwds):
126
+ return array_namespace(input, _skip_if_dtype(output))
127
+
128
+
129
+ def gaussian_filter1d_signature(
130
+ input, sigma, axis=-1, order=0, output=None, *args, **kwds
131
+ ):
132
+ return array_namespace(input, _skip_if_dtype(output))
133
+
134
+
135
+ def gaussian_gradient_magnitude_signature(input, sigma, output=None, *args, **kwds):
136
+ return array_namespace(input, _skip_if_dtype(output))
137
+
138
+ gaussian_laplace_signature = gaussian_gradient_magnitude_signature
139
+
140
+
141
+ def generate_binary_structure_signature(rank, connectivity):
142
+ # XXX: no input arrays; always return numpy
143
+ return np
144
+
145
+
146
+ def generic_filter_signature(
147
+ input, function, size=None, footprint=None, output=None, *args, **kwds
148
+ ):
149
+ # XXX: function LowLevelCallable w/backends
150
+ return array_namespace(input, footprint, _skip_if_dtype(output))
151
+
152
+
153
+ def generic_filter1d_signature(
154
+ input, function, filter_size, axis=-1, output=None, *args, **kwds
155
+ ):
156
+ return array_namespace(input, _skip_if_dtype(output))
157
+
158
+
159
+ def generic_gradient_magnitude_signature(
160
+ input, derivative, output=None, *args, **kwds
161
+ ):
162
+ # XXX: function LowLevelCallable w/backends
163
+ return array_namespace(input, _skip_if_dtype(output))
164
+
165
+
166
+ def generic_laplace_signature(input, derivative2, output=None, *args, **kwds):
167
+ # XXX: function LowLevelCallable w/backends
168
+ return array_namespace(input, _skip_if_dtype(output))
169
+
170
+
171
+ def geometric_transform_signature(
172
+ input, mapping, output_shape=None, output=None, *args, **kwds
173
+ ):
174
+ return array_namespace(input, _skip_if_dtype(output))
175
+
176
+
177
+ def histogram_signature(input, min, max, bins, labels=None, index=None):
178
+ return array_namespace(input, labels)
179
+
180
+
181
+ def iterate_structure_signature(structure, iterations, origin=None):
182
+ return array_namespace(structure)
183
+
184
+
185
+ def labeled_comprehension_signature(input, labels, *args, **kwds):
186
+ return array_namespace(input, labels)
187
+
188
+
189
+ def laplace_signature(input, output=None, *args, **kwds):
190
+ return array_namespace(input, _skip_if_dtype(output))
191
+
192
+
193
+ def map_coordinates_signature(input, coordinates, output=None, *args, **kwds):
194
+ return array_namespace(input, coordinates, _skip_if_dtype(output))
195
+
196
+
197
+ def maximum_filter1d_signature(input, size, axis=-1, output=None, *args, **kwds):
198
+ return array_namespace(input, _skip_if_dtype(output))
199
+
200
+ minimum_filter1d_signature = maximum_filter1d_signature
201
+ uniform_filter1d_signature = maximum_filter1d_signature
202
+
203
+
204
+ def maximum_signature(input, labels=None, index=None):
205
+ return array_namespace(input, labels, _skip_if_int(index))
206
+
207
+ minimum_signature = maximum_signature
208
+ median_signature = maximum_signature
209
+ mean_signature = maximum_signature
210
+ variance_signature = maximum_signature
211
+ standard_deviation_signature = maximum_signature
212
+ sum_labels_signature = maximum_signature
213
+ sum_signature = maximum_signature # ndimage.sum is sum_labels
214
+
215
+ maximum_position_signature = maximum_signature
216
+ minimum_position_signature = maximum_signature
217
+
218
+ extrema_signature = maximum_signature
219
+ center_of_mass_signature = extrema_signature
220
+
221
+
222
+ def median_filter_signature(
223
+ input, size=None, footprint=None, output=None, *args, **kwds
224
+ ):
225
+ return array_namespace(input, footprint, _skip_if_dtype(output))
226
+
227
+ minimum_filter_signature = median_filter_signature
228
+ maximum_filter_signature = median_filter_signature
229
+
230
+
231
+ def morphological_gradient_signature(
232
+ input, size=None, footprint=None, structure=None, output=None, *args, **kwds
233
+ ):
234
+ return array_namespace(input, footprint, structure, _skip_if_dtype(output))
235
+
236
+ morphological_laplace_signature = morphological_gradient_signature
237
+ white_tophat_signature = morphological_gradient_signature
238
+ black_tophat_signature = morphological_gradient_signature
239
+ grey_closing_signature = morphological_gradient_signature
240
+ grey_dilation_signature = morphological_gradient_signature
241
+ grey_erosion_signature = morphological_gradient_signature
242
+ grey_opening_signature = morphological_gradient_signature
243
+
244
+
245
+ def percentile_filter_signature(
246
+ input, percentile, size=None, footprint=None, output=None, *args, **kwds
247
+ ):
248
+ return array_namespace(input, footprint, _skip_if_dtype(output))
249
+
250
+
251
+ def prewitt_signature(input, axis=-1, output=None, *args, **kwds):
252
+ return array_namespace(input, _skip_if_dtype(output))
253
+
254
+ sobel_signature = prewitt_signature
255
+
256
+
257
+ def rank_filter_signature(
258
+ input, rank, size=None, footprint=None, output=None, *args, **kwds
259
+ ):
260
+ return array_namespace(input, footprint, _skip_if_dtype(output))
261
+
262
+
263
+ def rotate_signature(
264
+ input, angle, axes=(1, 0), reshape=True, output=None , *args, **kwds
265
+ ):
266
+ return array_namespace(input, _skip_if_dtype(output))
267
+
268
+
269
+ def shift_signature(input, shift, output=None, *args, **kwds):
270
+ return array_namespace(input, _skip_if_dtype(output))
271
+
272
+
273
+ def spline_filter_signature(input, order=3, output=np.float64, *args, **kwds):
274
+ return array_namespace(input, _skip_if_dtype(output))
275
+
276
+
277
+ def spline_filter1d_signature(
278
+ input, order=3, axis=-1, output=np.float64, *args, **kwds
279
+ ):
280
+ return array_namespace(input, _skip_if_dtype(output))
281
+
282
+
283
+ def uniform_filter_signature(input, size=3, output=None, *args, **kwds):
284
+ return array_namespace(input, _skip_if_dtype(output))
285
+
286
+
287
+ def value_indices_signature(arr, *args, **kwds):
288
+ return array_namespace(arr)
289
+
290
+
291
+ def watershed_ift_signature(input, markers, structure=None, output=None):
292
+ return array_namespace(input, markers, structure, _skip_if_dtype(output))
293
+
294
+
295
+ def zoom_signature(input, zoom, output=None, *args, **kwds):
296
+ return array_namespace(input, _skip_if_dtype(output))
297
+
llava_video/lib/python3.10/site-packages/scipy/ndimage/_filters.py ADDED
@@ -0,0 +1,1965 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2003-2005 Peter J. Verveer
2
+ #
3
+ # Redistribution and use in source and binary forms, with or without
4
+ # modification, are permitted provided that the following conditions
5
+ # are met:
6
+ #
7
+ # 1. Redistributions of source code must retain the above copyright
8
+ # notice, this list of conditions and the following disclaimer.
9
+ #
10
+ # 2. Redistributions in binary form must reproduce the above
11
+ # copyright notice, this list of conditions and the following
12
+ # disclaimer in the documentation and/or other materials provided
13
+ # with the distribution.
14
+ #
15
+ # 3. The name of the author may not be used to endorse or promote
16
+ # products derived from this software without specific prior
17
+ # written permission.
18
+ #
19
+ # THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
20
+ # OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21
+ # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22
+ # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
23
+ # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
25
+ # GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26
+ # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27
+ # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
28
+ # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29
+ # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
+
31
+ from collections.abc import Iterable
32
+ import numbers
33
+ import warnings
34
+ import numpy as np
35
+ import operator
36
+
37
+ from scipy._lib._util import normalize_axis_index
38
+ from . import _ni_support
39
+ from . import _nd_image
40
+ from . import _ni_docstrings
41
+ from . import _rank_filter_1d
42
+
43
+ __all__ = ['correlate1d', 'convolve1d', 'gaussian_filter1d', 'gaussian_filter',
44
+ 'prewitt', 'sobel', 'generic_laplace', 'laplace',
45
+ 'gaussian_laplace', 'generic_gradient_magnitude',
46
+ 'gaussian_gradient_magnitude', 'correlate', 'convolve',
47
+ 'uniform_filter1d', 'uniform_filter', 'minimum_filter1d',
48
+ 'maximum_filter1d', 'minimum_filter', 'maximum_filter',
49
+ 'rank_filter', 'median_filter', 'percentile_filter',
50
+ 'generic_filter1d', 'generic_filter']
51
+
52
+
53
+ def _invalid_origin(origin, lenw):
54
+ return (origin < -(lenw // 2)) or (origin > (lenw - 1) // 2)
55
+
56
+
57
+ def _complex_via_real_components(func, input, weights, output, cval, **kwargs):
58
+ """Complex convolution via a linear combination of real convolutions."""
59
+ complex_input = input.dtype.kind == 'c'
60
+ complex_weights = weights.dtype.kind == 'c'
61
+ if complex_input and complex_weights:
62
+ # real component of the output
63
+ func(input.real, weights.real, output=output.real,
64
+ cval=np.real(cval), **kwargs)
65
+ output.real -= func(input.imag, weights.imag, output=None,
66
+ cval=np.imag(cval), **kwargs)
67
+ # imaginary component of the output
68
+ func(input.real, weights.imag, output=output.imag,
69
+ cval=np.real(cval), **kwargs)
70
+ output.imag += func(input.imag, weights.real, output=None,
71
+ cval=np.imag(cval), **kwargs)
72
+ elif complex_input:
73
+ func(input.real, weights, output=output.real, cval=np.real(cval),
74
+ **kwargs)
75
+ func(input.imag, weights, output=output.imag, cval=np.imag(cval),
76
+ **kwargs)
77
+ else:
78
+ if np.iscomplexobj(cval):
79
+ raise ValueError("Cannot provide a complex-valued cval when the "
80
+ "input is real.")
81
+ func(input, weights.real, output=output.real, cval=cval, **kwargs)
82
+ func(input, weights.imag, output=output.imag, cval=cval, **kwargs)
83
+ return output
84
+
85
+
86
+ def _expand_origin(ndim_image, axes, origin):
87
+ num_axes = len(axes)
88
+ origins = _ni_support._normalize_sequence(origin, num_axes)
89
+ if num_axes < ndim_image:
90
+ # set origin = 0 for any axes not being filtered
91
+ origins_temp = [0,] * ndim_image
92
+ for o, ax in zip(origins, axes):
93
+ origins_temp[ax] = o
94
+ origins = origins_temp
95
+ return origins
96
+
97
+
98
+ def _expand_footprint(ndim_image, axes, footprint,
99
+ footprint_name="footprint"):
100
+ num_axes = len(axes)
101
+ if num_axes < ndim_image:
102
+ if footprint.ndim != num_axes:
103
+ raise RuntimeError(f"{footprint_name}.ndim ({footprint.ndim}) "
104
+ f"must match len(axes) ({num_axes})")
105
+
106
+ footprint = np.expand_dims(
107
+ footprint,
108
+ tuple(ax for ax in range(ndim_image) if ax not in axes)
109
+ )
110
+ return footprint
111
+
112
+
113
+ def _expand_mode(ndim_image, axes, mode):
114
+ num_axes = len(axes)
115
+ if not isinstance(mode, str) and isinstance(mode, Iterable):
116
+ # set mode = 'constant' for any axes not being filtered
117
+ modes = _ni_support._normalize_sequence(mode, num_axes)
118
+ modes_temp = ['constant'] * ndim_image
119
+ for m, ax in zip(modes, axes):
120
+ modes_temp[ax] = m
121
+ mode = modes_temp
122
+ return mode
123
+
124
+
125
+ @_ni_docstrings.docfiller
126
+ def correlate1d(input, weights, axis=-1, output=None, mode="reflect",
127
+ cval=0.0, origin=0):
128
+ """Calculate a 1-D correlation along the given axis.
129
+
130
+ The lines of the array along the given axis are correlated with the
131
+ given weights.
132
+
133
+ Parameters
134
+ ----------
135
+ %(input)s
136
+ weights : array
137
+ 1-D sequence of numbers.
138
+ %(axis)s
139
+ %(output)s
140
+ %(mode_reflect)s
141
+ %(cval)s
142
+ %(origin)s
143
+
144
+ Returns
145
+ -------
146
+ result : ndarray
147
+ Correlation result. Has the same shape as `input`.
148
+
149
+ Examples
150
+ --------
151
+ >>> from scipy.ndimage import correlate1d
152
+ >>> correlate1d([2, 8, 0, 4, 1, 9, 9, 0], weights=[1, 3])
153
+ array([ 8, 26, 8, 12, 7, 28, 36, 9])
154
+ """
155
+ input = np.asarray(input)
156
+ weights = np.asarray(weights)
157
+ complex_input = input.dtype.kind == 'c'
158
+ complex_weights = weights.dtype.kind == 'c'
159
+ if complex_input or complex_weights:
160
+ if complex_weights:
161
+ weights = weights.conj()
162
+ weights = weights.astype(np.complex128, copy=False)
163
+ kwargs = dict(axis=axis, mode=mode, origin=origin)
164
+ output = _ni_support._get_output(output, input, complex_output=True)
165
+ return _complex_via_real_components(correlate1d, input, weights,
166
+ output, cval, **kwargs)
167
+
168
+ output = _ni_support._get_output(output, input)
169
+ weights = np.asarray(weights, dtype=np.float64)
170
+ if weights.ndim != 1 or weights.shape[0] < 1:
171
+ raise RuntimeError('no filter weights given')
172
+ if not weights.flags.contiguous:
173
+ weights = weights.copy()
174
+ axis = normalize_axis_index(axis, input.ndim)
175
+ if _invalid_origin(origin, len(weights)):
176
+ raise ValueError('Invalid origin; origin must satisfy '
177
+ '-(len(weights) // 2) <= origin <= '
178
+ '(len(weights)-1) // 2')
179
+ mode = _ni_support._extend_mode_to_code(mode)
180
+ _nd_image.correlate1d(input, weights, axis, output, mode, cval,
181
+ origin)
182
+ return output
183
+
184
+
185
+ @_ni_docstrings.docfiller
186
+ def convolve1d(input, weights, axis=-1, output=None, mode="reflect",
187
+ cval=0.0, origin=0):
188
+ """Calculate a 1-D convolution along the given axis.
189
+
190
+ The lines of the array along the given axis are convolved with the
191
+ given weights.
192
+
193
+ Parameters
194
+ ----------
195
+ %(input)s
196
+ weights : ndarray
197
+ 1-D sequence of numbers.
198
+ %(axis)s
199
+ %(output)s
200
+ %(mode_reflect)s
201
+ %(cval)s
202
+ %(origin)s
203
+
204
+ Returns
205
+ -------
206
+ convolve1d : ndarray
207
+ Convolved array with same shape as input
208
+
209
+ Examples
210
+ --------
211
+ >>> from scipy.ndimage import convolve1d
212
+ >>> convolve1d([2, 8, 0, 4, 1, 9, 9, 0], weights=[1, 3])
213
+ array([14, 24, 4, 13, 12, 36, 27, 0])
214
+ """
215
+ weights = np.asarray(weights)
216
+ weights = weights[::-1]
217
+ origin = -origin
218
+ if not weights.shape[0] & 1:
219
+ origin -= 1
220
+ if weights.dtype.kind == 'c':
221
+ # pre-conjugate here to counteract the conjugation in correlate1d
222
+ weights = weights.conj()
223
+ return correlate1d(input, weights, axis, output, mode, cval, origin)
224
+
225
+
226
+ def _gaussian_kernel1d(sigma, order, radius):
227
+ """
228
+ Computes a 1-D Gaussian convolution kernel.
229
+ """
230
+ if order < 0:
231
+ raise ValueError('order must be non-negative')
232
+ exponent_range = np.arange(order + 1)
233
+ sigma2 = sigma * sigma
234
+ x = np.arange(-radius, radius+1)
235
+ phi_x = np.exp(-0.5 / sigma2 * x ** 2)
236
+ phi_x = phi_x / phi_x.sum()
237
+
238
+ if order == 0:
239
+ return phi_x
240
+ else:
241
+ # f(x) = q(x) * phi(x) = q(x) * exp(p(x))
242
+ # f'(x) = (q'(x) + q(x) * p'(x)) * phi(x)
243
+ # p'(x) = -1 / sigma ** 2
244
+ # Implement q'(x) + q(x) * p'(x) as a matrix operator and apply to the
245
+ # coefficients of q(x)
246
+ q = np.zeros(order + 1)
247
+ q[0] = 1
248
+ D = np.diag(exponent_range[1:], 1) # D @ q(x) = q'(x)
249
+ P = np.diag(np.ones(order)/-sigma2, -1) # P @ q(x) = q(x) * p'(x)
250
+ Q_deriv = D + P
251
+ for _ in range(order):
252
+ q = Q_deriv.dot(q)
253
+ q = (x[:, None] ** exponent_range).dot(q)
254
+ return q * phi_x
255
+
256
+
257
+ @_ni_docstrings.docfiller
258
+ def gaussian_filter1d(input, sigma, axis=-1, order=0, output=None,
259
+ mode="reflect", cval=0.0, truncate=4.0, *, radius=None):
260
+ """1-D Gaussian filter.
261
+
262
+ Parameters
263
+ ----------
264
+ %(input)s
265
+ sigma : scalar
266
+ standard deviation for Gaussian kernel
267
+ %(axis)s
268
+ order : int, optional
269
+ An order of 0 corresponds to convolution with a Gaussian
270
+ kernel. A positive order corresponds to convolution with
271
+ that derivative of a Gaussian.
272
+ %(output)s
273
+ %(mode_reflect)s
274
+ %(cval)s
275
+ truncate : float, optional
276
+ Truncate the filter at this many standard deviations.
277
+ Default is 4.0.
278
+ radius : None or int, optional
279
+ Radius of the Gaussian kernel. If specified, the size of
280
+ the kernel will be ``2*radius + 1``, and `truncate` is ignored.
281
+ Default is None.
282
+
283
+ Returns
284
+ -------
285
+ gaussian_filter1d : ndarray
286
+
287
+ Notes
288
+ -----
289
+ The Gaussian kernel will have size ``2*radius + 1`` along each axis. If
290
+ `radius` is None, a default ``radius = round(truncate * sigma)`` will be
291
+ used.
292
+
293
+ Examples
294
+ --------
295
+ >>> from scipy.ndimage import gaussian_filter1d
296
+ >>> import numpy as np
297
+ >>> gaussian_filter1d([1.0, 2.0, 3.0, 4.0, 5.0], 1)
298
+ array([ 1.42704095, 2.06782203, 3. , 3.93217797, 4.57295905])
299
+ >>> gaussian_filter1d([1.0, 2.0, 3.0, 4.0, 5.0], 4)
300
+ array([ 2.91948343, 2.95023502, 3. , 3.04976498, 3.08051657])
301
+ >>> import matplotlib.pyplot as plt
302
+ >>> rng = np.random.default_rng()
303
+ >>> x = rng.standard_normal(101).cumsum()
304
+ >>> y3 = gaussian_filter1d(x, 3)
305
+ >>> y6 = gaussian_filter1d(x, 6)
306
+ >>> plt.plot(x, 'k', label='original data')
307
+ >>> plt.plot(y3, '--', label='filtered, sigma=3')
308
+ >>> plt.plot(y6, ':', label='filtered, sigma=6')
309
+ >>> plt.legend()
310
+ >>> plt.grid()
311
+ >>> plt.show()
312
+
313
+ """
314
+ sd = float(sigma)
315
+ # make the radius of the filter equal to truncate standard deviations
316
+ lw = int(truncate * sd + 0.5)
317
+ if radius is not None:
318
+ lw = radius
319
+ if not isinstance(lw, numbers.Integral) or lw < 0:
320
+ raise ValueError('Radius must be a nonnegative integer.')
321
+ # Since we are calling correlate, not convolve, revert the kernel
322
+ weights = _gaussian_kernel1d(sigma, order, lw)[::-1]
323
+ return correlate1d(input, weights, axis, output, mode, cval, 0)
324
+
325
+
326
+ @_ni_docstrings.docfiller
327
+ def gaussian_filter(input, sigma, order=0, output=None,
328
+ mode="reflect", cval=0.0, truncate=4.0, *, radius=None,
329
+ axes=None):
330
+ """Multidimensional Gaussian filter.
331
+
332
+ Parameters
333
+ ----------
334
+ %(input)s
335
+ sigma : scalar or sequence of scalars
336
+ Standard deviation for Gaussian kernel. The standard
337
+ deviations of the Gaussian filter are given for each axis as a
338
+ sequence, or as a single number, in which case it is equal for
339
+ all axes.
340
+ order : int or sequence of ints, optional
341
+ The order of the filter along each axis is given as a sequence
342
+ of integers, or as a single number. An order of 0 corresponds
343
+ to convolution with a Gaussian kernel. A positive order
344
+ corresponds to convolution with that derivative of a Gaussian.
345
+ %(output)s
346
+ %(mode_multiple)s
347
+ %(cval)s
348
+ truncate : float, optional
349
+ Truncate the filter at this many standard deviations.
350
+ Default is 4.0.
351
+ radius : None or int or sequence of ints, optional
352
+ Radius of the Gaussian kernel. The radius are given for each axis
353
+ as a sequence, or as a single number, in which case it is equal
354
+ for all axes. If specified, the size of the kernel along each axis
355
+ will be ``2*radius + 1``, and `truncate` is ignored.
356
+ Default is None.
357
+ axes : tuple of int or None, optional
358
+ If None, `input` is filtered along all axes. Otherwise,
359
+ `input` is filtered along the specified axes. When `axes` is
360
+ specified, any tuples used for `sigma`, `order`, `mode` and/or `radius`
361
+ must match the length of `axes`. The ith entry in any of these tuples
362
+ corresponds to the ith entry in `axes`.
363
+
364
+ Returns
365
+ -------
366
+ gaussian_filter : ndarray
367
+ Returned array of same shape as `input`.
368
+
369
+ Notes
370
+ -----
371
+ The multidimensional filter is implemented as a sequence of
372
+ 1-D convolution filters. The intermediate arrays are
373
+ stored in the same data type as the output. Therefore, for output
374
+ types with a limited precision, the results may be imprecise
375
+ because intermediate results may be stored with insufficient
376
+ precision.
377
+
378
+ The Gaussian kernel will have size ``2*radius + 1`` along each axis. If
379
+ `radius` is None, the default ``radius = round(truncate * sigma)`` will be
380
+ used.
381
+
382
+ Examples
383
+ --------
384
+ >>> from scipy.ndimage import gaussian_filter
385
+ >>> import numpy as np
386
+ >>> a = np.arange(50, step=2).reshape((5,5))
387
+ >>> a
388
+ array([[ 0, 2, 4, 6, 8],
389
+ [10, 12, 14, 16, 18],
390
+ [20, 22, 24, 26, 28],
391
+ [30, 32, 34, 36, 38],
392
+ [40, 42, 44, 46, 48]])
393
+ >>> gaussian_filter(a, sigma=1)
394
+ array([[ 4, 6, 8, 9, 11],
395
+ [10, 12, 14, 15, 17],
396
+ [20, 22, 24, 25, 27],
397
+ [29, 31, 33, 34, 36],
398
+ [35, 37, 39, 40, 42]])
399
+
400
+ >>> from scipy import datasets
401
+ >>> import matplotlib.pyplot as plt
402
+ >>> fig = plt.figure()
403
+ >>> plt.gray() # show the filtered result in grayscale
404
+ >>> ax1 = fig.add_subplot(121) # left side
405
+ >>> ax2 = fig.add_subplot(122) # right side
406
+ >>> ascent = datasets.ascent()
407
+ >>> result = gaussian_filter(ascent, sigma=5)
408
+ >>> ax1.imshow(ascent)
409
+ >>> ax2.imshow(result)
410
+ >>> plt.show()
411
+ """
412
+ input = np.asarray(input)
413
+ output = _ni_support._get_output(output, input)
414
+
415
+ axes = _ni_support._check_axes(axes, input.ndim)
416
+ num_axes = len(axes)
417
+ orders = _ni_support._normalize_sequence(order, num_axes)
418
+ sigmas = _ni_support._normalize_sequence(sigma, num_axes)
419
+ modes = _ni_support._normalize_sequence(mode, num_axes)
420
+ radiuses = _ni_support._normalize_sequence(radius, num_axes)
421
+ axes = [(axes[ii], sigmas[ii], orders[ii], modes[ii], radiuses[ii])
422
+ for ii in range(num_axes) if sigmas[ii] > 1e-15]
423
+ if len(axes) > 0:
424
+ for axis, sigma, order, mode, radius in axes:
425
+ gaussian_filter1d(input, sigma, axis, order, output,
426
+ mode, cval, truncate, radius=radius)
427
+ input = output
428
+ else:
429
+ output[...] = input[...]
430
+ return output
431
+
432
+
433
+ @_ni_docstrings.docfiller
434
+ def prewitt(input, axis=-1, output=None, mode="reflect", cval=0.0):
435
+ """Calculate a Prewitt filter.
436
+
437
+ Parameters
438
+ ----------
439
+ %(input)s
440
+ %(axis)s
441
+ %(output)s
442
+ %(mode_multiple)s
443
+ %(cval)s
444
+
445
+ Returns
446
+ -------
447
+ prewitt : ndarray
448
+ Filtered array. Has the same shape as `input`.
449
+
450
+ See Also
451
+ --------
452
+ sobel: Sobel filter
453
+
454
+ Notes
455
+ -----
456
+ This function computes the one-dimensional Prewitt filter.
457
+ Horizontal edges are emphasised with the horizontal transform (axis=0),
458
+ vertical edges with the vertical transform (axis=1), and so on for higher
459
+ dimensions. These can be combined to give the magnitude.
460
+
461
+ Examples
462
+ --------
463
+ >>> from scipy import ndimage, datasets
464
+ >>> import matplotlib.pyplot as plt
465
+ >>> import numpy as np
466
+ >>> ascent = datasets.ascent()
467
+ >>> prewitt_h = ndimage.prewitt(ascent, axis=0)
468
+ >>> prewitt_v = ndimage.prewitt(ascent, axis=1)
469
+ >>> magnitude = np.sqrt(prewitt_h ** 2 + prewitt_v ** 2)
470
+ >>> magnitude *= 255 / np.max(magnitude) # Normalization
471
+ >>> fig, axes = plt.subplots(2, 2, figsize = (8, 8))
472
+ >>> plt.gray()
473
+ >>> axes[0, 0].imshow(ascent)
474
+ >>> axes[0, 1].imshow(prewitt_h)
475
+ >>> axes[1, 0].imshow(prewitt_v)
476
+ >>> axes[1, 1].imshow(magnitude)
477
+ >>> titles = ["original", "horizontal", "vertical", "magnitude"]
478
+ >>> for i, ax in enumerate(axes.ravel()):
479
+ ... ax.set_title(titles[i])
480
+ ... ax.axis("off")
481
+ >>> plt.show()
482
+
483
+ """
484
+ input = np.asarray(input)
485
+ axis = normalize_axis_index(axis, input.ndim)
486
+ output = _ni_support._get_output(output, input)
487
+ modes = _ni_support._normalize_sequence(mode, input.ndim)
488
+ correlate1d(input, [-1, 0, 1], axis, output, modes[axis], cval, 0)
489
+ axes = [ii for ii in range(input.ndim) if ii != axis]
490
+ for ii in axes:
491
+ correlate1d(output, [1, 1, 1], ii, output, modes[ii], cval, 0,)
492
+ return output
493
+
494
+
495
+ @_ni_docstrings.docfiller
496
+ def sobel(input, axis=-1, output=None, mode="reflect", cval=0.0):
497
+ """Calculate a Sobel filter.
498
+
499
+ Parameters
500
+ ----------
501
+ %(input)s
502
+ %(axis)s
503
+ %(output)s
504
+ %(mode_multiple)s
505
+ %(cval)s
506
+
507
+ Returns
508
+ -------
509
+ sobel : ndarray
510
+ Filtered array. Has the same shape as `input`.
511
+
512
+ Notes
513
+ -----
514
+ This function computes the axis-specific Sobel gradient.
515
+ The horizontal edges can be emphasised with the horizontal transform (axis=0),
516
+ the vertical edges with the vertical transform (axis=1) and so on for higher
517
+ dimensions. These can be combined to give the magnitude.
518
+
519
+ Examples
520
+ --------
521
+ >>> from scipy import ndimage, datasets
522
+ >>> import matplotlib.pyplot as plt
523
+ >>> import numpy as np
524
+ >>> ascent = datasets.ascent().astype('int32')
525
+ >>> sobel_h = ndimage.sobel(ascent, 0) # horizontal gradient
526
+ >>> sobel_v = ndimage.sobel(ascent, 1) # vertical gradient
527
+ >>> magnitude = np.sqrt(sobel_h**2 + sobel_v**2)
528
+ >>> magnitude *= 255.0 / np.max(magnitude) # normalization
529
+ >>> fig, axs = plt.subplots(2, 2, figsize=(8, 8))
530
+ >>> plt.gray() # show the filtered result in grayscale
531
+ >>> axs[0, 0].imshow(ascent)
532
+ >>> axs[0, 1].imshow(sobel_h)
533
+ >>> axs[1, 0].imshow(sobel_v)
534
+ >>> axs[1, 1].imshow(magnitude)
535
+ >>> titles = ["original", "horizontal", "vertical", "magnitude"]
536
+ >>> for i, ax in enumerate(axs.ravel()):
537
+ ... ax.set_title(titles[i])
538
+ ... ax.axis("off")
539
+ >>> plt.show()
540
+
541
+ """
542
+ input = np.asarray(input)
543
+ axis = normalize_axis_index(axis, input.ndim)
544
+ output = _ni_support._get_output(output, input)
545
+ modes = _ni_support._normalize_sequence(mode, input.ndim)
546
+ correlate1d(input, [-1, 0, 1], axis, output, modes[axis], cval, 0)
547
+ axes = [ii for ii in range(input.ndim) if ii != axis]
548
+ for ii in axes:
549
+ correlate1d(output, [1, 2, 1], ii, output, modes[ii], cval, 0)
550
+ return output
551
+
552
+
553
+ @_ni_docstrings.docfiller
554
+ def generic_laplace(input, derivative2, output=None, mode="reflect",
555
+ cval=0.0,
556
+ extra_arguments=(),
557
+ extra_keywords=None,
558
+ *, axes=None):
559
+ """
560
+ N-D Laplace filter using a provided second derivative function.
561
+
562
+ Parameters
563
+ ----------
564
+ %(input)s
565
+ derivative2 : callable
566
+ Callable with the following signature::
567
+
568
+ derivative2(input, axis, output, mode, cval,
569
+ *extra_arguments, **extra_keywords)
570
+
571
+ See `extra_arguments`, `extra_keywords` below.
572
+ %(output)s
573
+ %(mode_multiple)s
574
+ %(cval)s
575
+ %(extra_keywords)s
576
+ %(extra_arguments)s
577
+ axes : tuple of int or None
578
+ The axes over which to apply the filter. If a `mode` tuple is
579
+ provided, its length must match the number of axes.
580
+
581
+ Returns
582
+ -------
583
+ generic_laplace : ndarray
584
+ Filtered array. Has the same shape as `input`.
585
+
586
+ """
587
+ if extra_keywords is None:
588
+ extra_keywords = {}
589
+ input = np.asarray(input)
590
+ output = _ni_support._get_output(output, input)
591
+ axes = _ni_support._check_axes(axes, input.ndim)
592
+ if len(axes) > 0:
593
+ modes = _ni_support._normalize_sequence(mode, len(axes))
594
+ derivative2(input, axes[0], output, modes[0], cval,
595
+ *extra_arguments, **extra_keywords)
596
+ for ii in range(1, len(axes)):
597
+ tmp = derivative2(input, axes[ii], output.dtype, modes[ii], cval,
598
+ *extra_arguments, **extra_keywords)
599
+ output += tmp
600
+ else:
601
+ output[...] = input[...]
602
+ return output
603
+
604
+
605
+ @_ni_docstrings.docfiller
606
+ def laplace(input, output=None, mode="reflect", cval=0.0, *, axes=None):
607
+ """N-D Laplace filter based on approximate second derivatives.
608
+
609
+ Parameters
610
+ ----------
611
+ %(input)s
612
+ %(output)s
613
+ %(mode_multiple)s
614
+ %(cval)s
615
+ axes : tuple of int or None
616
+ The axes over which to apply the filter. If a `mode` tuple is
617
+ provided, its length must match the number of axes.
618
+
619
+ Returns
620
+ -------
621
+ laplace : ndarray
622
+ Filtered array. Has the same shape as `input`.
623
+
624
+ Examples
625
+ --------
626
+ >>> from scipy import ndimage, datasets
627
+ >>> import matplotlib.pyplot as plt
628
+ >>> fig = plt.figure()
629
+ >>> plt.gray() # show the filtered result in grayscale
630
+ >>> ax1 = fig.add_subplot(121) # left side
631
+ >>> ax2 = fig.add_subplot(122) # right side
632
+ >>> ascent = datasets.ascent()
633
+ >>> result = ndimage.laplace(ascent)
634
+ >>> ax1.imshow(ascent)
635
+ >>> ax2.imshow(result)
636
+ >>> plt.show()
637
+ """
638
+ def derivative2(input, axis, output, mode, cval):
639
+ return correlate1d(input, [1, -2, 1], axis, output, mode, cval, 0)
640
+ return generic_laplace(input, derivative2, output, mode, cval, axes=axes)
641
+
642
+
643
+ @_ni_docstrings.docfiller
644
+ def gaussian_laplace(input, sigma, output=None, mode="reflect",
645
+ cval=0.0, *, axes=None, **kwargs):
646
+ """Multidimensional Laplace filter using Gaussian second derivatives.
647
+
648
+ Parameters
649
+ ----------
650
+ %(input)s
651
+ sigma : scalar or sequence of scalars
652
+ The standard deviations of the Gaussian filter are given for
653
+ each axis as a sequence, or as a single number, in which case
654
+ it is equal for all axes.
655
+ %(output)s
656
+ %(mode_multiple)s
657
+ %(cval)s
658
+ axes : tuple of int or None
659
+ The axes over which to apply the filter. If `sigma` or `mode` tuples
660
+ are provided, their length must match the number of axes.
661
+ Extra keyword arguments will be passed to gaussian_filter().
662
+
663
+ Returns
664
+ -------
665
+ gaussian_laplace : ndarray
666
+ Filtered array. Has the same shape as `input`.
667
+
668
+ Examples
669
+ --------
670
+ >>> from scipy import ndimage, datasets
671
+ >>> import matplotlib.pyplot as plt
672
+ >>> ascent = datasets.ascent()
673
+
674
+ >>> fig = plt.figure()
675
+ >>> plt.gray() # show the filtered result in grayscale
676
+ >>> ax1 = fig.add_subplot(121) # left side
677
+ >>> ax2 = fig.add_subplot(122) # right side
678
+
679
+ >>> result = ndimage.gaussian_laplace(ascent, sigma=1)
680
+ >>> ax1.imshow(result)
681
+
682
+ >>> result = ndimage.gaussian_laplace(ascent, sigma=3)
683
+ >>> ax2.imshow(result)
684
+ >>> plt.show()
685
+ """
686
+ input = np.asarray(input)
687
+
688
+ def derivative2(input, axis, output, mode, cval, sigma, **kwargs):
689
+ order = [0] * input.ndim
690
+ order[axis] = 2
691
+ return gaussian_filter(input, sigma, order, output, mode, cval,
692
+ **kwargs)
693
+
694
+ axes = _ni_support._check_axes(axes, input.ndim)
695
+ num_axes = len(axes)
696
+ sigma = _ni_support._normalize_sequence(sigma, num_axes)
697
+ if num_axes < input.ndim:
698
+ # set sigma = 0 for any axes not being filtered
699
+ sigma_temp = [0,] * input.ndim
700
+ for s, ax in zip(sigma, axes):
701
+ sigma_temp[ax] = s
702
+ sigma = sigma_temp
703
+
704
+ return generic_laplace(input, derivative2, output, mode, cval,
705
+ extra_arguments=(sigma,),
706
+ extra_keywords=kwargs,
707
+ axes=axes)
708
+
709
+
710
+ @_ni_docstrings.docfiller
711
+ def generic_gradient_magnitude(input, derivative, output=None,
712
+ mode="reflect", cval=0.0,
713
+ extra_arguments=(), extra_keywords=None,
714
+ *, axes=None):
715
+ """Gradient magnitude using a provided gradient function.
716
+
717
+ Parameters
718
+ ----------
719
+ %(input)s
720
+ derivative : callable
721
+ Callable with the following signature::
722
+
723
+ derivative(input, axis, output, mode, cval,
724
+ *extra_arguments, **extra_keywords)
725
+
726
+ See `extra_arguments`, `extra_keywords` below.
727
+ `derivative` can assume that `input` and `output` are ndarrays.
728
+ Note that the output from `derivative` is modified inplace;
729
+ be careful to copy important inputs before returning them.
730
+ %(output)s
731
+ %(mode_multiple)s
732
+ %(cval)s
733
+ %(extra_keywords)s
734
+ %(extra_arguments)s
735
+ axes : tuple of int or None
736
+ The axes over which to apply the filter. If a `mode` tuple is
737
+ provided, its length must match the number of axes.
738
+
739
+ Returns
740
+ -------
741
+ generic_gradient_matnitude : ndarray
742
+ Filtered array. Has the same shape as `input`.
743
+
744
+ """
745
+ if extra_keywords is None:
746
+ extra_keywords = {}
747
+ input = np.asarray(input)
748
+ output = _ni_support._get_output(output, input)
749
+ axes = _ni_support._check_axes(axes, input.ndim)
750
+ if len(axes) > 0:
751
+ modes = _ni_support._normalize_sequence(mode, len(axes))
752
+ derivative(input, axes[0], output, modes[0], cval,
753
+ *extra_arguments, **extra_keywords)
754
+ np.multiply(output, output, output)
755
+ for ii in range(1, len(axes)):
756
+ tmp = derivative(input, axes[ii], output.dtype, modes[ii], cval,
757
+ *extra_arguments, **extra_keywords)
758
+ np.multiply(tmp, tmp, tmp)
759
+ output += tmp
760
+ # This allows the sqrt to work with a different default casting
761
+ np.sqrt(output, output, casting='unsafe')
762
+ else:
763
+ output[...] = input[...]
764
+ return output
765
+
766
+
767
+ @_ni_docstrings.docfiller
768
+ def gaussian_gradient_magnitude(input, sigma, output=None,
769
+ mode="reflect", cval=0.0, *, axes=None,
770
+ **kwargs):
771
+ """Multidimensional gradient magnitude using Gaussian derivatives.
772
+
773
+ Parameters
774
+ ----------
775
+ %(input)s
776
+ sigma : scalar or sequence of scalars
777
+ The standard deviations of the Gaussian filter are given for
778
+ each axis as a sequence, or as a single number, in which case
779
+ it is equal for all axes.
780
+ %(output)s
781
+ %(mode_multiple)s
782
+ %(cval)s
783
+ axes : tuple of int or None
784
+ The axes over which to apply the filter. If `sigma` or `mode` tuples
785
+ are provided, their length must match the number of axes.
786
+ Extra keyword arguments will be passed to gaussian_filter().
787
+
788
+ Returns
789
+ -------
790
+ gaussian_gradient_magnitude : ndarray
791
+ Filtered array. Has the same shape as `input`.
792
+
793
+ Examples
794
+ --------
795
+ >>> from scipy import ndimage, datasets
796
+ >>> import matplotlib.pyplot as plt
797
+ >>> fig = plt.figure()
798
+ >>> plt.gray() # show the filtered result in grayscale
799
+ >>> ax1 = fig.add_subplot(121) # left side
800
+ >>> ax2 = fig.add_subplot(122) # right side
801
+ >>> ascent = datasets.ascent()
802
+ >>> result = ndimage.gaussian_gradient_magnitude(ascent, sigma=5)
803
+ >>> ax1.imshow(ascent)
804
+ >>> ax2.imshow(result)
805
+ >>> plt.show()
806
+ """
807
+ input = np.asarray(input)
808
+
809
+ def derivative(input, axis, output, mode, cval, sigma, **kwargs):
810
+ order = [0] * input.ndim
811
+ order[axis] = 1
812
+ return gaussian_filter(input, sigma, order, output, mode,
813
+ cval, **kwargs)
814
+
815
+ return generic_gradient_magnitude(input, derivative, output, mode,
816
+ cval, extra_arguments=(sigma,),
817
+ extra_keywords=kwargs, axes=axes)
818
+
819
+
820
+ def _correlate_or_convolve(input, weights, output, mode, cval, origin,
821
+ convolution, axes):
822
+ input = np.asarray(input)
823
+ weights = np.asarray(weights)
824
+ complex_input = input.dtype.kind == 'c'
825
+ complex_weights = weights.dtype.kind == 'c'
826
+ if complex_input or complex_weights:
827
+ if complex_weights and not convolution:
828
+ # As for np.correlate, conjugate weights rather than input.
829
+ weights = weights.conj()
830
+ kwargs = dict(
831
+ mode=mode, origin=origin, convolution=convolution, axes=axes
832
+ )
833
+ output = _ni_support._get_output(output, input, complex_output=True)
834
+
835
+ return _complex_via_real_components(_correlate_or_convolve, input,
836
+ weights, output, cval, **kwargs)
837
+
838
+ axes = _ni_support._check_axes(axes, input.ndim)
839
+ weights = np.asarray(weights, dtype=np.float64)
840
+
841
+ # expand weights and origins if num_axes < input.ndim
842
+ weights = _expand_footprint(input.ndim, axes, weights, "weights")
843
+ origins = _expand_origin(input.ndim, axes, origin)
844
+
845
+ wshape = [ii for ii in weights.shape if ii > 0]
846
+ if len(wshape) != input.ndim:
847
+ raise RuntimeError(f"weights.ndim ({len(wshape)}) must match "
848
+ f"len(axes) ({len(axes)})")
849
+ if convolution:
850
+ weights = weights[tuple([slice(None, None, -1)] * weights.ndim)]
851
+ for ii in range(len(origins)):
852
+ origins[ii] = -origins[ii]
853
+ if not weights.shape[ii] & 1:
854
+ origins[ii] -= 1
855
+ for origin, lenw in zip(origins, wshape):
856
+ if _invalid_origin(origin, lenw):
857
+ raise ValueError('Invalid origin; origin must satisfy '
858
+ '-(weights.shape[k] // 2) <= origin[k] <= '
859
+ '(weights.shape[k]-1) // 2')
860
+
861
+ if not weights.flags.contiguous:
862
+ weights = weights.copy()
863
+ output = _ni_support._get_output(output, input)
864
+ temp_needed = np.may_share_memory(input, output)
865
+ if temp_needed:
866
+ # input and output arrays cannot share memory
867
+ temp = output
868
+ output = _ni_support._get_output(output.dtype, input)
869
+ if not isinstance(mode, str) and isinstance(mode, Iterable):
870
+ raise RuntimeError("A sequence of modes is not supported")
871
+ mode = _ni_support._extend_mode_to_code(mode)
872
+ _nd_image.correlate(input, weights, output, mode, cval, origins)
873
+ if temp_needed:
874
+ temp[...] = output
875
+ output = temp
876
+ return output
877
+
878
+
879
+ @_ni_docstrings.docfiller
880
+ def correlate(input, weights, output=None, mode='reflect', cval=0.0,
881
+ origin=0, *, axes=None):
882
+ """
883
+ Multidimensional correlation.
884
+
885
+ The array is correlated with the given kernel.
886
+
887
+ Parameters
888
+ ----------
889
+ %(input)s
890
+ weights : ndarray
891
+ array of weights, same number of dimensions as input
892
+ %(output)s
893
+ %(mode_reflect)s
894
+ %(cval)s
895
+ %(origin_multiple)s
896
+ axes : tuple of int or None, optional
897
+ If None, `input` is filtered along all axes. Otherwise,
898
+ `input` is filtered along the specified axes. When `axes` is
899
+ specified, any tuples used for `mode` or `origin` must match the length
900
+ of `axes`. The ith entry in any of these tuples corresponds to the ith
901
+ entry in `axes`.
902
+
903
+ Returns
904
+ -------
905
+ result : ndarray
906
+ The result of correlation of `input` with `weights`.
907
+
908
+ See Also
909
+ --------
910
+ convolve : Convolve an image with a kernel.
911
+
912
+ Examples
913
+ --------
914
+ Correlation is the process of moving a filter mask often referred to
915
+ as kernel over the image and computing the sum of products at each location.
916
+
917
+ >>> from scipy.ndimage import correlate
918
+ >>> import numpy as np
919
+ >>> input_img = np.arange(25).reshape(5,5)
920
+ >>> print(input_img)
921
+ [[ 0 1 2 3 4]
922
+ [ 5 6 7 8 9]
923
+ [10 11 12 13 14]
924
+ [15 16 17 18 19]
925
+ [20 21 22 23 24]]
926
+
927
+ Define a kernel (weights) for correlation. In this example, it is for sum of
928
+ center and up, down, left and right next elements.
929
+
930
+ >>> weights = [[0, 1, 0],
931
+ ... [1, 1, 1],
932
+ ... [0, 1, 0]]
933
+
934
+ We can calculate a correlation result:
935
+ For example, element ``[2,2]`` is ``7 + 11 + 12 + 13 + 17 = 60``.
936
+
937
+ >>> correlate(input_img, weights)
938
+ array([[ 6, 10, 15, 20, 24],
939
+ [ 26, 30, 35, 40, 44],
940
+ [ 51, 55, 60, 65, 69],
941
+ [ 76, 80, 85, 90, 94],
942
+ [ 96, 100, 105, 110, 114]])
943
+
944
+ """
945
+ return _correlate_or_convolve(input, weights, output, mode, cval,
946
+ origin, False, axes)
947
+
948
+
949
+ @_ni_docstrings.docfiller
950
+ def convolve(input, weights, output=None, mode='reflect', cval=0.0,
951
+ origin=0, *, axes=None):
952
+ """
953
+ Multidimensional convolution.
954
+
955
+ The array is convolved with the given kernel.
956
+
957
+ Parameters
958
+ ----------
959
+ %(input)s
960
+ weights : array_like
961
+ Array of weights, same number of dimensions as input
962
+ %(output)s
963
+ %(mode_reflect)s
964
+ cval : scalar, optional
965
+ Value to fill past edges of input if `mode` is 'constant'. Default
966
+ is 0.0
967
+ origin : int or sequence, optional
968
+ Controls the placement of the filter on the input array's pixels.
969
+ A value of 0 (the default) centers the filter over the pixel, with
970
+ positive values shifting the filter to the right, and negative ones
971
+ to the left. By passing a sequence of origins with length equal to
972
+ the number of dimensions of the input array, different shifts can
973
+ be specified along each axis.
974
+ axes : tuple of int or None, optional
975
+ If None, `input` is filtered along all axes. Otherwise,
976
+ `input` is filtered along the specified axes. When `axes` is
977
+ specified, any tuples used for `mode` or `origin` must match the length
978
+ of `axes`. The ith entry in any of these tuples corresponds to the ith
979
+ entry in `axes`.
980
+
981
+ Returns
982
+ -------
983
+ result : ndarray
984
+ The result of convolution of `input` with `weights`.
985
+
986
+ See Also
987
+ --------
988
+ correlate : Correlate an image with a kernel.
989
+
990
+ Notes
991
+ -----
992
+ Each value in result is :math:`C_i = \\sum_j{I_{i+k-j} W_j}`, where
993
+ W is the `weights` kernel,
994
+ j is the N-D spatial index over :math:`W`,
995
+ I is the `input` and k is the coordinate of the center of
996
+ W, specified by `origin` in the input parameters.
997
+
998
+ Examples
999
+ --------
1000
+ Perhaps the simplest case to understand is ``mode='constant', cval=0.0``,
1001
+ because in this case borders (i.e., where the `weights` kernel, centered
1002
+ on any one value, extends beyond an edge of `input`) are treated as zeros.
1003
+
1004
+ >>> import numpy as np
1005
+ >>> a = np.array([[1, 2, 0, 0],
1006
+ ... [5, 3, 0, 4],
1007
+ ... [0, 0, 0, 7],
1008
+ ... [9, 3, 0, 0]])
1009
+ >>> k = np.array([[1,1,1],[1,1,0],[1,0,0]])
1010
+ >>> from scipy import ndimage
1011
+ >>> ndimage.convolve(a, k, mode='constant', cval=0.0)
1012
+ array([[11, 10, 7, 4],
1013
+ [10, 3, 11, 11],
1014
+ [15, 12, 14, 7],
1015
+ [12, 3, 7, 0]])
1016
+
1017
+ Setting ``cval=1.0`` is equivalent to padding the outer edge of `input`
1018
+ with 1.0's (and then extracting only the original region of the result).
1019
+
1020
+ >>> ndimage.convolve(a, k, mode='constant', cval=1.0)
1021
+ array([[13, 11, 8, 7],
1022
+ [11, 3, 11, 14],
1023
+ [16, 12, 14, 10],
1024
+ [15, 6, 10, 5]])
1025
+
1026
+ With ``mode='reflect'`` (the default), outer values are reflected at the
1027
+ edge of `input` to fill in missing values.
1028
+
1029
+ >>> b = np.array([[2, 0, 0],
1030
+ ... [1, 0, 0],
1031
+ ... [0, 0, 0]])
1032
+ >>> k = np.array([[0,1,0], [0,1,0], [0,1,0]])
1033
+ >>> ndimage.convolve(b, k, mode='reflect')
1034
+ array([[5, 0, 0],
1035
+ [3, 0, 0],
1036
+ [1, 0, 0]])
1037
+
1038
+ This includes diagonally at the corners.
1039
+
1040
+ >>> k = np.array([[1,0,0],[0,1,0],[0,0,1]])
1041
+ >>> ndimage.convolve(b, k)
1042
+ array([[4, 2, 0],
1043
+ [3, 2, 0],
1044
+ [1, 1, 0]])
1045
+
1046
+ With ``mode='nearest'``, the single nearest value in to an edge in
1047
+ `input` is repeated as many times as needed to match the overlapping
1048
+ `weights`.
1049
+
1050
+ >>> c = np.array([[2, 0, 1],
1051
+ ... [1, 0, 0],
1052
+ ... [0, 0, 0]])
1053
+ >>> k = np.array([[0, 1, 0],
1054
+ ... [0, 1, 0],
1055
+ ... [0, 1, 0],
1056
+ ... [0, 1, 0],
1057
+ ... [0, 1, 0]])
1058
+ >>> ndimage.convolve(c, k, mode='nearest')
1059
+ array([[7, 0, 3],
1060
+ [5, 0, 2],
1061
+ [3, 0, 1]])
1062
+
1063
+ """
1064
+ return _correlate_or_convolve(input, weights, output, mode, cval,
1065
+ origin, True, axes)
1066
+
1067
+
1068
+ @_ni_docstrings.docfiller
1069
+ def uniform_filter1d(input, size, axis=-1, output=None,
1070
+ mode="reflect", cval=0.0, origin=0):
1071
+ """Calculate a 1-D uniform filter along the given axis.
1072
+
1073
+ The lines of the array along the given axis are filtered with a
1074
+ uniform filter of given size.
1075
+
1076
+ Parameters
1077
+ ----------
1078
+ %(input)s
1079
+ size : int
1080
+ length of uniform filter
1081
+ %(axis)s
1082
+ %(output)s
1083
+ %(mode_reflect)s
1084
+ %(cval)s
1085
+ %(origin)s
1086
+
1087
+ Returns
1088
+ -------
1089
+ result : ndarray
1090
+ Filtered array. Has same shape as `input`.
1091
+
1092
+ Examples
1093
+ --------
1094
+ >>> from scipy.ndimage import uniform_filter1d
1095
+ >>> uniform_filter1d([2, 8, 0, 4, 1, 9, 9, 0], size=3)
1096
+ array([4, 3, 4, 1, 4, 6, 6, 3])
1097
+ """
1098
+ input = np.asarray(input)
1099
+ axis = normalize_axis_index(axis, input.ndim)
1100
+ if size < 1:
1101
+ raise RuntimeError('incorrect filter size')
1102
+ complex_output = input.dtype.kind == 'c'
1103
+ output = _ni_support._get_output(output, input,
1104
+ complex_output=complex_output)
1105
+ if (size // 2 + origin < 0) or (size // 2 + origin >= size):
1106
+ raise ValueError('invalid origin')
1107
+ mode = _ni_support._extend_mode_to_code(mode)
1108
+ if not complex_output:
1109
+ _nd_image.uniform_filter1d(input, size, axis, output, mode, cval,
1110
+ origin)
1111
+ else:
1112
+ _nd_image.uniform_filter1d(input.real, size, axis, output.real, mode,
1113
+ np.real(cval), origin)
1114
+ _nd_image.uniform_filter1d(input.imag, size, axis, output.imag, mode,
1115
+ np.imag(cval), origin)
1116
+ return output
1117
+
1118
+
1119
+ @_ni_docstrings.docfiller
1120
+ def uniform_filter(input, size=3, output=None, mode="reflect",
1121
+ cval=0.0, origin=0, *, axes=None):
1122
+ """Multidimensional uniform filter.
1123
+
1124
+ Parameters
1125
+ ----------
1126
+ %(input)s
1127
+ size : int or sequence of ints, optional
1128
+ The sizes of the uniform filter are given for each axis as a
1129
+ sequence, or as a single number, in which case the size is
1130
+ equal for all axes.
1131
+ %(output)s
1132
+ %(mode_multiple)s
1133
+ %(cval)s
1134
+ %(origin_multiple)s
1135
+ axes : tuple of int or None, optional
1136
+ If None, `input` is filtered along all axes. Otherwise,
1137
+ `input` is filtered along the specified axes. When `axes` is
1138
+ specified, any tuples used for `size`, `origin`, and/or `mode`
1139
+ must match the length of `axes`. The ith entry in any of these tuples
1140
+ corresponds to the ith entry in `axes`.
1141
+
1142
+ Returns
1143
+ -------
1144
+ uniform_filter : ndarray
1145
+ Filtered array. Has the same shape as `input`.
1146
+
1147
+ Notes
1148
+ -----
1149
+ The multidimensional filter is implemented as a sequence of
1150
+ 1-D uniform filters. The intermediate arrays are stored
1151
+ in the same data type as the output. Therefore, for output types
1152
+ with a limited precision, the results may be imprecise because
1153
+ intermediate results may be stored with insufficient precision.
1154
+
1155
+ Examples
1156
+ --------
1157
+ >>> from scipy import ndimage, datasets
1158
+ >>> import matplotlib.pyplot as plt
1159
+ >>> fig = plt.figure()
1160
+ >>> plt.gray() # show the filtered result in grayscale
1161
+ >>> ax1 = fig.add_subplot(121) # left side
1162
+ >>> ax2 = fig.add_subplot(122) # right side
1163
+ >>> ascent = datasets.ascent()
1164
+ >>> result = ndimage.uniform_filter(ascent, size=20)
1165
+ >>> ax1.imshow(ascent)
1166
+ >>> ax2.imshow(result)
1167
+ >>> plt.show()
1168
+ """
1169
+ input = np.asarray(input)
1170
+ output = _ni_support._get_output(output, input,
1171
+ complex_output=input.dtype.kind == 'c')
1172
+ axes = _ni_support._check_axes(axes, input.ndim)
1173
+ num_axes = len(axes)
1174
+ sizes = _ni_support._normalize_sequence(size, num_axes)
1175
+ origins = _ni_support._normalize_sequence(origin, num_axes)
1176
+ modes = _ni_support._normalize_sequence(mode, num_axes)
1177
+ axes = [(axes[ii], sizes[ii], origins[ii], modes[ii])
1178
+ for ii in range(num_axes) if sizes[ii] > 1]
1179
+ if len(axes) > 0:
1180
+ for axis, size, origin, mode in axes:
1181
+ uniform_filter1d(input, int(size), axis, output, mode,
1182
+ cval, origin)
1183
+ input = output
1184
+ else:
1185
+ output[...] = input[...]
1186
+ return output
1187
+
1188
+
1189
+ @_ni_docstrings.docfiller
1190
+ def minimum_filter1d(input, size, axis=-1, output=None,
1191
+ mode="reflect", cval=0.0, origin=0):
1192
+ """Calculate a 1-D minimum filter along the given axis.
1193
+
1194
+ The lines of the array along the given axis are filtered with a
1195
+ minimum filter of given size.
1196
+
1197
+ Parameters
1198
+ ----------
1199
+ %(input)s
1200
+ size : int
1201
+ length along which to calculate 1D minimum
1202
+ %(axis)s
1203
+ %(output)s
1204
+ %(mode_reflect)s
1205
+ %(cval)s
1206
+ %(origin)s
1207
+
1208
+ Returns
1209
+ -------
1210
+ result : ndarray.
1211
+ Filtered image. Has the same shape as `input`.
1212
+
1213
+ Notes
1214
+ -----
1215
+ This function implements the MINLIST algorithm [1]_, as described by
1216
+ Richard Harter [2]_, and has a guaranteed O(n) performance, `n` being
1217
+ the `input` length, regardless of filter size.
1218
+
1219
+ References
1220
+ ----------
1221
+ .. [1] http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.42.2777
1222
+ .. [2] http://www.richardhartersworld.com/cri/2001/slidingmin.html
1223
+
1224
+
1225
+ Examples
1226
+ --------
1227
+ >>> from scipy.ndimage import minimum_filter1d
1228
+ >>> minimum_filter1d([2, 8, 0, 4, 1, 9, 9, 0], size=3)
1229
+ array([2, 0, 0, 0, 1, 1, 0, 0])
1230
+ """
1231
+ input = np.asarray(input)
1232
+ if np.iscomplexobj(input):
1233
+ raise TypeError('Complex type not supported')
1234
+ axis = normalize_axis_index(axis, input.ndim)
1235
+ if size < 1:
1236
+ raise RuntimeError('incorrect filter size')
1237
+ output = _ni_support._get_output(output, input)
1238
+ if (size // 2 + origin < 0) or (size // 2 + origin >= size):
1239
+ raise ValueError('invalid origin')
1240
+ mode = _ni_support._extend_mode_to_code(mode)
1241
+ _nd_image.min_or_max_filter1d(input, size, axis, output, mode, cval,
1242
+ origin, 1)
1243
+ return output
1244
+
1245
+
1246
+ @_ni_docstrings.docfiller
1247
+ def maximum_filter1d(input, size, axis=-1, output=None,
1248
+ mode="reflect", cval=0.0, origin=0):
1249
+ """Calculate a 1-D maximum filter along the given axis.
1250
+
1251
+ The lines of the array along the given axis are filtered with a
1252
+ maximum filter of given size.
1253
+
1254
+ Parameters
1255
+ ----------
1256
+ %(input)s
1257
+ size : int
1258
+ Length along which to calculate the 1-D maximum.
1259
+ %(axis)s
1260
+ %(output)s
1261
+ %(mode_reflect)s
1262
+ %(cval)s
1263
+ %(origin)s
1264
+
1265
+ Returns
1266
+ -------
1267
+ maximum1d : ndarray, None
1268
+ Maximum-filtered array with same shape as input.
1269
+ None if `output` is not None
1270
+
1271
+ Notes
1272
+ -----
1273
+ This function implements the MAXLIST algorithm [1]_, as described by
1274
+ Richard Harter [2]_, and has a guaranteed O(n) performance, `n` being
1275
+ the `input` length, regardless of filter size.
1276
+
1277
+ References
1278
+ ----------
1279
+ .. [1] http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.42.2777
1280
+ .. [2] http://www.richardhartersworld.com/cri/2001/slidingmin.html
1281
+
1282
+ Examples
1283
+ --------
1284
+ >>> from scipy.ndimage import maximum_filter1d
1285
+ >>> maximum_filter1d([2, 8, 0, 4, 1, 9, 9, 0], size=3)
1286
+ array([8, 8, 8, 4, 9, 9, 9, 9])
1287
+ """
1288
+ input = np.asarray(input)
1289
+ if np.iscomplexobj(input):
1290
+ raise TypeError('Complex type not supported')
1291
+ axis = normalize_axis_index(axis, input.ndim)
1292
+ if size < 1:
1293
+ raise RuntimeError('incorrect filter size')
1294
+ output = _ni_support._get_output(output, input)
1295
+ if (size // 2 + origin < 0) or (size // 2 + origin >= size):
1296
+ raise ValueError('invalid origin')
1297
+ mode = _ni_support._extend_mode_to_code(mode)
1298
+ _nd_image.min_or_max_filter1d(input, size, axis, output, mode, cval,
1299
+ origin, 0)
1300
+ return output
1301
+
1302
+
1303
+ def _min_or_max_filter(input, size, footprint, structure, output, mode,
1304
+ cval, origin, minimum, axes=None):
1305
+ if (size is not None) and (footprint is not None):
1306
+ warnings.warn("ignoring size because footprint is set",
1307
+ UserWarning, stacklevel=3)
1308
+ if structure is None:
1309
+ if footprint is None:
1310
+ if size is None:
1311
+ raise RuntimeError("no footprint provided")
1312
+ separable = True
1313
+ else:
1314
+ footprint = np.asarray(footprint, dtype=bool)
1315
+ if not footprint.any():
1316
+ raise ValueError("All-zero footprint is not supported.")
1317
+ if footprint.all():
1318
+ size = footprint.shape
1319
+ footprint = None
1320
+ separable = True
1321
+ else:
1322
+ separable = False
1323
+ else:
1324
+ structure = np.asarray(structure, dtype=np.float64)
1325
+ separable = False
1326
+ if footprint is None:
1327
+ footprint = np.ones(structure.shape, bool)
1328
+ else:
1329
+ footprint = np.asarray(footprint, dtype=bool)
1330
+ input = np.asarray(input)
1331
+ if np.iscomplexobj(input):
1332
+ raise TypeError("Complex type not supported")
1333
+ output = _ni_support._get_output(output, input)
1334
+ temp_needed = np.may_share_memory(input, output)
1335
+ if temp_needed:
1336
+ # input and output arrays cannot share memory
1337
+ temp = output
1338
+ output = _ni_support._get_output(output.dtype, input)
1339
+ axes = _ni_support._check_axes(axes, input.ndim)
1340
+ num_axes = len(axes)
1341
+ if separable:
1342
+ origins = _ni_support._normalize_sequence(origin, num_axes)
1343
+ sizes = _ni_support._normalize_sequence(size, num_axes)
1344
+ modes = _ni_support._normalize_sequence(mode, num_axes)
1345
+ axes = [(axes[ii], sizes[ii], origins[ii], modes[ii])
1346
+ for ii in range(len(axes)) if sizes[ii] > 1]
1347
+ if minimum:
1348
+ filter_ = minimum_filter1d
1349
+ else:
1350
+ filter_ = maximum_filter1d
1351
+ if len(axes) > 0:
1352
+ for axis, size, origin, mode in axes:
1353
+ filter_(input, int(size), axis, output, mode, cval, origin)
1354
+ input = output
1355
+ else:
1356
+ output[...] = input[...]
1357
+ else:
1358
+ # expand origins and footprint if num_axes < input.ndim
1359
+ footprint = _expand_footprint(input.ndim, axes, footprint)
1360
+ origins = _expand_origin(input.ndim, axes, origin)
1361
+
1362
+ fshape = [ii for ii in footprint.shape if ii > 0]
1363
+ if len(fshape) != input.ndim:
1364
+ raise RuntimeError(f"footprint.ndim ({footprint.ndim}) must match "
1365
+ f"len(axes) ({len(axes)})")
1366
+ for origin, lenf in zip(origins, fshape):
1367
+ if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf):
1368
+ raise ValueError("invalid origin")
1369
+ if not footprint.flags.contiguous:
1370
+ footprint = footprint.copy()
1371
+ if structure is not None:
1372
+ if len(structure.shape) != num_axes:
1373
+ raise RuntimeError("structure array has incorrect shape")
1374
+ if num_axes != structure.ndim:
1375
+ structure = np.expand_dims(
1376
+ structure,
1377
+ tuple(ax for ax in range(structure.ndim) if ax not in axes)
1378
+ )
1379
+ if not structure.flags.contiguous:
1380
+ structure = structure.copy()
1381
+ if not isinstance(mode, str) and isinstance(mode, Iterable):
1382
+ raise RuntimeError(
1383
+ "A sequence of modes is not supported for non-separable "
1384
+ "footprints")
1385
+ mode = _ni_support._extend_mode_to_code(mode)
1386
+ _nd_image.min_or_max_filter(input, footprint, structure, output,
1387
+ mode, cval, origins, minimum)
1388
+ if temp_needed:
1389
+ temp[...] = output
1390
+ output = temp
1391
+ return output
1392
+
1393
+
1394
+ @_ni_docstrings.docfiller
1395
+ def minimum_filter(input, size=None, footprint=None, output=None,
1396
+ mode="reflect", cval=0.0, origin=0, *, axes=None):
1397
+ """Calculate a multidimensional minimum filter.
1398
+
1399
+ Parameters
1400
+ ----------
1401
+ %(input)s
1402
+ %(size_foot)s
1403
+ %(output)s
1404
+ %(mode_multiple)s
1405
+ %(cval)s
1406
+ %(origin_multiple)s
1407
+ axes : tuple of int or None, optional
1408
+ If None, `input` is filtered along all axes. Otherwise,
1409
+ `input` is filtered along the specified axes. When `axes` is
1410
+ specified, any tuples used for `size`, `origin`, and/or `mode`
1411
+ must match the length of `axes`. The ith entry in any of these tuples
1412
+ corresponds to the ith entry in `axes`.
1413
+
1414
+ Returns
1415
+ -------
1416
+ minimum_filter : ndarray
1417
+ Filtered array. Has the same shape as `input`.
1418
+
1419
+ Notes
1420
+ -----
1421
+ A sequence of modes (one per axis) is only supported when the footprint is
1422
+ separable. Otherwise, a single mode string must be provided.
1423
+
1424
+ Examples
1425
+ --------
1426
+ >>> from scipy import ndimage, datasets
1427
+ >>> import matplotlib.pyplot as plt
1428
+ >>> fig = plt.figure()
1429
+ >>> plt.gray() # show the filtered result in grayscale
1430
+ >>> ax1 = fig.add_subplot(121) # left side
1431
+ >>> ax2 = fig.add_subplot(122) # right side
1432
+ >>> ascent = datasets.ascent()
1433
+ >>> result = ndimage.minimum_filter(ascent, size=20)
1434
+ >>> ax1.imshow(ascent)
1435
+ >>> ax2.imshow(result)
1436
+ >>> plt.show()
1437
+ """
1438
+ return _min_or_max_filter(input, size, footprint, None, output, mode,
1439
+ cval, origin, 1, axes)
1440
+
1441
+
1442
+ @_ni_docstrings.docfiller
1443
+ def maximum_filter(input, size=None, footprint=None, output=None,
1444
+ mode="reflect", cval=0.0, origin=0, *, axes=None):
1445
+ """Calculate a multidimensional maximum filter.
1446
+
1447
+ Parameters
1448
+ ----------
1449
+ %(input)s
1450
+ %(size_foot)s
1451
+ %(output)s
1452
+ %(mode_multiple)s
1453
+ %(cval)s
1454
+ %(origin_multiple)s
1455
+ axes : tuple of int or None, optional
1456
+ If None, `input` is filtered along all axes. Otherwise,
1457
+ `input` is filtered along the specified axes. When `axes` is
1458
+ specified, any tuples used for `size`, `origin`, and/or `mode`
1459
+ must match the length of `axes`. The ith entry in any of these tuples
1460
+ corresponds to the ith entry in `axes`.
1461
+
1462
+ Returns
1463
+ -------
1464
+ maximum_filter : ndarray
1465
+ Filtered array. Has the same shape as `input`.
1466
+
1467
+ Notes
1468
+ -----
1469
+ A sequence of modes (one per axis) is only supported when the footprint is
1470
+ separable. Otherwise, a single mode string must be provided.
1471
+
1472
+ Examples
1473
+ --------
1474
+ >>> from scipy import ndimage, datasets
1475
+ >>> import matplotlib.pyplot as plt
1476
+ >>> fig = plt.figure()
1477
+ >>> plt.gray() # show the filtered result in grayscale
1478
+ >>> ax1 = fig.add_subplot(121) # left side
1479
+ >>> ax2 = fig.add_subplot(122) # right side
1480
+ >>> ascent = datasets.ascent()
1481
+ >>> result = ndimage.maximum_filter(ascent, size=20)
1482
+ >>> ax1.imshow(ascent)
1483
+ >>> ax2.imshow(result)
1484
+ >>> plt.show()
1485
+ """
1486
+ return _min_or_max_filter(input, size, footprint, None, output, mode,
1487
+ cval, origin, 0, axes)
1488
+
1489
+
1490
+ @_ni_docstrings.docfiller
1491
+ def _rank_filter(input, rank, size=None, footprint=None, output=None,
1492
+ mode="reflect", cval=0.0, origin=0, operation='rank',
1493
+ axes=None):
1494
+ if (size is not None) and (footprint is not None):
1495
+ warnings.warn("ignoring size because footprint is set",
1496
+ UserWarning, stacklevel=3)
1497
+ input = np.asarray(input)
1498
+ if np.iscomplexobj(input):
1499
+ raise TypeError('Complex type not supported')
1500
+ axes = _ni_support._check_axes(axes, input.ndim)
1501
+ num_axes = len(axes)
1502
+ if footprint is None:
1503
+ if size is None:
1504
+ raise RuntimeError("no footprint or filter size provided")
1505
+ sizes = _ni_support._normalize_sequence(size, num_axes)
1506
+ footprint = np.ones(sizes, dtype=bool)
1507
+ else:
1508
+ footprint = np.asarray(footprint, dtype=bool)
1509
+ # expand origins, footprint and modes if num_axes < input.ndim
1510
+ footprint = _expand_footprint(input.ndim, axes, footprint)
1511
+ origins = _expand_origin(input.ndim, axes, origin)
1512
+ mode = _expand_mode(input.ndim, axes, mode)
1513
+
1514
+ fshape = [ii for ii in footprint.shape if ii > 0]
1515
+ if len(fshape) != input.ndim:
1516
+ raise RuntimeError(f"footprint.ndim ({footprint.ndim}) must match "
1517
+ f"len(axes) ({len(axes)})")
1518
+ for origin, lenf in zip(origins, fshape):
1519
+ if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf):
1520
+ raise ValueError('invalid origin')
1521
+ if not footprint.flags.contiguous:
1522
+ footprint = footprint.copy()
1523
+ filter_size = np.where(footprint, 1, 0).sum()
1524
+ if operation == 'median':
1525
+ rank = filter_size // 2
1526
+ elif operation == 'percentile':
1527
+ percentile = rank
1528
+ if percentile < 0.0:
1529
+ percentile += 100.0
1530
+ if percentile < 0 or percentile > 100:
1531
+ raise RuntimeError('invalid percentile')
1532
+ if percentile == 100.0:
1533
+ rank = filter_size - 1
1534
+ else:
1535
+ rank = int(float(filter_size) * percentile / 100.0)
1536
+ if rank < 0:
1537
+ rank += filter_size
1538
+ if rank < 0 or rank >= filter_size:
1539
+ raise RuntimeError('rank not within filter footprint size')
1540
+ if rank == 0:
1541
+ return minimum_filter(input, None, footprint, output, mode, cval,
1542
+ origins, axes=None)
1543
+ elif rank == filter_size - 1:
1544
+ return maximum_filter(input, None, footprint, output, mode, cval,
1545
+ origins, axes=None)
1546
+ else:
1547
+ output = _ni_support._get_output(output, input)
1548
+ temp_needed = np.may_share_memory(input, output)
1549
+ if temp_needed:
1550
+ # input and output arrays cannot share memory
1551
+ temp = output
1552
+ output = _ni_support._get_output(output.dtype, input)
1553
+ if not isinstance(mode, str) and isinstance(mode, Iterable):
1554
+ raise RuntimeError(
1555
+ "A sequence of modes is not supported by non-separable rank "
1556
+ "filters")
1557
+ mode = _ni_support._extend_mode_to_code(mode, is_filter=True)
1558
+ if input.ndim == 1:
1559
+ if input.dtype in (np.int64, np.float64, np.float32):
1560
+ x = input
1561
+ x_out = output
1562
+ elif input.dtype == np.float16:
1563
+ x = input.astype('float32')
1564
+ x_out = np.empty(x.shape, dtype='float32')
1565
+ elif np.result_type(input, np.int64) == np.int64:
1566
+ x = input.astype('int64')
1567
+ x_out = np.empty(x.shape, dtype='int64')
1568
+ elif input.dtype.kind in 'biu':
1569
+ # cast any other boolean, integer or unsigned type to int64
1570
+ x = input.astype('int64')
1571
+ x_out = np.empty(x.shape, dtype='int64')
1572
+ else:
1573
+ raise RuntimeError('Unsupported array type')
1574
+ cval = x.dtype.type(cval)
1575
+ _rank_filter_1d.rank_filter(x, rank, footprint.size, x_out, mode, cval,
1576
+ origin)
1577
+ if input.dtype not in (np.int64, np.float64, np.float32):
1578
+ np.copyto(output, x_out, casting='unsafe')
1579
+ else:
1580
+ _nd_image.rank_filter(input, rank, footprint, output, mode, cval, origins)
1581
+ if temp_needed:
1582
+ temp[...] = output
1583
+ output = temp
1584
+ return output
1585
+
1586
+
1587
+ @_ni_docstrings.docfiller
1588
+ def rank_filter(input, rank, size=None, footprint=None, output=None,
1589
+ mode="reflect", cval=0.0, origin=0, *, axes=None):
1590
+ """Calculate a multidimensional rank filter.
1591
+
1592
+ Parameters
1593
+ ----------
1594
+ %(input)s
1595
+ rank : int
1596
+ The rank parameter may be less than zero, i.e., rank = -1
1597
+ indicates the largest element.
1598
+ %(size_foot)s
1599
+ %(output)s
1600
+ %(mode_reflect)s
1601
+ %(cval)s
1602
+ %(origin_multiple)s
1603
+ axes : tuple of int or None, optional
1604
+ If None, `input` is filtered along all axes. Otherwise,
1605
+ `input` is filtered along the specified axes. When `axes` is
1606
+ specified, any tuples used for `size`, `origin`, and/or `mode`
1607
+ must match the length of `axes`. The ith entry in any of these tuples
1608
+ corresponds to the ith entry in `axes`.
1609
+
1610
+ Returns
1611
+ -------
1612
+ rank_filter : ndarray
1613
+ Filtered array. Has the same shape as `input`.
1614
+
1615
+ Examples
1616
+ --------
1617
+ >>> from scipy import ndimage, datasets
1618
+ >>> import matplotlib.pyplot as plt
1619
+ >>> fig = plt.figure()
1620
+ >>> plt.gray() # show the filtered result in grayscale
1621
+ >>> ax1 = fig.add_subplot(121) # left side
1622
+ >>> ax2 = fig.add_subplot(122) # right side
1623
+ >>> ascent = datasets.ascent()
1624
+ >>> result = ndimage.rank_filter(ascent, rank=42, size=20)
1625
+ >>> ax1.imshow(ascent)
1626
+ >>> ax2.imshow(result)
1627
+ >>> plt.show()
1628
+ """
1629
+ rank = operator.index(rank)
1630
+ return _rank_filter(input, rank, size, footprint, output, mode, cval,
1631
+ origin, 'rank', axes=axes)
1632
+
1633
+
1634
+ @_ni_docstrings.docfiller
1635
+ def median_filter(input, size=None, footprint=None, output=None,
1636
+ mode="reflect", cval=0.0, origin=0, *, axes=None):
1637
+ """
1638
+ Calculate a multidimensional median filter.
1639
+
1640
+ Parameters
1641
+ ----------
1642
+ %(input)s
1643
+ %(size_foot)s
1644
+ %(output)s
1645
+ %(mode_reflect)s
1646
+ %(cval)s
1647
+ %(origin_multiple)s
1648
+ axes : tuple of int or None, optional
1649
+ If None, `input` is filtered along all axes. Otherwise,
1650
+ `input` is filtered along the specified axes. When `axes` is
1651
+ specified, any tuples used for `size`, `origin`, and/or `mode`
1652
+ must match the length of `axes`. The ith entry in any of these tuples
1653
+ corresponds to the ith entry in `axes`.
1654
+
1655
+ Returns
1656
+ -------
1657
+ median_filter : ndarray
1658
+ Filtered array. Has the same shape as `input`.
1659
+
1660
+ See Also
1661
+ --------
1662
+ scipy.signal.medfilt2d
1663
+
1664
+ Notes
1665
+ -----
1666
+ For 2-dimensional images with ``uint8``, ``float32`` or ``float64`` dtypes
1667
+ the specialised function `scipy.signal.medfilt2d` may be faster. It is
1668
+ however limited to constant mode with ``cval=0``.
1669
+
1670
+ Examples
1671
+ --------
1672
+ >>> from scipy import ndimage, datasets
1673
+ >>> import matplotlib.pyplot as plt
1674
+ >>> fig = plt.figure()
1675
+ >>> plt.gray() # show the filtered result in grayscale
1676
+ >>> ax1 = fig.add_subplot(121) # left side
1677
+ >>> ax2 = fig.add_subplot(122) # right side
1678
+ >>> ascent = datasets.ascent()
1679
+ >>> result = ndimage.median_filter(ascent, size=20)
1680
+ >>> ax1.imshow(ascent)
1681
+ >>> ax2.imshow(result)
1682
+ >>> plt.show()
1683
+ """
1684
+ return _rank_filter(input, 0, size, footprint, output, mode, cval,
1685
+ origin, 'median', axes=axes)
1686
+
1687
+
1688
+ @_ni_docstrings.docfiller
1689
+ def percentile_filter(input, percentile, size=None, footprint=None,
1690
+ output=None, mode="reflect", cval=0.0, origin=0, *,
1691
+ axes=None):
1692
+ """Calculate a multidimensional percentile filter.
1693
+
1694
+ Parameters
1695
+ ----------
1696
+ %(input)s
1697
+ percentile : scalar
1698
+ The percentile parameter may be less than zero, i.e.,
1699
+ percentile = -20 equals percentile = 80
1700
+ %(size_foot)s
1701
+ %(output)s
1702
+ %(mode_reflect)s
1703
+ %(cval)s
1704
+ %(origin_multiple)s
1705
+ axes : tuple of int or None, optional
1706
+ If None, `input` is filtered along all axes. Otherwise,
1707
+ `input` is filtered along the specified axes. When `axes` is
1708
+ specified, any tuples used for `size`, `origin`, and/or `mode`
1709
+ must match the length of `axes`. The ith entry in any of these tuples
1710
+ corresponds to the ith entry in `axes`.
1711
+
1712
+ Returns
1713
+ -------
1714
+ percentile_filter : ndarray
1715
+ Filtered array. Has the same shape as `input`.
1716
+
1717
+ Examples
1718
+ --------
1719
+ >>> from scipy import ndimage, datasets
1720
+ >>> import matplotlib.pyplot as plt
1721
+ >>> fig = plt.figure()
1722
+ >>> plt.gray() # show the filtered result in grayscale
1723
+ >>> ax1 = fig.add_subplot(121) # left side
1724
+ >>> ax2 = fig.add_subplot(122) # right side
1725
+ >>> ascent = datasets.ascent()
1726
+ >>> result = ndimage.percentile_filter(ascent, percentile=20, size=20)
1727
+ >>> ax1.imshow(ascent)
1728
+ >>> ax2.imshow(result)
1729
+ >>> plt.show()
1730
+ """
1731
+ return _rank_filter(input, percentile, size, footprint, output, mode,
1732
+ cval, origin, 'percentile', axes=axes)
1733
+
1734
+
1735
+ @_ni_docstrings.docfiller
1736
+ def generic_filter1d(input, function, filter_size, axis=-1,
1737
+ output=None, mode="reflect", cval=0.0, origin=0,
1738
+ extra_arguments=(), extra_keywords=None):
1739
+ """Calculate a 1-D filter along the given axis.
1740
+
1741
+ `generic_filter1d` iterates over the lines of the array, calling the
1742
+ given function at each line. The arguments of the line are the
1743
+ input line, and the output line. The input and output lines are 1-D
1744
+ double arrays. The input line is extended appropriately according
1745
+ to the filter size and origin. The output line must be modified
1746
+ in-place with the result.
1747
+
1748
+ Parameters
1749
+ ----------
1750
+ %(input)s
1751
+ function : {callable, scipy.LowLevelCallable}
1752
+ Function to apply along given axis.
1753
+ filter_size : scalar
1754
+ Length of the filter.
1755
+ %(axis)s
1756
+ %(output)s
1757
+ %(mode_reflect)s
1758
+ %(cval)s
1759
+ %(origin)s
1760
+ %(extra_arguments)s
1761
+ %(extra_keywords)s
1762
+
1763
+ Returns
1764
+ -------
1765
+ generic_filter1d : ndarray
1766
+ Filtered array. Has the same shape as `input`.
1767
+
1768
+ Notes
1769
+ -----
1770
+ This function also accepts low-level callback functions with one of
1771
+ the following signatures and wrapped in `scipy.LowLevelCallable`:
1772
+
1773
+ .. code:: c
1774
+
1775
+ int function(double *input_line, npy_intp input_length,
1776
+ double *output_line, npy_intp output_length,
1777
+ void *user_data)
1778
+ int function(double *input_line, intptr_t input_length,
1779
+ double *output_line, intptr_t output_length,
1780
+ void *user_data)
1781
+
1782
+ The calling function iterates over the lines of the input and output
1783
+ arrays, calling the callback function at each line. The current line
1784
+ is extended according to the border conditions set by the calling
1785
+ function, and the result is copied into the array that is passed
1786
+ through ``input_line``. The length of the input line (after extension)
1787
+ is passed through ``input_length``. The callback function should apply
1788
+ the filter and store the result in the array passed through
1789
+ ``output_line``. The length of the output line is passed through
1790
+ ``output_length``. ``user_data`` is the data pointer provided
1791
+ to `scipy.LowLevelCallable` as-is.
1792
+
1793
+ The callback function must return an integer error status that is zero
1794
+ if something went wrong and one otherwise. If an error occurs, you should
1795
+ normally set the python error status with an informative message
1796
+ before returning, otherwise a default error message is set by the
1797
+ calling function.
1798
+
1799
+ In addition, some other low-level function pointer specifications
1800
+ are accepted, but these are for backward compatibility only and should
1801
+ not be used in new code.
1802
+
1803
+ """
1804
+ if extra_keywords is None:
1805
+ extra_keywords = {}
1806
+ input = np.asarray(input)
1807
+ if np.iscomplexobj(input):
1808
+ raise TypeError('Complex type not supported')
1809
+ output = _ni_support._get_output(output, input)
1810
+ if filter_size < 1:
1811
+ raise RuntimeError('invalid filter size')
1812
+ axis = normalize_axis_index(axis, input.ndim)
1813
+ if (filter_size // 2 + origin < 0) or (filter_size // 2 + origin >=
1814
+ filter_size):
1815
+ raise ValueError('invalid origin')
1816
+ mode = _ni_support._extend_mode_to_code(mode)
1817
+ _nd_image.generic_filter1d(input, function, filter_size, axis, output,
1818
+ mode, cval, origin, extra_arguments,
1819
+ extra_keywords)
1820
+ return output
1821
+
1822
+
1823
+ @_ni_docstrings.docfiller
1824
+ def generic_filter(input, function, size=None, footprint=None,
1825
+ output=None, mode="reflect", cval=0.0, origin=0,
1826
+ extra_arguments=(), extra_keywords=None, *, axes=None):
1827
+ """Calculate a multidimensional filter using the given function.
1828
+
1829
+ At each element the provided function is called. The input values
1830
+ within the filter footprint at that element are passed to the function
1831
+ as a 1-D array of double values.
1832
+
1833
+ Parameters
1834
+ ----------
1835
+ %(input)s
1836
+ function : {callable, scipy.LowLevelCallable}
1837
+ Function to apply at each element.
1838
+ %(size_foot)s
1839
+ %(output)s
1840
+ %(mode_reflect)s
1841
+ %(cval)s
1842
+ %(origin_multiple)s
1843
+ %(extra_arguments)s
1844
+ %(extra_keywords)s
1845
+ axes : tuple of int or None, optional
1846
+ If None, `input` is filtered along all axes. Otherwise,
1847
+ `input` is filtered along the specified axes. When `axes` is
1848
+ specified, any tuples used for `size` or `origin` must match the length
1849
+ of `axes`. The ith entry in any of these tuples corresponds to the ith
1850
+ entry in `axes`.
1851
+
1852
+ Returns
1853
+ -------
1854
+ generic_filter : ndarray
1855
+ Filtered array. Has the same shape as `input`.
1856
+
1857
+ Notes
1858
+ -----
1859
+ This function also accepts low-level callback functions with one of
1860
+ the following signatures and wrapped in `scipy.LowLevelCallable`:
1861
+
1862
+ .. code:: c
1863
+
1864
+ int callback(double *buffer, npy_intp filter_size,
1865
+ double *return_value, void *user_data)
1866
+ int callback(double *buffer, intptr_t filter_size,
1867
+ double *return_value, void *user_data)
1868
+
1869
+ The calling function iterates over the elements of the input and
1870
+ output arrays, calling the callback function at each element. The
1871
+ elements within the footprint of the filter at the current element are
1872
+ passed through the ``buffer`` parameter, and the number of elements
1873
+ within the footprint through ``filter_size``. The calculated value is
1874
+ returned in ``return_value``. ``user_data`` is the data pointer provided
1875
+ to `scipy.LowLevelCallable` as-is.
1876
+
1877
+ The callback function must return an integer error status that is zero
1878
+ if something went wrong and one otherwise. If an error occurs, you should
1879
+ normally set the python error status with an informative message
1880
+ before returning, otherwise a default error message is set by the
1881
+ calling function.
1882
+
1883
+ In addition, some other low-level function pointer specifications
1884
+ are accepted, but these are for backward compatibility only and should
1885
+ not be used in new code.
1886
+
1887
+ Examples
1888
+ --------
1889
+ Import the necessary modules and load the example image used for
1890
+ filtering.
1891
+
1892
+ >>> import numpy as np
1893
+ >>> from scipy import datasets
1894
+ >>> from scipy.ndimage import zoom, generic_filter
1895
+ >>> import matplotlib.pyplot as plt
1896
+ >>> ascent = zoom(datasets.ascent(), 0.5)
1897
+
1898
+ Compute a maximum filter with kernel size 5 by passing a simple NumPy
1899
+ aggregation function as argument to `function`.
1900
+
1901
+ >>> maximum_filter_result = generic_filter(ascent, np.amax, [5, 5])
1902
+
1903
+ While a maximum filter could also directly be obtained using
1904
+ `maximum_filter`, `generic_filter` allows generic Python function or
1905
+ `scipy.LowLevelCallable` to be used as a filter. Here, we compute the
1906
+ range between maximum and minimum value as an example for a kernel size
1907
+ of 5.
1908
+
1909
+ >>> def custom_filter(image):
1910
+ ... return np.amax(image) - np.amin(image)
1911
+ >>> custom_filter_result = generic_filter(ascent, custom_filter, [5, 5])
1912
+
1913
+ Plot the original and filtered images.
1914
+
1915
+ >>> fig, axes = plt.subplots(3, 1, figsize=(3, 9))
1916
+ >>> plt.gray() # show the filtered result in grayscale
1917
+ >>> top, middle, bottom = axes
1918
+ >>> for ax in axes:
1919
+ ... ax.set_axis_off() # remove coordinate system
1920
+ >>> top.imshow(ascent)
1921
+ >>> top.set_title("Original image")
1922
+ >>> middle.imshow(maximum_filter_result)
1923
+ >>> middle.set_title("Maximum filter, Kernel: 5x5")
1924
+ >>> bottom.imshow(custom_filter_result)
1925
+ >>> bottom.set_title("Custom filter, Kernel: 5x5")
1926
+ >>> fig.tight_layout()
1927
+
1928
+ """
1929
+ if (size is not None) and (footprint is not None):
1930
+ warnings.warn("ignoring size because footprint is set",
1931
+ UserWarning, stacklevel=2)
1932
+ if extra_keywords is None:
1933
+ extra_keywords = {}
1934
+ input = np.asarray(input)
1935
+ if np.iscomplexobj(input):
1936
+ raise TypeError('Complex type not supported')
1937
+ axes = _ni_support._check_axes(axes, input.ndim)
1938
+ num_axes = len(axes)
1939
+ if footprint is None:
1940
+ if size is None:
1941
+ raise RuntimeError("no footprint or filter size provided")
1942
+ sizes = _ni_support._normalize_sequence(size, num_axes)
1943
+ footprint = np.ones(sizes, dtype=bool)
1944
+ else:
1945
+ footprint = np.asarray(footprint, dtype=bool)
1946
+
1947
+ # expand origins, footprint if num_axes < input.ndim
1948
+ footprint = _expand_footprint(input.ndim, axes, footprint)
1949
+ origins = _expand_origin(input.ndim, axes, origin)
1950
+
1951
+ fshape = [ii for ii in footprint.shape if ii > 0]
1952
+ if len(fshape) != input.ndim:
1953
+ raise RuntimeError(f"footprint.ndim ({footprint.ndim}) "
1954
+ f"must match len(axes) ({num_axes})")
1955
+ for origin, lenf in zip(origins, fshape):
1956
+ if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf):
1957
+ raise ValueError('invalid origin')
1958
+ if not footprint.flags.contiguous:
1959
+ footprint = footprint.copy()
1960
+ output = _ni_support._get_output(output, input)
1961
+
1962
+ mode = _ni_support._extend_mode_to_code(mode)
1963
+ _nd_image.generic_filter(input, function, footprint, output, mode,
1964
+ cval, origins, extra_arguments, extra_keywords)
1965
+ return output
llava_video/lib/python3.10/site-packages/scipy/ndimage/_interpolation.py ADDED
@@ -0,0 +1,1003 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2003-2005 Peter J. Verveer
2
+ #
3
+ # Redistribution and use in source and binary forms, with or without
4
+ # modification, are permitted provided that the following conditions
5
+ # are met:
6
+ #
7
+ # 1. Redistributions of source code must retain the above copyright
8
+ # notice, this list of conditions and the following disclaimer.
9
+ #
10
+ # 2. Redistributions in binary form must reproduce the above
11
+ # copyright notice, this list of conditions and the following
12
+ # disclaimer in the documentation and/or other materials provided
13
+ # with the distribution.
14
+ #
15
+ # 3. The name of the author may not be used to endorse or promote
16
+ # products derived from this software without specific prior
17
+ # written permission.
18
+ #
19
+ # THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
20
+ # OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21
+ # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22
+ # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
23
+ # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
25
+ # GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26
+ # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27
+ # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
28
+ # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29
+ # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
+
31
+ import itertools
32
+ import warnings
33
+
34
+ import numpy as np
35
+ from scipy._lib._util import normalize_axis_index
36
+
37
+ from scipy import special
38
+ from . import _ni_support
39
+ from . import _nd_image
40
+ from ._ni_docstrings import docfiller
41
+
42
+
43
+ __all__ = ['spline_filter1d', 'spline_filter', 'geometric_transform',
44
+ 'map_coordinates', 'affine_transform', 'shift', 'zoom', 'rotate']
45
+
46
+
47
+ @docfiller
48
+ def spline_filter1d(input, order=3, axis=-1, output=np.float64,
49
+ mode='mirror'):
50
+ """
51
+ Calculate a 1-D spline filter along the given axis.
52
+
53
+ The lines of the array along the given axis are filtered by a
54
+ spline filter. The order of the spline must be >= 2 and <= 5.
55
+
56
+ Parameters
57
+ ----------
58
+ %(input)s
59
+ order : int, optional
60
+ The order of the spline, default is 3.
61
+ axis : int, optional
62
+ The axis along which the spline filter is applied. Default is the last
63
+ axis.
64
+ output : ndarray or dtype, optional
65
+ The array in which to place the output, or the dtype of the returned
66
+ array. Default is ``numpy.float64``.
67
+ %(mode_interp_mirror)s
68
+
69
+ Returns
70
+ -------
71
+ spline_filter1d : ndarray
72
+ The filtered input.
73
+
74
+ See Also
75
+ --------
76
+ spline_filter : Multidimensional spline filter.
77
+
78
+ Notes
79
+ -----
80
+ All of the interpolation functions in `ndimage` do spline interpolation of
81
+ the input image. If using B-splines of `order > 1`, the input image
82
+ values have to be converted to B-spline coefficients first, which is
83
+ done by applying this 1-D filter sequentially along all
84
+ axes of the input. All functions that require B-spline coefficients
85
+ will automatically filter their inputs, a behavior controllable with
86
+ the `prefilter` keyword argument. For functions that accept a `mode`
87
+ parameter, the result will only be correct if it matches the `mode`
88
+ used when filtering.
89
+
90
+ For complex-valued `input`, this function processes the real and imaginary
91
+ components independently.
92
+
93
+ .. versionadded:: 1.6.0
94
+ Complex-valued support added.
95
+
96
+ Examples
97
+ --------
98
+ We can filter an image using 1-D spline along the given axis:
99
+
100
+ >>> from scipy.ndimage import spline_filter1d
101
+ >>> import numpy as np
102
+ >>> import matplotlib.pyplot as plt
103
+ >>> orig_img = np.eye(20) # create an image
104
+ >>> orig_img[10, :] = 1.0
105
+ >>> sp_filter_axis_0 = spline_filter1d(orig_img, axis=0)
106
+ >>> sp_filter_axis_1 = spline_filter1d(orig_img, axis=1)
107
+ >>> f, ax = plt.subplots(1, 3, sharex=True)
108
+ >>> for ind, data in enumerate([[orig_img, "original image"],
109
+ ... [sp_filter_axis_0, "spline filter (axis=0)"],
110
+ ... [sp_filter_axis_1, "spline filter (axis=1)"]]):
111
+ ... ax[ind].imshow(data[0], cmap='gray_r')
112
+ ... ax[ind].set_title(data[1])
113
+ >>> plt.tight_layout()
114
+ >>> plt.show()
115
+
116
+ """
117
+ if order < 0 or order > 5:
118
+ raise RuntimeError('spline order not supported')
119
+ input = np.asarray(input)
120
+ complex_output = np.iscomplexobj(input)
121
+ output = _ni_support._get_output(output, input,
122
+ complex_output=complex_output)
123
+ if complex_output:
124
+ spline_filter1d(input.real, order, axis, output.real, mode)
125
+ spline_filter1d(input.imag, order, axis, output.imag, mode)
126
+ return output
127
+ if order in [0, 1]:
128
+ output[...] = np.array(input)
129
+ else:
130
+ mode = _ni_support._extend_mode_to_code(mode)
131
+ axis = normalize_axis_index(axis, input.ndim)
132
+ _nd_image.spline_filter1d(input, order, axis, output, mode)
133
+ return output
134
+
135
+ @docfiller
136
+ def spline_filter(input, order=3, output=np.float64, mode='mirror'):
137
+ """
138
+ Multidimensional spline filter.
139
+
140
+ Parameters
141
+ ----------
142
+ %(input)s
143
+ order : int, optional
144
+ The order of the spline, default is 3.
145
+ output : ndarray or dtype, optional
146
+ The array in which to place the output, or the dtype of the returned
147
+ array. Default is ``numpy.float64``.
148
+ %(mode_interp_mirror)s
149
+
150
+ Returns
151
+ -------
152
+ spline_filter : ndarray
153
+ Filtered array. Has the same shape as `input`.
154
+
155
+ See Also
156
+ --------
157
+ spline_filter1d : Calculate a 1-D spline filter along the given axis.
158
+
159
+ Notes
160
+ -----
161
+ The multidimensional filter is implemented as a sequence of
162
+ 1-D spline filters. The intermediate arrays are stored
163
+ in the same data type as the output. Therefore, for output types
164
+ with a limited precision, the results may be imprecise because
165
+ intermediate results may be stored with insufficient precision.
166
+
167
+ For complex-valued `input`, this function processes the real and imaginary
168
+ components independently.
169
+
170
+ .. versionadded:: 1.6.0
171
+ Complex-valued support added.
172
+
173
+ Examples
174
+ --------
175
+ We can filter an image using multidimensional splines:
176
+
177
+ >>> from scipy.ndimage import spline_filter
178
+ >>> import numpy as np
179
+ >>> import matplotlib.pyplot as plt
180
+ >>> orig_img = np.eye(20) # create an image
181
+ >>> orig_img[10, :] = 1.0
182
+ >>> sp_filter = spline_filter(orig_img, order=3)
183
+ >>> f, ax = plt.subplots(1, 2, sharex=True)
184
+ >>> for ind, data in enumerate([[orig_img, "original image"],
185
+ ... [sp_filter, "spline filter"]]):
186
+ ... ax[ind].imshow(data[0], cmap='gray_r')
187
+ ... ax[ind].set_title(data[1])
188
+ >>> plt.tight_layout()
189
+ >>> plt.show()
190
+
191
+ """
192
+ if order < 2 or order > 5:
193
+ raise RuntimeError('spline order not supported')
194
+ input = np.asarray(input)
195
+ complex_output = np.iscomplexobj(input)
196
+ output = _ni_support._get_output(output, input,
197
+ complex_output=complex_output)
198
+ if complex_output:
199
+ spline_filter(input.real, order, output.real, mode)
200
+ spline_filter(input.imag, order, output.imag, mode)
201
+ return output
202
+ if order not in [0, 1] and input.ndim > 0:
203
+ for axis in range(input.ndim):
204
+ spline_filter1d(input, order, axis, output=output, mode=mode)
205
+ input = output
206
+ else:
207
+ output[...] = input[...]
208
+ return output
209
+
210
+
211
+ def _prepad_for_spline_filter(input, mode, cval):
212
+ if mode in ['nearest', 'grid-constant']:
213
+ npad = 12
214
+ if mode == 'grid-constant':
215
+ padded = np.pad(input, npad, mode='constant',
216
+ constant_values=cval)
217
+ elif mode == 'nearest':
218
+ padded = np.pad(input, npad, mode='edge')
219
+ else:
220
+ # other modes have exact boundary conditions implemented so
221
+ # no prepadding is needed
222
+ npad = 0
223
+ padded = input
224
+ return padded, npad
225
+
226
+
227
+ @docfiller
228
+ def geometric_transform(input, mapping, output_shape=None,
229
+ output=None, order=3,
230
+ mode='constant', cval=0.0, prefilter=True,
231
+ extra_arguments=(), extra_keywords=None):
232
+ """
233
+ Apply an arbitrary geometric transform.
234
+
235
+ The given mapping function is used to find, for each point in the
236
+ output, the corresponding coordinates in the input. The value of the
237
+ input at those coordinates is determined by spline interpolation of
238
+ the requested order.
239
+
240
+ Parameters
241
+ ----------
242
+ %(input)s
243
+ mapping : {callable, scipy.LowLevelCallable}
244
+ A callable object that accepts a tuple of length equal to the output
245
+ array rank, and returns the corresponding input coordinates as a tuple
246
+ of length equal to the input array rank.
247
+ output_shape : tuple of ints, optional
248
+ Shape tuple.
249
+ %(output)s
250
+ order : int, optional
251
+ The order of the spline interpolation, default is 3.
252
+ The order has to be in the range 0-5.
253
+ %(mode_interp_constant)s
254
+ %(cval)s
255
+ %(prefilter)s
256
+ extra_arguments : tuple, optional
257
+ Extra arguments passed to `mapping`.
258
+ extra_keywords : dict, optional
259
+ Extra keywords passed to `mapping`.
260
+
261
+ Returns
262
+ -------
263
+ output : ndarray
264
+ The filtered input.
265
+
266
+ See Also
267
+ --------
268
+ map_coordinates, affine_transform, spline_filter1d
269
+
270
+
271
+ Notes
272
+ -----
273
+ This function also accepts low-level callback functions with one
274
+ the following signatures and wrapped in `scipy.LowLevelCallable`:
275
+
276
+ .. code:: c
277
+
278
+ int mapping(npy_intp *output_coordinates, double *input_coordinates,
279
+ int output_rank, int input_rank, void *user_data)
280
+ int mapping(intptr_t *output_coordinates, double *input_coordinates,
281
+ int output_rank, int input_rank, void *user_data)
282
+
283
+ The calling function iterates over the elements of the output array,
284
+ calling the callback function at each element. The coordinates of the
285
+ current output element are passed through ``output_coordinates``. The
286
+ callback function must return the coordinates at which the input must
287
+ be interpolated in ``input_coordinates``. The rank of the input and
288
+ output arrays are given by ``input_rank`` and ``output_rank``
289
+ respectively. ``user_data`` is the data pointer provided
290
+ to `scipy.LowLevelCallable` as-is.
291
+
292
+ The callback function must return an integer error status that is zero
293
+ if something went wrong and one otherwise. If an error occurs, you should
294
+ normally set the Python error status with an informative message
295
+ before returning, otherwise a default error message is set by the
296
+ calling function.
297
+
298
+ In addition, some other low-level function pointer specifications
299
+ are accepted, but these are for backward compatibility only and should
300
+ not be used in new code.
301
+
302
+ For complex-valued `input`, this function transforms the real and imaginary
303
+ components independently.
304
+
305
+ .. versionadded:: 1.6.0
306
+ Complex-valued support added.
307
+
308
+ Examples
309
+ --------
310
+ >>> import numpy as np
311
+ >>> from scipy.ndimage import geometric_transform
312
+ >>> a = np.arange(12.).reshape((4, 3))
313
+ >>> def shift_func(output_coords):
314
+ ... return (output_coords[0] - 0.5, output_coords[1] - 0.5)
315
+ ...
316
+ >>> geometric_transform(a, shift_func)
317
+ array([[ 0. , 0. , 0. ],
318
+ [ 0. , 1.362, 2.738],
319
+ [ 0. , 4.812, 6.187],
320
+ [ 0. , 8.263, 9.637]])
321
+
322
+ >>> b = [1, 2, 3, 4, 5]
323
+ >>> def shift_func(output_coords):
324
+ ... return (output_coords[0] - 3,)
325
+ ...
326
+ >>> geometric_transform(b, shift_func, mode='constant')
327
+ array([0, 0, 0, 1, 2])
328
+ >>> geometric_transform(b, shift_func, mode='nearest')
329
+ array([1, 1, 1, 1, 2])
330
+ >>> geometric_transform(b, shift_func, mode='reflect')
331
+ array([3, 2, 1, 1, 2])
332
+ >>> geometric_transform(b, shift_func, mode='wrap')
333
+ array([2, 3, 4, 1, 2])
334
+
335
+ """
336
+ if extra_keywords is None:
337
+ extra_keywords = {}
338
+ if order < 0 or order > 5:
339
+ raise RuntimeError('spline order not supported')
340
+ input = np.asarray(input)
341
+ if output_shape is None:
342
+ output_shape = input.shape
343
+ if input.ndim < 1 or len(output_shape) < 1:
344
+ raise RuntimeError('input and output rank must be > 0')
345
+ complex_output = np.iscomplexobj(input)
346
+ output = _ni_support._get_output(output, input, shape=output_shape,
347
+ complex_output=complex_output)
348
+ if complex_output:
349
+ kwargs = dict(order=order, mode=mode, prefilter=prefilter,
350
+ output_shape=output_shape,
351
+ extra_arguments=extra_arguments,
352
+ extra_keywords=extra_keywords)
353
+ geometric_transform(input.real, mapping, output=output.real,
354
+ cval=np.real(cval), **kwargs)
355
+ geometric_transform(input.imag, mapping, output=output.imag,
356
+ cval=np.imag(cval), **kwargs)
357
+ return output
358
+
359
+ if prefilter and order > 1:
360
+ padded, npad = _prepad_for_spline_filter(input, mode, cval)
361
+ filtered = spline_filter(padded, order, output=np.float64,
362
+ mode=mode)
363
+ else:
364
+ npad = 0
365
+ filtered = input
366
+ mode = _ni_support._extend_mode_to_code(mode)
367
+ _nd_image.geometric_transform(filtered, mapping, None, None, None, output,
368
+ order, mode, cval, npad, extra_arguments,
369
+ extra_keywords)
370
+ return output
371
+
372
+
373
+ @docfiller
374
+ def map_coordinates(input, coordinates, output=None, order=3,
375
+ mode='constant', cval=0.0, prefilter=True):
376
+ """
377
+ Map the input array to new coordinates by interpolation.
378
+
379
+ The array of coordinates is used to find, for each point in the output,
380
+ the corresponding coordinates in the input. The value of the input at
381
+ those coordinates is determined by spline interpolation of the
382
+ requested order.
383
+
384
+ The shape of the output is derived from that of the coordinate
385
+ array by dropping the first axis. The values of the array along
386
+ the first axis are the coordinates in the input array at which the
387
+ output value is found.
388
+
389
+ Parameters
390
+ ----------
391
+ %(input)s
392
+ coordinates : array_like
393
+ The coordinates at which `input` is evaluated.
394
+ %(output)s
395
+ order : int, optional
396
+ The order of the spline interpolation, default is 3.
397
+ The order has to be in the range 0-5.
398
+ %(mode_interp_constant)s
399
+ %(cval)s
400
+ %(prefilter)s
401
+
402
+ Returns
403
+ -------
404
+ map_coordinates : ndarray
405
+ The result of transforming the input. The shape of the output is
406
+ derived from that of `coordinates` by dropping the first axis.
407
+
408
+ See Also
409
+ --------
410
+ spline_filter, geometric_transform, scipy.interpolate
411
+
412
+ Notes
413
+ -----
414
+ For complex-valued `input`, this function maps the real and imaginary
415
+ components independently.
416
+
417
+ .. versionadded:: 1.6.0
418
+ Complex-valued support added.
419
+
420
+ Examples
421
+ --------
422
+ >>> from scipy import ndimage
423
+ >>> import numpy as np
424
+ >>> a = np.arange(12.).reshape((4, 3))
425
+ >>> a
426
+ array([[ 0., 1., 2.],
427
+ [ 3., 4., 5.],
428
+ [ 6., 7., 8.],
429
+ [ 9., 10., 11.]])
430
+ >>> ndimage.map_coordinates(a, [[0.5, 2], [0.5, 1]], order=1)
431
+ array([ 2., 7.])
432
+
433
+ Above, the interpolated value of a[0.5, 0.5] gives output[0], while
434
+ a[2, 1] is output[1].
435
+
436
+ >>> inds = np.array([[0.5, 2], [0.5, 4]])
437
+ >>> ndimage.map_coordinates(a, inds, order=1, cval=-33.3)
438
+ array([ 2. , -33.3])
439
+ >>> ndimage.map_coordinates(a, inds, order=1, mode='nearest')
440
+ array([ 2., 8.])
441
+ >>> ndimage.map_coordinates(a, inds, order=1, cval=0, output=bool)
442
+ array([ True, False], dtype=bool)
443
+
444
+ """
445
+ if order < 0 or order > 5:
446
+ raise RuntimeError('spline order not supported')
447
+ input = np.asarray(input)
448
+ coordinates = np.asarray(coordinates)
449
+ if np.iscomplexobj(coordinates):
450
+ raise TypeError('Complex type not supported')
451
+ output_shape = coordinates.shape[1:]
452
+ if input.ndim < 1 or len(output_shape) < 1:
453
+ raise RuntimeError('input and output rank must be > 0')
454
+ if coordinates.shape[0] != input.ndim:
455
+ raise RuntimeError('invalid shape for coordinate array')
456
+ complex_output = np.iscomplexobj(input)
457
+ output = _ni_support._get_output(output, input, shape=output_shape,
458
+ complex_output=complex_output)
459
+ if complex_output:
460
+ kwargs = dict(order=order, mode=mode, prefilter=prefilter)
461
+ map_coordinates(input.real, coordinates, output=output.real,
462
+ cval=np.real(cval), **kwargs)
463
+ map_coordinates(input.imag, coordinates, output=output.imag,
464
+ cval=np.imag(cval), **kwargs)
465
+ return output
466
+ if prefilter and order > 1:
467
+ padded, npad = _prepad_for_spline_filter(input, mode, cval)
468
+ filtered = spline_filter(padded, order, output=np.float64, mode=mode)
469
+ else:
470
+ npad = 0
471
+ filtered = input
472
+ mode = _ni_support._extend_mode_to_code(mode)
473
+ _nd_image.geometric_transform(filtered, None, coordinates, None, None,
474
+ output, order, mode, cval, npad, None, None)
475
+ return output
476
+
477
+
478
+ @docfiller
479
+ def affine_transform(input, matrix, offset=0.0, output_shape=None,
480
+ output=None, order=3,
481
+ mode='constant', cval=0.0, prefilter=True):
482
+ """
483
+ Apply an affine transformation.
484
+
485
+ Given an output image pixel index vector ``o``, the pixel value
486
+ is determined from the input image at position
487
+ ``np.dot(matrix, o) + offset``.
488
+
489
+ This does 'pull' (or 'backward') resampling, transforming the output space
490
+ to the input to locate data. Affine transformations are often described in
491
+ the 'push' (or 'forward') direction, transforming input to output. If you
492
+ have a matrix for the 'push' transformation, use its inverse
493
+ (:func:`numpy.linalg.inv`) in this function.
494
+
495
+ Parameters
496
+ ----------
497
+ %(input)s
498
+ matrix : ndarray
499
+ The inverse coordinate transformation matrix, mapping output
500
+ coordinates to input coordinates. If ``ndim`` is the number of
501
+ dimensions of ``input``, the given matrix must have one of the
502
+ following shapes:
503
+
504
+ - ``(ndim, ndim)``: the linear transformation matrix for each
505
+ output coordinate.
506
+ - ``(ndim,)``: assume that the 2-D transformation matrix is
507
+ diagonal, with the diagonal specified by the given value. A more
508
+ efficient algorithm is then used that exploits the separability
509
+ of the problem.
510
+ - ``(ndim + 1, ndim + 1)``: assume that the transformation is
511
+ specified using homogeneous coordinates [1]_. In this case, any
512
+ value passed to ``offset`` is ignored.
513
+ - ``(ndim, ndim + 1)``: as above, but the bottom row of a
514
+ homogeneous transformation matrix is always ``[0, 0, ..., 1]``,
515
+ and may be omitted.
516
+
517
+ offset : float or sequence, optional
518
+ The offset into the array where the transform is applied. If a float,
519
+ `offset` is the same for each axis. If a sequence, `offset` should
520
+ contain one value for each axis.
521
+ output_shape : tuple of ints, optional
522
+ Shape tuple.
523
+ %(output)s
524
+ order : int, optional
525
+ The order of the spline interpolation, default is 3.
526
+ The order has to be in the range 0-5.
527
+ %(mode_interp_constant)s
528
+ %(cval)s
529
+ %(prefilter)s
530
+
531
+ Returns
532
+ -------
533
+ affine_transform : ndarray
534
+ The transformed input.
535
+
536
+ Notes
537
+ -----
538
+ The given matrix and offset are used to find for each point in the
539
+ output the corresponding coordinates in the input by an affine
540
+ transformation. The value of the input at those coordinates is
541
+ determined by spline interpolation of the requested order. Points
542
+ outside the boundaries of the input are filled according to the given
543
+ mode.
544
+
545
+ .. versionchanged:: 0.18.0
546
+ Previously, the exact interpretation of the affine transformation
547
+ depended on whether the matrix was supplied as a 1-D or a
548
+ 2-D array. If a 1-D array was supplied
549
+ to the matrix parameter, the output pixel value at index ``o``
550
+ was determined from the input image at position
551
+ ``matrix * (o + offset)``.
552
+
553
+ For complex-valued `input`, this function transforms the real and imaginary
554
+ components independently.
555
+
556
+ .. versionadded:: 1.6.0
557
+ Complex-valued support added.
558
+
559
+ References
560
+ ----------
561
+ .. [1] https://en.wikipedia.org/wiki/Homogeneous_coordinates
562
+ """
563
+ if order < 0 or order > 5:
564
+ raise RuntimeError('spline order not supported')
565
+ input = np.asarray(input)
566
+ if output_shape is None:
567
+ if isinstance(output, np.ndarray):
568
+ output_shape = output.shape
569
+ else:
570
+ output_shape = input.shape
571
+ if input.ndim < 1 or len(output_shape) < 1:
572
+ raise RuntimeError('input and output rank must be > 0')
573
+ complex_output = np.iscomplexobj(input)
574
+ output = _ni_support._get_output(output, input, shape=output_shape,
575
+ complex_output=complex_output)
576
+ if complex_output:
577
+ kwargs = dict(offset=offset, output_shape=output_shape, order=order,
578
+ mode=mode, prefilter=prefilter)
579
+ affine_transform(input.real, matrix, output=output.real,
580
+ cval=np.real(cval), **kwargs)
581
+ affine_transform(input.imag, matrix, output=output.imag,
582
+ cval=np.imag(cval), **kwargs)
583
+ return output
584
+ if prefilter and order > 1:
585
+ padded, npad = _prepad_for_spline_filter(input, mode, cval)
586
+ filtered = spline_filter(padded, order, output=np.float64, mode=mode)
587
+ else:
588
+ npad = 0
589
+ filtered = input
590
+ mode = _ni_support._extend_mode_to_code(mode)
591
+ matrix = np.asarray(matrix, dtype=np.float64)
592
+ if matrix.ndim not in [1, 2] or matrix.shape[0] < 1:
593
+ raise RuntimeError('no proper affine matrix provided')
594
+ if (matrix.ndim == 2 and matrix.shape[1] == input.ndim + 1 and
595
+ (matrix.shape[0] in [input.ndim, input.ndim + 1])):
596
+ if matrix.shape[0] == input.ndim + 1:
597
+ exptd = [0] * input.ndim + [1]
598
+ if not np.all(matrix[input.ndim] == exptd):
599
+ msg = (f'Expected homogeneous transformation matrix with '
600
+ f'shape {matrix.shape} for image shape {input.shape}, '
601
+ f'but bottom row was not equal to {exptd}')
602
+ raise ValueError(msg)
603
+ # assume input is homogeneous coordinate transformation matrix
604
+ offset = matrix[:input.ndim, input.ndim]
605
+ matrix = matrix[:input.ndim, :input.ndim]
606
+ if matrix.shape[0] != input.ndim:
607
+ raise RuntimeError('affine matrix has wrong number of rows')
608
+ if matrix.ndim == 2 and matrix.shape[1] != output.ndim:
609
+ raise RuntimeError('affine matrix has wrong number of columns')
610
+ if not matrix.flags.contiguous:
611
+ matrix = matrix.copy()
612
+ offset = _ni_support._normalize_sequence(offset, input.ndim)
613
+ offset = np.asarray(offset, dtype=np.float64)
614
+ if offset.ndim != 1 or offset.shape[0] < 1:
615
+ raise RuntimeError('no proper offset provided')
616
+ if not offset.flags.contiguous:
617
+ offset = offset.copy()
618
+ if matrix.ndim == 1:
619
+ warnings.warn(
620
+ "The behavior of affine_transform with a 1-D "
621
+ "array supplied for the matrix parameter has changed in "
622
+ "SciPy 0.18.0.",
623
+ stacklevel=2
624
+ )
625
+ _nd_image.zoom_shift(filtered, matrix, offset/matrix, output, order,
626
+ mode, cval, npad, False)
627
+ else:
628
+ _nd_image.geometric_transform(filtered, None, None, matrix, offset,
629
+ output, order, mode, cval, npad, None,
630
+ None)
631
+ return output
632
+
633
+
634
+ @docfiller
635
+ def shift(input, shift, output=None, order=3, mode='constant', cval=0.0,
636
+ prefilter=True):
637
+ """
638
+ Shift an array.
639
+
640
+ The array is shifted using spline interpolation of the requested order.
641
+ Points outside the boundaries of the input are filled according to the
642
+ given mode.
643
+
644
+ Parameters
645
+ ----------
646
+ %(input)s
647
+ shift : float or sequence
648
+ The shift along the axes. If a float, `shift` is the same for each
649
+ axis. If a sequence, `shift` should contain one value for each axis.
650
+ %(output)s
651
+ order : int, optional
652
+ The order of the spline interpolation, default is 3.
653
+ The order has to be in the range 0-5.
654
+ %(mode_interp_constant)s
655
+ %(cval)s
656
+ %(prefilter)s
657
+
658
+ Returns
659
+ -------
660
+ shift : ndarray
661
+ The shifted input.
662
+
663
+ See Also
664
+ --------
665
+ affine_transform : Affine transformations
666
+
667
+ Notes
668
+ -----
669
+ For complex-valued `input`, this function shifts the real and imaginary
670
+ components independently.
671
+
672
+ .. versionadded:: 1.6.0
673
+ Complex-valued support added.
674
+
675
+ Examples
676
+ --------
677
+ Import the necessary modules and an exemplary image.
678
+
679
+ >>> from scipy.ndimage import shift
680
+ >>> import matplotlib.pyplot as plt
681
+ >>> from scipy import datasets
682
+ >>> image = datasets.ascent()
683
+
684
+ Shift the image vertically by 20 pixels.
685
+
686
+ >>> image_shifted_vertically = shift(image, (20, 0))
687
+
688
+ Shift the image vertically by -200 pixels and horizontally by 100 pixels.
689
+
690
+ >>> image_shifted_both_directions = shift(image, (-200, 100))
691
+
692
+ Plot the original and the shifted images.
693
+
694
+ >>> fig, axes = plt.subplots(3, 1, figsize=(4, 12))
695
+ >>> plt.gray() # show the filtered result in grayscale
696
+ >>> top, middle, bottom = axes
697
+ >>> for ax in axes:
698
+ ... ax.set_axis_off() # remove coordinate system
699
+ >>> top.imshow(image)
700
+ >>> top.set_title("Original image")
701
+ >>> middle.imshow(image_shifted_vertically)
702
+ >>> middle.set_title("Vertically shifted image")
703
+ >>> bottom.imshow(image_shifted_both_directions)
704
+ >>> bottom.set_title("Image shifted in both directions")
705
+ >>> fig.tight_layout()
706
+ """
707
+ if order < 0 or order > 5:
708
+ raise RuntimeError('spline order not supported')
709
+ input = np.asarray(input)
710
+ if input.ndim < 1:
711
+ raise RuntimeError('input and output rank must be > 0')
712
+ complex_output = np.iscomplexobj(input)
713
+ output = _ni_support._get_output(output, input, complex_output=complex_output)
714
+ if complex_output:
715
+ # import under different name to avoid confusion with shift parameter
716
+ from scipy.ndimage._interpolation import shift as _shift
717
+
718
+ kwargs = dict(order=order, mode=mode, prefilter=prefilter)
719
+ _shift(input.real, shift, output=output.real, cval=np.real(cval), **kwargs)
720
+ _shift(input.imag, shift, output=output.imag, cval=np.imag(cval), **kwargs)
721
+ return output
722
+ if prefilter and order > 1:
723
+ padded, npad = _prepad_for_spline_filter(input, mode, cval)
724
+ filtered = spline_filter(padded, order, output=np.float64, mode=mode)
725
+ else:
726
+ npad = 0
727
+ filtered = input
728
+ mode = _ni_support._extend_mode_to_code(mode)
729
+ shift = _ni_support._normalize_sequence(shift, input.ndim)
730
+ shift = [-ii for ii in shift]
731
+ shift = np.asarray(shift, dtype=np.float64)
732
+ if not shift.flags.contiguous:
733
+ shift = shift.copy()
734
+ _nd_image.zoom_shift(filtered, None, shift, output, order, mode, cval,
735
+ npad, False)
736
+ return output
737
+
738
+
739
+ @docfiller
740
+ def zoom(input, zoom, output=None, order=3, mode='constant', cval=0.0,
741
+ prefilter=True, *, grid_mode=False):
742
+ """
743
+ Zoom an array.
744
+
745
+ The array is zoomed using spline interpolation of the requested order.
746
+
747
+ Parameters
748
+ ----------
749
+ %(input)s
750
+ zoom : float or sequence
751
+ The zoom factor along the axes. If a float, `zoom` is the same for each
752
+ axis. If a sequence, `zoom` should contain one value for each axis.
753
+ %(output)s
754
+ order : int, optional
755
+ The order of the spline interpolation, default is 3.
756
+ The order has to be in the range 0-5.
757
+ %(mode_interp_constant)s
758
+ %(cval)s
759
+ %(prefilter)s
760
+ grid_mode : bool, optional
761
+ If False, the distance from the pixel centers is zoomed. Otherwise, the
762
+ distance including the full pixel extent is used. For example, a 1d
763
+ signal of length 5 is considered to have length 4 when `grid_mode` is
764
+ False, but length 5 when `grid_mode` is True. See the following
765
+ visual illustration:
766
+
767
+ .. code-block:: text
768
+
769
+ | pixel 1 | pixel 2 | pixel 3 | pixel 4 | pixel 5 |
770
+ |<-------------------------------------->|
771
+ vs.
772
+ |<----------------------------------------------->|
773
+
774
+ The starting point of the arrow in the diagram above corresponds to
775
+ coordinate location 0 in each mode.
776
+
777
+ Returns
778
+ -------
779
+ zoom : ndarray
780
+ The zoomed input.
781
+
782
+ Notes
783
+ -----
784
+ For complex-valued `input`, this function zooms the real and imaginary
785
+ components independently.
786
+
787
+ .. versionadded:: 1.6.0
788
+ Complex-valued support added.
789
+
790
+ Examples
791
+ --------
792
+ >>> from scipy import ndimage, datasets
793
+ >>> import matplotlib.pyplot as plt
794
+
795
+ >>> fig = plt.figure()
796
+ >>> ax1 = fig.add_subplot(121) # left side
797
+ >>> ax2 = fig.add_subplot(122) # right side
798
+ >>> ascent = datasets.ascent()
799
+ >>> result = ndimage.zoom(ascent, 3.0)
800
+ >>> ax1.imshow(ascent, vmin=0, vmax=255)
801
+ >>> ax2.imshow(result, vmin=0, vmax=255)
802
+ >>> plt.show()
803
+
804
+ >>> print(ascent.shape)
805
+ (512, 512)
806
+
807
+ >>> print(result.shape)
808
+ (1536, 1536)
809
+ """
810
+ if order < 0 or order > 5:
811
+ raise RuntimeError('spline order not supported')
812
+ input = np.asarray(input)
813
+ if input.ndim < 1:
814
+ raise RuntimeError('input and output rank must be > 0')
815
+ zoom = _ni_support._normalize_sequence(zoom, input.ndim)
816
+ output_shape = tuple(
817
+ [int(round(ii * jj)) for ii, jj in zip(input.shape, zoom)])
818
+ complex_output = np.iscomplexobj(input)
819
+ output = _ni_support._get_output(output, input, shape=output_shape,
820
+ complex_output=complex_output)
821
+ if complex_output:
822
+ # import under different name to avoid confusion with zoom parameter
823
+ from scipy.ndimage._interpolation import zoom as _zoom
824
+
825
+ kwargs = dict(order=order, mode=mode, prefilter=prefilter)
826
+ _zoom(input.real, zoom, output=output.real, cval=np.real(cval), **kwargs)
827
+ _zoom(input.imag, zoom, output=output.imag, cval=np.imag(cval), **kwargs)
828
+ return output
829
+ if prefilter and order > 1:
830
+ padded, npad = _prepad_for_spline_filter(input, mode, cval)
831
+ filtered = spline_filter(padded, order, output=np.float64, mode=mode)
832
+ else:
833
+ npad = 0
834
+ filtered = input
835
+ if grid_mode:
836
+ # warn about modes that may have surprising behavior
837
+ suggest_mode = None
838
+ if mode == 'constant':
839
+ suggest_mode = 'grid-constant'
840
+ elif mode == 'wrap':
841
+ suggest_mode = 'grid-wrap'
842
+ if suggest_mode is not None:
843
+ warnings.warn(
844
+ (f"It is recommended to use mode = {suggest_mode} instead of {mode} "
845
+ f"when grid_mode is True."),
846
+ stacklevel=2
847
+ )
848
+ mode = _ni_support._extend_mode_to_code(mode)
849
+
850
+ zoom_div = np.array(output_shape)
851
+ zoom_nominator = np.array(input.shape)
852
+ if not grid_mode:
853
+ zoom_div -= 1
854
+ zoom_nominator -= 1
855
+
856
+ # Zooming to infinite values is unpredictable, so just choose
857
+ # zoom factor 1 instead
858
+ zoom = np.divide(zoom_nominator, zoom_div,
859
+ out=np.ones_like(input.shape, dtype=np.float64),
860
+ where=zoom_div != 0)
861
+ zoom = np.ascontiguousarray(zoom)
862
+ _nd_image.zoom_shift(filtered, zoom, None, output, order, mode, cval, npad,
863
+ grid_mode)
864
+ return output
865
+
866
+
867
+ @docfiller
868
+ def rotate(input, angle, axes=(1, 0), reshape=True, output=None, order=3,
869
+ mode='constant', cval=0.0, prefilter=True):
870
+ """
871
+ Rotate an array.
872
+
873
+ The array is rotated in the plane defined by the two axes given by the
874
+ `axes` parameter using spline interpolation of the requested order.
875
+
876
+ Parameters
877
+ ----------
878
+ %(input)s
879
+ angle : float
880
+ The rotation angle in degrees.
881
+ axes : tuple of 2 ints, optional
882
+ The two axes that define the plane of rotation. Default is the first
883
+ two axes.
884
+ reshape : bool, optional
885
+ If `reshape` is true, the output shape is adapted so that the input
886
+ array is contained completely in the output. Default is True.
887
+ %(output)s
888
+ order : int, optional
889
+ The order of the spline interpolation, default is 3.
890
+ The order has to be in the range 0-5.
891
+ %(mode_interp_constant)s
892
+ %(cval)s
893
+ %(prefilter)s
894
+
895
+ Returns
896
+ -------
897
+ rotate : ndarray
898
+ The rotated input.
899
+
900
+ Notes
901
+ -----
902
+ For complex-valued `input`, this function rotates the real and imaginary
903
+ components independently.
904
+
905
+ .. versionadded:: 1.6.0
906
+ Complex-valued support added.
907
+
908
+ Examples
909
+ --------
910
+ >>> from scipy import ndimage, datasets
911
+ >>> import matplotlib.pyplot as plt
912
+ >>> fig = plt.figure(figsize=(10, 3))
913
+ >>> ax1, ax2, ax3 = fig.subplots(1, 3)
914
+ >>> img = datasets.ascent()
915
+ >>> img_45 = ndimage.rotate(img, 45, reshape=False)
916
+ >>> full_img_45 = ndimage.rotate(img, 45, reshape=True)
917
+ >>> ax1.imshow(img, cmap='gray')
918
+ >>> ax1.set_axis_off()
919
+ >>> ax2.imshow(img_45, cmap='gray')
920
+ >>> ax2.set_axis_off()
921
+ >>> ax3.imshow(full_img_45, cmap='gray')
922
+ >>> ax3.set_axis_off()
923
+ >>> fig.set_layout_engine('tight')
924
+ >>> plt.show()
925
+ >>> print(img.shape)
926
+ (512, 512)
927
+ >>> print(img_45.shape)
928
+ (512, 512)
929
+ >>> print(full_img_45.shape)
930
+ (724, 724)
931
+
932
+ """
933
+ input_arr = np.asarray(input)
934
+ ndim = input_arr.ndim
935
+
936
+ if ndim < 2:
937
+ raise ValueError('input array should be at least 2D')
938
+
939
+ axes = list(axes)
940
+
941
+ if len(axes) != 2:
942
+ raise ValueError('axes should contain exactly two values')
943
+
944
+ if not all([float(ax).is_integer() for ax in axes]):
945
+ raise ValueError('axes should contain only integer values')
946
+
947
+ if axes[0] < 0:
948
+ axes[0] += ndim
949
+ if axes[1] < 0:
950
+ axes[1] += ndim
951
+ if axes[0] < 0 or axes[1] < 0 or axes[0] >= ndim or axes[1] >= ndim:
952
+ raise ValueError('invalid rotation plane specified')
953
+
954
+ axes.sort()
955
+
956
+ c, s = special.cosdg(angle), special.sindg(angle)
957
+
958
+ rot_matrix = np.array([[c, s],
959
+ [-s, c]])
960
+
961
+ img_shape = np.asarray(input_arr.shape)
962
+ in_plane_shape = img_shape[axes]
963
+ if reshape:
964
+ # Compute transformed input bounds
965
+ iy, ix = in_plane_shape
966
+ out_bounds = rot_matrix @ [[0, 0, iy, iy],
967
+ [0, ix, 0, ix]]
968
+ # Compute the shape of the transformed input plane
969
+ out_plane_shape = (np.ptp(out_bounds, axis=1) + 0.5).astype(int)
970
+ else:
971
+ out_plane_shape = img_shape[axes]
972
+
973
+ out_center = rot_matrix @ ((out_plane_shape - 1) / 2)
974
+ in_center = (in_plane_shape - 1) / 2
975
+ offset = in_center - out_center
976
+
977
+ output_shape = img_shape
978
+ output_shape[axes] = out_plane_shape
979
+ output_shape = tuple(output_shape)
980
+
981
+ complex_output = np.iscomplexobj(input_arr)
982
+ output = _ni_support._get_output(output, input_arr, shape=output_shape,
983
+ complex_output=complex_output)
984
+
985
+ if ndim <= 2:
986
+ affine_transform(input_arr, rot_matrix, offset, output_shape, output,
987
+ order, mode, cval, prefilter)
988
+ else:
989
+ # If ndim > 2, the rotation is applied over all the planes
990
+ # parallel to axes
991
+ planes_coord = itertools.product(
992
+ *[[slice(None)] if ax in axes else range(img_shape[ax])
993
+ for ax in range(ndim)])
994
+
995
+ out_plane_shape = tuple(out_plane_shape)
996
+
997
+ for coordinates in planes_coord:
998
+ ia = input_arr[coordinates]
999
+ oa = output[coordinates]
1000
+ affine_transform(ia, rot_matrix, offset, out_plane_shape,
1001
+ oa, order, mode, cval, prefilter)
1002
+
1003
+ return output
llava_video/lib/python3.10/site-packages/scipy/ndimage/_morphology.py ADDED
The diff for this file is too large to render. See raw diff
 
llava_video/lib/python3.10/site-packages/scipy/ndimage/_ni_support.py ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2003-2005 Peter J. Verveer
2
+ #
3
+ # Redistribution and use in source and binary forms, with or without
4
+ # modification, are permitted provided that the following conditions
5
+ # are met:
6
+ #
7
+ # 1. Redistributions of source code must retain the above copyright
8
+ # notice, this list of conditions and the following disclaimer.
9
+ #
10
+ # 2. Redistributions in binary form must reproduce the above
11
+ # copyright notice, this list of conditions and the following
12
+ # disclaimer in the documentation and/or other materials provided
13
+ # with the distribution.
14
+ #
15
+ # 3. The name of the author may not be used to endorse or promote
16
+ # products derived from this software without specific prior
17
+ # written permission.
18
+ #
19
+ # THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
20
+ # OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21
+ # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22
+ # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
23
+ # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
25
+ # GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26
+ # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27
+ # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
28
+ # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29
+ # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
+
31
+ from collections.abc import Iterable
32
+ import operator
33
+ import warnings
34
+ import numpy as np
35
+
36
+
37
+ def _extend_mode_to_code(mode, is_filter=False):
38
+ """Convert an extension mode to the corresponding integer code.
39
+ """
40
+ if mode == 'nearest':
41
+ return 0
42
+ elif mode == 'wrap':
43
+ return 1
44
+ elif mode in ['reflect', 'grid-mirror']:
45
+ return 2
46
+ elif mode == 'mirror':
47
+ return 3
48
+ elif mode == 'constant':
49
+ return 4
50
+ elif mode == 'grid-wrap' and is_filter:
51
+ return 1
52
+ elif mode == 'grid-wrap':
53
+ return 5
54
+ elif mode == 'grid-constant' and is_filter:
55
+ return 4
56
+ elif mode == 'grid-constant':
57
+ return 6
58
+ else:
59
+ raise RuntimeError('boundary mode not supported')
60
+
61
+
62
+ def _normalize_sequence(input, rank):
63
+ """If input is a scalar, create a sequence of length equal to the
64
+ rank by duplicating the input. If input is a sequence,
65
+ check if its length is equal to the length of array.
66
+ """
67
+ is_str = isinstance(input, str)
68
+ if not is_str and np.iterable(input):
69
+ normalized = list(input)
70
+ if len(normalized) != rank:
71
+ err = "sequence argument must have length equal to input rank"
72
+ raise RuntimeError(err)
73
+ else:
74
+ normalized = [input] * rank
75
+ return normalized
76
+
77
+
78
+ def _get_output(output, input, shape=None, complex_output=False):
79
+ if shape is None:
80
+ shape = input.shape
81
+ if output is None:
82
+ if not complex_output:
83
+ output = np.zeros(shape, dtype=input.dtype.name)
84
+ else:
85
+ complex_type = np.promote_types(input.dtype, np.complex64)
86
+ output = np.zeros(shape, dtype=complex_type)
87
+ elif isinstance(output, (type, np.dtype)):
88
+ # Classes (like `np.float32`) and dtypes are interpreted as dtype
89
+ if complex_output and np.dtype(output).kind != 'c':
90
+ warnings.warn("promoting specified output dtype to complex", stacklevel=3)
91
+ output = np.promote_types(output, np.complex64)
92
+ output = np.zeros(shape, dtype=output)
93
+ elif isinstance(output, str):
94
+ output = np.dtype(output)
95
+ if complex_output and output.kind != 'c':
96
+ raise RuntimeError("output must have complex dtype")
97
+ elif not issubclass(output.type, np.number):
98
+ raise RuntimeError("output must have numeric dtype")
99
+ output = np.zeros(shape, dtype=output)
100
+ else:
101
+ # output was supplied as an array
102
+ output = np.asarray(output)
103
+ if output.shape != shape:
104
+ raise RuntimeError("output shape not correct")
105
+ elif complex_output and output.dtype.kind != 'c':
106
+ raise RuntimeError("output must have complex dtype")
107
+ return output
108
+
109
+
110
+ def _check_axes(axes, ndim):
111
+ if axes is None:
112
+ return tuple(range(ndim))
113
+ elif np.isscalar(axes):
114
+ axes = (operator.index(axes),)
115
+ elif isinstance(axes, Iterable):
116
+ for ax in axes:
117
+ axes = tuple(operator.index(ax) for ax in axes)
118
+ if ax < -ndim or ax > ndim - 1:
119
+ raise ValueError(f"specified axis: {ax} is out of range")
120
+ axes = tuple(ax % ndim if ax < 0 else ax for ax in axes)
121
+ else:
122
+ message = "axes must be an integer, iterable of integers, or None"
123
+ raise ValueError(message)
124
+ if len(tuple(set(axes))) != len(axes):
125
+ raise ValueError("axes must be unique")
126
+ return axes
127
+
128
+ def _skip_if_dtype(arg):
129
+ """'array or dtype' polymorphism.
130
+
131
+ Return None for np.int8, dtype('float32') or 'f' etc
132
+ arg for np.empty(3) etc
133
+ """
134
+ if isinstance(arg, str):
135
+ return None
136
+ if type(arg) is type:
137
+ return None if issubclass(arg, np.generic) else arg
138
+ else:
139
+ return None if isinstance(arg, np.dtype) else arg
140
+
141
+
142
+ def _skip_if_int(arg):
143
+ return None if (arg is None or isinstance(arg, int)) else arg
llava_video/lib/python3.10/site-packages/scipy/ndimage/_rank_filter_1d.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (27.4 kB). View file
 
llava_video/lib/python3.10/site-packages/scipy/ndimage/fourier.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is not meant for public use and will be removed in SciPy v2.0.0.
2
+ # Use the `scipy.ndimage` namespace for importing the functions
3
+ # included below.
4
+
5
+ from scipy._lib.deprecation import _sub_module_deprecation
6
+
7
+
8
+ __all__ = [ # noqa: F822
9
+ 'fourier_gaussian', 'fourier_uniform',
10
+ 'fourier_ellipsoid', 'fourier_shift'
11
+ ]
12
+
13
+
14
+ def __dir__():
15
+ return __all__
16
+
17
+
18
+ def __getattr__(name):
19
+ return _sub_module_deprecation(sub_package='ndimage', module='fourier',
20
+ private_modules=['_fourier'], all=__all__,
21
+ attribute=name)