ZTWHHH commited on
Commit
34a77fc
·
verified ·
1 Parent(s): 7105614

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +2 -0
  2. parrot/lib/python3.10/site-packages/scipy/cluster/_hierarchy.cpython-310-x86_64-linux-gnu.so +3 -0
  3. parrot/lib/python3.10/site-packages/scipy/ndimage/__init__.py +169 -0
  4. parrot/lib/python3.10/site-packages/scipy/ndimage/_ctest.cpython-310-x86_64-linux-gnu.so +0 -0
  5. parrot/lib/python3.10/site-packages/scipy/ndimage/_filters.py +1858 -0
  6. parrot/lib/python3.10/site-packages/scipy/ndimage/_fourier.py +306 -0
  7. parrot/lib/python3.10/site-packages/scipy/ndimage/_interpolation.py +1001 -0
  8. parrot/lib/python3.10/site-packages/scipy/ndimage/_ni_docstrings.py +208 -0
  9. parrot/lib/python3.10/site-packages/scipy/ndimage/interpolation.py +22 -0
  10. parrot/lib/python3.10/site-packages/scipy/ndimage/tests/__init__.py +13 -0
  11. parrot/lib/python3.10/site-packages/scipy/ndimage/tests/test_c_api.py +102 -0
  12. parrot/lib/python3.10/site-packages/scipy/ndimage/tests/test_datatypes.py +66 -0
  13. parrot/lib/python3.10/site-packages/scipy/ndimage/tests/test_filters.py +2214 -0
  14. parrot/lib/python3.10/site-packages/scipy/odr/__pycache__/_add_newdocs.cpython-310.pyc +0 -0
  15. parrot/lib/python3.10/site-packages/scipy/odr/__pycache__/_odrpack.cpython-310.pyc +0 -0
  16. parrot/lib/python3.10/site-packages/scipy/odr/__pycache__/models.cpython-310.pyc +0 -0
  17. parrot/lib/python3.10/site-packages/scipy/odr/tests/__pycache__/test_odr.cpython-310.pyc +0 -0
  18. parrot/lib/python3.10/site-packages/scipy/stats/_ansari_swilk_statistics.cpython-310-x86_64-linux-gnu.so +3 -0
  19. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/batch_mm.h +11 -0
  20. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/canonicalize_graph_fuser_ops.h +11 -0
  21. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/clear_profiling.h +19 -0
  22. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/concat_opt.h +19 -0
  23. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/constant_propagation.h +32 -0
  24. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/create_autodiff_subgraphs.h +19 -0
  25. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/create_functional_graphs.h +14 -0
  26. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/dead_code_elimination.h +42 -0
  27. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/decompose_ops.h +11 -0
  28. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/eliminate_no_ops.h +17 -0
  29. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/erase_number_types.h +23 -0
  30. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/fixup_trace_scope_blocks.h +47 -0
  31. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/fold_conv_bn.h +37 -0
  32. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_concat_linear.h +13 -0
  33. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_conv_add_relu_fusion.h +15 -0
  34. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_linear_folding.h +14 -0
  35. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_linear_transpose.h +13 -0
  36. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_ops_to_mkldnn.h +15 -0
  37. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/fuse_linear.h +24 -0
  38. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/graph_fuser.h +37 -0
  39. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/graph_rewrite_helper.h +54 -0
  40. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/hoist_conv_packed_params.h +12 -0
  41. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/inline_fork_wait.h +16 -0
  42. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/inliner.h +14 -0
  43. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/insert_guards.h +21 -0
  44. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/lift_closures.h +12 -0
  45. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/liveness.h +23 -0
  46. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/lower_grad_of.h +17 -0
  47. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/lower_graph.h +22 -0
  48. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/lower_tuples.h +20 -0
  49. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/mobile_optimizer_type.h +13 -0
  50. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/onednn_graph_fuser.h +64 -0
.gitattributes CHANGED
@@ -1700,3 +1700,5 @@ vllm/lib/python3.10/site-packages/sympy/solvers/__pycache__/solveset.cpython-310
1700
  vllm/lib/python3.10/site-packages/sympy/solvers/tests/__pycache__/test_solvers.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1701
  vllm/lib/python3.10/site-packages/sympy/logic/__pycache__/boolalg.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1702
  vllm/lib/python3.10/site-packages/tiktoken/_tiktoken.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
 
 
 
1700
  vllm/lib/python3.10/site-packages/sympy/solvers/tests/__pycache__/test_solvers.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1701
  vllm/lib/python3.10/site-packages/sympy/logic/__pycache__/boolalg.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1702
  vllm/lib/python3.10/site-packages/tiktoken/_tiktoken.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
1703
+ parrot/lib/python3.10/site-packages/scipy/cluster/_hierarchy.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
1704
+ parrot/lib/python3.10/site-packages/scipy/stats/_ansari_swilk_statistics.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
parrot/lib/python3.10/site-packages/scipy/cluster/_hierarchy.cpython-310-x86_64-linux-gnu.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ee732614b22a8d19dbc0841e4b958699990ef7c0cee00edd2c850d58d6616b2f
3
+ size 423352
parrot/lib/python3.10/site-packages/scipy/ndimage/__init__.py ADDED
@@ -0,0 +1,169 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ =========================================================
3
+ Multidimensional image processing (:mod:`scipy.ndimage`)
4
+ =========================================================
5
+
6
+ .. currentmodule:: scipy.ndimage
7
+
8
+ This package contains various functions for multidimensional image
9
+ processing.
10
+
11
+
12
+ Filters
13
+ =======
14
+
15
+ .. autosummary::
16
+ :toctree: generated/
17
+
18
+ convolve - Multidimensional convolution
19
+ convolve1d - 1-D convolution along the given axis
20
+ correlate - Multidimensional correlation
21
+ correlate1d - 1-D correlation along the given axis
22
+ gaussian_filter
23
+ gaussian_filter1d
24
+ gaussian_gradient_magnitude
25
+ gaussian_laplace
26
+ generic_filter - Multidimensional filter using a given function
27
+ generic_filter1d - 1-D generic filter along the given axis
28
+ generic_gradient_magnitude
29
+ generic_laplace
30
+ laplace - N-D Laplace filter based on approximate second derivatives
31
+ maximum_filter
32
+ maximum_filter1d
33
+ median_filter - Calculates a multidimensional median filter
34
+ minimum_filter
35
+ minimum_filter1d
36
+ percentile_filter - Calculates a multidimensional percentile filter
37
+ prewitt
38
+ rank_filter - Calculates a multidimensional rank filter
39
+ sobel
40
+ uniform_filter - Multidimensional uniform filter
41
+ uniform_filter1d - 1-D uniform filter along the given axis
42
+
43
+ Fourier filters
44
+ ===============
45
+
46
+ .. autosummary::
47
+ :toctree: generated/
48
+
49
+ fourier_ellipsoid
50
+ fourier_gaussian
51
+ fourier_shift
52
+ fourier_uniform
53
+
54
+ Interpolation
55
+ =============
56
+
57
+ .. autosummary::
58
+ :toctree: generated/
59
+
60
+ affine_transform - Apply an affine transformation
61
+ geometric_transform - Apply an arbitrary geometric transform
62
+ map_coordinates - Map input array to new coordinates by interpolation
63
+ rotate - Rotate an array
64
+ shift - Shift an array
65
+ spline_filter
66
+ spline_filter1d
67
+ zoom - Zoom an array
68
+
69
+ Measurements
70
+ ============
71
+
72
+ .. autosummary::
73
+ :toctree: generated/
74
+
75
+ center_of_mass - The center of mass of the values of an array at labels
76
+ extrema - Min's and max's of an array at labels, with their positions
77
+ find_objects - Find objects in a labeled array
78
+ histogram - Histogram of the values of an array, optionally at labels
79
+ label - Label features in an array
80
+ labeled_comprehension
81
+ maximum
82
+ maximum_position
83
+ mean - Mean of the values of an array at labels
84
+ median
85
+ minimum
86
+ minimum_position
87
+ standard_deviation - Standard deviation of an N-D image array
88
+ sum_labels - Sum of the values of the array
89
+ value_indices - Find indices of each distinct value in given array
90
+ variance - Variance of the values of an N-D image array
91
+ watershed_ift
92
+
93
+ Morphology
94
+ ==========
95
+
96
+ .. autosummary::
97
+ :toctree: generated/
98
+
99
+ binary_closing
100
+ binary_dilation
101
+ binary_erosion
102
+ binary_fill_holes
103
+ binary_hit_or_miss
104
+ binary_opening
105
+ binary_propagation
106
+ black_tophat
107
+ distance_transform_bf
108
+ distance_transform_cdt
109
+ distance_transform_edt
110
+ generate_binary_structure
111
+ grey_closing
112
+ grey_dilation
113
+ grey_erosion
114
+ grey_opening
115
+ iterate_structure
116
+ morphological_gradient
117
+ morphological_laplace
118
+ white_tophat
119
+
120
+ """
121
+
122
+ # Copyright (C) 2003-2005 Peter J. Verveer
123
+ #
124
+ # Redistribution and use in source and binary forms, with or without
125
+ # modification, are permitted provided that the following conditions
126
+ # are met:
127
+ #
128
+ # 1. Redistributions of source code must retain the above copyright
129
+ # notice, this list of conditions and the following disclaimer.
130
+ #
131
+ # 2. Redistributions in binary form must reproduce the above
132
+ # copyright notice, this list of conditions and the following
133
+ # disclaimer in the documentation and/or other materials provided
134
+ # with the distribution.
135
+ #
136
+ # 3. The name of the author may not be used to endorse or promote
137
+ # products derived from this software without specific prior
138
+ # written permission.
139
+ #
140
+ # THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
141
+ # OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
142
+ # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
143
+ # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
144
+ # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
145
+ # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
146
+ # GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
147
+ # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
148
+ # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
149
+ # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
150
+ # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
151
+
152
+ from ._filters import *
153
+ from ._fourier import *
154
+ from ._interpolation import *
155
+ from ._measurements import *
156
+ from ._morphology import *
157
+
158
+ # Deprecated namespaces, to be removed in v2.0.0
159
+ from . import filters
160
+ from . import fourier
161
+ from . import interpolation
162
+ from . import measurements
163
+ from . import morphology
164
+
165
+ __all__ = [s for s in dir() if not s.startswith('_')]
166
+
167
+ from scipy._lib._testutils import PytestTester
168
+ test = PytestTester(__name__)
169
+ del PytestTester
parrot/lib/python3.10/site-packages/scipy/ndimage/_ctest.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (17 kB). View file
 
parrot/lib/python3.10/site-packages/scipy/ndimage/_filters.py ADDED
@@ -0,0 +1,1858 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2003-2005 Peter J. Verveer
2
+ #
3
+ # Redistribution and use in source and binary forms, with or without
4
+ # modification, are permitted provided that the following conditions
5
+ # are met:
6
+ #
7
+ # 1. Redistributions of source code must retain the above copyright
8
+ # notice, this list of conditions and the following disclaimer.
9
+ #
10
+ # 2. Redistributions in binary form must reproduce the above
11
+ # copyright notice, this list of conditions and the following
12
+ # disclaimer in the documentation and/or other materials provided
13
+ # with the distribution.
14
+ #
15
+ # 3. The name of the author may not be used to endorse or promote
16
+ # products derived from this software without specific prior
17
+ # written permission.
18
+ #
19
+ # THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
20
+ # OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21
+ # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22
+ # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
23
+ # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
25
+ # GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26
+ # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27
+ # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
28
+ # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29
+ # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
+
31
+ from collections.abc import Iterable
32
+ import numbers
33
+ import warnings
34
+ import numpy as np
35
+ import operator
36
+
37
+ from scipy._lib._util import normalize_axis_index
38
+ from . import _ni_support
39
+ from . import _nd_image
40
+ from . import _ni_docstrings
41
+
42
+ __all__ = ['correlate1d', 'convolve1d', 'gaussian_filter1d', 'gaussian_filter',
43
+ 'prewitt', 'sobel', 'generic_laplace', 'laplace',
44
+ 'gaussian_laplace', 'generic_gradient_magnitude',
45
+ 'gaussian_gradient_magnitude', 'correlate', 'convolve',
46
+ 'uniform_filter1d', 'uniform_filter', 'minimum_filter1d',
47
+ 'maximum_filter1d', 'minimum_filter', 'maximum_filter',
48
+ 'rank_filter', 'median_filter', 'percentile_filter',
49
+ 'generic_filter1d', 'generic_filter']
50
+
51
+
52
+ def _invalid_origin(origin, lenw):
53
+ return (origin < -(lenw // 2)) or (origin > (lenw - 1) // 2)
54
+
55
+
56
+ def _complex_via_real_components(func, input, weights, output, cval, **kwargs):
57
+ """Complex convolution via a linear combination of real convolutions."""
58
+ complex_input = input.dtype.kind == 'c'
59
+ complex_weights = weights.dtype.kind == 'c'
60
+ if complex_input and complex_weights:
61
+ # real component of the output
62
+ func(input.real, weights.real, output=output.real,
63
+ cval=np.real(cval), **kwargs)
64
+ output.real -= func(input.imag, weights.imag, output=None,
65
+ cval=np.imag(cval), **kwargs)
66
+ # imaginary component of the output
67
+ func(input.real, weights.imag, output=output.imag,
68
+ cval=np.real(cval), **kwargs)
69
+ output.imag += func(input.imag, weights.real, output=None,
70
+ cval=np.imag(cval), **kwargs)
71
+ elif complex_input:
72
+ func(input.real, weights, output=output.real, cval=np.real(cval),
73
+ **kwargs)
74
+ func(input.imag, weights, output=output.imag, cval=np.imag(cval),
75
+ **kwargs)
76
+ else:
77
+ if np.iscomplexobj(cval):
78
+ raise ValueError("Cannot provide a complex-valued cval when the "
79
+ "input is real.")
80
+ func(input, weights.real, output=output.real, cval=cval, **kwargs)
81
+ func(input, weights.imag, output=output.imag, cval=cval, **kwargs)
82
+ return output
83
+
84
+
85
+ @_ni_docstrings.docfiller
86
+ def correlate1d(input, weights, axis=-1, output=None, mode="reflect",
87
+ cval=0.0, origin=0):
88
+ """Calculate a 1-D correlation along the given axis.
89
+
90
+ The lines of the array along the given axis are correlated with the
91
+ given weights.
92
+
93
+ Parameters
94
+ ----------
95
+ %(input)s
96
+ weights : array
97
+ 1-D sequence of numbers.
98
+ %(axis)s
99
+ %(output)s
100
+ %(mode_reflect)s
101
+ %(cval)s
102
+ %(origin)s
103
+
104
+ Returns
105
+ -------
106
+ result : ndarray
107
+ Correlation result. Has the same shape as `input`.
108
+
109
+ Examples
110
+ --------
111
+ >>> from scipy.ndimage import correlate1d
112
+ >>> correlate1d([2, 8, 0, 4, 1, 9, 9, 0], weights=[1, 3])
113
+ array([ 8, 26, 8, 12, 7, 28, 36, 9])
114
+ """
115
+ input = np.asarray(input)
116
+ weights = np.asarray(weights)
117
+ complex_input = input.dtype.kind == 'c'
118
+ complex_weights = weights.dtype.kind == 'c'
119
+ if complex_input or complex_weights:
120
+ if complex_weights:
121
+ weights = weights.conj()
122
+ weights = weights.astype(np.complex128, copy=False)
123
+ kwargs = dict(axis=axis, mode=mode, origin=origin)
124
+ output = _ni_support._get_output(output, input, complex_output=True)
125
+ return _complex_via_real_components(correlate1d, input, weights,
126
+ output, cval, **kwargs)
127
+
128
+ output = _ni_support._get_output(output, input)
129
+ weights = np.asarray(weights, dtype=np.float64)
130
+ if weights.ndim != 1 or weights.shape[0] < 1:
131
+ raise RuntimeError('no filter weights given')
132
+ if not weights.flags.contiguous:
133
+ weights = weights.copy()
134
+ axis = normalize_axis_index(axis, input.ndim)
135
+ if _invalid_origin(origin, len(weights)):
136
+ raise ValueError('Invalid origin; origin must satisfy '
137
+ '-(len(weights) // 2) <= origin <= '
138
+ '(len(weights)-1) // 2')
139
+ mode = _ni_support._extend_mode_to_code(mode)
140
+ _nd_image.correlate1d(input, weights, axis, output, mode, cval,
141
+ origin)
142
+ return output
143
+
144
+
145
+ @_ni_docstrings.docfiller
146
+ def convolve1d(input, weights, axis=-1, output=None, mode="reflect",
147
+ cval=0.0, origin=0):
148
+ """Calculate a 1-D convolution along the given axis.
149
+
150
+ The lines of the array along the given axis are convolved with the
151
+ given weights.
152
+
153
+ Parameters
154
+ ----------
155
+ %(input)s
156
+ weights : ndarray
157
+ 1-D sequence of numbers.
158
+ %(axis)s
159
+ %(output)s
160
+ %(mode_reflect)s
161
+ %(cval)s
162
+ %(origin)s
163
+
164
+ Returns
165
+ -------
166
+ convolve1d : ndarray
167
+ Convolved array with same shape as input
168
+
169
+ Examples
170
+ --------
171
+ >>> from scipy.ndimage import convolve1d
172
+ >>> convolve1d([2, 8, 0, 4, 1, 9, 9, 0], weights=[1, 3])
173
+ array([14, 24, 4, 13, 12, 36, 27, 0])
174
+ """
175
+ weights = weights[::-1]
176
+ origin = -origin
177
+ if not len(weights) & 1:
178
+ origin -= 1
179
+ weights = np.asarray(weights)
180
+ if weights.dtype.kind == 'c':
181
+ # pre-conjugate here to counteract the conjugation in correlate1d
182
+ weights = weights.conj()
183
+ return correlate1d(input, weights, axis, output, mode, cval, origin)
184
+
185
+
186
+ def _gaussian_kernel1d(sigma, order, radius):
187
+ """
188
+ Computes a 1-D Gaussian convolution kernel.
189
+ """
190
+ if order < 0:
191
+ raise ValueError('order must be non-negative')
192
+ exponent_range = np.arange(order + 1)
193
+ sigma2 = sigma * sigma
194
+ x = np.arange(-radius, radius+1)
195
+ phi_x = np.exp(-0.5 / sigma2 * x ** 2)
196
+ phi_x = phi_x / phi_x.sum()
197
+
198
+ if order == 0:
199
+ return phi_x
200
+ else:
201
+ # f(x) = q(x) * phi(x) = q(x) * exp(p(x))
202
+ # f'(x) = (q'(x) + q(x) * p'(x)) * phi(x)
203
+ # p'(x) = -1 / sigma ** 2
204
+ # Implement q'(x) + q(x) * p'(x) as a matrix operator and apply to the
205
+ # coefficients of q(x)
206
+ q = np.zeros(order + 1)
207
+ q[0] = 1
208
+ D = np.diag(exponent_range[1:], 1) # D @ q(x) = q'(x)
209
+ P = np.diag(np.ones(order)/-sigma2, -1) # P @ q(x) = q(x) * p'(x)
210
+ Q_deriv = D + P
211
+ for _ in range(order):
212
+ q = Q_deriv.dot(q)
213
+ q = (x[:, None] ** exponent_range).dot(q)
214
+ return q * phi_x
215
+
216
+
217
+ @_ni_docstrings.docfiller
218
+ def gaussian_filter1d(input, sigma, axis=-1, order=0, output=None,
219
+ mode="reflect", cval=0.0, truncate=4.0, *, radius=None):
220
+ """1-D Gaussian filter.
221
+
222
+ Parameters
223
+ ----------
224
+ %(input)s
225
+ sigma : scalar
226
+ standard deviation for Gaussian kernel
227
+ %(axis)s
228
+ order : int, optional
229
+ An order of 0 corresponds to convolution with a Gaussian
230
+ kernel. A positive order corresponds to convolution with
231
+ that derivative of a Gaussian.
232
+ %(output)s
233
+ %(mode_reflect)s
234
+ %(cval)s
235
+ truncate : float, optional
236
+ Truncate the filter at this many standard deviations.
237
+ Default is 4.0.
238
+ radius : None or int, optional
239
+ Radius of the Gaussian kernel. If specified, the size of
240
+ the kernel will be ``2*radius + 1``, and `truncate` is ignored.
241
+ Default is None.
242
+
243
+ Returns
244
+ -------
245
+ gaussian_filter1d : ndarray
246
+
247
+ Notes
248
+ -----
249
+ The Gaussian kernel will have size ``2*radius + 1`` along each axis. If
250
+ `radius` is None, a default ``radius = round(truncate * sigma)`` will be
251
+ used.
252
+
253
+ Examples
254
+ --------
255
+ >>> from scipy.ndimage import gaussian_filter1d
256
+ >>> import numpy as np
257
+ >>> gaussian_filter1d([1.0, 2.0, 3.0, 4.0, 5.0], 1)
258
+ array([ 1.42704095, 2.06782203, 3. , 3.93217797, 4.57295905])
259
+ >>> gaussian_filter1d([1.0, 2.0, 3.0, 4.0, 5.0], 4)
260
+ array([ 2.91948343, 2.95023502, 3. , 3.04976498, 3.08051657])
261
+ >>> import matplotlib.pyplot as plt
262
+ >>> rng = np.random.default_rng()
263
+ >>> x = rng.standard_normal(101).cumsum()
264
+ >>> y3 = gaussian_filter1d(x, 3)
265
+ >>> y6 = gaussian_filter1d(x, 6)
266
+ >>> plt.plot(x, 'k', label='original data')
267
+ >>> plt.plot(y3, '--', label='filtered, sigma=3')
268
+ >>> plt.plot(y6, ':', label='filtered, sigma=6')
269
+ >>> plt.legend()
270
+ >>> plt.grid()
271
+ >>> plt.show()
272
+
273
+ """
274
+ sd = float(sigma)
275
+ # make the radius of the filter equal to truncate standard deviations
276
+ lw = int(truncate * sd + 0.5)
277
+ if radius is not None:
278
+ lw = radius
279
+ if not isinstance(lw, numbers.Integral) or lw < 0:
280
+ raise ValueError('Radius must be a nonnegative integer.')
281
+ # Since we are calling correlate, not convolve, revert the kernel
282
+ weights = _gaussian_kernel1d(sigma, order, lw)[::-1]
283
+ return correlate1d(input, weights, axis, output, mode, cval, 0)
284
+
285
+
286
+ @_ni_docstrings.docfiller
287
+ def gaussian_filter(input, sigma, order=0, output=None,
288
+ mode="reflect", cval=0.0, truncate=4.0, *, radius=None,
289
+ axes=None):
290
+ """Multidimensional Gaussian filter.
291
+
292
+ Parameters
293
+ ----------
294
+ %(input)s
295
+ sigma : scalar or sequence of scalars
296
+ Standard deviation for Gaussian kernel. The standard
297
+ deviations of the Gaussian filter are given for each axis as a
298
+ sequence, or as a single number, in which case it is equal for
299
+ all axes.
300
+ order : int or sequence of ints, optional
301
+ The order of the filter along each axis is given as a sequence
302
+ of integers, or as a single number. An order of 0 corresponds
303
+ to convolution with a Gaussian kernel. A positive order
304
+ corresponds to convolution with that derivative of a Gaussian.
305
+ %(output)s
306
+ %(mode_multiple)s
307
+ %(cval)s
308
+ truncate : float, optional
309
+ Truncate the filter at this many standard deviations.
310
+ Default is 4.0.
311
+ radius : None or int or sequence of ints, optional
312
+ Radius of the Gaussian kernel. The radius are given for each axis
313
+ as a sequence, or as a single number, in which case it is equal
314
+ for all axes. If specified, the size of the kernel along each axis
315
+ will be ``2*radius + 1``, and `truncate` is ignored.
316
+ Default is None.
317
+ axes : tuple of int or None, optional
318
+ If None, `input` is filtered along all axes. Otherwise,
319
+ `input` is filtered along the specified axes. When `axes` is
320
+ specified, any tuples used for `sigma`, `order`, `mode` and/or `radius`
321
+ must match the length of `axes`. The ith entry in any of these tuples
322
+ corresponds to the ith entry in `axes`.
323
+
324
+ Returns
325
+ -------
326
+ gaussian_filter : ndarray
327
+ Returned array of same shape as `input`.
328
+
329
+ Notes
330
+ -----
331
+ The multidimensional filter is implemented as a sequence of
332
+ 1-D convolution filters. The intermediate arrays are
333
+ stored in the same data type as the output. Therefore, for output
334
+ types with a limited precision, the results may be imprecise
335
+ because intermediate results may be stored with insufficient
336
+ precision.
337
+
338
+ The Gaussian kernel will have size ``2*radius + 1`` along each axis. If
339
+ `radius` is None, the default ``radius = round(truncate * sigma)`` will be
340
+ used.
341
+
342
+ Examples
343
+ --------
344
+ >>> from scipy.ndimage import gaussian_filter
345
+ >>> import numpy as np
346
+ >>> a = np.arange(50, step=2).reshape((5,5))
347
+ >>> a
348
+ array([[ 0, 2, 4, 6, 8],
349
+ [10, 12, 14, 16, 18],
350
+ [20, 22, 24, 26, 28],
351
+ [30, 32, 34, 36, 38],
352
+ [40, 42, 44, 46, 48]])
353
+ >>> gaussian_filter(a, sigma=1)
354
+ array([[ 4, 6, 8, 9, 11],
355
+ [10, 12, 14, 15, 17],
356
+ [20, 22, 24, 25, 27],
357
+ [29, 31, 33, 34, 36],
358
+ [35, 37, 39, 40, 42]])
359
+
360
+ >>> from scipy import datasets
361
+ >>> import matplotlib.pyplot as plt
362
+ >>> fig = plt.figure()
363
+ >>> plt.gray() # show the filtered result in grayscale
364
+ >>> ax1 = fig.add_subplot(121) # left side
365
+ >>> ax2 = fig.add_subplot(122) # right side
366
+ >>> ascent = datasets.ascent()
367
+ >>> result = gaussian_filter(ascent, sigma=5)
368
+ >>> ax1.imshow(ascent)
369
+ >>> ax2.imshow(result)
370
+ >>> plt.show()
371
+ """
372
+ input = np.asarray(input)
373
+ output = _ni_support._get_output(output, input)
374
+
375
+ axes = _ni_support._check_axes(axes, input.ndim)
376
+ num_axes = len(axes)
377
+ orders = _ni_support._normalize_sequence(order, num_axes)
378
+ sigmas = _ni_support._normalize_sequence(sigma, num_axes)
379
+ modes = _ni_support._normalize_sequence(mode, num_axes)
380
+ radiuses = _ni_support._normalize_sequence(radius, num_axes)
381
+ axes = [(axes[ii], sigmas[ii], orders[ii], modes[ii], radiuses[ii])
382
+ for ii in range(num_axes) if sigmas[ii] > 1e-15]
383
+ if len(axes) > 0:
384
+ for axis, sigma, order, mode, radius in axes:
385
+ gaussian_filter1d(input, sigma, axis, order, output,
386
+ mode, cval, truncate, radius=radius)
387
+ input = output
388
+ else:
389
+ output[...] = input[...]
390
+ return output
391
+
392
+
393
+ @_ni_docstrings.docfiller
394
+ def prewitt(input, axis=-1, output=None, mode="reflect", cval=0.0):
395
+ """Calculate a Prewitt filter.
396
+
397
+ Parameters
398
+ ----------
399
+ %(input)s
400
+ %(axis)s
401
+ %(output)s
402
+ %(mode_multiple)s
403
+ %(cval)s
404
+
405
+ Returns
406
+ -------
407
+ prewitt : ndarray
408
+ Filtered array. Has the same shape as `input`.
409
+
410
+ See Also
411
+ --------
412
+ sobel: Sobel filter
413
+
414
+ Notes
415
+ -----
416
+ This function computes the one-dimensional Prewitt filter.
417
+ Horizontal edges are emphasised with the horizontal transform (axis=0),
418
+ vertical edges with the vertical transform (axis=1), and so on for higher
419
+ dimensions. These can be combined to give the magnitude.
420
+
421
+ Examples
422
+ --------
423
+ >>> from scipy import ndimage, datasets
424
+ >>> import matplotlib.pyplot as plt
425
+ >>> import numpy as np
426
+ >>> ascent = datasets.ascent()
427
+ >>> prewitt_h = ndimage.prewitt(ascent, axis=0)
428
+ >>> prewitt_v = ndimage.prewitt(ascent, axis=1)
429
+ >>> magnitude = np.sqrt(prewitt_h ** 2 + prewitt_v ** 2)
430
+ >>> magnitude *= 255 / np.max(magnitude) # Normalization
431
+ >>> fig, axes = plt.subplots(2, 2, figsize = (8, 8))
432
+ >>> plt.gray()
433
+ >>> axes[0, 0].imshow(ascent)
434
+ >>> axes[0, 1].imshow(prewitt_h)
435
+ >>> axes[1, 0].imshow(prewitt_v)
436
+ >>> axes[1, 1].imshow(magnitude)
437
+ >>> titles = ["original", "horizontal", "vertical", "magnitude"]
438
+ >>> for i, ax in enumerate(axes.ravel()):
439
+ ... ax.set_title(titles[i])
440
+ ... ax.axis("off")
441
+ >>> plt.show()
442
+
443
+ """
444
+ input = np.asarray(input)
445
+ axis = normalize_axis_index(axis, input.ndim)
446
+ output = _ni_support._get_output(output, input)
447
+ modes = _ni_support._normalize_sequence(mode, input.ndim)
448
+ correlate1d(input, [-1, 0, 1], axis, output, modes[axis], cval, 0)
449
+ axes = [ii for ii in range(input.ndim) if ii != axis]
450
+ for ii in axes:
451
+ correlate1d(output, [1, 1, 1], ii, output, modes[ii], cval, 0,)
452
+ return output
453
+
454
+
455
+ @_ni_docstrings.docfiller
456
+ def sobel(input, axis=-1, output=None, mode="reflect", cval=0.0):
457
+ """Calculate a Sobel filter.
458
+
459
+ Parameters
460
+ ----------
461
+ %(input)s
462
+ %(axis)s
463
+ %(output)s
464
+ %(mode_multiple)s
465
+ %(cval)s
466
+
467
+ Returns
468
+ -------
469
+ sobel : ndarray
470
+ Filtered array. Has the same shape as `input`.
471
+
472
+ Notes
473
+ -----
474
+ This function computes the axis-specific Sobel gradient.
475
+ The horizontal edges can be emphasised with the horizontal transform (axis=0),
476
+ the vertical edges with the vertical transform (axis=1) and so on for higher
477
+ dimensions. These can be combined to give the magnitude.
478
+
479
+ Examples
480
+ --------
481
+ >>> from scipy import ndimage, datasets
482
+ >>> import matplotlib.pyplot as plt
483
+ >>> import numpy as np
484
+ >>> ascent = datasets.ascent().astype('int32')
485
+ >>> sobel_h = ndimage.sobel(ascent, 0) # horizontal gradient
486
+ >>> sobel_v = ndimage.sobel(ascent, 1) # vertical gradient
487
+ >>> magnitude = np.sqrt(sobel_h**2 + sobel_v**2)
488
+ >>> magnitude *= 255.0 / np.max(magnitude) # normalization
489
+ >>> fig, axs = plt.subplots(2, 2, figsize=(8, 8))
490
+ >>> plt.gray() # show the filtered result in grayscale
491
+ >>> axs[0, 0].imshow(ascent)
492
+ >>> axs[0, 1].imshow(sobel_h)
493
+ >>> axs[1, 0].imshow(sobel_v)
494
+ >>> axs[1, 1].imshow(magnitude)
495
+ >>> titles = ["original", "horizontal", "vertical", "magnitude"]
496
+ >>> for i, ax in enumerate(axs.ravel()):
497
+ ... ax.set_title(titles[i])
498
+ ... ax.axis("off")
499
+ >>> plt.show()
500
+
501
+ """
502
+ input = np.asarray(input)
503
+ axis = normalize_axis_index(axis, input.ndim)
504
+ output = _ni_support._get_output(output, input)
505
+ modes = _ni_support._normalize_sequence(mode, input.ndim)
506
+ correlate1d(input, [-1, 0, 1], axis, output, modes[axis], cval, 0)
507
+ axes = [ii for ii in range(input.ndim) if ii != axis]
508
+ for ii in axes:
509
+ correlate1d(output, [1, 2, 1], ii, output, modes[ii], cval, 0)
510
+ return output
511
+
512
+
513
+ @_ni_docstrings.docfiller
514
+ def generic_laplace(input, derivative2, output=None, mode="reflect",
515
+ cval=0.0,
516
+ extra_arguments=(),
517
+ extra_keywords=None):
518
+ """
519
+ N-D Laplace filter using a provided second derivative function.
520
+
521
+ Parameters
522
+ ----------
523
+ %(input)s
524
+ derivative2 : callable
525
+ Callable with the following signature::
526
+
527
+ derivative2(input, axis, output, mode, cval,
528
+ *extra_arguments, **extra_keywords)
529
+
530
+ See `extra_arguments`, `extra_keywords` below.
531
+ %(output)s
532
+ %(mode_multiple)s
533
+ %(cval)s
534
+ %(extra_keywords)s
535
+ %(extra_arguments)s
536
+
537
+ Returns
538
+ -------
539
+ generic_laplace : ndarray
540
+ Filtered array. Has the same shape as `input`.
541
+
542
+ """
543
+ if extra_keywords is None:
544
+ extra_keywords = {}
545
+ input = np.asarray(input)
546
+ output = _ni_support._get_output(output, input)
547
+ axes = list(range(input.ndim))
548
+ if len(axes) > 0:
549
+ modes = _ni_support._normalize_sequence(mode, len(axes))
550
+ derivative2(input, axes[0], output, modes[0], cval,
551
+ *extra_arguments, **extra_keywords)
552
+ for ii in range(1, len(axes)):
553
+ tmp = derivative2(input, axes[ii], output.dtype, modes[ii], cval,
554
+ *extra_arguments, **extra_keywords)
555
+ output += tmp
556
+ else:
557
+ output[...] = input[...]
558
+ return output
559
+
560
+
561
+ @_ni_docstrings.docfiller
562
+ def laplace(input, output=None, mode="reflect", cval=0.0):
563
+ """N-D Laplace filter based on approximate second derivatives.
564
+
565
+ Parameters
566
+ ----------
567
+ %(input)s
568
+ %(output)s
569
+ %(mode_multiple)s
570
+ %(cval)s
571
+
572
+ Returns
573
+ -------
574
+ laplace : ndarray
575
+ Filtered array. Has the same shape as `input`.
576
+
577
+ Examples
578
+ --------
579
+ >>> from scipy import ndimage, datasets
580
+ >>> import matplotlib.pyplot as plt
581
+ >>> fig = plt.figure()
582
+ >>> plt.gray() # show the filtered result in grayscale
583
+ >>> ax1 = fig.add_subplot(121) # left side
584
+ >>> ax2 = fig.add_subplot(122) # right side
585
+ >>> ascent = datasets.ascent()
586
+ >>> result = ndimage.laplace(ascent)
587
+ >>> ax1.imshow(ascent)
588
+ >>> ax2.imshow(result)
589
+ >>> plt.show()
590
+ """
591
+ def derivative2(input, axis, output, mode, cval):
592
+ return correlate1d(input, [1, -2, 1], axis, output, mode, cval, 0)
593
+ return generic_laplace(input, derivative2, output, mode, cval)
594
+
595
+
596
+ @_ni_docstrings.docfiller
597
+ def gaussian_laplace(input, sigma, output=None, mode="reflect",
598
+ cval=0.0, **kwargs):
599
+ """Multidimensional Laplace filter using Gaussian second derivatives.
600
+
601
+ Parameters
602
+ ----------
603
+ %(input)s
604
+ sigma : scalar or sequence of scalars
605
+ The standard deviations of the Gaussian filter are given for
606
+ each axis as a sequence, or as a single number, in which case
607
+ it is equal for all axes.
608
+ %(output)s
609
+ %(mode_multiple)s
610
+ %(cval)s
611
+ Extra keyword arguments will be passed to gaussian_filter().
612
+
613
+ Returns
614
+ -------
615
+ gaussian_laplace : ndarray
616
+ Filtered array. Has the same shape as `input`.
617
+
618
+ Examples
619
+ --------
620
+ >>> from scipy import ndimage, datasets
621
+ >>> import matplotlib.pyplot as plt
622
+ >>> ascent = datasets.ascent()
623
+
624
+ >>> fig = plt.figure()
625
+ >>> plt.gray() # show the filtered result in grayscale
626
+ >>> ax1 = fig.add_subplot(121) # left side
627
+ >>> ax2 = fig.add_subplot(122) # right side
628
+
629
+ >>> result = ndimage.gaussian_laplace(ascent, sigma=1)
630
+ >>> ax1.imshow(result)
631
+
632
+ >>> result = ndimage.gaussian_laplace(ascent, sigma=3)
633
+ >>> ax2.imshow(result)
634
+ >>> plt.show()
635
+ """
636
+ input = np.asarray(input)
637
+
638
+ def derivative2(input, axis, output, mode, cval, sigma, **kwargs):
639
+ order = [0] * input.ndim
640
+ order[axis] = 2
641
+ return gaussian_filter(input, sigma, order, output, mode, cval,
642
+ **kwargs)
643
+
644
+ return generic_laplace(input, derivative2, output, mode, cval,
645
+ extra_arguments=(sigma,),
646
+ extra_keywords=kwargs)
647
+
648
+
649
+ @_ni_docstrings.docfiller
650
+ def generic_gradient_magnitude(input, derivative, output=None,
651
+ mode="reflect", cval=0.0,
652
+ extra_arguments=(), extra_keywords=None):
653
+ """Gradient magnitude using a provided gradient function.
654
+
655
+ Parameters
656
+ ----------
657
+ %(input)s
658
+ derivative : callable
659
+ Callable with the following signature::
660
+
661
+ derivative(input, axis, output, mode, cval,
662
+ *extra_arguments, **extra_keywords)
663
+
664
+ See `extra_arguments`, `extra_keywords` below.
665
+ `derivative` can assume that `input` and `output` are ndarrays.
666
+ Note that the output from `derivative` is modified inplace;
667
+ be careful to copy important inputs before returning them.
668
+ %(output)s
669
+ %(mode_multiple)s
670
+ %(cval)s
671
+ %(extra_keywords)s
672
+ %(extra_arguments)s
673
+
674
+ Returns
675
+ -------
676
+ generic_gradient_matnitude : ndarray
677
+ Filtered array. Has the same shape as `input`.
678
+
679
+ """
680
+ if extra_keywords is None:
681
+ extra_keywords = {}
682
+ input = np.asarray(input)
683
+ output = _ni_support._get_output(output, input)
684
+ axes = list(range(input.ndim))
685
+ if len(axes) > 0:
686
+ modes = _ni_support._normalize_sequence(mode, len(axes))
687
+ derivative(input, axes[0], output, modes[0], cval,
688
+ *extra_arguments, **extra_keywords)
689
+ np.multiply(output, output, output)
690
+ for ii in range(1, len(axes)):
691
+ tmp = derivative(input, axes[ii], output.dtype, modes[ii], cval,
692
+ *extra_arguments, **extra_keywords)
693
+ np.multiply(tmp, tmp, tmp)
694
+ output += tmp
695
+ # This allows the sqrt to work with a different default casting
696
+ np.sqrt(output, output, casting='unsafe')
697
+ else:
698
+ output[...] = input[...]
699
+ return output
700
+
701
+
702
+ @_ni_docstrings.docfiller
703
+ def gaussian_gradient_magnitude(input, sigma, output=None,
704
+ mode="reflect", cval=0.0, **kwargs):
705
+ """Multidimensional gradient magnitude using Gaussian derivatives.
706
+
707
+ Parameters
708
+ ----------
709
+ %(input)s
710
+ sigma : scalar or sequence of scalars
711
+ The standard deviations of the Gaussian filter are given for
712
+ each axis as a sequence, or as a single number, in which case
713
+ it is equal for all axes.
714
+ %(output)s
715
+ %(mode_multiple)s
716
+ %(cval)s
717
+ Extra keyword arguments will be passed to gaussian_filter().
718
+
719
+ Returns
720
+ -------
721
+ gaussian_gradient_magnitude : ndarray
722
+ Filtered array. Has the same shape as `input`.
723
+
724
+ Examples
725
+ --------
726
+ >>> from scipy import ndimage, datasets
727
+ >>> import matplotlib.pyplot as plt
728
+ >>> fig = plt.figure()
729
+ >>> plt.gray() # show the filtered result in grayscale
730
+ >>> ax1 = fig.add_subplot(121) # left side
731
+ >>> ax2 = fig.add_subplot(122) # right side
732
+ >>> ascent = datasets.ascent()
733
+ >>> result = ndimage.gaussian_gradient_magnitude(ascent, sigma=5)
734
+ >>> ax1.imshow(ascent)
735
+ >>> ax2.imshow(result)
736
+ >>> plt.show()
737
+ """
738
+ input = np.asarray(input)
739
+
740
+ def derivative(input, axis, output, mode, cval, sigma, **kwargs):
741
+ order = [0] * input.ndim
742
+ order[axis] = 1
743
+ return gaussian_filter(input, sigma, order, output, mode,
744
+ cval, **kwargs)
745
+
746
+ return generic_gradient_magnitude(input, derivative, output, mode,
747
+ cval, extra_arguments=(sigma,),
748
+ extra_keywords=kwargs)
749
+
750
+
751
+ def _correlate_or_convolve(input, weights, output, mode, cval, origin,
752
+ convolution):
753
+ input = np.asarray(input)
754
+ weights = np.asarray(weights)
755
+ complex_input = input.dtype.kind == 'c'
756
+ complex_weights = weights.dtype.kind == 'c'
757
+ if complex_input or complex_weights:
758
+ if complex_weights and not convolution:
759
+ # As for np.correlate, conjugate weights rather than input.
760
+ weights = weights.conj()
761
+ kwargs = dict(
762
+ mode=mode, origin=origin, convolution=convolution
763
+ )
764
+ output = _ni_support._get_output(output, input, complex_output=True)
765
+
766
+ return _complex_via_real_components(_correlate_or_convolve, input,
767
+ weights, output, cval, **kwargs)
768
+
769
+ origins = _ni_support._normalize_sequence(origin, input.ndim)
770
+ weights = np.asarray(weights, dtype=np.float64)
771
+ wshape = [ii for ii in weights.shape if ii > 0]
772
+ if len(wshape) != input.ndim:
773
+ raise RuntimeError('filter weights array has incorrect shape.')
774
+ if convolution:
775
+ weights = weights[tuple([slice(None, None, -1)] * weights.ndim)]
776
+ for ii in range(len(origins)):
777
+ origins[ii] = -origins[ii]
778
+ if not weights.shape[ii] & 1:
779
+ origins[ii] -= 1
780
+ for origin, lenw in zip(origins, wshape):
781
+ if _invalid_origin(origin, lenw):
782
+ raise ValueError('Invalid origin; origin must satisfy '
783
+ '-(weights.shape[k] // 2) <= origin[k] <= '
784
+ '(weights.shape[k]-1) // 2')
785
+
786
+ if not weights.flags.contiguous:
787
+ weights = weights.copy()
788
+ output = _ni_support._get_output(output, input)
789
+ temp_needed = np.may_share_memory(input, output)
790
+ if temp_needed:
791
+ # input and output arrays cannot share memory
792
+ temp = output
793
+ output = _ni_support._get_output(output.dtype, input)
794
+ if not isinstance(mode, str) and isinstance(mode, Iterable):
795
+ raise RuntimeError("A sequence of modes is not supported")
796
+ mode = _ni_support._extend_mode_to_code(mode)
797
+ _nd_image.correlate(input, weights, output, mode, cval, origins)
798
+ if temp_needed:
799
+ temp[...] = output
800
+ output = temp
801
+ return output
802
+
803
+
804
+ @_ni_docstrings.docfiller
805
+ def correlate(input, weights, output=None, mode='reflect', cval=0.0,
806
+ origin=0):
807
+ """
808
+ Multidimensional correlation.
809
+
810
+ The array is correlated with the given kernel.
811
+
812
+ Parameters
813
+ ----------
814
+ %(input)s
815
+ weights : ndarray
816
+ array of weights, same number of dimensions as input
817
+ %(output)s
818
+ %(mode_reflect)s
819
+ %(cval)s
820
+ %(origin_multiple)s
821
+
822
+ Returns
823
+ -------
824
+ result : ndarray
825
+ The result of correlation of `input` with `weights`.
826
+
827
+ See Also
828
+ --------
829
+ convolve : Convolve an image with a kernel.
830
+
831
+ Examples
832
+ --------
833
+ Correlation is the process of moving a filter mask often referred to
834
+ as kernel over the image and computing the sum of products at each location.
835
+
836
+ >>> from scipy.ndimage import correlate
837
+ >>> import numpy as np
838
+ >>> input_img = np.arange(25).reshape(5,5)
839
+ >>> print(input_img)
840
+ [[ 0 1 2 3 4]
841
+ [ 5 6 7 8 9]
842
+ [10 11 12 13 14]
843
+ [15 16 17 18 19]
844
+ [20 21 22 23 24]]
845
+
846
+ Define a kernel (weights) for correlation. In this example, it is for sum of
847
+ center and up, down, left and right next elements.
848
+
849
+ >>> weights = [[0, 1, 0],
850
+ ... [1, 1, 1],
851
+ ... [0, 1, 0]]
852
+
853
+ We can calculate a correlation result:
854
+ For example, element ``[2,2]`` is ``7 + 11 + 12 + 13 + 17 = 60``.
855
+
856
+ >>> correlate(input_img, weights)
857
+ array([[ 6, 10, 15, 20, 24],
858
+ [ 26, 30, 35, 40, 44],
859
+ [ 51, 55, 60, 65, 69],
860
+ [ 76, 80, 85, 90, 94],
861
+ [ 96, 100, 105, 110, 114]])
862
+
863
+ """
864
+ return _correlate_or_convolve(input, weights, output, mode, cval,
865
+ origin, False)
866
+
867
+
868
+ @_ni_docstrings.docfiller
869
+ def convolve(input, weights, output=None, mode='reflect', cval=0.0,
870
+ origin=0):
871
+ """
872
+ Multidimensional convolution.
873
+
874
+ The array is convolved with the given kernel.
875
+
876
+ Parameters
877
+ ----------
878
+ %(input)s
879
+ weights : array_like
880
+ Array of weights, same number of dimensions as input
881
+ %(output)s
882
+ %(mode_reflect)s
883
+ cval : scalar, optional
884
+ Value to fill past edges of input if `mode` is 'constant'. Default
885
+ is 0.0
886
+ origin : int, optional
887
+ Controls the origin of the input signal, which is where the
888
+ filter is centered to produce the first element of the output.
889
+ Positive values shift the filter to the right, and negative values
890
+ shift the filter to the left. Default is 0.
891
+
892
+ Returns
893
+ -------
894
+ result : ndarray
895
+ The result of convolution of `input` with `weights`.
896
+
897
+ See Also
898
+ --------
899
+ correlate : Correlate an image with a kernel.
900
+
901
+ Notes
902
+ -----
903
+ Each value in result is :math:`C_i = \\sum_j{I_{i+k-j} W_j}`, where
904
+ W is the `weights` kernel,
905
+ j is the N-D spatial index over :math:`W`,
906
+ I is the `input` and k is the coordinate of the center of
907
+ W, specified by `origin` in the input parameters.
908
+
909
+ Examples
910
+ --------
911
+ Perhaps the simplest case to understand is ``mode='constant', cval=0.0``,
912
+ because in this case borders (i.e., where the `weights` kernel, centered
913
+ on any one value, extends beyond an edge of `input`) are treated as zeros.
914
+
915
+ >>> import numpy as np
916
+ >>> a = np.array([[1, 2, 0, 0],
917
+ ... [5, 3, 0, 4],
918
+ ... [0, 0, 0, 7],
919
+ ... [9, 3, 0, 0]])
920
+ >>> k = np.array([[1,1,1],[1,1,0],[1,0,0]])
921
+ >>> from scipy import ndimage
922
+ >>> ndimage.convolve(a, k, mode='constant', cval=0.0)
923
+ array([[11, 10, 7, 4],
924
+ [10, 3, 11, 11],
925
+ [15, 12, 14, 7],
926
+ [12, 3, 7, 0]])
927
+
928
+ Setting ``cval=1.0`` is equivalent to padding the outer edge of `input`
929
+ with 1.0's (and then extracting only the original region of the result).
930
+
931
+ >>> ndimage.convolve(a, k, mode='constant', cval=1.0)
932
+ array([[13, 11, 8, 7],
933
+ [11, 3, 11, 14],
934
+ [16, 12, 14, 10],
935
+ [15, 6, 10, 5]])
936
+
937
+ With ``mode='reflect'`` (the default), outer values are reflected at the
938
+ edge of `input` to fill in missing values.
939
+
940
+ >>> b = np.array([[2, 0, 0],
941
+ ... [1, 0, 0],
942
+ ... [0, 0, 0]])
943
+ >>> k = np.array([[0,1,0], [0,1,0], [0,1,0]])
944
+ >>> ndimage.convolve(b, k, mode='reflect')
945
+ array([[5, 0, 0],
946
+ [3, 0, 0],
947
+ [1, 0, 0]])
948
+
949
+ This includes diagonally at the corners.
950
+
951
+ >>> k = np.array([[1,0,0],[0,1,0],[0,0,1]])
952
+ >>> ndimage.convolve(b, k)
953
+ array([[4, 2, 0],
954
+ [3, 2, 0],
955
+ [1, 1, 0]])
956
+
957
+ With ``mode='nearest'``, the single nearest value in to an edge in
958
+ `input` is repeated as many times as needed to match the overlapping
959
+ `weights`.
960
+
961
+ >>> c = np.array([[2, 0, 1],
962
+ ... [1, 0, 0],
963
+ ... [0, 0, 0]])
964
+ >>> k = np.array([[0, 1, 0],
965
+ ... [0, 1, 0],
966
+ ... [0, 1, 0],
967
+ ... [0, 1, 0],
968
+ ... [0, 1, 0]])
969
+ >>> ndimage.convolve(c, k, mode='nearest')
970
+ array([[7, 0, 3],
971
+ [5, 0, 2],
972
+ [3, 0, 1]])
973
+
974
+ """
975
+ return _correlate_or_convolve(input, weights, output, mode, cval,
976
+ origin, True)
977
+
978
+
979
+ @_ni_docstrings.docfiller
980
+ def uniform_filter1d(input, size, axis=-1, output=None,
981
+ mode="reflect", cval=0.0, origin=0):
982
+ """Calculate a 1-D uniform filter along the given axis.
983
+
984
+ The lines of the array along the given axis are filtered with a
985
+ uniform filter of given size.
986
+
987
+ Parameters
988
+ ----------
989
+ %(input)s
990
+ size : int
991
+ length of uniform filter
992
+ %(axis)s
993
+ %(output)s
994
+ %(mode_reflect)s
995
+ %(cval)s
996
+ %(origin)s
997
+
998
+ Returns
999
+ -------
1000
+ result : ndarray
1001
+ Filtered array. Has same shape as `input`.
1002
+
1003
+ Examples
1004
+ --------
1005
+ >>> from scipy.ndimage import uniform_filter1d
1006
+ >>> uniform_filter1d([2, 8, 0, 4, 1, 9, 9, 0], size=3)
1007
+ array([4, 3, 4, 1, 4, 6, 6, 3])
1008
+ """
1009
+ input = np.asarray(input)
1010
+ axis = normalize_axis_index(axis, input.ndim)
1011
+ if size < 1:
1012
+ raise RuntimeError('incorrect filter size')
1013
+ complex_output = input.dtype.kind == 'c'
1014
+ output = _ni_support._get_output(output, input,
1015
+ complex_output=complex_output)
1016
+ if (size // 2 + origin < 0) or (size // 2 + origin >= size):
1017
+ raise ValueError('invalid origin')
1018
+ mode = _ni_support._extend_mode_to_code(mode)
1019
+ if not complex_output:
1020
+ _nd_image.uniform_filter1d(input, size, axis, output, mode, cval,
1021
+ origin)
1022
+ else:
1023
+ _nd_image.uniform_filter1d(input.real, size, axis, output.real, mode,
1024
+ np.real(cval), origin)
1025
+ _nd_image.uniform_filter1d(input.imag, size, axis, output.imag, mode,
1026
+ np.imag(cval), origin)
1027
+ return output
1028
+
1029
+
1030
+ @_ni_docstrings.docfiller
1031
+ def uniform_filter(input, size=3, output=None, mode="reflect",
1032
+ cval=0.0, origin=0, *, axes=None):
1033
+ """Multidimensional uniform filter.
1034
+
1035
+ Parameters
1036
+ ----------
1037
+ %(input)s
1038
+ size : int or sequence of ints, optional
1039
+ The sizes of the uniform filter are given for each axis as a
1040
+ sequence, or as a single number, in which case the size is
1041
+ equal for all axes.
1042
+ %(output)s
1043
+ %(mode_multiple)s
1044
+ %(cval)s
1045
+ %(origin_multiple)s
1046
+ axes : tuple of int or None, optional
1047
+ If None, `input` is filtered along all axes. Otherwise,
1048
+ `input` is filtered along the specified axes. When `axes` is
1049
+ specified, any tuples used for `size`, `origin`, and/or `mode`
1050
+ must match the length of `axes`. The ith entry in any of these tuples
1051
+ corresponds to the ith entry in `axes`.
1052
+
1053
+ Returns
1054
+ -------
1055
+ uniform_filter : ndarray
1056
+ Filtered array. Has the same shape as `input`.
1057
+
1058
+ Notes
1059
+ -----
1060
+ The multidimensional filter is implemented as a sequence of
1061
+ 1-D uniform filters. The intermediate arrays are stored
1062
+ in the same data type as the output. Therefore, for output types
1063
+ with a limited precision, the results may be imprecise because
1064
+ intermediate results may be stored with insufficient precision.
1065
+
1066
+ Examples
1067
+ --------
1068
+ >>> from scipy import ndimage, datasets
1069
+ >>> import matplotlib.pyplot as plt
1070
+ >>> fig = plt.figure()
1071
+ >>> plt.gray() # show the filtered result in grayscale
1072
+ >>> ax1 = fig.add_subplot(121) # left side
1073
+ >>> ax2 = fig.add_subplot(122) # right side
1074
+ >>> ascent = datasets.ascent()
1075
+ >>> result = ndimage.uniform_filter(ascent, size=20)
1076
+ >>> ax1.imshow(ascent)
1077
+ >>> ax2.imshow(result)
1078
+ >>> plt.show()
1079
+ """
1080
+ input = np.asarray(input)
1081
+ output = _ni_support._get_output(output, input,
1082
+ complex_output=input.dtype.kind == 'c')
1083
+ axes = _ni_support._check_axes(axes, input.ndim)
1084
+ num_axes = len(axes)
1085
+ sizes = _ni_support._normalize_sequence(size, num_axes)
1086
+ origins = _ni_support._normalize_sequence(origin, num_axes)
1087
+ modes = _ni_support._normalize_sequence(mode, num_axes)
1088
+ axes = [(axes[ii], sizes[ii], origins[ii], modes[ii])
1089
+ for ii in range(num_axes) if sizes[ii] > 1]
1090
+ if len(axes) > 0:
1091
+ for axis, size, origin, mode in axes:
1092
+ uniform_filter1d(input, int(size), axis, output, mode,
1093
+ cval, origin)
1094
+ input = output
1095
+ else:
1096
+ output[...] = input[...]
1097
+ return output
1098
+
1099
+
1100
+ @_ni_docstrings.docfiller
1101
+ def minimum_filter1d(input, size, axis=-1, output=None,
1102
+ mode="reflect", cval=0.0, origin=0):
1103
+ """Calculate a 1-D minimum filter along the given axis.
1104
+
1105
+ The lines of the array along the given axis are filtered with a
1106
+ minimum filter of given size.
1107
+
1108
+ Parameters
1109
+ ----------
1110
+ %(input)s
1111
+ size : int
1112
+ length along which to calculate 1D minimum
1113
+ %(axis)s
1114
+ %(output)s
1115
+ %(mode_reflect)s
1116
+ %(cval)s
1117
+ %(origin)s
1118
+
1119
+ Returns
1120
+ -------
1121
+ result : ndarray.
1122
+ Filtered image. Has the same shape as `input`.
1123
+
1124
+ Notes
1125
+ -----
1126
+ This function implements the MINLIST algorithm [1]_, as described by
1127
+ Richard Harter [2]_, and has a guaranteed O(n) performance, `n` being
1128
+ the `input` length, regardless of filter size.
1129
+
1130
+ References
1131
+ ----------
1132
+ .. [1] http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.42.2777
1133
+ .. [2] http://www.richardhartersworld.com/cri/2001/slidingmin.html
1134
+
1135
+
1136
+ Examples
1137
+ --------
1138
+ >>> from scipy.ndimage import minimum_filter1d
1139
+ >>> minimum_filter1d([2, 8, 0, 4, 1, 9, 9, 0], size=3)
1140
+ array([2, 0, 0, 0, 1, 1, 0, 0])
1141
+ """
1142
+ input = np.asarray(input)
1143
+ if np.iscomplexobj(input):
1144
+ raise TypeError('Complex type not supported')
1145
+ axis = normalize_axis_index(axis, input.ndim)
1146
+ if size < 1:
1147
+ raise RuntimeError('incorrect filter size')
1148
+ output = _ni_support._get_output(output, input)
1149
+ if (size // 2 + origin < 0) or (size // 2 + origin >= size):
1150
+ raise ValueError('invalid origin')
1151
+ mode = _ni_support._extend_mode_to_code(mode)
1152
+ _nd_image.min_or_max_filter1d(input, size, axis, output, mode, cval,
1153
+ origin, 1)
1154
+ return output
1155
+
1156
+
1157
+ @_ni_docstrings.docfiller
1158
+ def maximum_filter1d(input, size, axis=-1, output=None,
1159
+ mode="reflect", cval=0.0, origin=0):
1160
+ """Calculate a 1-D maximum filter along the given axis.
1161
+
1162
+ The lines of the array along the given axis are filtered with a
1163
+ maximum filter of given size.
1164
+
1165
+ Parameters
1166
+ ----------
1167
+ %(input)s
1168
+ size : int
1169
+ Length along which to calculate the 1-D maximum.
1170
+ %(axis)s
1171
+ %(output)s
1172
+ %(mode_reflect)s
1173
+ %(cval)s
1174
+ %(origin)s
1175
+
1176
+ Returns
1177
+ -------
1178
+ maximum1d : ndarray, None
1179
+ Maximum-filtered array with same shape as input.
1180
+ None if `output` is not None
1181
+
1182
+ Notes
1183
+ -----
1184
+ This function implements the MAXLIST algorithm [1]_, as described by
1185
+ Richard Harter [2]_, and has a guaranteed O(n) performance, `n` being
1186
+ the `input` length, regardless of filter size.
1187
+
1188
+ References
1189
+ ----------
1190
+ .. [1] http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.42.2777
1191
+ .. [2] http://www.richardhartersworld.com/cri/2001/slidingmin.html
1192
+
1193
+ Examples
1194
+ --------
1195
+ >>> from scipy.ndimage import maximum_filter1d
1196
+ >>> maximum_filter1d([2, 8, 0, 4, 1, 9, 9, 0], size=3)
1197
+ array([8, 8, 8, 4, 9, 9, 9, 9])
1198
+ """
1199
+ input = np.asarray(input)
1200
+ if np.iscomplexobj(input):
1201
+ raise TypeError('Complex type not supported')
1202
+ axis = normalize_axis_index(axis, input.ndim)
1203
+ if size < 1:
1204
+ raise RuntimeError('incorrect filter size')
1205
+ output = _ni_support._get_output(output, input)
1206
+ if (size // 2 + origin < 0) or (size // 2 + origin >= size):
1207
+ raise ValueError('invalid origin')
1208
+ mode = _ni_support._extend_mode_to_code(mode)
1209
+ _nd_image.min_or_max_filter1d(input, size, axis, output, mode, cval,
1210
+ origin, 0)
1211
+ return output
1212
+
1213
+
1214
+ def _min_or_max_filter(input, size, footprint, structure, output, mode,
1215
+ cval, origin, minimum, axes=None):
1216
+ if (size is not None) and (footprint is not None):
1217
+ warnings.warn("ignoring size because footprint is set",
1218
+ UserWarning, stacklevel=3)
1219
+ if structure is None:
1220
+ if footprint is None:
1221
+ if size is None:
1222
+ raise RuntimeError("no footprint provided")
1223
+ separable = True
1224
+ else:
1225
+ footprint = np.asarray(footprint, dtype=bool)
1226
+ if not footprint.any():
1227
+ raise ValueError("All-zero footprint is not supported.")
1228
+ if footprint.all():
1229
+ size = footprint.shape
1230
+ footprint = None
1231
+ separable = True
1232
+ else:
1233
+ separable = False
1234
+ else:
1235
+ structure = np.asarray(structure, dtype=np.float64)
1236
+ separable = False
1237
+ if footprint is None:
1238
+ footprint = np.ones(structure.shape, bool)
1239
+ else:
1240
+ footprint = np.asarray(footprint, dtype=bool)
1241
+ input = np.asarray(input)
1242
+ if np.iscomplexobj(input):
1243
+ raise TypeError("Complex type not supported")
1244
+ output = _ni_support._get_output(output, input)
1245
+ temp_needed = np.may_share_memory(input, output)
1246
+ if temp_needed:
1247
+ # input and output arrays cannot share memory
1248
+ temp = output
1249
+ output = _ni_support._get_output(output.dtype, input)
1250
+ axes = _ni_support._check_axes(axes, input.ndim)
1251
+ num_axes = len(axes)
1252
+ if separable:
1253
+ origins = _ni_support._normalize_sequence(origin, num_axes)
1254
+ sizes = _ni_support._normalize_sequence(size, num_axes)
1255
+ modes = _ni_support._normalize_sequence(mode, num_axes)
1256
+ axes = [(axes[ii], sizes[ii], origins[ii], modes[ii])
1257
+ for ii in range(len(axes)) if sizes[ii] > 1]
1258
+ if minimum:
1259
+ filter_ = minimum_filter1d
1260
+ else:
1261
+ filter_ = maximum_filter1d
1262
+ if len(axes) > 0:
1263
+ for axis, size, origin, mode in axes:
1264
+ filter_(input, int(size), axis, output, mode, cval, origin)
1265
+ input = output
1266
+ else:
1267
+ output[...] = input[...]
1268
+ else:
1269
+ origins = _ni_support._normalize_sequence(origin, num_axes)
1270
+ if num_axes < input.ndim:
1271
+ if footprint.ndim != num_axes:
1272
+ raise RuntimeError("footprint array has incorrect shape")
1273
+ footprint = np.expand_dims(
1274
+ footprint,
1275
+ tuple(ax for ax in range(input.ndim) if ax not in axes)
1276
+ )
1277
+ # set origin = 0 for any axes not being filtered
1278
+ origins_temp = [0,] * input.ndim
1279
+ for o, ax in zip(origins, axes):
1280
+ origins_temp[ax] = o
1281
+ origins = origins_temp
1282
+
1283
+ fshape = [ii for ii in footprint.shape if ii > 0]
1284
+ if len(fshape) != input.ndim:
1285
+ raise RuntimeError('footprint array has incorrect shape.')
1286
+ for origin, lenf in zip(origins, fshape):
1287
+ if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf):
1288
+ raise ValueError("invalid origin")
1289
+ if not footprint.flags.contiguous:
1290
+ footprint = footprint.copy()
1291
+ if structure is not None:
1292
+ if len(structure.shape) != input.ndim:
1293
+ raise RuntimeError("structure array has incorrect shape")
1294
+ if num_axes != structure.ndim:
1295
+ structure = np.expand_dims(
1296
+ structure,
1297
+ tuple(ax for ax in range(structure.ndim) if ax not in axes)
1298
+ )
1299
+ if not structure.flags.contiguous:
1300
+ structure = structure.copy()
1301
+ if not isinstance(mode, str) and isinstance(mode, Iterable):
1302
+ raise RuntimeError(
1303
+ "A sequence of modes is not supported for non-separable "
1304
+ "footprints")
1305
+ mode = _ni_support._extend_mode_to_code(mode)
1306
+ _nd_image.min_or_max_filter(input, footprint, structure, output,
1307
+ mode, cval, origins, minimum)
1308
+ if temp_needed:
1309
+ temp[...] = output
1310
+ output = temp
1311
+ return output
1312
+
1313
+
1314
+ @_ni_docstrings.docfiller
1315
+ def minimum_filter(input, size=None, footprint=None, output=None,
1316
+ mode="reflect", cval=0.0, origin=0, *, axes=None):
1317
+ """Calculate a multidimensional minimum filter.
1318
+
1319
+ Parameters
1320
+ ----------
1321
+ %(input)s
1322
+ %(size_foot)s
1323
+ %(output)s
1324
+ %(mode_multiple)s
1325
+ %(cval)s
1326
+ %(origin_multiple)s
1327
+ axes : tuple of int or None, optional
1328
+ If None, `input` is filtered along all axes. Otherwise,
1329
+ `input` is filtered along the specified axes. When `axes` is
1330
+ specified, any tuples used for `size`, `origin`, and/or `mode`
1331
+ must match the length of `axes`. The ith entry in any of these tuples
1332
+ corresponds to the ith entry in `axes`.
1333
+
1334
+ Returns
1335
+ -------
1336
+ minimum_filter : ndarray
1337
+ Filtered array. Has the same shape as `input`.
1338
+
1339
+ Notes
1340
+ -----
1341
+ A sequence of modes (one per axis) is only supported when the footprint is
1342
+ separable. Otherwise, a single mode string must be provided.
1343
+
1344
+ Examples
1345
+ --------
1346
+ >>> from scipy import ndimage, datasets
1347
+ >>> import matplotlib.pyplot as plt
1348
+ >>> fig = plt.figure()
1349
+ >>> plt.gray() # show the filtered result in grayscale
1350
+ >>> ax1 = fig.add_subplot(121) # left side
1351
+ >>> ax2 = fig.add_subplot(122) # right side
1352
+ >>> ascent = datasets.ascent()
1353
+ >>> result = ndimage.minimum_filter(ascent, size=20)
1354
+ >>> ax1.imshow(ascent)
1355
+ >>> ax2.imshow(result)
1356
+ >>> plt.show()
1357
+ """
1358
+ return _min_or_max_filter(input, size, footprint, None, output, mode,
1359
+ cval, origin, 1, axes)
1360
+
1361
+
1362
+ @_ni_docstrings.docfiller
1363
+ def maximum_filter(input, size=None, footprint=None, output=None,
1364
+ mode="reflect", cval=0.0, origin=0, *, axes=None):
1365
+ """Calculate a multidimensional maximum filter.
1366
+
1367
+ Parameters
1368
+ ----------
1369
+ %(input)s
1370
+ %(size_foot)s
1371
+ %(output)s
1372
+ %(mode_multiple)s
1373
+ %(cval)s
1374
+ %(origin_multiple)s
1375
+ axes : tuple of int or None, optional
1376
+ If None, `input` is filtered along all axes. Otherwise,
1377
+ `input` is filtered along the specified axes. When `axes` is
1378
+ specified, any tuples used for `size`, `origin`, and/or `mode`
1379
+ must match the length of `axes`. The ith entry in any of these tuples
1380
+ corresponds to the ith entry in `axes`.
1381
+
1382
+ Returns
1383
+ -------
1384
+ maximum_filter : ndarray
1385
+ Filtered array. Has the same shape as `input`.
1386
+
1387
+ Notes
1388
+ -----
1389
+ A sequence of modes (one per axis) is only supported when the footprint is
1390
+ separable. Otherwise, a single mode string must be provided.
1391
+
1392
+ Examples
1393
+ --------
1394
+ >>> from scipy import ndimage, datasets
1395
+ >>> import matplotlib.pyplot as plt
1396
+ >>> fig = plt.figure()
1397
+ >>> plt.gray() # show the filtered result in grayscale
1398
+ >>> ax1 = fig.add_subplot(121) # left side
1399
+ >>> ax2 = fig.add_subplot(122) # right side
1400
+ >>> ascent = datasets.ascent()
1401
+ >>> result = ndimage.maximum_filter(ascent, size=20)
1402
+ >>> ax1.imshow(ascent)
1403
+ >>> ax2.imshow(result)
1404
+ >>> plt.show()
1405
+ """
1406
+ return _min_or_max_filter(input, size, footprint, None, output, mode,
1407
+ cval, origin, 0, axes)
1408
+
1409
+
1410
+ @_ni_docstrings.docfiller
1411
+ def _rank_filter(input, rank, size=None, footprint=None, output=None,
1412
+ mode="reflect", cval=0.0, origin=0, operation='rank',
1413
+ axes=None):
1414
+ if (size is not None) and (footprint is not None):
1415
+ warnings.warn("ignoring size because footprint is set",
1416
+ UserWarning, stacklevel=3)
1417
+ input = np.asarray(input)
1418
+ if np.iscomplexobj(input):
1419
+ raise TypeError('Complex type not supported')
1420
+ axes = _ni_support._check_axes(axes, input.ndim)
1421
+ num_axes = len(axes)
1422
+ origins = _ni_support._normalize_sequence(origin, num_axes)
1423
+ if footprint is None:
1424
+ if size is None:
1425
+ raise RuntimeError("no footprint or filter size provided")
1426
+ sizes = _ni_support._normalize_sequence(size, num_axes)
1427
+ footprint = np.ones(sizes, dtype=bool)
1428
+ else:
1429
+ footprint = np.asarray(footprint, dtype=bool)
1430
+ if num_axes < input.ndim:
1431
+ # set origin = 0 for any axes not being filtered
1432
+ origins_temp = [0,] * input.ndim
1433
+ for o, ax in zip(origins, axes):
1434
+ origins_temp[ax] = o
1435
+ origins = origins_temp
1436
+
1437
+ if not isinstance(mode, str) and isinstance(mode, Iterable):
1438
+ # set mode = 'constant' for any axes not being filtered
1439
+ modes = _ni_support._normalize_sequence(mode, num_axes)
1440
+ modes_temp = ['constant'] * input.ndim
1441
+ for m, ax in zip(modes, axes):
1442
+ modes_temp[ax] = m
1443
+ mode = modes_temp
1444
+
1445
+ # insert singleton dimension along any non-filtered axes
1446
+ if footprint.ndim != num_axes:
1447
+ raise RuntimeError("footprint array has incorrect shape")
1448
+ footprint = np.expand_dims(
1449
+ footprint,
1450
+ tuple(ax for ax in range(input.ndim) if ax not in axes)
1451
+ )
1452
+ fshape = [ii for ii in footprint.shape if ii > 0]
1453
+ if len(fshape) != input.ndim:
1454
+ raise RuntimeError('footprint array has incorrect shape.')
1455
+ for origin, lenf in zip(origins, fshape):
1456
+ if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf):
1457
+ raise ValueError('invalid origin')
1458
+ if not footprint.flags.contiguous:
1459
+ footprint = footprint.copy()
1460
+ filter_size = np.where(footprint, 1, 0).sum()
1461
+ if operation == 'median':
1462
+ rank = filter_size // 2
1463
+ elif operation == 'percentile':
1464
+ percentile = rank
1465
+ if percentile < 0.0:
1466
+ percentile += 100.0
1467
+ if percentile < 0 or percentile > 100:
1468
+ raise RuntimeError('invalid percentile')
1469
+ if percentile == 100.0:
1470
+ rank = filter_size - 1
1471
+ else:
1472
+ rank = int(float(filter_size) * percentile / 100.0)
1473
+ if rank < 0:
1474
+ rank += filter_size
1475
+ if rank < 0 or rank >= filter_size:
1476
+ raise RuntimeError('rank not within filter footprint size')
1477
+ if rank == 0:
1478
+ return minimum_filter(input, None, footprint, output, mode, cval,
1479
+ origins, axes=None)
1480
+ elif rank == filter_size - 1:
1481
+ return maximum_filter(input, None, footprint, output, mode, cval,
1482
+ origins, axes=None)
1483
+ else:
1484
+ output = _ni_support._get_output(output, input)
1485
+ temp_needed = np.may_share_memory(input, output)
1486
+ if temp_needed:
1487
+ # input and output arrays cannot share memory
1488
+ temp = output
1489
+ output = _ni_support._get_output(output.dtype, input)
1490
+ if not isinstance(mode, str) and isinstance(mode, Iterable):
1491
+ raise RuntimeError(
1492
+ "A sequence of modes is not supported by non-separable rank "
1493
+ "filters")
1494
+ mode = _ni_support._extend_mode_to_code(mode)
1495
+ _nd_image.rank_filter(input, rank, footprint, output, mode, cval,
1496
+ origins)
1497
+ if temp_needed:
1498
+ temp[...] = output
1499
+ output = temp
1500
+ return output
1501
+
1502
+
1503
+ @_ni_docstrings.docfiller
1504
+ def rank_filter(input, rank, size=None, footprint=None, output=None,
1505
+ mode="reflect", cval=0.0, origin=0, *, axes=None):
1506
+ """Calculate a multidimensional rank filter.
1507
+
1508
+ Parameters
1509
+ ----------
1510
+ %(input)s
1511
+ rank : int
1512
+ The rank parameter may be less than zero, i.e., rank = -1
1513
+ indicates the largest element.
1514
+ %(size_foot)s
1515
+ %(output)s
1516
+ %(mode_reflect)s
1517
+ %(cval)s
1518
+ %(origin_multiple)s
1519
+ axes : tuple of int or None, optional
1520
+ If None, `input` is filtered along all axes. Otherwise,
1521
+ `input` is filtered along the specified axes.
1522
+
1523
+ Returns
1524
+ -------
1525
+ rank_filter : ndarray
1526
+ Filtered array. Has the same shape as `input`.
1527
+
1528
+ Examples
1529
+ --------
1530
+ >>> from scipy import ndimage, datasets
1531
+ >>> import matplotlib.pyplot as plt
1532
+ >>> fig = plt.figure()
1533
+ >>> plt.gray() # show the filtered result in grayscale
1534
+ >>> ax1 = fig.add_subplot(121) # left side
1535
+ >>> ax2 = fig.add_subplot(122) # right side
1536
+ >>> ascent = datasets.ascent()
1537
+ >>> result = ndimage.rank_filter(ascent, rank=42, size=20)
1538
+ >>> ax1.imshow(ascent)
1539
+ >>> ax2.imshow(result)
1540
+ >>> plt.show()
1541
+ """
1542
+ rank = operator.index(rank)
1543
+ return _rank_filter(input, rank, size, footprint, output, mode, cval,
1544
+ origin, 'rank', axes=axes)
1545
+
1546
+
1547
+ @_ni_docstrings.docfiller
1548
+ def median_filter(input, size=None, footprint=None, output=None,
1549
+ mode="reflect", cval=0.0, origin=0, *, axes=None):
1550
+ """
1551
+ Calculate a multidimensional median filter.
1552
+
1553
+ Parameters
1554
+ ----------
1555
+ %(input)s
1556
+ %(size_foot)s
1557
+ %(output)s
1558
+ %(mode_reflect)s
1559
+ %(cval)s
1560
+ %(origin_multiple)s
1561
+ axes : tuple of int or None, optional
1562
+ If None, `input` is filtered along all axes. Otherwise,
1563
+ `input` is filtered along the specified axes.
1564
+
1565
+ Returns
1566
+ -------
1567
+ median_filter : ndarray
1568
+ Filtered array. Has the same shape as `input`.
1569
+
1570
+ See Also
1571
+ --------
1572
+ scipy.signal.medfilt2d
1573
+
1574
+ Notes
1575
+ -----
1576
+ For 2-dimensional images with ``uint8``, ``float32`` or ``float64`` dtypes
1577
+ the specialised function `scipy.signal.medfilt2d` may be faster. It is
1578
+ however limited to constant mode with ``cval=0``.
1579
+
1580
+ Examples
1581
+ --------
1582
+ >>> from scipy import ndimage, datasets
1583
+ >>> import matplotlib.pyplot as plt
1584
+ >>> fig = plt.figure()
1585
+ >>> plt.gray() # show the filtered result in grayscale
1586
+ >>> ax1 = fig.add_subplot(121) # left side
1587
+ >>> ax2 = fig.add_subplot(122) # right side
1588
+ >>> ascent = datasets.ascent()
1589
+ >>> result = ndimage.median_filter(ascent, size=20)
1590
+ >>> ax1.imshow(ascent)
1591
+ >>> ax2.imshow(result)
1592
+ >>> plt.show()
1593
+ """
1594
+ return _rank_filter(input, 0, size, footprint, output, mode, cval,
1595
+ origin, 'median', axes=axes)
1596
+
1597
+
1598
+ @_ni_docstrings.docfiller
1599
+ def percentile_filter(input, percentile, size=None, footprint=None,
1600
+ output=None, mode="reflect", cval=0.0, origin=0, *,
1601
+ axes=None):
1602
+ """Calculate a multidimensional percentile filter.
1603
+
1604
+ Parameters
1605
+ ----------
1606
+ %(input)s
1607
+ percentile : scalar
1608
+ The percentile parameter may be less than zero, i.e.,
1609
+ percentile = -20 equals percentile = 80
1610
+ %(size_foot)s
1611
+ %(output)s
1612
+ %(mode_reflect)s
1613
+ %(cval)s
1614
+ %(origin_multiple)s
1615
+ axes : tuple of int or None, optional
1616
+ If None, `input` is filtered along all axes. Otherwise,
1617
+ `input` is filtered along the specified axes.
1618
+
1619
+ Returns
1620
+ -------
1621
+ percentile_filter : ndarray
1622
+ Filtered array. Has the same shape as `input`.
1623
+
1624
+ Examples
1625
+ --------
1626
+ >>> from scipy import ndimage, datasets
1627
+ >>> import matplotlib.pyplot as plt
1628
+ >>> fig = plt.figure()
1629
+ >>> plt.gray() # show the filtered result in grayscale
1630
+ >>> ax1 = fig.add_subplot(121) # left side
1631
+ >>> ax2 = fig.add_subplot(122) # right side
1632
+ >>> ascent = datasets.ascent()
1633
+ >>> result = ndimage.percentile_filter(ascent, percentile=20, size=20)
1634
+ >>> ax1.imshow(ascent)
1635
+ >>> ax2.imshow(result)
1636
+ >>> plt.show()
1637
+ """
1638
+ return _rank_filter(input, percentile, size, footprint, output, mode,
1639
+ cval, origin, 'percentile', axes=axes)
1640
+
1641
+
1642
+ @_ni_docstrings.docfiller
1643
+ def generic_filter1d(input, function, filter_size, axis=-1,
1644
+ output=None, mode="reflect", cval=0.0, origin=0,
1645
+ extra_arguments=(), extra_keywords=None):
1646
+ """Calculate a 1-D filter along the given axis.
1647
+
1648
+ `generic_filter1d` iterates over the lines of the array, calling the
1649
+ given function at each line. The arguments of the line are the
1650
+ input line, and the output line. The input and output lines are 1-D
1651
+ double arrays. The input line is extended appropriately according
1652
+ to the filter size and origin. The output line must be modified
1653
+ in-place with the result.
1654
+
1655
+ Parameters
1656
+ ----------
1657
+ %(input)s
1658
+ function : {callable, scipy.LowLevelCallable}
1659
+ Function to apply along given axis.
1660
+ filter_size : scalar
1661
+ Length of the filter.
1662
+ %(axis)s
1663
+ %(output)s
1664
+ %(mode_reflect)s
1665
+ %(cval)s
1666
+ %(origin)s
1667
+ %(extra_arguments)s
1668
+ %(extra_keywords)s
1669
+
1670
+ Returns
1671
+ -------
1672
+ generic_filter1d : ndarray
1673
+ Filtered array. Has the same shape as `input`.
1674
+
1675
+ Notes
1676
+ -----
1677
+ This function also accepts low-level callback functions with one of
1678
+ the following signatures and wrapped in `scipy.LowLevelCallable`:
1679
+
1680
+ .. code:: c
1681
+
1682
+ int function(double *input_line, npy_intp input_length,
1683
+ double *output_line, npy_intp output_length,
1684
+ void *user_data)
1685
+ int function(double *input_line, intptr_t input_length,
1686
+ double *output_line, intptr_t output_length,
1687
+ void *user_data)
1688
+
1689
+ The calling function iterates over the lines of the input and output
1690
+ arrays, calling the callback function at each line. The current line
1691
+ is extended according to the border conditions set by the calling
1692
+ function, and the result is copied into the array that is passed
1693
+ through ``input_line``. The length of the input line (after extension)
1694
+ is passed through ``input_length``. The callback function should apply
1695
+ the filter and store the result in the array passed through
1696
+ ``output_line``. The length of the output line is passed through
1697
+ ``output_length``. ``user_data`` is the data pointer provided
1698
+ to `scipy.LowLevelCallable` as-is.
1699
+
1700
+ The callback function must return an integer error status that is zero
1701
+ if something went wrong and one otherwise. If an error occurs, you should
1702
+ normally set the python error status with an informative message
1703
+ before returning, otherwise a default error message is set by the
1704
+ calling function.
1705
+
1706
+ In addition, some other low-level function pointer specifications
1707
+ are accepted, but these are for backward compatibility only and should
1708
+ not be used in new code.
1709
+
1710
+ """
1711
+ if extra_keywords is None:
1712
+ extra_keywords = {}
1713
+ input = np.asarray(input)
1714
+ if np.iscomplexobj(input):
1715
+ raise TypeError('Complex type not supported')
1716
+ output = _ni_support._get_output(output, input)
1717
+ if filter_size < 1:
1718
+ raise RuntimeError('invalid filter size')
1719
+ axis = normalize_axis_index(axis, input.ndim)
1720
+ if (filter_size // 2 + origin < 0) or (filter_size // 2 + origin >=
1721
+ filter_size):
1722
+ raise ValueError('invalid origin')
1723
+ mode = _ni_support._extend_mode_to_code(mode)
1724
+ _nd_image.generic_filter1d(input, function, filter_size, axis, output,
1725
+ mode, cval, origin, extra_arguments,
1726
+ extra_keywords)
1727
+ return output
1728
+
1729
+
1730
+ @_ni_docstrings.docfiller
1731
+ def generic_filter(input, function, size=None, footprint=None,
1732
+ output=None, mode="reflect", cval=0.0, origin=0,
1733
+ extra_arguments=(), extra_keywords=None):
1734
+ """Calculate a multidimensional filter using the given function.
1735
+
1736
+ At each element the provided function is called. The input values
1737
+ within the filter footprint at that element are passed to the function
1738
+ as a 1-D array of double values.
1739
+
1740
+ Parameters
1741
+ ----------
1742
+ %(input)s
1743
+ function : {callable, scipy.LowLevelCallable}
1744
+ Function to apply at each element.
1745
+ %(size_foot)s
1746
+ %(output)s
1747
+ %(mode_reflect)s
1748
+ %(cval)s
1749
+ %(origin_multiple)s
1750
+ %(extra_arguments)s
1751
+ %(extra_keywords)s
1752
+
1753
+ Returns
1754
+ -------
1755
+ generic_filter : ndarray
1756
+ Filtered array. Has the same shape as `input`.
1757
+
1758
+ Notes
1759
+ -----
1760
+ This function also accepts low-level callback functions with one of
1761
+ the following signatures and wrapped in `scipy.LowLevelCallable`:
1762
+
1763
+ .. code:: c
1764
+
1765
+ int callback(double *buffer, npy_intp filter_size,
1766
+ double *return_value, void *user_data)
1767
+ int callback(double *buffer, intptr_t filter_size,
1768
+ double *return_value, void *user_data)
1769
+
1770
+ The calling function iterates over the elements of the input and
1771
+ output arrays, calling the callback function at each element. The
1772
+ elements within the footprint of the filter at the current element are
1773
+ passed through the ``buffer`` parameter, and the number of elements
1774
+ within the footprint through ``filter_size``. The calculated value is
1775
+ returned in ``return_value``. ``user_data`` is the data pointer provided
1776
+ to `scipy.LowLevelCallable` as-is.
1777
+
1778
+ The callback function must return an integer error status that is zero
1779
+ if something went wrong and one otherwise. If an error occurs, you should
1780
+ normally set the python error status with an informative message
1781
+ before returning, otherwise a default error message is set by the
1782
+ calling function.
1783
+
1784
+ In addition, some other low-level function pointer specifications
1785
+ are accepted, but these are for backward compatibility only and should
1786
+ not be used in new code.
1787
+
1788
+ Examples
1789
+ --------
1790
+ Import the necessary modules and load the example image used for
1791
+ filtering.
1792
+
1793
+ >>> import numpy as np
1794
+ >>> from scipy import datasets
1795
+ >>> from scipy.ndimage import zoom, generic_filter
1796
+ >>> import matplotlib.pyplot as plt
1797
+ >>> ascent = zoom(datasets.ascent(), 0.5)
1798
+
1799
+ Compute a maximum filter with kernel size 5 by passing a simple NumPy
1800
+ aggregation function as argument to `function`.
1801
+
1802
+ >>> maximum_filter_result = generic_filter(ascent, np.amax, [5, 5])
1803
+
1804
+ While a maximmum filter could also directly be obtained using
1805
+ `maximum_filter`, `generic_filter` allows generic Python function or
1806
+ `scipy.LowLevelCallable` to be used as a filter. Here, we compute the
1807
+ range between maximum and minimum value as an example for a kernel size
1808
+ of 5.
1809
+
1810
+ >>> def custom_filter(image):
1811
+ ... return np.amax(image) - np.amin(image)
1812
+ >>> custom_filter_result = generic_filter(ascent, custom_filter, [5, 5])
1813
+
1814
+ Plot the original and filtered images.
1815
+
1816
+ >>> fig, axes = plt.subplots(3, 1, figsize=(3, 9))
1817
+ >>> plt.gray() # show the filtered result in grayscale
1818
+ >>> top, middle, bottom = axes
1819
+ >>> for ax in axes:
1820
+ ... ax.set_axis_off() # remove coordinate system
1821
+ >>> top.imshow(ascent)
1822
+ >>> top.set_title("Original image")
1823
+ >>> middle.imshow(maximum_filter_result)
1824
+ >>> middle.set_title("Maximum filter, Kernel: 5x5")
1825
+ >>> bottom.imshow(custom_filter_result)
1826
+ >>> bottom.set_title("Custom filter, Kernel: 5x5")
1827
+ >>> fig.tight_layout()
1828
+
1829
+ """
1830
+ if (size is not None) and (footprint is not None):
1831
+ warnings.warn("ignoring size because footprint is set",
1832
+ UserWarning, stacklevel=2)
1833
+ if extra_keywords is None:
1834
+ extra_keywords = {}
1835
+ input = np.asarray(input)
1836
+ if np.iscomplexobj(input):
1837
+ raise TypeError('Complex type not supported')
1838
+ origins = _ni_support._normalize_sequence(origin, input.ndim)
1839
+ if footprint is None:
1840
+ if size is None:
1841
+ raise RuntimeError("no footprint or filter size provided")
1842
+ sizes = _ni_support._normalize_sequence(size, input.ndim)
1843
+ footprint = np.ones(sizes, dtype=bool)
1844
+ else:
1845
+ footprint = np.asarray(footprint, dtype=bool)
1846
+ fshape = [ii for ii in footprint.shape if ii > 0]
1847
+ if len(fshape) != input.ndim:
1848
+ raise RuntimeError('filter footprint array has incorrect shape.')
1849
+ for origin, lenf in zip(origins, fshape):
1850
+ if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf):
1851
+ raise ValueError('invalid origin')
1852
+ if not footprint.flags.contiguous:
1853
+ footprint = footprint.copy()
1854
+ output = _ni_support._get_output(output, input)
1855
+ mode = _ni_support._extend_mode_to_code(mode)
1856
+ _nd_image.generic_filter(input, function, footprint, output, mode,
1857
+ cval, origins, extra_arguments, extra_keywords)
1858
+ return output
parrot/lib/python3.10/site-packages/scipy/ndimage/_fourier.py ADDED
@@ -0,0 +1,306 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2003-2005 Peter J. Verveer
2
+ #
3
+ # Redistribution and use in source and binary forms, with or without
4
+ # modification, are permitted provided that the following conditions
5
+ # are met:
6
+ #
7
+ # 1. Redistributions of source code must retain the above copyright
8
+ # notice, this list of conditions and the following disclaimer.
9
+ #
10
+ # 2. Redistributions in binary form must reproduce the above
11
+ # copyright notice, this list of conditions and the following
12
+ # disclaimer in the documentation and/or other materials provided
13
+ # with the distribution.
14
+ #
15
+ # 3. The name of the author may not be used to endorse or promote
16
+ # products derived from this software without specific prior
17
+ # written permission.
18
+ #
19
+ # THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
20
+ # OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21
+ # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22
+ # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
23
+ # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
25
+ # GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26
+ # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27
+ # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
28
+ # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29
+ # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
+
31
+ import numpy as np
32
+ from scipy._lib._util import normalize_axis_index
33
+ from . import _ni_support
34
+ from . import _nd_image
35
+
36
+ __all__ = ['fourier_gaussian', 'fourier_uniform', 'fourier_ellipsoid',
37
+ 'fourier_shift']
38
+
39
+
40
+ def _get_output_fourier(output, input):
41
+ if output is None:
42
+ if input.dtype.type in [np.complex64, np.complex128, np.float32]:
43
+ output = np.zeros(input.shape, dtype=input.dtype)
44
+ else:
45
+ output = np.zeros(input.shape, dtype=np.float64)
46
+ elif type(output) is type:
47
+ if output not in [np.complex64, np.complex128,
48
+ np.float32, np.float64]:
49
+ raise RuntimeError("output type not supported")
50
+ output = np.zeros(input.shape, dtype=output)
51
+ elif output.shape != input.shape:
52
+ raise RuntimeError("output shape not correct")
53
+ return output
54
+
55
+
56
+ def _get_output_fourier_complex(output, input):
57
+ if output is None:
58
+ if input.dtype.type in [np.complex64, np.complex128]:
59
+ output = np.zeros(input.shape, dtype=input.dtype)
60
+ else:
61
+ output = np.zeros(input.shape, dtype=np.complex128)
62
+ elif type(output) is type:
63
+ if output not in [np.complex64, np.complex128]:
64
+ raise RuntimeError("output type not supported")
65
+ output = np.zeros(input.shape, dtype=output)
66
+ elif output.shape != input.shape:
67
+ raise RuntimeError("output shape not correct")
68
+ return output
69
+
70
+
71
+ def fourier_gaussian(input, sigma, n=-1, axis=-1, output=None):
72
+ """
73
+ Multidimensional Gaussian fourier filter.
74
+
75
+ The array is multiplied with the fourier transform of a Gaussian
76
+ kernel.
77
+
78
+ Parameters
79
+ ----------
80
+ input : array_like
81
+ The input array.
82
+ sigma : float or sequence
83
+ The sigma of the Gaussian kernel. If a float, `sigma` is the same for
84
+ all axes. If a sequence, `sigma` has to contain one value for each
85
+ axis.
86
+ n : int, optional
87
+ If `n` is negative (default), then the input is assumed to be the
88
+ result of a complex fft.
89
+ If `n` is larger than or equal to zero, the input is assumed to be the
90
+ result of a real fft, and `n` gives the length of the array before
91
+ transformation along the real transform direction.
92
+ axis : int, optional
93
+ The axis of the real transform.
94
+ output : ndarray, optional
95
+ If given, the result of filtering the input is placed in this array.
96
+
97
+ Returns
98
+ -------
99
+ fourier_gaussian : ndarray
100
+ The filtered input.
101
+
102
+ Examples
103
+ --------
104
+ >>> from scipy import ndimage, datasets
105
+ >>> import numpy.fft
106
+ >>> import matplotlib.pyplot as plt
107
+ >>> fig, (ax1, ax2) = plt.subplots(1, 2)
108
+ >>> plt.gray() # show the filtered result in grayscale
109
+ >>> ascent = datasets.ascent()
110
+ >>> input_ = numpy.fft.fft2(ascent)
111
+ >>> result = ndimage.fourier_gaussian(input_, sigma=4)
112
+ >>> result = numpy.fft.ifft2(result)
113
+ >>> ax1.imshow(ascent)
114
+ >>> ax2.imshow(result.real) # the imaginary part is an artifact
115
+ >>> plt.show()
116
+ """
117
+ input = np.asarray(input)
118
+ output = _get_output_fourier(output, input)
119
+ axis = normalize_axis_index(axis, input.ndim)
120
+ sigmas = _ni_support._normalize_sequence(sigma, input.ndim)
121
+ sigmas = np.asarray(sigmas, dtype=np.float64)
122
+ if not sigmas.flags.contiguous:
123
+ sigmas = sigmas.copy()
124
+
125
+ _nd_image.fourier_filter(input, sigmas, n, axis, output, 0)
126
+ return output
127
+
128
+
129
+ def fourier_uniform(input, size, n=-1, axis=-1, output=None):
130
+ """
131
+ Multidimensional uniform fourier filter.
132
+
133
+ The array is multiplied with the Fourier transform of a box of given
134
+ size.
135
+
136
+ Parameters
137
+ ----------
138
+ input : array_like
139
+ The input array.
140
+ size : float or sequence
141
+ The size of the box used for filtering.
142
+ If a float, `size` is the same for all axes. If a sequence, `size` has
143
+ to contain one value for each axis.
144
+ n : int, optional
145
+ If `n` is negative (default), then the input is assumed to be the
146
+ result of a complex fft.
147
+ If `n` is larger than or equal to zero, the input is assumed to be the
148
+ result of a real fft, and `n` gives the length of the array before
149
+ transformation along the real transform direction.
150
+ axis : int, optional
151
+ The axis of the real transform.
152
+ output : ndarray, optional
153
+ If given, the result of filtering the input is placed in this array.
154
+
155
+ Returns
156
+ -------
157
+ fourier_uniform : ndarray
158
+ The filtered input.
159
+
160
+ Examples
161
+ --------
162
+ >>> from scipy import ndimage, datasets
163
+ >>> import numpy.fft
164
+ >>> import matplotlib.pyplot as plt
165
+ >>> fig, (ax1, ax2) = plt.subplots(1, 2)
166
+ >>> plt.gray() # show the filtered result in grayscale
167
+ >>> ascent = datasets.ascent()
168
+ >>> input_ = numpy.fft.fft2(ascent)
169
+ >>> result = ndimage.fourier_uniform(input_, size=20)
170
+ >>> result = numpy.fft.ifft2(result)
171
+ >>> ax1.imshow(ascent)
172
+ >>> ax2.imshow(result.real) # the imaginary part is an artifact
173
+ >>> plt.show()
174
+ """
175
+ input = np.asarray(input)
176
+ output = _get_output_fourier(output, input)
177
+ axis = normalize_axis_index(axis, input.ndim)
178
+ sizes = _ni_support._normalize_sequence(size, input.ndim)
179
+ sizes = np.asarray(sizes, dtype=np.float64)
180
+ if not sizes.flags.contiguous:
181
+ sizes = sizes.copy()
182
+ _nd_image.fourier_filter(input, sizes, n, axis, output, 1)
183
+ return output
184
+
185
+
186
+ def fourier_ellipsoid(input, size, n=-1, axis=-1, output=None):
187
+ """
188
+ Multidimensional ellipsoid Fourier filter.
189
+
190
+ The array is multiplied with the fourier transform of an ellipsoid of
191
+ given sizes.
192
+
193
+ Parameters
194
+ ----------
195
+ input : array_like
196
+ The input array.
197
+ size : float or sequence
198
+ The size of the box used for filtering.
199
+ If a float, `size` is the same for all axes. If a sequence, `size` has
200
+ to contain one value for each axis.
201
+ n : int, optional
202
+ If `n` is negative (default), then the input is assumed to be the
203
+ result of a complex fft.
204
+ If `n` is larger than or equal to zero, the input is assumed to be the
205
+ result of a real fft, and `n` gives the length of the array before
206
+ transformation along the real transform direction.
207
+ axis : int, optional
208
+ The axis of the real transform.
209
+ output : ndarray, optional
210
+ If given, the result of filtering the input is placed in this array.
211
+
212
+ Returns
213
+ -------
214
+ fourier_ellipsoid : ndarray
215
+ The filtered input.
216
+
217
+ Notes
218
+ -----
219
+ This function is implemented for arrays of rank 1, 2, or 3.
220
+
221
+ Examples
222
+ --------
223
+ >>> from scipy import ndimage, datasets
224
+ >>> import numpy.fft
225
+ >>> import matplotlib.pyplot as plt
226
+ >>> fig, (ax1, ax2) = plt.subplots(1, 2)
227
+ >>> plt.gray() # show the filtered result in grayscale
228
+ >>> ascent = datasets.ascent()
229
+ >>> input_ = numpy.fft.fft2(ascent)
230
+ >>> result = ndimage.fourier_ellipsoid(input_, size=20)
231
+ >>> result = numpy.fft.ifft2(result)
232
+ >>> ax1.imshow(ascent)
233
+ >>> ax2.imshow(result.real) # the imaginary part is an artifact
234
+ >>> plt.show()
235
+ """
236
+ input = np.asarray(input)
237
+ if input.ndim > 3:
238
+ raise NotImplementedError("Only 1d, 2d and 3d inputs are supported")
239
+ output = _get_output_fourier(output, input)
240
+ if output.size == 0:
241
+ # The C code has a bug that can result in a segfault with arrays
242
+ # that have size 0 (gh-17270), so check here.
243
+ return output
244
+ axis = normalize_axis_index(axis, input.ndim)
245
+ sizes = _ni_support._normalize_sequence(size, input.ndim)
246
+ sizes = np.asarray(sizes, dtype=np.float64)
247
+ if not sizes.flags.contiguous:
248
+ sizes = sizes.copy()
249
+ _nd_image.fourier_filter(input, sizes, n, axis, output, 2)
250
+ return output
251
+
252
+
253
+ def fourier_shift(input, shift, n=-1, axis=-1, output=None):
254
+ """
255
+ Multidimensional Fourier shift filter.
256
+
257
+ The array is multiplied with the Fourier transform of a shift operation.
258
+
259
+ Parameters
260
+ ----------
261
+ input : array_like
262
+ The input array.
263
+ shift : float or sequence
264
+ The size of the box used for filtering.
265
+ If a float, `shift` is the same for all axes. If a sequence, `shift`
266
+ has to contain one value for each axis.
267
+ n : int, optional
268
+ If `n` is negative (default), then the input is assumed to be the
269
+ result of a complex fft.
270
+ If `n` is larger than or equal to zero, the input is assumed to be the
271
+ result of a real fft, and `n` gives the length of the array before
272
+ transformation along the real transform direction.
273
+ axis : int, optional
274
+ The axis of the real transform.
275
+ output : ndarray, optional
276
+ If given, the result of shifting the input is placed in this array.
277
+
278
+ Returns
279
+ -------
280
+ fourier_shift : ndarray
281
+ The shifted input.
282
+
283
+ Examples
284
+ --------
285
+ >>> from scipy import ndimage, datasets
286
+ >>> import matplotlib.pyplot as plt
287
+ >>> import numpy.fft
288
+ >>> fig, (ax1, ax2) = plt.subplots(1, 2)
289
+ >>> plt.gray() # show the filtered result in grayscale
290
+ >>> ascent = datasets.ascent()
291
+ >>> input_ = numpy.fft.fft2(ascent)
292
+ >>> result = ndimage.fourier_shift(input_, shift=200)
293
+ >>> result = numpy.fft.ifft2(result)
294
+ >>> ax1.imshow(ascent)
295
+ >>> ax2.imshow(result.real) # the imaginary part is an artifact
296
+ >>> plt.show()
297
+ """
298
+ input = np.asarray(input)
299
+ output = _get_output_fourier_complex(output, input)
300
+ axis = normalize_axis_index(axis, input.ndim)
301
+ shifts = _ni_support._normalize_sequence(shift, input.ndim)
302
+ shifts = np.asarray(shifts, dtype=np.float64)
303
+ if not shifts.flags.contiguous:
304
+ shifts = shifts.copy()
305
+ _nd_image.fourier_shift(input, shifts, n, axis, output)
306
+ return output
parrot/lib/python3.10/site-packages/scipy/ndimage/_interpolation.py ADDED
@@ -0,0 +1,1001 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2003-2005 Peter J. Verveer
2
+ #
3
+ # Redistribution and use in source and binary forms, with or without
4
+ # modification, are permitted provided that the following conditions
5
+ # are met:
6
+ #
7
+ # 1. Redistributions of source code must retain the above copyright
8
+ # notice, this list of conditions and the following disclaimer.
9
+ #
10
+ # 2. Redistributions in binary form must reproduce the above
11
+ # copyright notice, this list of conditions and the following
12
+ # disclaimer in the documentation and/or other materials provided
13
+ # with the distribution.
14
+ #
15
+ # 3. The name of the author may not be used to endorse or promote
16
+ # products derived from this software without specific prior
17
+ # written permission.
18
+ #
19
+ # THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
20
+ # OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21
+ # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22
+ # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
23
+ # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
25
+ # GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26
+ # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27
+ # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
28
+ # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29
+ # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
+
31
+ import itertools
32
+ import warnings
33
+
34
+ import numpy as np
35
+ from scipy._lib._util import normalize_axis_index
36
+
37
+ from scipy import special
38
+ from . import _ni_support
39
+ from . import _nd_image
40
+ from ._ni_docstrings import docfiller
41
+
42
+
43
+ __all__ = ['spline_filter1d', 'spline_filter', 'geometric_transform',
44
+ 'map_coordinates', 'affine_transform', 'shift', 'zoom', 'rotate']
45
+
46
+
47
+ @docfiller
48
+ def spline_filter1d(input, order=3, axis=-1, output=np.float64,
49
+ mode='mirror'):
50
+ """
51
+ Calculate a 1-D spline filter along the given axis.
52
+
53
+ The lines of the array along the given axis are filtered by a
54
+ spline filter. The order of the spline must be >= 2 and <= 5.
55
+
56
+ Parameters
57
+ ----------
58
+ %(input)s
59
+ order : int, optional
60
+ The order of the spline, default is 3.
61
+ axis : int, optional
62
+ The axis along which the spline filter is applied. Default is the last
63
+ axis.
64
+ output : ndarray or dtype, optional
65
+ The array in which to place the output, or the dtype of the returned
66
+ array. Default is ``numpy.float64``.
67
+ %(mode_interp_mirror)s
68
+
69
+ Returns
70
+ -------
71
+ spline_filter1d : ndarray
72
+ The filtered input.
73
+
74
+ See Also
75
+ --------
76
+ spline_filter : Multidimensional spline filter.
77
+
78
+ Notes
79
+ -----
80
+ All of the interpolation functions in `ndimage` do spline interpolation of
81
+ the input image. If using B-splines of `order > 1`, the input image
82
+ values have to be converted to B-spline coefficients first, which is
83
+ done by applying this 1-D filter sequentially along all
84
+ axes of the input. All functions that require B-spline coefficients
85
+ will automatically filter their inputs, a behavior controllable with
86
+ the `prefilter` keyword argument. For functions that accept a `mode`
87
+ parameter, the result will only be correct if it matches the `mode`
88
+ used when filtering.
89
+
90
+ For complex-valued `input`, this function processes the real and imaginary
91
+ components independently.
92
+
93
+ .. versionadded:: 1.6.0
94
+ Complex-valued support added.
95
+
96
+ Examples
97
+ --------
98
+ We can filter an image using 1-D spline along the given axis:
99
+
100
+ >>> from scipy.ndimage import spline_filter1d
101
+ >>> import numpy as np
102
+ >>> import matplotlib.pyplot as plt
103
+ >>> orig_img = np.eye(20) # create an image
104
+ >>> orig_img[10, :] = 1.0
105
+ >>> sp_filter_axis_0 = spline_filter1d(orig_img, axis=0)
106
+ >>> sp_filter_axis_1 = spline_filter1d(orig_img, axis=1)
107
+ >>> f, ax = plt.subplots(1, 3, sharex=True)
108
+ >>> for ind, data in enumerate([[orig_img, "original image"],
109
+ ... [sp_filter_axis_0, "spline filter (axis=0)"],
110
+ ... [sp_filter_axis_1, "spline filter (axis=1)"]]):
111
+ ... ax[ind].imshow(data[0], cmap='gray_r')
112
+ ... ax[ind].set_title(data[1])
113
+ >>> plt.tight_layout()
114
+ >>> plt.show()
115
+
116
+ """
117
+ if order < 0 or order > 5:
118
+ raise RuntimeError('spline order not supported')
119
+ input = np.asarray(input)
120
+ complex_output = np.iscomplexobj(input)
121
+ output = _ni_support._get_output(output, input,
122
+ complex_output=complex_output)
123
+ if complex_output:
124
+ spline_filter1d(input.real, order, axis, output.real, mode)
125
+ spline_filter1d(input.imag, order, axis, output.imag, mode)
126
+ return output
127
+ if order in [0, 1]:
128
+ output[...] = np.array(input)
129
+ else:
130
+ mode = _ni_support._extend_mode_to_code(mode)
131
+ axis = normalize_axis_index(axis, input.ndim)
132
+ _nd_image.spline_filter1d(input, order, axis, output, mode)
133
+ return output
134
+
135
+ @docfiller
136
+ def spline_filter(input, order=3, output=np.float64, mode='mirror'):
137
+ """
138
+ Multidimensional spline filter.
139
+
140
+ Parameters
141
+ ----------
142
+ %(input)s
143
+ order : int, optional
144
+ The order of the spline, default is 3.
145
+ output : ndarray or dtype, optional
146
+ The array in which to place the output, or the dtype of the returned
147
+ array. Default is ``numpy.float64``.
148
+ %(mode_interp_mirror)s
149
+
150
+ Returns
151
+ -------
152
+ spline_filter : ndarray
153
+ Filtered array. Has the same shape as `input`.
154
+
155
+ See Also
156
+ --------
157
+ spline_filter1d : Calculate a 1-D spline filter along the given axis.
158
+
159
+ Notes
160
+ -----
161
+ The multidimensional filter is implemented as a sequence of
162
+ 1-D spline filters. The intermediate arrays are stored
163
+ in the same data type as the output. Therefore, for output types
164
+ with a limited precision, the results may be imprecise because
165
+ intermediate results may be stored with insufficient precision.
166
+
167
+ For complex-valued `input`, this function processes the real and imaginary
168
+ components independently.
169
+
170
+ .. versionadded:: 1.6.0
171
+ Complex-valued support added.
172
+
173
+ Examples
174
+ --------
175
+ We can filter an image using multidimentional splines:
176
+
177
+ >>> from scipy.ndimage import spline_filter
178
+ >>> import numpy as np
179
+ >>> import matplotlib.pyplot as plt
180
+ >>> orig_img = np.eye(20) # create an image
181
+ >>> orig_img[10, :] = 1.0
182
+ >>> sp_filter = spline_filter(orig_img, order=3)
183
+ >>> f, ax = plt.subplots(1, 2, sharex=True)
184
+ >>> for ind, data in enumerate([[orig_img, "original image"],
185
+ ... [sp_filter, "spline filter"]]):
186
+ ... ax[ind].imshow(data[0], cmap='gray_r')
187
+ ... ax[ind].set_title(data[1])
188
+ >>> plt.tight_layout()
189
+ >>> plt.show()
190
+
191
+ """
192
+ if order < 2 or order > 5:
193
+ raise RuntimeError('spline order not supported')
194
+ input = np.asarray(input)
195
+ complex_output = np.iscomplexobj(input)
196
+ output = _ni_support._get_output(output, input,
197
+ complex_output=complex_output)
198
+ if complex_output:
199
+ spline_filter(input.real, order, output.real, mode)
200
+ spline_filter(input.imag, order, output.imag, mode)
201
+ return output
202
+ if order not in [0, 1] and input.ndim > 0:
203
+ for axis in range(input.ndim):
204
+ spline_filter1d(input, order, axis, output=output, mode=mode)
205
+ input = output
206
+ else:
207
+ output[...] = input[...]
208
+ return output
209
+
210
+
211
+ def _prepad_for_spline_filter(input, mode, cval):
212
+ if mode in ['nearest', 'grid-constant']:
213
+ npad = 12
214
+ if mode == 'grid-constant':
215
+ padded = np.pad(input, npad, mode='constant',
216
+ constant_values=cval)
217
+ elif mode == 'nearest':
218
+ padded = np.pad(input, npad, mode='edge')
219
+ else:
220
+ # other modes have exact boundary conditions implemented so
221
+ # no prepadding is needed
222
+ npad = 0
223
+ padded = input
224
+ return padded, npad
225
+
226
+
227
+ @docfiller
228
+ def geometric_transform(input, mapping, output_shape=None,
229
+ output=None, order=3,
230
+ mode='constant', cval=0.0, prefilter=True,
231
+ extra_arguments=(), extra_keywords={}):
232
+ """
233
+ Apply an arbitrary geometric transform.
234
+
235
+ The given mapping function is used to find, for each point in the
236
+ output, the corresponding coordinates in the input. The value of the
237
+ input at those coordinates is determined by spline interpolation of
238
+ the requested order.
239
+
240
+ Parameters
241
+ ----------
242
+ %(input)s
243
+ mapping : {callable, scipy.LowLevelCallable}
244
+ A callable object that accepts a tuple of length equal to the output
245
+ array rank, and returns the corresponding input coordinates as a tuple
246
+ of length equal to the input array rank.
247
+ output_shape : tuple of ints, optional
248
+ Shape tuple.
249
+ %(output)s
250
+ order : int, optional
251
+ The order of the spline interpolation, default is 3.
252
+ The order has to be in the range 0-5.
253
+ %(mode_interp_constant)s
254
+ %(cval)s
255
+ %(prefilter)s
256
+ extra_arguments : tuple, optional
257
+ Extra arguments passed to `mapping`.
258
+ extra_keywords : dict, optional
259
+ Extra keywords passed to `mapping`.
260
+
261
+ Returns
262
+ -------
263
+ output : ndarray
264
+ The filtered input.
265
+
266
+ See Also
267
+ --------
268
+ map_coordinates, affine_transform, spline_filter1d
269
+
270
+
271
+ Notes
272
+ -----
273
+ This function also accepts low-level callback functions with one
274
+ the following signatures and wrapped in `scipy.LowLevelCallable`:
275
+
276
+ .. code:: c
277
+
278
+ int mapping(npy_intp *output_coordinates, double *input_coordinates,
279
+ int output_rank, int input_rank, void *user_data)
280
+ int mapping(intptr_t *output_coordinates, double *input_coordinates,
281
+ int output_rank, int input_rank, void *user_data)
282
+
283
+ The calling function iterates over the elements of the output array,
284
+ calling the callback function at each element. The coordinates of the
285
+ current output element are passed through ``output_coordinates``. The
286
+ callback function must return the coordinates at which the input must
287
+ be interpolated in ``input_coordinates``. The rank of the input and
288
+ output arrays are given by ``input_rank`` and ``output_rank``
289
+ respectively. ``user_data`` is the data pointer provided
290
+ to `scipy.LowLevelCallable` as-is.
291
+
292
+ The callback function must return an integer error status that is zero
293
+ if something went wrong and one otherwise. If an error occurs, you should
294
+ normally set the Python error status with an informative message
295
+ before returning, otherwise a default error message is set by the
296
+ calling function.
297
+
298
+ In addition, some other low-level function pointer specifications
299
+ are accepted, but these are for backward compatibility only and should
300
+ not be used in new code.
301
+
302
+ For complex-valued `input`, this function transforms the real and imaginary
303
+ components independently.
304
+
305
+ .. versionadded:: 1.6.0
306
+ Complex-valued support added.
307
+
308
+ Examples
309
+ --------
310
+ >>> import numpy as np
311
+ >>> from scipy.ndimage import geometric_transform
312
+ >>> a = np.arange(12.).reshape((4, 3))
313
+ >>> def shift_func(output_coords):
314
+ ... return (output_coords[0] - 0.5, output_coords[1] - 0.5)
315
+ ...
316
+ >>> geometric_transform(a, shift_func)
317
+ array([[ 0. , 0. , 0. ],
318
+ [ 0. , 1.362, 2.738],
319
+ [ 0. , 4.812, 6.187],
320
+ [ 0. , 8.263, 9.637]])
321
+
322
+ >>> b = [1, 2, 3, 4, 5]
323
+ >>> def shift_func(output_coords):
324
+ ... return (output_coords[0] - 3,)
325
+ ...
326
+ >>> geometric_transform(b, shift_func, mode='constant')
327
+ array([0, 0, 0, 1, 2])
328
+ >>> geometric_transform(b, shift_func, mode='nearest')
329
+ array([1, 1, 1, 1, 2])
330
+ >>> geometric_transform(b, shift_func, mode='reflect')
331
+ array([3, 2, 1, 1, 2])
332
+ >>> geometric_transform(b, shift_func, mode='wrap')
333
+ array([2, 3, 4, 1, 2])
334
+
335
+ """
336
+ if order < 0 or order > 5:
337
+ raise RuntimeError('spline order not supported')
338
+ input = np.asarray(input)
339
+ if output_shape is None:
340
+ output_shape = input.shape
341
+ if input.ndim < 1 or len(output_shape) < 1:
342
+ raise RuntimeError('input and output rank must be > 0')
343
+ complex_output = np.iscomplexobj(input)
344
+ output = _ni_support._get_output(output, input, shape=output_shape,
345
+ complex_output=complex_output)
346
+ if complex_output:
347
+ kwargs = dict(order=order, mode=mode, prefilter=prefilter,
348
+ output_shape=output_shape,
349
+ extra_arguments=extra_arguments,
350
+ extra_keywords=extra_keywords)
351
+ geometric_transform(input.real, mapping, output=output.real,
352
+ cval=np.real(cval), **kwargs)
353
+ geometric_transform(input.imag, mapping, output=output.imag,
354
+ cval=np.imag(cval), **kwargs)
355
+ return output
356
+
357
+ if prefilter and order > 1:
358
+ padded, npad = _prepad_for_spline_filter(input, mode, cval)
359
+ filtered = spline_filter(padded, order, output=np.float64,
360
+ mode=mode)
361
+ else:
362
+ npad = 0
363
+ filtered = input
364
+ mode = _ni_support._extend_mode_to_code(mode)
365
+ _nd_image.geometric_transform(filtered, mapping, None, None, None, output,
366
+ order, mode, cval, npad, extra_arguments,
367
+ extra_keywords)
368
+ return output
369
+
370
+
371
+ @docfiller
372
+ def map_coordinates(input, coordinates, output=None, order=3,
373
+ mode='constant', cval=0.0, prefilter=True):
374
+ """
375
+ Map the input array to new coordinates by interpolation.
376
+
377
+ The array of coordinates is used to find, for each point in the output,
378
+ the corresponding coordinates in the input. The value of the input at
379
+ those coordinates is determined by spline interpolation of the
380
+ requested order.
381
+
382
+ The shape of the output is derived from that of the coordinate
383
+ array by dropping the first axis. The values of the array along
384
+ the first axis are the coordinates in the input array at which the
385
+ output value is found.
386
+
387
+ Parameters
388
+ ----------
389
+ %(input)s
390
+ coordinates : array_like
391
+ The coordinates at which `input` is evaluated.
392
+ %(output)s
393
+ order : int, optional
394
+ The order of the spline interpolation, default is 3.
395
+ The order has to be in the range 0-5.
396
+ %(mode_interp_constant)s
397
+ %(cval)s
398
+ %(prefilter)s
399
+
400
+ Returns
401
+ -------
402
+ map_coordinates : ndarray
403
+ The result of transforming the input. The shape of the output is
404
+ derived from that of `coordinates` by dropping the first axis.
405
+
406
+ See Also
407
+ --------
408
+ spline_filter, geometric_transform, scipy.interpolate
409
+
410
+ Notes
411
+ -----
412
+ For complex-valued `input`, this function maps the real and imaginary
413
+ components independently.
414
+
415
+ .. versionadded:: 1.6.0
416
+ Complex-valued support added.
417
+
418
+ Examples
419
+ --------
420
+ >>> from scipy import ndimage
421
+ >>> import numpy as np
422
+ >>> a = np.arange(12.).reshape((4, 3))
423
+ >>> a
424
+ array([[ 0., 1., 2.],
425
+ [ 3., 4., 5.],
426
+ [ 6., 7., 8.],
427
+ [ 9., 10., 11.]])
428
+ >>> ndimage.map_coordinates(a, [[0.5, 2], [0.5, 1]], order=1)
429
+ array([ 2., 7.])
430
+
431
+ Above, the interpolated value of a[0.5, 0.5] gives output[0], while
432
+ a[2, 1] is output[1].
433
+
434
+ >>> inds = np.array([[0.5, 2], [0.5, 4]])
435
+ >>> ndimage.map_coordinates(a, inds, order=1, cval=-33.3)
436
+ array([ 2. , -33.3])
437
+ >>> ndimage.map_coordinates(a, inds, order=1, mode='nearest')
438
+ array([ 2., 8.])
439
+ >>> ndimage.map_coordinates(a, inds, order=1, cval=0, output=bool)
440
+ array([ True, False], dtype=bool)
441
+
442
+ """
443
+ if order < 0 or order > 5:
444
+ raise RuntimeError('spline order not supported')
445
+ input = np.asarray(input)
446
+ coordinates = np.asarray(coordinates)
447
+ if np.iscomplexobj(coordinates):
448
+ raise TypeError('Complex type not supported')
449
+ output_shape = coordinates.shape[1:]
450
+ if input.ndim < 1 or len(output_shape) < 1:
451
+ raise RuntimeError('input and output rank must be > 0')
452
+ if coordinates.shape[0] != input.ndim:
453
+ raise RuntimeError('invalid shape for coordinate array')
454
+ complex_output = np.iscomplexobj(input)
455
+ output = _ni_support._get_output(output, input, shape=output_shape,
456
+ complex_output=complex_output)
457
+ if complex_output:
458
+ kwargs = dict(order=order, mode=mode, prefilter=prefilter)
459
+ map_coordinates(input.real, coordinates, output=output.real,
460
+ cval=np.real(cval), **kwargs)
461
+ map_coordinates(input.imag, coordinates, output=output.imag,
462
+ cval=np.imag(cval), **kwargs)
463
+ return output
464
+ if prefilter and order > 1:
465
+ padded, npad = _prepad_for_spline_filter(input, mode, cval)
466
+ filtered = spline_filter(padded, order, output=np.float64, mode=mode)
467
+ else:
468
+ npad = 0
469
+ filtered = input
470
+ mode = _ni_support._extend_mode_to_code(mode)
471
+ _nd_image.geometric_transform(filtered, None, coordinates, None, None,
472
+ output, order, mode, cval, npad, None, None)
473
+ return output
474
+
475
+
476
+ @docfiller
477
+ def affine_transform(input, matrix, offset=0.0, output_shape=None,
478
+ output=None, order=3,
479
+ mode='constant', cval=0.0, prefilter=True):
480
+ """
481
+ Apply an affine transformation.
482
+
483
+ Given an output image pixel index vector ``o``, the pixel value
484
+ is determined from the input image at position
485
+ ``np.dot(matrix, o) + offset``.
486
+
487
+ This does 'pull' (or 'backward') resampling, transforming the output space
488
+ to the input to locate data. Affine transformations are often described in
489
+ the 'push' (or 'forward') direction, transforming input to output. If you
490
+ have a matrix for the 'push' transformation, use its inverse
491
+ (:func:`numpy.linalg.inv`) in this function.
492
+
493
+ Parameters
494
+ ----------
495
+ %(input)s
496
+ matrix : ndarray
497
+ The inverse coordinate transformation matrix, mapping output
498
+ coordinates to input coordinates. If ``ndim`` is the number of
499
+ dimensions of ``input``, the given matrix must have one of the
500
+ following shapes:
501
+
502
+ - ``(ndim, ndim)``: the linear transformation matrix for each
503
+ output coordinate.
504
+ - ``(ndim,)``: assume that the 2-D transformation matrix is
505
+ diagonal, with the diagonal specified by the given value. A more
506
+ efficient algorithm is then used that exploits the separability
507
+ of the problem.
508
+ - ``(ndim + 1, ndim + 1)``: assume that the transformation is
509
+ specified using homogeneous coordinates [1]_. In this case, any
510
+ value passed to ``offset`` is ignored.
511
+ - ``(ndim, ndim + 1)``: as above, but the bottom row of a
512
+ homogeneous transformation matrix is always ``[0, 0, ..., 1]``,
513
+ and may be omitted.
514
+
515
+ offset : float or sequence, optional
516
+ The offset into the array where the transform is applied. If a float,
517
+ `offset` is the same for each axis. If a sequence, `offset` should
518
+ contain one value for each axis.
519
+ output_shape : tuple of ints, optional
520
+ Shape tuple.
521
+ %(output)s
522
+ order : int, optional
523
+ The order of the spline interpolation, default is 3.
524
+ The order has to be in the range 0-5.
525
+ %(mode_interp_constant)s
526
+ %(cval)s
527
+ %(prefilter)s
528
+
529
+ Returns
530
+ -------
531
+ affine_transform : ndarray
532
+ The transformed input.
533
+
534
+ Notes
535
+ -----
536
+ The given matrix and offset are used to find for each point in the
537
+ output the corresponding coordinates in the input by an affine
538
+ transformation. The value of the input at those coordinates is
539
+ determined by spline interpolation of the requested order. Points
540
+ outside the boundaries of the input are filled according to the given
541
+ mode.
542
+
543
+ .. versionchanged:: 0.18.0
544
+ Previously, the exact interpretation of the affine transformation
545
+ depended on whether the matrix was supplied as a 1-D or a
546
+ 2-D array. If a 1-D array was supplied
547
+ to the matrix parameter, the output pixel value at index ``o``
548
+ was determined from the input image at position
549
+ ``matrix * (o + offset)``.
550
+
551
+ For complex-valued `input`, this function transforms the real and imaginary
552
+ components independently.
553
+
554
+ .. versionadded:: 1.6.0
555
+ Complex-valued support added.
556
+
557
+ References
558
+ ----------
559
+ .. [1] https://en.wikipedia.org/wiki/Homogeneous_coordinates
560
+ """
561
+ if order < 0 or order > 5:
562
+ raise RuntimeError('spline order not supported')
563
+ input = np.asarray(input)
564
+ if output_shape is None:
565
+ if isinstance(output, np.ndarray):
566
+ output_shape = output.shape
567
+ else:
568
+ output_shape = input.shape
569
+ if input.ndim < 1 or len(output_shape) < 1:
570
+ raise RuntimeError('input and output rank must be > 0')
571
+ complex_output = np.iscomplexobj(input)
572
+ output = _ni_support._get_output(output, input, shape=output_shape,
573
+ complex_output=complex_output)
574
+ if complex_output:
575
+ kwargs = dict(offset=offset, output_shape=output_shape, order=order,
576
+ mode=mode, prefilter=prefilter)
577
+ affine_transform(input.real, matrix, output=output.real,
578
+ cval=np.real(cval), **kwargs)
579
+ affine_transform(input.imag, matrix, output=output.imag,
580
+ cval=np.imag(cval), **kwargs)
581
+ return output
582
+ if prefilter and order > 1:
583
+ padded, npad = _prepad_for_spline_filter(input, mode, cval)
584
+ filtered = spline_filter(padded, order, output=np.float64, mode=mode)
585
+ else:
586
+ npad = 0
587
+ filtered = input
588
+ mode = _ni_support._extend_mode_to_code(mode)
589
+ matrix = np.asarray(matrix, dtype=np.float64)
590
+ if matrix.ndim not in [1, 2] or matrix.shape[0] < 1:
591
+ raise RuntimeError('no proper affine matrix provided')
592
+ if (matrix.ndim == 2 and matrix.shape[1] == input.ndim + 1 and
593
+ (matrix.shape[0] in [input.ndim, input.ndim + 1])):
594
+ if matrix.shape[0] == input.ndim + 1:
595
+ exptd = [0] * input.ndim + [1]
596
+ if not np.all(matrix[input.ndim] == exptd):
597
+ msg = (f'Expected homogeneous transformation matrix with '
598
+ f'shape {matrix.shape} for image shape {input.shape}, '
599
+ f'but bottom row was not equal to {exptd}')
600
+ raise ValueError(msg)
601
+ # assume input is homogeneous coordinate transformation matrix
602
+ offset = matrix[:input.ndim, input.ndim]
603
+ matrix = matrix[:input.ndim, :input.ndim]
604
+ if matrix.shape[0] != input.ndim:
605
+ raise RuntimeError('affine matrix has wrong number of rows')
606
+ if matrix.ndim == 2 and matrix.shape[1] != output.ndim:
607
+ raise RuntimeError('affine matrix has wrong number of columns')
608
+ if not matrix.flags.contiguous:
609
+ matrix = matrix.copy()
610
+ offset = _ni_support._normalize_sequence(offset, input.ndim)
611
+ offset = np.asarray(offset, dtype=np.float64)
612
+ if offset.ndim != 1 or offset.shape[0] < 1:
613
+ raise RuntimeError('no proper offset provided')
614
+ if not offset.flags.contiguous:
615
+ offset = offset.copy()
616
+ if matrix.ndim == 1:
617
+ warnings.warn(
618
+ "The behavior of affine_transform with a 1-D "
619
+ "array supplied for the matrix parameter has changed in "
620
+ "SciPy 0.18.0.",
621
+ stacklevel=2
622
+ )
623
+ _nd_image.zoom_shift(filtered, matrix, offset/matrix, output, order,
624
+ mode, cval, npad, False)
625
+ else:
626
+ _nd_image.geometric_transform(filtered, None, None, matrix, offset,
627
+ output, order, mode, cval, npad, None,
628
+ None)
629
+ return output
630
+
631
+
632
+ @docfiller
633
+ def shift(input, shift, output=None, order=3, mode='constant', cval=0.0,
634
+ prefilter=True):
635
+ """
636
+ Shift an array.
637
+
638
+ The array is shifted using spline interpolation of the requested order.
639
+ Points outside the boundaries of the input are filled according to the
640
+ given mode.
641
+
642
+ Parameters
643
+ ----------
644
+ %(input)s
645
+ shift : float or sequence
646
+ The shift along the axes. If a float, `shift` is the same for each
647
+ axis. If a sequence, `shift` should contain one value for each axis.
648
+ %(output)s
649
+ order : int, optional
650
+ The order of the spline interpolation, default is 3.
651
+ The order has to be in the range 0-5.
652
+ %(mode_interp_constant)s
653
+ %(cval)s
654
+ %(prefilter)s
655
+
656
+ Returns
657
+ -------
658
+ shift : ndarray
659
+ The shifted input.
660
+
661
+ See Also
662
+ --------
663
+ affine_transform : Affine transformations
664
+
665
+ Notes
666
+ -----
667
+ For complex-valued `input`, this function shifts the real and imaginary
668
+ components independently.
669
+
670
+ .. versionadded:: 1.6.0
671
+ Complex-valued support added.
672
+
673
+ Examples
674
+ --------
675
+ Import the necessary modules and an exemplary image.
676
+
677
+ >>> from scipy.ndimage import shift
678
+ >>> import matplotlib.pyplot as plt
679
+ >>> from scipy import datasets
680
+ >>> image = datasets.ascent()
681
+
682
+ Shift the image vertically by 20 pixels.
683
+
684
+ >>> image_shifted_vertically = shift(image, (20, 0))
685
+
686
+ Shift the image vertically by -200 pixels and horizontally by 100 pixels.
687
+
688
+ >>> image_shifted_both_directions = shift(image, (-200, 100))
689
+
690
+ Plot the original and the shifted images.
691
+
692
+ >>> fig, axes = plt.subplots(3, 1, figsize=(4, 12))
693
+ >>> plt.gray() # show the filtered result in grayscale
694
+ >>> top, middle, bottom = axes
695
+ >>> for ax in axes:
696
+ ... ax.set_axis_off() # remove coordinate system
697
+ >>> top.imshow(image)
698
+ >>> top.set_title("Original image")
699
+ >>> middle.imshow(image_shifted_vertically)
700
+ >>> middle.set_title("Vertically shifted image")
701
+ >>> bottom.imshow(image_shifted_both_directions)
702
+ >>> bottom.set_title("Image shifted in both directions")
703
+ >>> fig.tight_layout()
704
+ """
705
+ if order < 0 or order > 5:
706
+ raise RuntimeError('spline order not supported')
707
+ input = np.asarray(input)
708
+ if input.ndim < 1:
709
+ raise RuntimeError('input and output rank must be > 0')
710
+ complex_output = np.iscomplexobj(input)
711
+ output = _ni_support._get_output(output, input, complex_output=complex_output)
712
+ if complex_output:
713
+ # import under different name to avoid confusion with shift parameter
714
+ from scipy.ndimage._interpolation import shift as _shift
715
+
716
+ kwargs = dict(order=order, mode=mode, prefilter=prefilter)
717
+ _shift(input.real, shift, output=output.real, cval=np.real(cval), **kwargs)
718
+ _shift(input.imag, shift, output=output.imag, cval=np.imag(cval), **kwargs)
719
+ return output
720
+ if prefilter and order > 1:
721
+ padded, npad = _prepad_for_spline_filter(input, mode, cval)
722
+ filtered = spline_filter(padded, order, output=np.float64, mode=mode)
723
+ else:
724
+ npad = 0
725
+ filtered = input
726
+ mode = _ni_support._extend_mode_to_code(mode)
727
+ shift = _ni_support._normalize_sequence(shift, input.ndim)
728
+ shift = [-ii for ii in shift]
729
+ shift = np.asarray(shift, dtype=np.float64)
730
+ if not shift.flags.contiguous:
731
+ shift = shift.copy()
732
+ _nd_image.zoom_shift(filtered, None, shift, output, order, mode, cval,
733
+ npad, False)
734
+ return output
735
+
736
+
737
+ @docfiller
738
+ def zoom(input, zoom, output=None, order=3, mode='constant', cval=0.0,
739
+ prefilter=True, *, grid_mode=False):
740
+ """
741
+ Zoom an array.
742
+
743
+ The array is zoomed using spline interpolation of the requested order.
744
+
745
+ Parameters
746
+ ----------
747
+ %(input)s
748
+ zoom : float or sequence
749
+ The zoom factor along the axes. If a float, `zoom` is the same for each
750
+ axis. If a sequence, `zoom` should contain one value for each axis.
751
+ %(output)s
752
+ order : int, optional
753
+ The order of the spline interpolation, default is 3.
754
+ The order has to be in the range 0-5.
755
+ %(mode_interp_constant)s
756
+ %(cval)s
757
+ %(prefilter)s
758
+ grid_mode : bool, optional
759
+ If False, the distance from the pixel centers is zoomed. Otherwise, the
760
+ distance including the full pixel extent is used. For example, a 1d
761
+ signal of length 5 is considered to have length 4 when `grid_mode` is
762
+ False, but length 5 when `grid_mode` is True. See the following
763
+ visual illustration:
764
+
765
+ .. code-block:: text
766
+
767
+ | pixel 1 | pixel 2 | pixel 3 | pixel 4 | pixel 5 |
768
+ |<-------------------------------------->|
769
+ vs.
770
+ |<----------------------------------------------->|
771
+
772
+ The starting point of the arrow in the diagram above corresponds to
773
+ coordinate location 0 in each mode.
774
+
775
+ Returns
776
+ -------
777
+ zoom : ndarray
778
+ The zoomed input.
779
+
780
+ Notes
781
+ -----
782
+ For complex-valued `input`, this function zooms the real and imaginary
783
+ components independently.
784
+
785
+ .. versionadded:: 1.6.0
786
+ Complex-valued support added.
787
+
788
+ Examples
789
+ --------
790
+ >>> from scipy import ndimage, datasets
791
+ >>> import matplotlib.pyplot as plt
792
+
793
+ >>> fig = plt.figure()
794
+ >>> ax1 = fig.add_subplot(121) # left side
795
+ >>> ax2 = fig.add_subplot(122) # right side
796
+ >>> ascent = datasets.ascent()
797
+ >>> result = ndimage.zoom(ascent, 3.0)
798
+ >>> ax1.imshow(ascent, vmin=0, vmax=255)
799
+ >>> ax2.imshow(result, vmin=0, vmax=255)
800
+ >>> plt.show()
801
+
802
+ >>> print(ascent.shape)
803
+ (512, 512)
804
+
805
+ >>> print(result.shape)
806
+ (1536, 1536)
807
+ """
808
+ if order < 0 or order > 5:
809
+ raise RuntimeError('spline order not supported')
810
+ input = np.asarray(input)
811
+ if input.ndim < 1:
812
+ raise RuntimeError('input and output rank must be > 0')
813
+ zoom = _ni_support._normalize_sequence(zoom, input.ndim)
814
+ output_shape = tuple(
815
+ [int(round(ii * jj)) for ii, jj in zip(input.shape, zoom)])
816
+ complex_output = np.iscomplexobj(input)
817
+ output = _ni_support._get_output(output, input, shape=output_shape,
818
+ complex_output=complex_output)
819
+ if complex_output:
820
+ # import under different name to avoid confusion with zoom parameter
821
+ from scipy.ndimage._interpolation import zoom as _zoom
822
+
823
+ kwargs = dict(order=order, mode=mode, prefilter=prefilter)
824
+ _zoom(input.real, zoom, output=output.real, cval=np.real(cval), **kwargs)
825
+ _zoom(input.imag, zoom, output=output.imag, cval=np.imag(cval), **kwargs)
826
+ return output
827
+ if prefilter and order > 1:
828
+ padded, npad = _prepad_for_spline_filter(input, mode, cval)
829
+ filtered = spline_filter(padded, order, output=np.float64, mode=mode)
830
+ else:
831
+ npad = 0
832
+ filtered = input
833
+ if grid_mode:
834
+ # warn about modes that may have surprising behavior
835
+ suggest_mode = None
836
+ if mode == 'constant':
837
+ suggest_mode = 'grid-constant'
838
+ elif mode == 'wrap':
839
+ suggest_mode = 'grid-wrap'
840
+ if suggest_mode is not None:
841
+ warnings.warn(
842
+ (f"It is recommended to use mode = {suggest_mode} instead of {mode} "
843
+ f"when grid_mode is True."),
844
+ stacklevel=2
845
+ )
846
+ mode = _ni_support._extend_mode_to_code(mode)
847
+
848
+ zoom_div = np.array(output_shape)
849
+ zoom_nominator = np.array(input.shape)
850
+ if not grid_mode:
851
+ zoom_div -= 1
852
+ zoom_nominator -= 1
853
+
854
+ # Zooming to infinite values is unpredictable, so just choose
855
+ # zoom factor 1 instead
856
+ zoom = np.divide(zoom_nominator, zoom_div,
857
+ out=np.ones_like(input.shape, dtype=np.float64),
858
+ where=zoom_div != 0)
859
+ zoom = np.ascontiguousarray(zoom)
860
+ _nd_image.zoom_shift(filtered, zoom, None, output, order, mode, cval, npad,
861
+ grid_mode)
862
+ return output
863
+
864
+
865
+ @docfiller
866
+ def rotate(input, angle, axes=(1, 0), reshape=True, output=None, order=3,
867
+ mode='constant', cval=0.0, prefilter=True):
868
+ """
869
+ Rotate an array.
870
+
871
+ The array is rotated in the plane defined by the two axes given by the
872
+ `axes` parameter using spline interpolation of the requested order.
873
+
874
+ Parameters
875
+ ----------
876
+ %(input)s
877
+ angle : float
878
+ The rotation angle in degrees.
879
+ axes : tuple of 2 ints, optional
880
+ The two axes that define the plane of rotation. Default is the first
881
+ two axes.
882
+ reshape : bool, optional
883
+ If `reshape` is true, the output shape is adapted so that the input
884
+ array is contained completely in the output. Default is True.
885
+ %(output)s
886
+ order : int, optional
887
+ The order of the spline interpolation, default is 3.
888
+ The order has to be in the range 0-5.
889
+ %(mode_interp_constant)s
890
+ %(cval)s
891
+ %(prefilter)s
892
+
893
+ Returns
894
+ -------
895
+ rotate : ndarray
896
+ The rotated input.
897
+
898
+ Notes
899
+ -----
900
+ For complex-valued `input`, this function rotates the real and imaginary
901
+ components independently.
902
+
903
+ .. versionadded:: 1.6.0
904
+ Complex-valued support added.
905
+
906
+ Examples
907
+ --------
908
+ >>> from scipy import ndimage, datasets
909
+ >>> import matplotlib.pyplot as plt
910
+ >>> fig = plt.figure(figsize=(10, 3))
911
+ >>> ax1, ax2, ax3 = fig.subplots(1, 3)
912
+ >>> img = datasets.ascent()
913
+ >>> img_45 = ndimage.rotate(img, 45, reshape=False)
914
+ >>> full_img_45 = ndimage.rotate(img, 45, reshape=True)
915
+ >>> ax1.imshow(img, cmap='gray')
916
+ >>> ax1.set_axis_off()
917
+ >>> ax2.imshow(img_45, cmap='gray')
918
+ >>> ax2.set_axis_off()
919
+ >>> ax3.imshow(full_img_45, cmap='gray')
920
+ >>> ax3.set_axis_off()
921
+ >>> fig.set_layout_engine('tight')
922
+ >>> plt.show()
923
+ >>> print(img.shape)
924
+ (512, 512)
925
+ >>> print(img_45.shape)
926
+ (512, 512)
927
+ >>> print(full_img_45.shape)
928
+ (724, 724)
929
+
930
+ """
931
+ input_arr = np.asarray(input)
932
+ ndim = input_arr.ndim
933
+
934
+ if ndim < 2:
935
+ raise ValueError('input array should be at least 2D')
936
+
937
+ axes = list(axes)
938
+
939
+ if len(axes) != 2:
940
+ raise ValueError('axes should contain exactly two values')
941
+
942
+ if not all([float(ax).is_integer() for ax in axes]):
943
+ raise ValueError('axes should contain only integer values')
944
+
945
+ if axes[0] < 0:
946
+ axes[0] += ndim
947
+ if axes[1] < 0:
948
+ axes[1] += ndim
949
+ if axes[0] < 0 or axes[1] < 0 or axes[0] >= ndim or axes[1] >= ndim:
950
+ raise ValueError('invalid rotation plane specified')
951
+
952
+ axes.sort()
953
+
954
+ c, s = special.cosdg(angle), special.sindg(angle)
955
+
956
+ rot_matrix = np.array([[c, s],
957
+ [-s, c]])
958
+
959
+ img_shape = np.asarray(input_arr.shape)
960
+ in_plane_shape = img_shape[axes]
961
+ if reshape:
962
+ # Compute transformed input bounds
963
+ iy, ix = in_plane_shape
964
+ out_bounds = rot_matrix @ [[0, 0, iy, iy],
965
+ [0, ix, 0, ix]]
966
+ # Compute the shape of the transformed input plane
967
+ out_plane_shape = (np.ptp(out_bounds, axis=1) + 0.5).astype(int)
968
+ else:
969
+ out_plane_shape = img_shape[axes]
970
+
971
+ out_center = rot_matrix @ ((out_plane_shape - 1) / 2)
972
+ in_center = (in_plane_shape - 1) / 2
973
+ offset = in_center - out_center
974
+
975
+ output_shape = img_shape
976
+ output_shape[axes] = out_plane_shape
977
+ output_shape = tuple(output_shape)
978
+
979
+ complex_output = np.iscomplexobj(input_arr)
980
+ output = _ni_support._get_output(output, input_arr, shape=output_shape,
981
+ complex_output=complex_output)
982
+
983
+ if ndim <= 2:
984
+ affine_transform(input_arr, rot_matrix, offset, output_shape, output,
985
+ order, mode, cval, prefilter)
986
+ else:
987
+ # If ndim > 2, the rotation is applied over all the planes
988
+ # parallel to axes
989
+ planes_coord = itertools.product(
990
+ *[[slice(None)] if ax in axes else range(img_shape[ax])
991
+ for ax in range(ndim)])
992
+
993
+ out_plane_shape = tuple(out_plane_shape)
994
+
995
+ for coordinates in planes_coord:
996
+ ia = input_arr[coordinates]
997
+ oa = output[coordinates]
998
+ affine_transform(ia, rot_matrix, offset, out_plane_shape,
999
+ oa, order, mode, cval, prefilter)
1000
+
1001
+ return output
parrot/lib/python3.10/site-packages/scipy/ndimage/_ni_docstrings.py ADDED
@@ -0,0 +1,208 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Docstring components common to several ndimage functions."""
2
+ from scipy._lib import doccer
3
+
4
+ __all__ = ['docfiller']
5
+
6
+
7
+ _input_doc = (
8
+ """input : array_like
9
+ The input array.""")
10
+ _axis_doc = (
11
+ """axis : int, optional
12
+ The axis of `input` along which to calculate. Default is -1.""")
13
+ _output_doc = (
14
+ """output : array or dtype, optional
15
+ The array in which to place the output, or the dtype of the
16
+ returned array. By default an array of the same dtype as input
17
+ will be created.""")
18
+ _size_foot_doc = (
19
+ """size : scalar or tuple, optional
20
+ See footprint, below. Ignored if footprint is given.
21
+ footprint : array, optional
22
+ Either `size` or `footprint` must be defined. `size` gives
23
+ the shape that is taken from the input array, at every element
24
+ position, to define the input to the filter function.
25
+ `footprint` is a boolean array that specifies (implicitly) a
26
+ shape, but also which of the elements within this shape will get
27
+ passed to the filter function. Thus ``size=(n,m)`` is equivalent
28
+ to ``footprint=np.ones((n,m))``. We adjust `size` to the number
29
+ of dimensions of the input array, so that, if the input array is
30
+ shape (10,10,10), and `size` is 2, then the actual size used is
31
+ (2,2,2). When `footprint` is given, `size` is ignored.""")
32
+ _mode_reflect_doc = (
33
+ """mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
34
+ The `mode` parameter determines how the input array is extended
35
+ beyond its boundaries. Default is 'reflect'. Behavior for each valid
36
+ value is as follows:
37
+
38
+ 'reflect' (`d c b a | a b c d | d c b a`)
39
+ The input is extended by reflecting about the edge of the last
40
+ pixel. This mode is also sometimes referred to as half-sample
41
+ symmetric.
42
+
43
+ 'constant' (`k k k k | a b c d | k k k k`)
44
+ The input is extended by filling all values beyond the edge with
45
+ the same constant value, defined by the `cval` parameter.
46
+
47
+ 'nearest' (`a a a a | a b c d | d d d d`)
48
+ The input is extended by replicating the last pixel.
49
+
50
+ 'mirror' (`d c b | a b c d | c b a`)
51
+ The input is extended by reflecting about the center of the last
52
+ pixel. This mode is also sometimes referred to as whole-sample
53
+ symmetric.
54
+
55
+ 'wrap' (`a b c d | a b c d | a b c d`)
56
+ The input is extended by wrapping around to the opposite edge.
57
+
58
+ For consistency with the interpolation functions, the following mode
59
+ names can also be used:
60
+
61
+ 'grid-mirror'
62
+ This is a synonym for 'reflect'.
63
+
64
+ 'grid-constant'
65
+ This is a synonym for 'constant'.
66
+
67
+ 'grid-wrap'
68
+ This is a synonym for 'wrap'.""")
69
+
70
+ _mode_interp_constant_doc = (
71
+ """mode : {'reflect', 'grid-mirror', 'constant', 'grid-constant', 'nearest', \
72
+ 'mirror', 'grid-wrap', 'wrap'}, optional
73
+ The `mode` parameter determines how the input array is extended
74
+ beyond its boundaries. Default is 'constant'. Behavior for each valid
75
+ value is as follows (see additional plots and details on
76
+ :ref:`boundary modes <ndimage-interpolation-modes>`):
77
+
78
+ 'reflect' (`d c b a | a b c d | d c b a`)
79
+ The input is extended by reflecting about the edge of the last
80
+ pixel. This mode is also sometimes referred to as half-sample
81
+ symmetric.
82
+
83
+ 'grid-mirror'
84
+ This is a synonym for 'reflect'.
85
+
86
+ 'constant' (`k k k k | a b c d | k k k k`)
87
+ The input is extended by filling all values beyond the edge with
88
+ the same constant value, defined by the `cval` parameter. No
89
+ interpolation is performed beyond the edges of the input.
90
+
91
+ 'grid-constant' (`k k k k | a b c d | k k k k`)
92
+ The input is extended by filling all values beyond the edge with
93
+ the same constant value, defined by the `cval` parameter. Interpolation
94
+ occurs for samples outside the input's extent as well.
95
+
96
+ 'nearest' (`a a a a | a b c d | d d d d`)
97
+ The input is extended by replicating the last pixel.
98
+
99
+ 'mirror' (`d c b | a b c d | c b a`)
100
+ The input is extended by reflecting about the center of the last
101
+ pixel. This mode is also sometimes referred to as whole-sample
102
+ symmetric.
103
+
104
+ 'grid-wrap' (`a b c d | a b c d | a b c d`)
105
+ The input is extended by wrapping around to the opposite edge.
106
+
107
+ 'wrap' (`d b c d | a b c d | b c a b`)
108
+ The input is extended by wrapping around to the opposite edge, but in a
109
+ way such that the last point and initial point exactly overlap. In this
110
+ case it is not well defined which sample will be chosen at the point of
111
+ overlap.""")
112
+ _mode_interp_mirror_doc = (
113
+ _mode_interp_constant_doc.replace("Default is 'constant'",
114
+ "Default is 'mirror'")
115
+ )
116
+ assert _mode_interp_mirror_doc != _mode_interp_constant_doc, \
117
+ 'Default not replaced'
118
+
119
+ _mode_multiple_doc = (
120
+ """mode : str or sequence, optional
121
+ The `mode` parameter determines how the input array is extended
122
+ when the filter overlaps a border. By passing a sequence of modes
123
+ with length equal to the number of dimensions of the input array,
124
+ different modes can be specified along each axis. Default value is
125
+ 'reflect'. The valid values and their behavior is as follows:
126
+
127
+ 'reflect' (`d c b a | a b c d | d c b a`)
128
+ The input is extended by reflecting about the edge of the last
129
+ pixel. This mode is also sometimes referred to as half-sample
130
+ symmetric.
131
+
132
+ 'constant' (`k k k k | a b c d | k k k k`)
133
+ The input is extended by filling all values beyond the edge with
134
+ the same constant value, defined by the `cval` parameter.
135
+
136
+ 'nearest' (`a a a a | a b c d | d d d d`)
137
+ The input is extended by replicating the last pixel.
138
+
139
+ 'mirror' (`d c b | a b c d | c b a`)
140
+ The input is extended by reflecting about the center of the last
141
+ pixel. This mode is also sometimes referred to as whole-sample
142
+ symmetric.
143
+
144
+ 'wrap' (`a b c d | a b c d | a b c d`)
145
+ The input is extended by wrapping around to the opposite edge.
146
+
147
+ For consistency with the interpolation functions, the following mode
148
+ names can also be used:
149
+
150
+ 'grid-constant'
151
+ This is a synonym for 'constant'.
152
+
153
+ 'grid-mirror'
154
+ This is a synonym for 'reflect'.
155
+
156
+ 'grid-wrap'
157
+ This is a synonym for 'wrap'.""")
158
+ _cval_doc = (
159
+ """cval : scalar, optional
160
+ Value to fill past edges of input if `mode` is 'constant'. Default
161
+ is 0.0.""")
162
+ _origin_doc = (
163
+ """origin : int, optional
164
+ Controls the placement of the filter on the input array's pixels.
165
+ A value of 0 (the default) centers the filter over the pixel, with
166
+ positive values shifting the filter to the left, and negative ones
167
+ to the right.""")
168
+ _origin_multiple_doc = (
169
+ """origin : int or sequence, optional
170
+ Controls the placement of the filter on the input array's pixels.
171
+ A value of 0 (the default) centers the filter over the pixel, with
172
+ positive values shifting the filter to the left, and negative ones
173
+ to the right. By passing a sequence of origins with length equal to
174
+ the number of dimensions of the input array, different shifts can
175
+ be specified along each axis.""")
176
+ _extra_arguments_doc = (
177
+ """extra_arguments : sequence, optional
178
+ Sequence of extra positional arguments to pass to passed function.""")
179
+ _extra_keywords_doc = (
180
+ """extra_keywords : dict, optional
181
+ dict of extra keyword arguments to pass to passed function.""")
182
+ _prefilter_doc = (
183
+ """prefilter : bool, optional
184
+ Determines if the input array is prefiltered with `spline_filter`
185
+ before interpolation. The default is True, which will create a
186
+ temporary `float64` array of filtered values if `order > 1`. If
187
+ setting this to False, the output will be slightly blurred if
188
+ `order > 1`, unless the input is prefiltered, i.e. it is the result
189
+ of calling `spline_filter` on the original input.""")
190
+
191
+ docdict = {
192
+ 'input': _input_doc,
193
+ 'axis': _axis_doc,
194
+ 'output': _output_doc,
195
+ 'size_foot': _size_foot_doc,
196
+ 'mode_interp_constant': _mode_interp_constant_doc,
197
+ 'mode_interp_mirror': _mode_interp_mirror_doc,
198
+ 'mode_reflect': _mode_reflect_doc,
199
+ 'mode_multiple': _mode_multiple_doc,
200
+ 'cval': _cval_doc,
201
+ 'origin': _origin_doc,
202
+ 'origin_multiple': _origin_multiple_doc,
203
+ 'extra_arguments': _extra_arguments_doc,
204
+ 'extra_keywords': _extra_keywords_doc,
205
+ 'prefilter': _prefilter_doc
206
+ }
207
+
208
+ docfiller = doccer.filldoc(docdict)
parrot/lib/python3.10/site-packages/scipy/ndimage/interpolation.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is not meant for public use and will be removed in SciPy v2.0.0.
2
+ # Use the `scipy.ndimage` namespace for importing the functions
3
+ # included below.
4
+
5
+ from scipy._lib.deprecation import _sub_module_deprecation
6
+
7
+
8
+ __all__ = [ # noqa: F822
9
+ 'spline_filter1d', 'spline_filter',
10
+ 'geometric_transform', 'map_coordinates',
11
+ 'affine_transform', 'shift', 'zoom', 'rotate',
12
+ ]
13
+
14
+
15
+ def __dir__():
16
+ return __all__
17
+
18
+
19
+ def __getattr__(name):
20
+ return _sub_module_deprecation(sub_package='ndimage', module='interpolation',
21
+ private_modules=['_interpolation'], all=__all__,
22
+ attribute=name)
parrot/lib/python3.10/site-packages/scipy/ndimage/tests/__init__.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+ import numpy as np
3
+
4
+ # list of numarray data types
5
+ integer_types: list[type] = [
6
+ np.int8, np.uint8, np.int16, np.uint16,
7
+ np.int32, np.uint32, np.int64, np.uint64]
8
+
9
+ float_types: list[type] = [np.float32, np.float64]
10
+
11
+ complex_types: list[type] = [np.complex64, np.complex128]
12
+
13
+ types: list[type] = integer_types + float_types
parrot/lib/python3.10/site-packages/scipy/ndimage/tests/test_c_api.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from numpy.testing import assert_allclose
3
+
4
+ from scipy import ndimage
5
+ from scipy.ndimage import _ctest
6
+ from scipy.ndimage import _cytest
7
+ from scipy._lib._ccallback import LowLevelCallable
8
+
9
+ FILTER1D_FUNCTIONS = [
10
+ lambda filter_size: _ctest.filter1d(filter_size),
11
+ lambda filter_size: _cytest.filter1d(filter_size, with_signature=False),
12
+ lambda filter_size: LowLevelCallable(
13
+ _cytest.filter1d(filter_size, with_signature=True)
14
+ ),
15
+ lambda filter_size: LowLevelCallable.from_cython(
16
+ _cytest, "_filter1d",
17
+ _cytest.filter1d_capsule(filter_size),
18
+ ),
19
+ ]
20
+
21
+ FILTER2D_FUNCTIONS = [
22
+ lambda weights: _ctest.filter2d(weights),
23
+ lambda weights: _cytest.filter2d(weights, with_signature=False),
24
+ lambda weights: LowLevelCallable(_cytest.filter2d(weights, with_signature=True)),
25
+ lambda weights: LowLevelCallable.from_cython(_cytest,
26
+ "_filter2d",
27
+ _cytest.filter2d_capsule(weights),),
28
+ ]
29
+
30
+ TRANSFORM_FUNCTIONS = [
31
+ lambda shift: _ctest.transform(shift),
32
+ lambda shift: _cytest.transform(shift, with_signature=False),
33
+ lambda shift: LowLevelCallable(_cytest.transform(shift, with_signature=True)),
34
+ lambda shift: LowLevelCallable.from_cython(_cytest,
35
+ "_transform",
36
+ _cytest.transform_capsule(shift),),
37
+ ]
38
+
39
+
40
+ def test_generic_filter():
41
+ def filter2d(footprint_elements, weights):
42
+ return (weights*footprint_elements).sum()
43
+
44
+ def check(j):
45
+ func = FILTER2D_FUNCTIONS[j]
46
+
47
+ im = np.ones((20, 20))
48
+ im[:10,:10] = 0
49
+ footprint = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
50
+ footprint_size = np.count_nonzero(footprint)
51
+ weights = np.ones(footprint_size)/footprint_size
52
+
53
+ res = ndimage.generic_filter(im, func(weights),
54
+ footprint=footprint)
55
+ std = ndimage.generic_filter(im, filter2d, footprint=footprint,
56
+ extra_arguments=(weights,))
57
+ assert_allclose(res, std, err_msg=f"#{j} failed")
58
+
59
+ for j, func in enumerate(FILTER2D_FUNCTIONS):
60
+ check(j)
61
+
62
+
63
+ def test_generic_filter1d():
64
+ def filter1d(input_line, output_line, filter_size):
65
+ for i in range(output_line.size):
66
+ output_line[i] = 0
67
+ for j in range(filter_size):
68
+ output_line[i] += input_line[i+j]
69
+ output_line /= filter_size
70
+
71
+ def check(j):
72
+ func = FILTER1D_FUNCTIONS[j]
73
+
74
+ im = np.tile(np.hstack((np.zeros(10), np.ones(10))), (10, 1))
75
+ filter_size = 3
76
+
77
+ res = ndimage.generic_filter1d(im, func(filter_size),
78
+ filter_size)
79
+ std = ndimage.generic_filter1d(im, filter1d, filter_size,
80
+ extra_arguments=(filter_size,))
81
+ assert_allclose(res, std, err_msg=f"#{j} failed")
82
+
83
+ for j, func in enumerate(FILTER1D_FUNCTIONS):
84
+ check(j)
85
+
86
+
87
+ def test_geometric_transform():
88
+ def transform(output_coordinates, shift):
89
+ return output_coordinates[0] - shift, output_coordinates[1] - shift
90
+
91
+ def check(j):
92
+ func = TRANSFORM_FUNCTIONS[j]
93
+
94
+ im = np.arange(12).reshape(4, 3).astype(np.float64)
95
+ shift = 0.5
96
+
97
+ res = ndimage.geometric_transform(im, func(shift))
98
+ std = ndimage.geometric_transform(im, transform, extra_arguments=(shift,))
99
+ assert_allclose(res, std, err_msg=f"#{j} failed")
100
+
101
+ for j, func in enumerate(TRANSFORM_FUNCTIONS):
102
+ check(j)
parrot/lib/python3.10/site-packages/scipy/ndimage/tests/test_datatypes.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Testing data types for ndimage calls
2
+ """
3
+ import numpy as np
4
+ from numpy.testing import assert_array_almost_equal, assert_
5
+ import pytest
6
+
7
+ from scipy import ndimage
8
+
9
+
10
+ def test_map_coordinates_dts():
11
+ # check that ndimage accepts different data types for interpolation
12
+ data = np.array([[4, 1, 3, 2],
13
+ [7, 6, 8, 5],
14
+ [3, 5, 3, 6]])
15
+ shifted_data = np.array([[0, 0, 0, 0],
16
+ [0, 4, 1, 3],
17
+ [0, 7, 6, 8]])
18
+ idx = np.indices(data.shape)
19
+ dts = (np.uint8, np.uint16, np.uint32, np.uint64,
20
+ np.int8, np.int16, np.int32, np.int64,
21
+ np.intp, np.uintp, np.float32, np.float64)
22
+ for order in range(0, 6):
23
+ for data_dt in dts:
24
+ these_data = data.astype(data_dt)
25
+ for coord_dt in dts:
26
+ # affine mapping
27
+ mat = np.eye(2, dtype=coord_dt)
28
+ off = np.zeros((2,), dtype=coord_dt)
29
+ out = ndimage.affine_transform(these_data, mat, off)
30
+ assert_array_almost_equal(these_data, out)
31
+ # map coordinates
32
+ coords_m1 = idx.astype(coord_dt) - 1
33
+ coords_p10 = idx.astype(coord_dt) + 10
34
+ out = ndimage.map_coordinates(these_data, coords_m1, order=order)
35
+ assert_array_almost_equal(out, shifted_data)
36
+ # check constant fill works
37
+ out = ndimage.map_coordinates(these_data, coords_p10, order=order)
38
+ assert_array_almost_equal(out, np.zeros((3,4)))
39
+ # check shift and zoom
40
+ out = ndimage.shift(these_data, 1)
41
+ assert_array_almost_equal(out, shifted_data)
42
+ out = ndimage.zoom(these_data, 1)
43
+ assert_array_almost_equal(these_data, out)
44
+
45
+
46
+ @pytest.mark.xfail(True, reason="Broken on many platforms")
47
+ def test_uint64_max():
48
+ # Test interpolation respects uint64 max. Reported to fail at least on
49
+ # win32 (due to the 32 bit visual C compiler using signed int64 when
50
+ # converting between uint64 to double) and Debian on s390x.
51
+ # Interpolation is always done in double precision floating point, so
52
+ # we use the largest uint64 value for which int(float(big)) still fits
53
+ # in a uint64.
54
+ # This test was last enabled on macOS only, and there it started failing
55
+ # on arm64 as well (see gh-19117).
56
+ big = 2**64 - 1025
57
+ arr = np.array([big, big, big], dtype=np.uint64)
58
+ # Tests geometric transform (map_coordinates, affine_transform)
59
+ inds = np.indices(arr.shape) - 0.1
60
+ x = ndimage.map_coordinates(arr, inds)
61
+ assert_(x[1] == int(float(big)))
62
+ assert_(x[2] == int(float(big)))
63
+ # Tests zoom / shift
64
+ x = ndimage.shift(arr, 0.1)
65
+ assert_(x[1] == int(float(big)))
66
+ assert_(x[2] == int(float(big)))
parrot/lib/python3.10/site-packages/scipy/ndimage/tests/test_filters.py ADDED
@@ -0,0 +1,2214 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ''' Some tests for filters '''
2
+ import functools
3
+ import itertools
4
+ import math
5
+ import numpy as np
6
+
7
+ from numpy.testing import (assert_equal, assert_allclose,
8
+ assert_array_almost_equal,
9
+ assert_array_equal, assert_almost_equal,
10
+ suppress_warnings, assert_)
11
+ import pytest
12
+ from pytest import raises as assert_raises
13
+
14
+ from scipy import ndimage
15
+ from scipy.ndimage._filters import _gaussian_kernel1d
16
+
17
+ from . import types, float_types, complex_types
18
+
19
+
20
+ def sumsq(a, b):
21
+ return math.sqrt(((a - b)**2).sum())
22
+
23
+
24
+ def _complex_correlate(array, kernel, real_dtype, convolve=False,
25
+ mode="reflect", cval=0, ):
26
+ """Utility to perform a reference complex-valued convolutions.
27
+
28
+ When convolve==False, correlation is performed instead
29
+ """
30
+ array = np.asarray(array)
31
+ kernel = np.asarray(kernel)
32
+ complex_array = array.dtype.kind == 'c'
33
+ complex_kernel = kernel.dtype.kind == 'c'
34
+ if array.ndim == 1:
35
+ func = ndimage.convolve1d if convolve else ndimage.correlate1d
36
+ else:
37
+ func = ndimage.convolve if convolve else ndimage.correlate
38
+ if not convolve:
39
+ kernel = kernel.conj()
40
+ if complex_array and complex_kernel:
41
+ # use: real(cval) for array.real component
42
+ # imag(cval) for array.imag component
43
+ output = (
44
+ func(array.real, kernel.real, output=real_dtype,
45
+ mode=mode, cval=np.real(cval)) -
46
+ func(array.imag, kernel.imag, output=real_dtype,
47
+ mode=mode, cval=np.imag(cval)) +
48
+ 1j * func(array.imag, kernel.real, output=real_dtype,
49
+ mode=mode, cval=np.imag(cval)) +
50
+ 1j * func(array.real, kernel.imag, output=real_dtype,
51
+ mode=mode, cval=np.real(cval))
52
+ )
53
+ elif complex_array:
54
+ output = (
55
+ func(array.real, kernel, output=real_dtype, mode=mode,
56
+ cval=np.real(cval)) +
57
+ 1j * func(array.imag, kernel, output=real_dtype, mode=mode,
58
+ cval=np.imag(cval))
59
+ )
60
+ elif complex_kernel:
61
+ # real array so cval is real too
62
+ output = (
63
+ func(array, kernel.real, output=real_dtype, mode=mode, cval=cval) +
64
+ 1j * func(array, kernel.imag, output=real_dtype, mode=mode,
65
+ cval=cval)
66
+ )
67
+ return output
68
+
69
+
70
+ def _cases_axes_tuple_length_mismatch():
71
+ # Generate combinations of filter function, valid kwargs, and
72
+ # keyword-value pairs for which the value will become with mismatched
73
+ # (invalid) size
74
+ filter_func = ndimage.gaussian_filter
75
+ kwargs = dict(radius=3, mode='constant', sigma=1.0, order=0)
76
+ for key, val in kwargs.items():
77
+ yield filter_func, kwargs, key, val
78
+
79
+ filter_funcs = [ndimage.uniform_filter, ndimage.minimum_filter,
80
+ ndimage.maximum_filter]
81
+ kwargs = dict(size=3, mode='constant', origin=0)
82
+ for filter_func in filter_funcs:
83
+ for key, val in kwargs.items():
84
+ yield filter_func, kwargs, key, val
85
+
86
+
87
+ class TestNdimageFilters:
88
+
89
+ def _validate_complex(self, array, kernel, type2, mode='reflect', cval=0):
90
+ # utility for validating complex-valued correlations
91
+ real_dtype = np.asarray([], dtype=type2).real.dtype
92
+ expected = _complex_correlate(
93
+ array, kernel, real_dtype, convolve=False, mode=mode, cval=cval
94
+ )
95
+
96
+ if array.ndim == 1:
97
+ correlate = functools.partial(ndimage.correlate1d, axis=-1,
98
+ mode=mode, cval=cval)
99
+ convolve = functools.partial(ndimage.convolve1d, axis=-1,
100
+ mode=mode, cval=cval)
101
+ else:
102
+ correlate = functools.partial(ndimage.correlate, mode=mode,
103
+ cval=cval)
104
+ convolve = functools.partial(ndimage.convolve, mode=mode,
105
+ cval=cval)
106
+
107
+ # test correlate output dtype
108
+ output = correlate(array, kernel, output=type2)
109
+ assert_array_almost_equal(expected, output)
110
+ assert_equal(output.dtype.type, type2)
111
+
112
+ # test correlate with pre-allocated output
113
+ output = np.zeros_like(array, dtype=type2)
114
+ correlate(array, kernel, output=output)
115
+ assert_array_almost_equal(expected, output)
116
+
117
+ # test convolve output dtype
118
+ output = convolve(array, kernel, output=type2)
119
+ expected = _complex_correlate(
120
+ array, kernel, real_dtype, convolve=True, mode=mode, cval=cval,
121
+ )
122
+ assert_array_almost_equal(expected, output)
123
+ assert_equal(output.dtype.type, type2)
124
+
125
+ # convolve with pre-allocated output
126
+ convolve(array, kernel, output=output)
127
+ assert_array_almost_equal(expected, output)
128
+ assert_equal(output.dtype.type, type2)
129
+
130
+ # warns if the output is not a complex dtype
131
+ with pytest.warns(UserWarning,
132
+ match="promoting specified output dtype to complex"):
133
+ correlate(array, kernel, output=real_dtype)
134
+
135
+ with pytest.warns(UserWarning,
136
+ match="promoting specified output dtype to complex"):
137
+ convolve(array, kernel, output=real_dtype)
138
+
139
+ # raises if output array is provided, but is not complex-valued
140
+ output_real = np.zeros_like(array, dtype=real_dtype)
141
+ with assert_raises(RuntimeError):
142
+ correlate(array, kernel, output=output_real)
143
+
144
+ with assert_raises(RuntimeError):
145
+ convolve(array, kernel, output=output_real)
146
+
147
+ def test_correlate01(self):
148
+ array = np.array([1, 2])
149
+ weights = np.array([2])
150
+ expected = [2, 4]
151
+
152
+ output = ndimage.correlate(array, weights)
153
+ assert_array_almost_equal(output, expected)
154
+
155
+ output = ndimage.convolve(array, weights)
156
+ assert_array_almost_equal(output, expected)
157
+
158
+ output = ndimage.correlate1d(array, weights)
159
+ assert_array_almost_equal(output, expected)
160
+
161
+ output = ndimage.convolve1d(array, weights)
162
+ assert_array_almost_equal(output, expected)
163
+
164
+ def test_correlate01_overlap(self):
165
+ array = np.arange(256).reshape(16, 16)
166
+ weights = np.array([2])
167
+ expected = 2 * array
168
+
169
+ ndimage.correlate1d(array, weights, output=array)
170
+ assert_array_almost_equal(array, expected)
171
+
172
+ def test_correlate02(self):
173
+ array = np.array([1, 2, 3])
174
+ kernel = np.array([1])
175
+
176
+ output = ndimage.correlate(array, kernel)
177
+ assert_array_almost_equal(array, output)
178
+
179
+ output = ndimage.convolve(array, kernel)
180
+ assert_array_almost_equal(array, output)
181
+
182
+ output = ndimage.correlate1d(array, kernel)
183
+ assert_array_almost_equal(array, output)
184
+
185
+ output = ndimage.convolve1d(array, kernel)
186
+ assert_array_almost_equal(array, output)
187
+
188
+ def test_correlate03(self):
189
+ array = np.array([1])
190
+ weights = np.array([1, 1])
191
+ expected = [2]
192
+
193
+ output = ndimage.correlate(array, weights)
194
+ assert_array_almost_equal(output, expected)
195
+
196
+ output = ndimage.convolve(array, weights)
197
+ assert_array_almost_equal(output, expected)
198
+
199
+ output = ndimage.correlate1d(array, weights)
200
+ assert_array_almost_equal(output, expected)
201
+
202
+ output = ndimage.convolve1d(array, weights)
203
+ assert_array_almost_equal(output, expected)
204
+
205
+ def test_correlate04(self):
206
+ array = np.array([1, 2])
207
+ tcor = [2, 3]
208
+ tcov = [3, 4]
209
+ weights = np.array([1, 1])
210
+ output = ndimage.correlate(array, weights)
211
+ assert_array_almost_equal(output, tcor)
212
+ output = ndimage.convolve(array, weights)
213
+ assert_array_almost_equal(output, tcov)
214
+ output = ndimage.correlate1d(array, weights)
215
+ assert_array_almost_equal(output, tcor)
216
+ output = ndimage.convolve1d(array, weights)
217
+ assert_array_almost_equal(output, tcov)
218
+
219
+ def test_correlate05(self):
220
+ array = np.array([1, 2, 3])
221
+ tcor = [2, 3, 5]
222
+ tcov = [3, 5, 6]
223
+ kernel = np.array([1, 1])
224
+ output = ndimage.correlate(array, kernel)
225
+ assert_array_almost_equal(tcor, output)
226
+ output = ndimage.convolve(array, kernel)
227
+ assert_array_almost_equal(tcov, output)
228
+ output = ndimage.correlate1d(array, kernel)
229
+ assert_array_almost_equal(tcor, output)
230
+ output = ndimage.convolve1d(array, kernel)
231
+ assert_array_almost_equal(tcov, output)
232
+
233
+ def test_correlate06(self):
234
+ array = np.array([1, 2, 3])
235
+ tcor = [9, 14, 17]
236
+ tcov = [7, 10, 15]
237
+ weights = np.array([1, 2, 3])
238
+ output = ndimage.correlate(array, weights)
239
+ assert_array_almost_equal(output, tcor)
240
+ output = ndimage.convolve(array, weights)
241
+ assert_array_almost_equal(output, tcov)
242
+ output = ndimage.correlate1d(array, weights)
243
+ assert_array_almost_equal(output, tcor)
244
+ output = ndimage.convolve1d(array, weights)
245
+ assert_array_almost_equal(output, tcov)
246
+
247
+ def test_correlate07(self):
248
+ array = np.array([1, 2, 3])
249
+ expected = [5, 8, 11]
250
+ weights = np.array([1, 2, 1])
251
+ output = ndimage.correlate(array, weights)
252
+ assert_array_almost_equal(output, expected)
253
+ output = ndimage.convolve(array, weights)
254
+ assert_array_almost_equal(output, expected)
255
+ output = ndimage.correlate1d(array, weights)
256
+ assert_array_almost_equal(output, expected)
257
+ output = ndimage.convolve1d(array, weights)
258
+ assert_array_almost_equal(output, expected)
259
+
260
+ def test_correlate08(self):
261
+ array = np.array([1, 2, 3])
262
+ tcor = [1, 2, 5]
263
+ tcov = [3, 6, 7]
264
+ weights = np.array([1, 2, -1])
265
+ output = ndimage.correlate(array, weights)
266
+ assert_array_almost_equal(output, tcor)
267
+ output = ndimage.convolve(array, weights)
268
+ assert_array_almost_equal(output, tcov)
269
+ output = ndimage.correlate1d(array, weights)
270
+ assert_array_almost_equal(output, tcor)
271
+ output = ndimage.convolve1d(array, weights)
272
+ assert_array_almost_equal(output, tcov)
273
+
274
+ def test_correlate09(self):
275
+ array = []
276
+ kernel = np.array([1, 1])
277
+ output = ndimage.correlate(array, kernel)
278
+ assert_array_almost_equal(array, output)
279
+ output = ndimage.convolve(array, kernel)
280
+ assert_array_almost_equal(array, output)
281
+ output = ndimage.correlate1d(array, kernel)
282
+ assert_array_almost_equal(array, output)
283
+ output = ndimage.convolve1d(array, kernel)
284
+ assert_array_almost_equal(array, output)
285
+
286
+ def test_correlate10(self):
287
+ array = [[]]
288
+ kernel = np.array([[1, 1]])
289
+ output = ndimage.correlate(array, kernel)
290
+ assert_array_almost_equal(array, output)
291
+ output = ndimage.convolve(array, kernel)
292
+ assert_array_almost_equal(array, output)
293
+
294
+ def test_correlate11(self):
295
+ array = np.array([[1, 2, 3],
296
+ [4, 5, 6]])
297
+ kernel = np.array([[1, 1],
298
+ [1, 1]])
299
+ output = ndimage.correlate(array, kernel)
300
+ assert_array_almost_equal([[4, 6, 10], [10, 12, 16]], output)
301
+ output = ndimage.convolve(array, kernel)
302
+ assert_array_almost_equal([[12, 16, 18], [18, 22, 24]], output)
303
+
304
+ def test_correlate12(self):
305
+ array = np.array([[1, 2, 3],
306
+ [4, 5, 6]])
307
+ kernel = np.array([[1, 0],
308
+ [0, 1]])
309
+ output = ndimage.correlate(array, kernel)
310
+ assert_array_almost_equal([[2, 3, 5], [5, 6, 8]], output)
311
+ output = ndimage.convolve(array, kernel)
312
+ assert_array_almost_equal([[6, 8, 9], [9, 11, 12]], output)
313
+
314
+ @pytest.mark.parametrize('dtype_array', types)
315
+ @pytest.mark.parametrize('dtype_kernel', types)
316
+ def test_correlate13(self, dtype_array, dtype_kernel):
317
+ kernel = np.array([[1, 0],
318
+ [0, 1]])
319
+ array = np.array([[1, 2, 3],
320
+ [4, 5, 6]], dtype_array)
321
+ output = ndimage.correlate(array, kernel, output=dtype_kernel)
322
+ assert_array_almost_equal([[2, 3, 5], [5, 6, 8]], output)
323
+ assert_equal(output.dtype.type, dtype_kernel)
324
+
325
+ output = ndimage.convolve(array, kernel,
326
+ output=dtype_kernel)
327
+ assert_array_almost_equal([[6, 8, 9], [9, 11, 12]], output)
328
+ assert_equal(output.dtype.type, dtype_kernel)
329
+
330
+ @pytest.mark.parametrize('dtype_array', types)
331
+ @pytest.mark.parametrize('dtype_output', types)
332
+ def test_correlate14(self, dtype_array, dtype_output):
333
+ kernel = np.array([[1, 0],
334
+ [0, 1]])
335
+ array = np.array([[1, 2, 3],
336
+ [4, 5, 6]], dtype_array)
337
+ output = np.zeros(array.shape, dtype_output)
338
+ ndimage.correlate(array, kernel, output=output)
339
+ assert_array_almost_equal([[2, 3, 5], [5, 6, 8]], output)
340
+ assert_equal(output.dtype.type, dtype_output)
341
+
342
+ ndimage.convolve(array, kernel, output=output)
343
+ assert_array_almost_equal([[6, 8, 9], [9, 11, 12]], output)
344
+ assert_equal(output.dtype.type, dtype_output)
345
+
346
+ @pytest.mark.parametrize('dtype_array', types)
347
+ def test_correlate15(self, dtype_array):
348
+ kernel = np.array([[1, 0],
349
+ [0, 1]])
350
+ array = np.array([[1, 2, 3],
351
+ [4, 5, 6]], dtype_array)
352
+ output = ndimage.correlate(array, kernel, output=np.float32)
353
+ assert_array_almost_equal([[2, 3, 5], [5, 6, 8]], output)
354
+ assert_equal(output.dtype.type, np.float32)
355
+
356
+ output = ndimage.convolve(array, kernel, output=np.float32)
357
+ assert_array_almost_equal([[6, 8, 9], [9, 11, 12]], output)
358
+ assert_equal(output.dtype.type, np.float32)
359
+
360
+ @pytest.mark.parametrize('dtype_array', types)
361
+ def test_correlate16(self, dtype_array):
362
+ kernel = np.array([[0.5, 0],
363
+ [0, 0.5]])
364
+ array = np.array([[1, 2, 3], [4, 5, 6]], dtype_array)
365
+ output = ndimage.correlate(array, kernel, output=np.float32)
366
+ assert_array_almost_equal([[1, 1.5, 2.5], [2.5, 3, 4]], output)
367
+ assert_equal(output.dtype.type, np.float32)
368
+
369
+ output = ndimage.convolve(array, kernel, output=np.float32)
370
+ assert_array_almost_equal([[3, 4, 4.5], [4.5, 5.5, 6]], output)
371
+ assert_equal(output.dtype.type, np.float32)
372
+
373
+ def test_correlate17(self):
374
+ array = np.array([1, 2, 3])
375
+ tcor = [3, 5, 6]
376
+ tcov = [2, 3, 5]
377
+ kernel = np.array([1, 1])
378
+ output = ndimage.correlate(array, kernel, origin=-1)
379
+ assert_array_almost_equal(tcor, output)
380
+ output = ndimage.convolve(array, kernel, origin=-1)
381
+ assert_array_almost_equal(tcov, output)
382
+ output = ndimage.correlate1d(array, kernel, origin=-1)
383
+ assert_array_almost_equal(tcor, output)
384
+ output = ndimage.convolve1d(array, kernel, origin=-1)
385
+ assert_array_almost_equal(tcov, output)
386
+
387
+ @pytest.mark.parametrize('dtype_array', types)
388
+ def test_correlate18(self, dtype_array):
389
+ kernel = np.array([[1, 0],
390
+ [0, 1]])
391
+ array = np.array([[1, 2, 3],
392
+ [4, 5, 6]], dtype_array)
393
+ output = ndimage.correlate(array, kernel,
394
+ output=np.float32,
395
+ mode='nearest', origin=-1)
396
+ assert_array_almost_equal([[6, 8, 9], [9, 11, 12]], output)
397
+ assert_equal(output.dtype.type, np.float32)
398
+
399
+ output = ndimage.convolve(array, kernel,
400
+ output=np.float32,
401
+ mode='nearest', origin=-1)
402
+ assert_array_almost_equal([[2, 3, 5], [5, 6, 8]], output)
403
+ assert_equal(output.dtype.type, np.float32)
404
+
405
+ def test_correlate_mode_sequence(self):
406
+ kernel = np.ones((2, 2))
407
+ array = np.ones((3, 3), float)
408
+ with assert_raises(RuntimeError):
409
+ ndimage.correlate(array, kernel, mode=['nearest', 'reflect'])
410
+ with assert_raises(RuntimeError):
411
+ ndimage.convolve(array, kernel, mode=['nearest', 'reflect'])
412
+
413
+ @pytest.mark.parametrize('dtype_array', types)
414
+ def test_correlate19(self, dtype_array):
415
+ kernel = np.array([[1, 0],
416
+ [0, 1]])
417
+ array = np.array([[1, 2, 3],
418
+ [4, 5, 6]], dtype_array)
419
+ output = ndimage.correlate(array, kernel,
420
+ output=np.float32,
421
+ mode='nearest', origin=[-1, 0])
422
+ assert_array_almost_equal([[5, 6, 8], [8, 9, 11]], output)
423
+ assert_equal(output.dtype.type, np.float32)
424
+
425
+ output = ndimage.convolve(array, kernel,
426
+ output=np.float32,
427
+ mode='nearest', origin=[-1, 0])
428
+ assert_array_almost_equal([[3, 5, 6], [6, 8, 9]], output)
429
+ assert_equal(output.dtype.type, np.float32)
430
+
431
+ @pytest.mark.parametrize('dtype_array', types)
432
+ @pytest.mark.parametrize('dtype_output', types)
433
+ def test_correlate20(self, dtype_array, dtype_output):
434
+ weights = np.array([1, 2, 1])
435
+ expected = [[5, 10, 15], [7, 14, 21]]
436
+ array = np.array([[1, 2, 3],
437
+ [2, 4, 6]], dtype_array)
438
+ output = np.zeros((2, 3), dtype_output)
439
+ ndimage.correlate1d(array, weights, axis=0, output=output)
440
+ assert_array_almost_equal(output, expected)
441
+ ndimage.convolve1d(array, weights, axis=0, output=output)
442
+ assert_array_almost_equal(output, expected)
443
+
444
+ def test_correlate21(self):
445
+ array = np.array([[1, 2, 3],
446
+ [2, 4, 6]])
447
+ expected = [[5, 10, 15], [7, 14, 21]]
448
+ weights = np.array([1, 2, 1])
449
+ output = ndimage.correlate1d(array, weights, axis=0)
450
+ assert_array_almost_equal(output, expected)
451
+ output = ndimage.convolve1d(array, weights, axis=0)
452
+ assert_array_almost_equal(output, expected)
453
+
454
+ @pytest.mark.parametrize('dtype_array', types)
455
+ @pytest.mark.parametrize('dtype_output', types)
456
+ def test_correlate22(self, dtype_array, dtype_output):
457
+ weights = np.array([1, 2, 1])
458
+ expected = [[6, 12, 18], [6, 12, 18]]
459
+ array = np.array([[1, 2, 3],
460
+ [2, 4, 6]], dtype_array)
461
+ output = np.zeros((2, 3), dtype_output)
462
+ ndimage.correlate1d(array, weights, axis=0,
463
+ mode='wrap', output=output)
464
+ assert_array_almost_equal(output, expected)
465
+ ndimage.convolve1d(array, weights, axis=0,
466
+ mode='wrap', output=output)
467
+ assert_array_almost_equal(output, expected)
468
+
469
+ @pytest.mark.parametrize('dtype_array', types)
470
+ @pytest.mark.parametrize('dtype_output', types)
471
+ def test_correlate23(self, dtype_array, dtype_output):
472
+ weights = np.array([1, 2, 1])
473
+ expected = [[5, 10, 15], [7, 14, 21]]
474
+ array = np.array([[1, 2, 3],
475
+ [2, 4, 6]], dtype_array)
476
+ output = np.zeros((2, 3), dtype_output)
477
+ ndimage.correlate1d(array, weights, axis=0,
478
+ mode='nearest', output=output)
479
+ assert_array_almost_equal(output, expected)
480
+ ndimage.convolve1d(array, weights, axis=0,
481
+ mode='nearest', output=output)
482
+ assert_array_almost_equal(output, expected)
483
+
484
+ @pytest.mark.parametrize('dtype_array', types)
485
+ @pytest.mark.parametrize('dtype_output', types)
486
+ def test_correlate24(self, dtype_array, dtype_output):
487
+ weights = np.array([1, 2, 1])
488
+ tcor = [[7, 14, 21], [8, 16, 24]]
489
+ tcov = [[4, 8, 12], [5, 10, 15]]
490
+ array = np.array([[1, 2, 3],
491
+ [2, 4, 6]], dtype_array)
492
+ output = np.zeros((2, 3), dtype_output)
493
+ ndimage.correlate1d(array, weights, axis=0,
494
+ mode='nearest', output=output, origin=-1)
495
+ assert_array_almost_equal(output, tcor)
496
+ ndimage.convolve1d(array, weights, axis=0,
497
+ mode='nearest', output=output, origin=-1)
498
+ assert_array_almost_equal(output, tcov)
499
+
500
+ @pytest.mark.parametrize('dtype_array', types)
501
+ @pytest.mark.parametrize('dtype_output', types)
502
+ def test_correlate25(self, dtype_array, dtype_output):
503
+ weights = np.array([1, 2, 1])
504
+ tcor = [[4, 8, 12], [5, 10, 15]]
505
+ tcov = [[7, 14, 21], [8, 16, 24]]
506
+ array = np.array([[1, 2, 3],
507
+ [2, 4, 6]], dtype_array)
508
+ output = np.zeros((2, 3), dtype_output)
509
+ ndimage.correlate1d(array, weights, axis=0,
510
+ mode='nearest', output=output, origin=1)
511
+ assert_array_almost_equal(output, tcor)
512
+ ndimage.convolve1d(array, weights, axis=0,
513
+ mode='nearest', output=output, origin=1)
514
+ assert_array_almost_equal(output, tcov)
515
+
516
+ def test_correlate26(self):
517
+ # test fix for gh-11661 (mirror extension of a length 1 signal)
518
+ y = ndimage.convolve1d(np.ones(1), np.ones(5), mode='mirror')
519
+ assert_array_equal(y, np.array(5.))
520
+
521
+ y = ndimage.correlate1d(np.ones(1), np.ones(5), mode='mirror')
522
+ assert_array_equal(y, np.array(5.))
523
+
524
+ @pytest.mark.parametrize('dtype_kernel', complex_types)
525
+ @pytest.mark.parametrize('dtype_input', types)
526
+ @pytest.mark.parametrize('dtype_output', complex_types)
527
+ def test_correlate_complex_kernel(self, dtype_input, dtype_kernel,
528
+ dtype_output):
529
+ kernel = np.array([[1, 0],
530
+ [0, 1 + 1j]], dtype_kernel)
531
+ array = np.array([[1, 2, 3],
532
+ [4, 5, 6]], dtype_input)
533
+ self._validate_complex(array, kernel, dtype_output)
534
+
535
+ @pytest.mark.parametrize('dtype_kernel', complex_types)
536
+ @pytest.mark.parametrize('dtype_input', types)
537
+ @pytest.mark.parametrize('dtype_output', complex_types)
538
+ @pytest.mark.parametrize('mode', ['grid-constant', 'constant'])
539
+ def test_correlate_complex_kernel_cval(self, dtype_input, dtype_kernel,
540
+ dtype_output, mode):
541
+ # test use of non-zero cval with complex inputs
542
+ # also verifies that mode 'grid-constant' does not segfault
543
+ kernel = np.array([[1, 0],
544
+ [0, 1 + 1j]], dtype_kernel)
545
+ array = np.array([[1, 2, 3],
546
+ [4, 5, 6]], dtype_input)
547
+ self._validate_complex(array, kernel, dtype_output, mode=mode,
548
+ cval=5.0)
549
+
550
+ @pytest.mark.parametrize('dtype_kernel', complex_types)
551
+ @pytest.mark.parametrize('dtype_input', types)
552
+ def test_correlate_complex_kernel_invalid_cval(self, dtype_input,
553
+ dtype_kernel):
554
+ # cannot give complex cval with a real image
555
+ kernel = np.array([[1, 0],
556
+ [0, 1 + 1j]], dtype_kernel)
557
+ array = np.array([[1, 2, 3],
558
+ [4, 5, 6]], dtype_input)
559
+ for func in [ndimage.convolve, ndimage.correlate, ndimage.convolve1d,
560
+ ndimage.correlate1d]:
561
+ with pytest.raises(ValueError):
562
+ func(array, kernel, mode='constant', cval=5.0 + 1.0j,
563
+ output=np.complex64)
564
+
565
+ @pytest.mark.parametrize('dtype_kernel', complex_types)
566
+ @pytest.mark.parametrize('dtype_input', types)
567
+ @pytest.mark.parametrize('dtype_output', complex_types)
568
+ def test_correlate1d_complex_kernel(self, dtype_input, dtype_kernel,
569
+ dtype_output):
570
+ kernel = np.array([1, 1 + 1j], dtype_kernel)
571
+ array = np.array([1, 2, 3, 4, 5, 6], dtype_input)
572
+ self._validate_complex(array, kernel, dtype_output)
573
+
574
+ @pytest.mark.parametrize('dtype_kernel', complex_types)
575
+ @pytest.mark.parametrize('dtype_input', types)
576
+ @pytest.mark.parametrize('dtype_output', complex_types)
577
+ def test_correlate1d_complex_kernel_cval(self, dtype_input, dtype_kernel,
578
+ dtype_output):
579
+ kernel = np.array([1, 1 + 1j], dtype_kernel)
580
+ array = np.array([1, 2, 3, 4, 5, 6], dtype_input)
581
+ self._validate_complex(array, kernel, dtype_output, mode='constant',
582
+ cval=5.0)
583
+
584
+ @pytest.mark.parametrize('dtype_kernel', types)
585
+ @pytest.mark.parametrize('dtype_input', complex_types)
586
+ @pytest.mark.parametrize('dtype_output', complex_types)
587
+ def test_correlate_complex_input(self, dtype_input, dtype_kernel,
588
+ dtype_output):
589
+ kernel = np.array([[1, 0],
590
+ [0, 1]], dtype_kernel)
591
+ array = np.array([[1, 2j, 3],
592
+ [1 + 4j, 5, 6j]], dtype_input)
593
+ self._validate_complex(array, kernel, dtype_output)
594
+
595
+ @pytest.mark.parametrize('dtype_kernel', types)
596
+ @pytest.mark.parametrize('dtype_input', complex_types)
597
+ @pytest.mark.parametrize('dtype_output', complex_types)
598
+ def test_correlate1d_complex_input(self, dtype_input, dtype_kernel,
599
+ dtype_output):
600
+ kernel = np.array([1, 0, 1], dtype_kernel)
601
+ array = np.array([1, 2j, 3, 1 + 4j, 5, 6j], dtype_input)
602
+ self._validate_complex(array, kernel, dtype_output)
603
+
604
+ @pytest.mark.parametrize('dtype_kernel', types)
605
+ @pytest.mark.parametrize('dtype_input', complex_types)
606
+ @pytest.mark.parametrize('dtype_output', complex_types)
607
+ def test_correlate1d_complex_input_cval(self, dtype_input, dtype_kernel,
608
+ dtype_output):
609
+ kernel = np.array([1, 0, 1], dtype_kernel)
610
+ array = np.array([1, 2j, 3, 1 + 4j, 5, 6j], dtype_input)
611
+ self._validate_complex(array, kernel, dtype_output, mode='constant',
612
+ cval=5 - 3j)
613
+
614
+ @pytest.mark.parametrize('dtype', complex_types)
615
+ @pytest.mark.parametrize('dtype_output', complex_types)
616
+ def test_correlate_complex_input_and_kernel(self, dtype, dtype_output):
617
+ kernel = np.array([[1, 0],
618
+ [0, 1 + 1j]], dtype)
619
+ array = np.array([[1, 2j, 3],
620
+ [1 + 4j, 5, 6j]], dtype)
621
+ self._validate_complex(array, kernel, dtype_output)
622
+
623
+ @pytest.mark.parametrize('dtype', complex_types)
624
+ @pytest.mark.parametrize('dtype_output', complex_types)
625
+ def test_correlate_complex_input_and_kernel_cval(self, dtype,
626
+ dtype_output):
627
+ kernel = np.array([[1, 0],
628
+ [0, 1 + 1j]], dtype)
629
+ array = np.array([[1, 2, 3],
630
+ [4, 5, 6]], dtype)
631
+ self._validate_complex(array, kernel, dtype_output, mode='constant',
632
+ cval=5.0 + 2.0j)
633
+
634
+ @pytest.mark.parametrize('dtype', complex_types)
635
+ @pytest.mark.parametrize('dtype_output', complex_types)
636
+ def test_correlate1d_complex_input_and_kernel(self, dtype, dtype_output):
637
+ kernel = np.array([1, 1 + 1j], dtype)
638
+ array = np.array([1, 2j, 3, 1 + 4j, 5, 6j], dtype)
639
+ self._validate_complex(array, kernel, dtype_output)
640
+
641
+ @pytest.mark.parametrize('dtype', complex_types)
642
+ @pytest.mark.parametrize('dtype_output', complex_types)
643
+ def test_correlate1d_complex_input_and_kernel_cval(self, dtype,
644
+ dtype_output):
645
+ kernel = np.array([1, 1 + 1j], dtype)
646
+ array = np.array([1, 2j, 3, 1 + 4j, 5, 6j], dtype)
647
+ self._validate_complex(array, kernel, dtype_output, mode='constant',
648
+ cval=5.0 + 2.0j)
649
+
650
+ def test_gauss01(self):
651
+ input = np.array([[1, 2, 3],
652
+ [2, 4, 6]], np.float32)
653
+ output = ndimage.gaussian_filter(input, 0)
654
+ assert_array_almost_equal(output, input)
655
+
656
+ def test_gauss02(self):
657
+ input = np.array([[1, 2, 3],
658
+ [2, 4, 6]], np.float32)
659
+ output = ndimage.gaussian_filter(input, 1.0)
660
+ assert_equal(input.dtype, output.dtype)
661
+ assert_equal(input.shape, output.shape)
662
+
663
+ def test_gauss03(self):
664
+ # single precision data
665
+ input = np.arange(100 * 100).astype(np.float32)
666
+ input.shape = (100, 100)
667
+ output = ndimage.gaussian_filter(input, [1.0, 1.0])
668
+
669
+ assert_equal(input.dtype, output.dtype)
670
+ assert_equal(input.shape, output.shape)
671
+
672
+ # input.sum() is 49995000.0. With single precision floats, we can't
673
+ # expect more than 8 digits of accuracy, so use decimal=0 in this test.
674
+ assert_almost_equal(output.sum(dtype='d'), input.sum(dtype='d'),
675
+ decimal=0)
676
+ assert_(sumsq(input, output) > 1.0)
677
+
678
+ def test_gauss04(self):
679
+ input = np.arange(100 * 100).astype(np.float32)
680
+ input.shape = (100, 100)
681
+ otype = np.float64
682
+ output = ndimage.gaussian_filter(input, [1.0, 1.0], output=otype)
683
+ assert_equal(output.dtype.type, np.float64)
684
+ assert_equal(input.shape, output.shape)
685
+ assert_(sumsq(input, output) > 1.0)
686
+
687
+ def test_gauss05(self):
688
+ input = np.arange(100 * 100).astype(np.float32)
689
+ input.shape = (100, 100)
690
+ otype = np.float64
691
+ output = ndimage.gaussian_filter(input, [1.0, 1.0],
692
+ order=1, output=otype)
693
+ assert_equal(output.dtype.type, np.float64)
694
+ assert_equal(input.shape, output.shape)
695
+ assert_(sumsq(input, output) > 1.0)
696
+
697
+ def test_gauss06(self):
698
+ input = np.arange(100 * 100).astype(np.float32)
699
+ input.shape = (100, 100)
700
+ otype = np.float64
701
+ output1 = ndimage.gaussian_filter(input, [1.0, 1.0], output=otype)
702
+ output2 = ndimage.gaussian_filter(input, 1.0, output=otype)
703
+ assert_array_almost_equal(output1, output2)
704
+
705
+ def test_gauss_memory_overlap(self):
706
+ input = np.arange(100 * 100).astype(np.float32)
707
+ input.shape = (100, 100)
708
+ output1 = ndimage.gaussian_filter(input, 1.0)
709
+ ndimage.gaussian_filter(input, 1.0, output=input)
710
+ assert_array_almost_equal(output1, input)
711
+
712
+ @pytest.mark.parametrize(('filter_func', 'extra_args', 'size0', 'size'),
713
+ [(ndimage.gaussian_filter, (), 0, 1.0),
714
+ (ndimage.uniform_filter, (), 1, 3),
715
+ (ndimage.minimum_filter, (), 1, 3),
716
+ (ndimage.maximum_filter, (), 1, 3),
717
+ (ndimage.median_filter, (), 1, 3),
718
+ (ndimage.rank_filter, (1,), 1, 3),
719
+ (ndimage.percentile_filter, (40,), 1, 3)])
720
+ @pytest.mark.parametrize(
721
+ 'axes',
722
+ tuple(itertools.combinations(range(-3, 3), 1))
723
+ + tuple(itertools.combinations(range(-3, 3), 2))
724
+ + ((0, 1, 2),))
725
+ def test_filter_axes(self, filter_func, extra_args, size0, size, axes):
726
+ # Note: `size` is called `sigma` in `gaussian_filter`
727
+ array = np.arange(6 * 8 * 12, dtype=np.float64).reshape(6, 8, 12)
728
+ axes = np.array(axes)
729
+
730
+ if len(set(axes % array.ndim)) != len(axes):
731
+ # parametrized cases with duplicate axes raise an error
732
+ with pytest.raises(ValueError, match="axes must be unique"):
733
+ filter_func(array, *extra_args, size, axes=axes)
734
+ return
735
+ output = filter_func(array, *extra_args, size, axes=axes)
736
+
737
+ # result should be equivalent to sigma=0.0/size=1 on unfiltered axes
738
+ all_sizes = (size if ax in (axes % array.ndim) else size0
739
+ for ax in range(array.ndim))
740
+ expected = filter_func(array, *extra_args, all_sizes)
741
+ assert_allclose(output, expected)
742
+
743
+ kwargs_gauss = dict(radius=[4, 2, 3], order=[0, 1, 2],
744
+ mode=['reflect', 'nearest', 'constant'])
745
+ kwargs_other = dict(origin=(-1, 0, 1),
746
+ mode=['reflect', 'nearest', 'constant'])
747
+ kwargs_rank = dict(origin=(-1, 0, 1))
748
+
749
+ @pytest.mark.parametrize("filter_func, size0, size, kwargs",
750
+ [(ndimage.gaussian_filter, 0, 1.0, kwargs_gauss),
751
+ (ndimage.uniform_filter, 1, 3, kwargs_other),
752
+ (ndimage.maximum_filter, 1, 3, kwargs_other),
753
+ (ndimage.minimum_filter, 1, 3, kwargs_other),
754
+ (ndimage.median_filter, 1, 3, kwargs_rank),
755
+ (ndimage.rank_filter, 1, 3, kwargs_rank),
756
+ (ndimage.percentile_filter, 1, 3, kwargs_rank)])
757
+ @pytest.mark.parametrize('axes', itertools.combinations(range(-3, 3), 2))
758
+ def test_filter_axes_kwargs(self, filter_func, size0, size, kwargs, axes):
759
+ array = np.arange(6 * 8 * 12, dtype=np.float64).reshape(6, 8, 12)
760
+
761
+ kwargs = {key: np.array(val) for key, val in kwargs.items()}
762
+ axes = np.array(axes)
763
+ n_axes = axes.size
764
+
765
+ if filter_func == ndimage.rank_filter:
766
+ args = (2,) # (rank,)
767
+ elif filter_func == ndimage.percentile_filter:
768
+ args = (30,) # (percentile,)
769
+ else:
770
+ args = ()
771
+
772
+ # form kwargs that specify only the axes in `axes`
773
+ reduced_kwargs = {key: val[axes] for key, val in kwargs.items()}
774
+ if len(set(axes % array.ndim)) != len(axes):
775
+ # parametrized cases with duplicate axes raise an error
776
+ with pytest.raises(ValueError, match="axes must be unique"):
777
+ filter_func(array, *args, [size]*n_axes, axes=axes,
778
+ **reduced_kwargs)
779
+ return
780
+
781
+ output = filter_func(array, *args, [size]*n_axes, axes=axes,
782
+ **reduced_kwargs)
783
+
784
+ # result should be equivalent to sigma=0.0/size=1 on unfiltered axes
785
+ size_3d = np.full(array.ndim, fill_value=size0)
786
+ size_3d[axes] = size
787
+ if 'origin' in kwargs:
788
+ # origin should be zero on the axis that has size 0
789
+ origin = np.array([0, 0, 0])
790
+ origin[axes] = reduced_kwargs['origin']
791
+ kwargs['origin'] = origin
792
+ expected = filter_func(array, *args, size_3d, **kwargs)
793
+ assert_allclose(output, expected)
794
+
795
+ @pytest.mark.parametrize("filter_func, kwargs",
796
+ [(ndimage.minimum_filter, {}),
797
+ (ndimage.maximum_filter, {}),
798
+ (ndimage.median_filter, {}),
799
+ (ndimage.rank_filter, {"rank": 1}),
800
+ (ndimage.percentile_filter, {"percentile": 30})])
801
+ def test_filter_weights_subset_axes_origins(self, filter_func, kwargs):
802
+ axes = (-2, -1)
803
+ origins = (0, 1)
804
+ array = np.arange(6 * 8 * 12, dtype=np.float64).reshape(6, 8, 12)
805
+ axes = np.array(axes)
806
+
807
+ # weights with ndim matching len(axes)
808
+ footprint = np.ones((3, 5), dtype=bool)
809
+ footprint[0, 1] = 0 # make non-separable
810
+
811
+ output = filter_func(
812
+ array, footprint=footprint, axes=axes, origin=origins, **kwargs)
813
+
814
+ output0 = filter_func(
815
+ array, footprint=footprint, axes=axes, origin=0, **kwargs)
816
+
817
+ # output has origin shift on last axis relative to output0, so
818
+ # expect shifted arrays to be equal.
819
+ np.testing.assert_array_equal(output[:, :, 1:], output0[:, :, :-1])
820
+
821
+ @pytest.mark.parametrize(
822
+ 'filter_func, args',
823
+ [(ndimage.gaussian_filter, (1.0,)), # args = (sigma,)
824
+ (ndimage.uniform_filter, (3,)), # args = (size,)
825
+ (ndimage.minimum_filter, (3,)), # args = (size,)
826
+ (ndimage.maximum_filter, (3,)), # args = (size,)
827
+ (ndimage.median_filter, (3,)), # args = (size,)
828
+ (ndimage.rank_filter, (2, 3)), # args = (rank, size)
829
+ (ndimage.percentile_filter, (30, 3))]) # args = (percentile, size)
830
+ @pytest.mark.parametrize(
831
+ 'axes', [(1.5,), (0, 1, 2, 3), (3,), (-4,)]
832
+ )
833
+ def test_filter_invalid_axes(self, filter_func, args, axes):
834
+ array = np.arange(6 * 8 * 12, dtype=np.float64).reshape(6, 8, 12)
835
+ if any(isinstance(ax, float) for ax in axes):
836
+ error_class = TypeError
837
+ match = "cannot be interpreted as an integer"
838
+ else:
839
+ error_class = ValueError
840
+ match = "out of range"
841
+ with pytest.raises(error_class, match=match):
842
+ filter_func(array, *args, axes=axes)
843
+
844
+ @pytest.mark.parametrize(
845
+ 'filter_func, kwargs',
846
+ [(ndimage.minimum_filter, {}),
847
+ (ndimage.maximum_filter, {}),
848
+ (ndimage.median_filter, {}),
849
+ (ndimage.rank_filter, dict(rank=3)),
850
+ (ndimage.percentile_filter, dict(percentile=30))])
851
+ @pytest.mark.parametrize(
852
+ 'axes', [(0, ), (1, 2), (0, 1, 2)]
853
+ )
854
+ @pytest.mark.parametrize('separable_footprint', [False, True])
855
+ def test_filter_invalid_footprint_ndim(self, filter_func, kwargs, axes,
856
+ separable_footprint):
857
+ array = np.arange(6 * 8 * 12, dtype=np.float64).reshape(6, 8, 12)
858
+ # create a footprint with one too many dimensions
859
+ footprint = np.ones((3,) * (len(axes) + 1))
860
+ if not separable_footprint:
861
+ footprint[(0,) * footprint.ndim] = 0
862
+ if (filter_func in [ndimage.minimum_filter, ndimage.maximum_filter]
863
+ and separable_footprint):
864
+ match = "sequence argument must have length equal to input rank"
865
+ else:
866
+ match = "footprint array has incorrect shape"
867
+ with pytest.raises(RuntimeError, match=match):
868
+ filter_func(array, **kwargs, footprint=footprint, axes=axes)
869
+
870
+ @pytest.mark.parametrize('n_mismatch', [1, 3])
871
+ @pytest.mark.parametrize('filter_func, kwargs, key, val',
872
+ _cases_axes_tuple_length_mismatch())
873
+ def test_filter_tuple_length_mismatch(self, n_mismatch, filter_func,
874
+ kwargs, key, val):
875
+ # Test for the intended RuntimeError when a kwargs has an invalid size
876
+ array = np.arange(6 * 8 * 12, dtype=np.float64).reshape(6, 8, 12)
877
+ kwargs = dict(**kwargs, axes=(0, 1))
878
+ kwargs[key] = (val,) * n_mismatch
879
+ err_msg = "sequence argument must have length equal to input rank"
880
+ with pytest.raises(RuntimeError, match=err_msg):
881
+ filter_func(array, **kwargs)
882
+
883
+ @pytest.mark.parametrize('dtype', types + complex_types)
884
+ def test_prewitt01(self, dtype):
885
+ array = np.array([[3, 2, 5, 1, 4],
886
+ [5, 8, 3, 7, 1],
887
+ [5, 6, 9, 3, 5]], dtype)
888
+ t = ndimage.correlate1d(array, [-1.0, 0.0, 1.0], 0)
889
+ t = ndimage.correlate1d(t, [1.0, 1.0, 1.0], 1)
890
+ output = ndimage.prewitt(array, 0)
891
+ assert_array_almost_equal(t, output)
892
+
893
+ @pytest.mark.parametrize('dtype', types + complex_types)
894
+ def test_prewitt02(self, dtype):
895
+ array = np.array([[3, 2, 5, 1, 4],
896
+ [5, 8, 3, 7, 1],
897
+ [5, 6, 9, 3, 5]], dtype)
898
+ t = ndimage.correlate1d(array, [-1.0, 0.0, 1.0], 0)
899
+ t = ndimage.correlate1d(t, [1.0, 1.0, 1.0], 1)
900
+ output = np.zeros(array.shape, dtype)
901
+ ndimage.prewitt(array, 0, output)
902
+ assert_array_almost_equal(t, output)
903
+
904
+ @pytest.mark.parametrize('dtype', types + complex_types)
905
+ def test_prewitt03(self, dtype):
906
+ array = np.array([[3, 2, 5, 1, 4],
907
+ [5, 8, 3, 7, 1],
908
+ [5, 6, 9, 3, 5]], dtype)
909
+ t = ndimage.correlate1d(array, [-1.0, 0.0, 1.0], 1)
910
+ t = ndimage.correlate1d(t, [1.0, 1.0, 1.0], 0)
911
+ output = ndimage.prewitt(array, 1)
912
+ assert_array_almost_equal(t, output)
913
+
914
+ @pytest.mark.parametrize('dtype', types + complex_types)
915
+ def test_prewitt04(self, dtype):
916
+ array = np.array([[3, 2, 5, 1, 4],
917
+ [5, 8, 3, 7, 1],
918
+ [5, 6, 9, 3, 5]], dtype)
919
+ t = ndimage.prewitt(array, -1)
920
+ output = ndimage.prewitt(array, 1)
921
+ assert_array_almost_equal(t, output)
922
+
923
+ @pytest.mark.parametrize('dtype', types + complex_types)
924
+ def test_sobel01(self, dtype):
925
+ array = np.array([[3, 2, 5, 1, 4],
926
+ [5, 8, 3, 7, 1],
927
+ [5, 6, 9, 3, 5]], dtype)
928
+ t = ndimage.correlate1d(array, [-1.0, 0.0, 1.0], 0)
929
+ t = ndimage.correlate1d(t, [1.0, 2.0, 1.0], 1)
930
+ output = ndimage.sobel(array, 0)
931
+ assert_array_almost_equal(t, output)
932
+
933
+ @pytest.mark.parametrize('dtype', types + complex_types)
934
+ def test_sobel02(self, dtype):
935
+ array = np.array([[3, 2, 5, 1, 4],
936
+ [5, 8, 3, 7, 1],
937
+ [5, 6, 9, 3, 5]], dtype)
938
+ t = ndimage.correlate1d(array, [-1.0, 0.0, 1.0], 0)
939
+ t = ndimage.correlate1d(t, [1.0, 2.0, 1.0], 1)
940
+ output = np.zeros(array.shape, dtype)
941
+ ndimage.sobel(array, 0, output)
942
+ assert_array_almost_equal(t, output)
943
+
944
+ @pytest.mark.parametrize('dtype', types + complex_types)
945
+ def test_sobel03(self, dtype):
946
+ array = np.array([[3, 2, 5, 1, 4],
947
+ [5, 8, 3, 7, 1],
948
+ [5, 6, 9, 3, 5]], dtype)
949
+ t = ndimage.correlate1d(array, [-1.0, 0.0, 1.0], 1)
950
+ t = ndimage.correlate1d(t, [1.0, 2.0, 1.0], 0)
951
+ output = np.zeros(array.shape, dtype)
952
+ output = ndimage.sobel(array, 1)
953
+ assert_array_almost_equal(t, output)
954
+
955
+ @pytest.mark.parametrize('dtype', types + complex_types)
956
+ def test_sobel04(self, dtype):
957
+ array = np.array([[3, 2, 5, 1, 4],
958
+ [5, 8, 3, 7, 1],
959
+ [5, 6, 9, 3, 5]], dtype)
960
+ t = ndimage.sobel(array, -1)
961
+ output = ndimage.sobel(array, 1)
962
+ assert_array_almost_equal(t, output)
963
+
964
+ @pytest.mark.parametrize('dtype',
965
+ [np.int32, np.float32, np.float64,
966
+ np.complex64, np.complex128])
967
+ def test_laplace01(self, dtype):
968
+ array = np.array([[3, 2, 5, 1, 4],
969
+ [5, 8, 3, 7, 1],
970
+ [5, 6, 9, 3, 5]], dtype) * 100
971
+ tmp1 = ndimage.correlate1d(array, [1, -2, 1], 0)
972
+ tmp2 = ndimage.correlate1d(array, [1, -2, 1], 1)
973
+ output = ndimage.laplace(array)
974
+ assert_array_almost_equal(tmp1 + tmp2, output)
975
+
976
+ @pytest.mark.parametrize('dtype',
977
+ [np.int32, np.float32, np.float64,
978
+ np.complex64, np.complex128])
979
+ def test_laplace02(self, dtype):
980
+ array = np.array([[3, 2, 5, 1, 4],
981
+ [5, 8, 3, 7, 1],
982
+ [5, 6, 9, 3, 5]], dtype) * 100
983
+ tmp1 = ndimage.correlate1d(array, [1, -2, 1], 0)
984
+ tmp2 = ndimage.correlate1d(array, [1, -2, 1], 1)
985
+ output = np.zeros(array.shape, dtype)
986
+ ndimage.laplace(array, output=output)
987
+ assert_array_almost_equal(tmp1 + tmp2, output)
988
+
989
+ @pytest.mark.parametrize('dtype',
990
+ [np.int32, np.float32, np.float64,
991
+ np.complex64, np.complex128])
992
+ def test_gaussian_laplace01(self, dtype):
993
+ array = np.array([[3, 2, 5, 1, 4],
994
+ [5, 8, 3, 7, 1],
995
+ [5, 6, 9, 3, 5]], dtype) * 100
996
+ tmp1 = ndimage.gaussian_filter(array, 1.0, [2, 0])
997
+ tmp2 = ndimage.gaussian_filter(array, 1.0, [0, 2])
998
+ output = ndimage.gaussian_laplace(array, 1.0)
999
+ assert_array_almost_equal(tmp1 + tmp2, output)
1000
+
1001
+ @pytest.mark.parametrize('dtype',
1002
+ [np.int32, np.float32, np.float64,
1003
+ np.complex64, np.complex128])
1004
+ def test_gaussian_laplace02(self, dtype):
1005
+ array = np.array([[3, 2, 5, 1, 4],
1006
+ [5, 8, 3, 7, 1],
1007
+ [5, 6, 9, 3, 5]], dtype) * 100
1008
+ tmp1 = ndimage.gaussian_filter(array, 1.0, [2, 0])
1009
+ tmp2 = ndimage.gaussian_filter(array, 1.0, [0, 2])
1010
+ output = np.zeros(array.shape, dtype)
1011
+ ndimage.gaussian_laplace(array, 1.0, output)
1012
+ assert_array_almost_equal(tmp1 + tmp2, output)
1013
+
1014
+ @pytest.mark.parametrize('dtype', types + complex_types)
1015
+ def test_generic_laplace01(self, dtype):
1016
+ def derivative2(input, axis, output, mode, cval, a, b):
1017
+ sigma = [a, b / 2.0]
1018
+ input = np.asarray(input)
1019
+ order = [0] * input.ndim
1020
+ order[axis] = 2
1021
+ return ndimage.gaussian_filter(input, sigma, order,
1022
+ output, mode, cval)
1023
+ array = np.array([[3, 2, 5, 1, 4],
1024
+ [5, 8, 3, 7, 1],
1025
+ [5, 6, 9, 3, 5]], dtype)
1026
+ output = np.zeros(array.shape, dtype)
1027
+ tmp = ndimage.generic_laplace(array, derivative2,
1028
+ extra_arguments=(1.0,),
1029
+ extra_keywords={'b': 2.0})
1030
+ ndimage.gaussian_laplace(array, 1.0, output)
1031
+ assert_array_almost_equal(tmp, output)
1032
+
1033
+ @pytest.mark.parametrize('dtype',
1034
+ [np.int32, np.float32, np.float64,
1035
+ np.complex64, np.complex128])
1036
+ def test_gaussian_gradient_magnitude01(self, dtype):
1037
+ array = np.array([[3, 2, 5, 1, 4],
1038
+ [5, 8, 3, 7, 1],
1039
+ [5, 6, 9, 3, 5]], dtype) * 100
1040
+ tmp1 = ndimage.gaussian_filter(array, 1.0, [1, 0])
1041
+ tmp2 = ndimage.gaussian_filter(array, 1.0, [0, 1])
1042
+ output = ndimage.gaussian_gradient_magnitude(array, 1.0)
1043
+ expected = tmp1 * tmp1 + tmp2 * tmp2
1044
+ expected = np.sqrt(expected).astype(dtype)
1045
+ assert_array_almost_equal(expected, output)
1046
+
1047
+ @pytest.mark.parametrize('dtype',
1048
+ [np.int32, np.float32, np.float64,
1049
+ np.complex64, np.complex128])
1050
+ def test_gaussian_gradient_magnitude02(self, dtype):
1051
+ array = np.array([[3, 2, 5, 1, 4],
1052
+ [5, 8, 3, 7, 1],
1053
+ [5, 6, 9, 3, 5]], dtype) * 100
1054
+ tmp1 = ndimage.gaussian_filter(array, 1.0, [1, 0])
1055
+ tmp2 = ndimage.gaussian_filter(array, 1.0, [0, 1])
1056
+ output = np.zeros(array.shape, dtype)
1057
+ ndimage.gaussian_gradient_magnitude(array, 1.0, output)
1058
+ expected = tmp1 * tmp1 + tmp2 * tmp2
1059
+ expected = np.sqrt(expected).astype(dtype)
1060
+ assert_array_almost_equal(expected, output)
1061
+
1062
+ def test_generic_gradient_magnitude01(self):
1063
+ array = np.array([[3, 2, 5, 1, 4],
1064
+ [5, 8, 3, 7, 1],
1065
+ [5, 6, 9, 3, 5]], np.float64)
1066
+
1067
+ def derivative(input, axis, output, mode, cval, a, b):
1068
+ sigma = [a, b / 2.0]
1069
+ input = np.asarray(input)
1070
+ order = [0] * input.ndim
1071
+ order[axis] = 1
1072
+ return ndimage.gaussian_filter(input, sigma, order,
1073
+ output, mode, cval)
1074
+ tmp1 = ndimage.gaussian_gradient_magnitude(array, 1.0)
1075
+ tmp2 = ndimage.generic_gradient_magnitude(
1076
+ array, derivative, extra_arguments=(1.0,),
1077
+ extra_keywords={'b': 2.0})
1078
+ assert_array_almost_equal(tmp1, tmp2)
1079
+
1080
+ def test_uniform01(self):
1081
+ array = np.array([2, 4, 6])
1082
+ size = 2
1083
+ output = ndimage.uniform_filter1d(array, size, origin=-1)
1084
+ assert_array_almost_equal([3, 5, 6], output)
1085
+
1086
+ def test_uniform01_complex(self):
1087
+ array = np.array([2 + 1j, 4 + 2j, 6 + 3j], dtype=np.complex128)
1088
+ size = 2
1089
+ output = ndimage.uniform_filter1d(array, size, origin=-1)
1090
+ assert_array_almost_equal([3, 5, 6], output.real)
1091
+ assert_array_almost_equal([1.5, 2.5, 3], output.imag)
1092
+
1093
+ def test_uniform02(self):
1094
+ array = np.array([1, 2, 3])
1095
+ filter_shape = [0]
1096
+ output = ndimage.uniform_filter(array, filter_shape)
1097
+ assert_array_almost_equal(array, output)
1098
+
1099
+ def test_uniform03(self):
1100
+ array = np.array([1, 2, 3])
1101
+ filter_shape = [1]
1102
+ output = ndimage.uniform_filter(array, filter_shape)
1103
+ assert_array_almost_equal(array, output)
1104
+
1105
+ def test_uniform04(self):
1106
+ array = np.array([2, 4, 6])
1107
+ filter_shape = [2]
1108
+ output = ndimage.uniform_filter(array, filter_shape)
1109
+ assert_array_almost_equal([2, 3, 5], output)
1110
+
1111
+ def test_uniform05(self):
1112
+ array = []
1113
+ filter_shape = [1]
1114
+ output = ndimage.uniform_filter(array, filter_shape)
1115
+ assert_array_almost_equal([], output)
1116
+
1117
+ @pytest.mark.parametrize('dtype_array', types)
1118
+ @pytest.mark.parametrize('dtype_output', types)
1119
+ def test_uniform06(self, dtype_array, dtype_output):
1120
+ filter_shape = [2, 2]
1121
+ array = np.array([[4, 8, 12],
1122
+ [16, 20, 24]], dtype_array)
1123
+ output = ndimage.uniform_filter(
1124
+ array, filter_shape, output=dtype_output)
1125
+ assert_array_almost_equal([[4, 6, 10], [10, 12, 16]], output)
1126
+ assert_equal(output.dtype.type, dtype_output)
1127
+
1128
+ @pytest.mark.parametrize('dtype_array', complex_types)
1129
+ @pytest.mark.parametrize('dtype_output', complex_types)
1130
+ def test_uniform06_complex(self, dtype_array, dtype_output):
1131
+ filter_shape = [2, 2]
1132
+ array = np.array([[4, 8 + 5j, 12],
1133
+ [16, 20, 24]], dtype_array)
1134
+ output = ndimage.uniform_filter(
1135
+ array, filter_shape, output=dtype_output)
1136
+ assert_array_almost_equal([[4, 6, 10], [10, 12, 16]], output.real)
1137
+ assert_equal(output.dtype.type, dtype_output)
1138
+
1139
+ def test_minimum_filter01(self):
1140
+ array = np.array([1, 2, 3, 4, 5])
1141
+ filter_shape = np.array([2])
1142
+ output = ndimage.minimum_filter(array, filter_shape)
1143
+ assert_array_almost_equal([1, 1, 2, 3, 4], output)
1144
+
1145
+ def test_minimum_filter02(self):
1146
+ array = np.array([1, 2, 3, 4, 5])
1147
+ filter_shape = np.array([3])
1148
+ output = ndimage.minimum_filter(array, filter_shape)
1149
+ assert_array_almost_equal([1, 1, 2, 3, 4], output)
1150
+
1151
+ def test_minimum_filter03(self):
1152
+ array = np.array([3, 2, 5, 1, 4])
1153
+ filter_shape = np.array([2])
1154
+ output = ndimage.minimum_filter(array, filter_shape)
1155
+ assert_array_almost_equal([3, 2, 2, 1, 1], output)
1156
+
1157
+ def test_minimum_filter04(self):
1158
+ array = np.array([3, 2, 5, 1, 4])
1159
+ filter_shape = np.array([3])
1160
+ output = ndimage.minimum_filter(array, filter_shape)
1161
+ assert_array_almost_equal([2, 2, 1, 1, 1], output)
1162
+
1163
+ def test_minimum_filter05(self):
1164
+ array = np.array([[3, 2, 5, 1, 4],
1165
+ [7, 6, 9, 3, 5],
1166
+ [5, 8, 3, 7, 1]])
1167
+ filter_shape = np.array([2, 3])
1168
+ output = ndimage.minimum_filter(array, filter_shape)
1169
+ assert_array_almost_equal([[2, 2, 1, 1, 1],
1170
+ [2, 2, 1, 1, 1],
1171
+ [5, 3, 3, 1, 1]], output)
1172
+
1173
+ def test_minimum_filter05_overlap(self):
1174
+ array = np.array([[3, 2, 5, 1, 4],
1175
+ [7, 6, 9, 3, 5],
1176
+ [5, 8, 3, 7, 1]])
1177
+ filter_shape = np.array([2, 3])
1178
+ ndimage.minimum_filter(array, filter_shape, output=array)
1179
+ assert_array_almost_equal([[2, 2, 1, 1, 1],
1180
+ [2, 2, 1, 1, 1],
1181
+ [5, 3, 3, 1, 1]], array)
1182
+
1183
+ def test_minimum_filter06(self):
1184
+ array = np.array([[3, 2, 5, 1, 4],
1185
+ [7, 6, 9, 3, 5],
1186
+ [5, 8, 3, 7, 1]])
1187
+ footprint = [[1, 1, 1], [1, 1, 1]]
1188
+ output = ndimage.minimum_filter(array, footprint=footprint)
1189
+ assert_array_almost_equal([[2, 2, 1, 1, 1],
1190
+ [2, 2, 1, 1, 1],
1191
+ [5, 3, 3, 1, 1]], output)
1192
+ # separable footprint should allow mode sequence
1193
+ output2 = ndimage.minimum_filter(array, footprint=footprint,
1194
+ mode=['reflect', 'reflect'])
1195
+ assert_array_almost_equal(output2, output)
1196
+
1197
+ def test_minimum_filter07(self):
1198
+ array = np.array([[3, 2, 5, 1, 4],
1199
+ [7, 6, 9, 3, 5],
1200
+ [5, 8, 3, 7, 1]])
1201
+ footprint = [[1, 0, 1], [1, 1, 0]]
1202
+ output = ndimage.minimum_filter(array, footprint=footprint)
1203
+ assert_array_almost_equal([[2, 2, 1, 1, 1],
1204
+ [2, 3, 1, 3, 1],
1205
+ [5, 5, 3, 3, 1]], output)
1206
+ with assert_raises(RuntimeError):
1207
+ ndimage.minimum_filter(array, footprint=footprint,
1208
+ mode=['reflect', 'constant'])
1209
+
1210
+ def test_minimum_filter08(self):
1211
+ array = np.array([[3, 2, 5, 1, 4],
1212
+ [7, 6, 9, 3, 5],
1213
+ [5, 8, 3, 7, 1]])
1214
+ footprint = [[1, 0, 1], [1, 1, 0]]
1215
+ output = ndimage.minimum_filter(array, footprint=footprint, origin=-1)
1216
+ assert_array_almost_equal([[3, 1, 3, 1, 1],
1217
+ [5, 3, 3, 1, 1],
1218
+ [3, 3, 1, 1, 1]], output)
1219
+
1220
+ def test_minimum_filter09(self):
1221
+ array = np.array([[3, 2, 5, 1, 4],
1222
+ [7, 6, 9, 3, 5],
1223
+ [5, 8, 3, 7, 1]])
1224
+ footprint = [[1, 0, 1], [1, 1, 0]]
1225
+ output = ndimage.minimum_filter(array, footprint=footprint,
1226
+ origin=[-1, 0])
1227
+ assert_array_almost_equal([[2, 3, 1, 3, 1],
1228
+ [5, 5, 3, 3, 1],
1229
+ [5, 3, 3, 1, 1]], output)
1230
+
1231
+ def test_maximum_filter01(self):
1232
+ array = np.array([1, 2, 3, 4, 5])
1233
+ filter_shape = np.array([2])
1234
+ output = ndimage.maximum_filter(array, filter_shape)
1235
+ assert_array_almost_equal([1, 2, 3, 4, 5], output)
1236
+
1237
+ def test_maximum_filter02(self):
1238
+ array = np.array([1, 2, 3, 4, 5])
1239
+ filter_shape = np.array([3])
1240
+ output = ndimage.maximum_filter(array, filter_shape)
1241
+ assert_array_almost_equal([2, 3, 4, 5, 5], output)
1242
+
1243
+ def test_maximum_filter03(self):
1244
+ array = np.array([3, 2, 5, 1, 4])
1245
+ filter_shape = np.array([2])
1246
+ output = ndimage.maximum_filter(array, filter_shape)
1247
+ assert_array_almost_equal([3, 3, 5, 5, 4], output)
1248
+
1249
+ def test_maximum_filter04(self):
1250
+ array = np.array([3, 2, 5, 1, 4])
1251
+ filter_shape = np.array([3])
1252
+ output = ndimage.maximum_filter(array, filter_shape)
1253
+ assert_array_almost_equal([3, 5, 5, 5, 4], output)
1254
+
1255
+ def test_maximum_filter05(self):
1256
+ array = np.array([[3, 2, 5, 1, 4],
1257
+ [7, 6, 9, 3, 5],
1258
+ [5, 8, 3, 7, 1]])
1259
+ filter_shape = np.array([2, 3])
1260
+ output = ndimage.maximum_filter(array, filter_shape)
1261
+ assert_array_almost_equal([[3, 5, 5, 5, 4],
1262
+ [7, 9, 9, 9, 5],
1263
+ [8, 9, 9, 9, 7]], output)
1264
+
1265
+ def test_maximum_filter06(self):
1266
+ array = np.array([[3, 2, 5, 1, 4],
1267
+ [7, 6, 9, 3, 5],
1268
+ [5, 8, 3, 7, 1]])
1269
+ footprint = [[1, 1, 1], [1, 1, 1]]
1270
+ output = ndimage.maximum_filter(array, footprint=footprint)
1271
+ assert_array_almost_equal([[3, 5, 5, 5, 4],
1272
+ [7, 9, 9, 9, 5],
1273
+ [8, 9, 9, 9, 7]], output)
1274
+ # separable footprint should allow mode sequence
1275
+ output2 = ndimage.maximum_filter(array, footprint=footprint,
1276
+ mode=['reflect', 'reflect'])
1277
+ assert_array_almost_equal(output2, output)
1278
+
1279
+ def test_maximum_filter07(self):
1280
+ array = np.array([[3, 2, 5, 1, 4],
1281
+ [7, 6, 9, 3, 5],
1282
+ [5, 8, 3, 7, 1]])
1283
+ footprint = [[1, 0, 1], [1, 1, 0]]
1284
+ output = ndimage.maximum_filter(array, footprint=footprint)
1285
+ assert_array_almost_equal([[3, 5, 5, 5, 4],
1286
+ [7, 7, 9, 9, 5],
1287
+ [7, 9, 8, 9, 7]], output)
1288
+ # non-separable footprint should not allow mode sequence
1289
+ with assert_raises(RuntimeError):
1290
+ ndimage.maximum_filter(array, footprint=footprint,
1291
+ mode=['reflect', 'reflect'])
1292
+
1293
+ def test_maximum_filter08(self):
1294
+ array = np.array([[3, 2, 5, 1, 4],
1295
+ [7, 6, 9, 3, 5],
1296
+ [5, 8, 3, 7, 1]])
1297
+ footprint = [[1, 0, 1], [1, 1, 0]]
1298
+ output = ndimage.maximum_filter(array, footprint=footprint, origin=-1)
1299
+ assert_array_almost_equal([[7, 9, 9, 5, 5],
1300
+ [9, 8, 9, 7, 5],
1301
+ [8, 8, 7, 7, 7]], output)
1302
+
1303
+ def test_maximum_filter09(self):
1304
+ array = np.array([[3, 2, 5, 1, 4],
1305
+ [7, 6, 9, 3, 5],
1306
+ [5, 8, 3, 7, 1]])
1307
+ footprint = [[1, 0, 1], [1, 1, 0]]
1308
+ output = ndimage.maximum_filter(array, footprint=footprint,
1309
+ origin=[-1, 0])
1310
+ assert_array_almost_equal([[7, 7, 9, 9, 5],
1311
+ [7, 9, 8, 9, 7],
1312
+ [8, 8, 8, 7, 7]], output)
1313
+
1314
+ @pytest.mark.parametrize(
1315
+ 'axes', tuple(itertools.combinations(range(-3, 3), 2))
1316
+ )
1317
+ @pytest.mark.parametrize(
1318
+ 'filter_func, kwargs',
1319
+ [(ndimage.minimum_filter, {}),
1320
+ (ndimage.maximum_filter, {}),
1321
+ (ndimage.median_filter, {}),
1322
+ (ndimage.rank_filter, dict(rank=3)),
1323
+ (ndimage.percentile_filter, dict(percentile=60))]
1324
+ )
1325
+ def test_minmax_nonseparable_axes(self, filter_func, axes, kwargs):
1326
+ array = np.arange(6 * 8 * 12, dtype=np.float32).reshape(6, 8, 12)
1327
+ # use 2D triangular footprint because it is non-separable
1328
+ footprint = np.tri(5)
1329
+ axes = np.array(axes)
1330
+
1331
+ if len(set(axes % array.ndim)) != len(axes):
1332
+ # parametrized cases with duplicate axes raise an error
1333
+ with pytest.raises(ValueError):
1334
+ filter_func(array, footprint=footprint, axes=axes, **kwargs)
1335
+ return
1336
+ output = filter_func(array, footprint=footprint, axes=axes, **kwargs)
1337
+
1338
+ missing_axis = tuple(set(range(3)) - set(axes % array.ndim))[0]
1339
+ footprint_3d = np.expand_dims(footprint, missing_axis)
1340
+ expected = filter_func(array, footprint=footprint_3d, **kwargs)
1341
+ assert_allclose(output, expected)
1342
+
1343
+ def test_rank01(self):
1344
+ array = np.array([1, 2, 3, 4, 5])
1345
+ output = ndimage.rank_filter(array, 1, size=2)
1346
+ assert_array_almost_equal(array, output)
1347
+ output = ndimage.percentile_filter(array, 100, size=2)
1348
+ assert_array_almost_equal(array, output)
1349
+ output = ndimage.median_filter(array, 2)
1350
+ assert_array_almost_equal(array, output)
1351
+
1352
+ def test_rank02(self):
1353
+ array = np.array([1, 2, 3, 4, 5])
1354
+ output = ndimage.rank_filter(array, 1, size=[3])
1355
+ assert_array_almost_equal(array, output)
1356
+ output = ndimage.percentile_filter(array, 50, size=3)
1357
+ assert_array_almost_equal(array, output)
1358
+ output = ndimage.median_filter(array, (3,))
1359
+ assert_array_almost_equal(array, output)
1360
+
1361
+ def test_rank03(self):
1362
+ array = np.array([3, 2, 5, 1, 4])
1363
+ output = ndimage.rank_filter(array, 1, size=[2])
1364
+ assert_array_almost_equal([3, 3, 5, 5, 4], output)
1365
+ output = ndimage.percentile_filter(array, 100, size=2)
1366
+ assert_array_almost_equal([3, 3, 5, 5, 4], output)
1367
+
1368
+ def test_rank04(self):
1369
+ array = np.array([3, 2, 5, 1, 4])
1370
+ expected = [3, 3, 2, 4, 4]
1371
+ output = ndimage.rank_filter(array, 1, size=3)
1372
+ assert_array_almost_equal(expected, output)
1373
+ output = ndimage.percentile_filter(array, 50, size=3)
1374
+ assert_array_almost_equal(expected, output)
1375
+ output = ndimage.median_filter(array, size=3)
1376
+ assert_array_almost_equal(expected, output)
1377
+
1378
+ def test_rank05(self):
1379
+ array = np.array([3, 2, 5, 1, 4])
1380
+ expected = [3, 3, 2, 4, 4]
1381
+ output = ndimage.rank_filter(array, -2, size=3)
1382
+ assert_array_almost_equal(expected, output)
1383
+
1384
+ def test_rank06(self):
1385
+ array = np.array([[3, 2, 5, 1, 4],
1386
+ [5, 8, 3, 7, 1],
1387
+ [5, 6, 9, 3, 5]])
1388
+ expected = [[2, 2, 1, 1, 1],
1389
+ [3, 3, 2, 1, 1],
1390
+ [5, 5, 3, 3, 1]]
1391
+ output = ndimage.rank_filter(array, 1, size=[2, 3])
1392
+ assert_array_almost_equal(expected, output)
1393
+ output = ndimage.percentile_filter(array, 17, size=(2, 3))
1394
+ assert_array_almost_equal(expected, output)
1395
+
1396
+ def test_rank06_overlap(self):
1397
+ array = np.array([[3, 2, 5, 1, 4],
1398
+ [5, 8, 3, 7, 1],
1399
+ [5, 6, 9, 3, 5]])
1400
+ array_copy = array.copy()
1401
+ expected = [[2, 2, 1, 1, 1],
1402
+ [3, 3, 2, 1, 1],
1403
+ [5, 5, 3, 3, 1]]
1404
+ ndimage.rank_filter(array, 1, size=[2, 3], output=array)
1405
+ assert_array_almost_equal(expected, array)
1406
+
1407
+ ndimage.percentile_filter(array_copy, 17, size=(2, 3),
1408
+ output=array_copy)
1409
+ assert_array_almost_equal(expected, array_copy)
1410
+
1411
+ def test_rank07(self):
1412
+ array = np.array([[3, 2, 5, 1, 4],
1413
+ [5, 8, 3, 7, 1],
1414
+ [5, 6, 9, 3, 5]])
1415
+ expected = [[3, 5, 5, 5, 4],
1416
+ [5, 5, 7, 5, 4],
1417
+ [6, 8, 8, 7, 5]]
1418
+ output = ndimage.rank_filter(array, -2, size=[2, 3])
1419
+ assert_array_almost_equal(expected, output)
1420
+
1421
+ def test_rank08(self):
1422
+ array = np.array([[3, 2, 5, 1, 4],
1423
+ [5, 8, 3, 7, 1],
1424
+ [5, 6, 9, 3, 5]])
1425
+ expected = [[3, 3, 2, 4, 4],
1426
+ [5, 5, 5, 4, 4],
1427
+ [5, 6, 7, 5, 5]]
1428
+ output = ndimage.percentile_filter(array, 50.0, size=(2, 3))
1429
+ assert_array_almost_equal(expected, output)
1430
+ output = ndimage.rank_filter(array, 3, size=(2, 3))
1431
+ assert_array_almost_equal(expected, output)
1432
+ output = ndimage.median_filter(array, size=(2, 3))
1433
+ assert_array_almost_equal(expected, output)
1434
+
1435
+ # non-separable: does not allow mode sequence
1436
+ with assert_raises(RuntimeError):
1437
+ ndimage.percentile_filter(array, 50.0, size=(2, 3),
1438
+ mode=['reflect', 'constant'])
1439
+ with assert_raises(RuntimeError):
1440
+ ndimage.rank_filter(array, 3, size=(2, 3), mode=['reflect']*2)
1441
+ with assert_raises(RuntimeError):
1442
+ ndimage.median_filter(array, size=(2, 3), mode=['reflect']*2)
1443
+
1444
+ @pytest.mark.parametrize('dtype', types)
1445
+ def test_rank09(self, dtype):
1446
+ expected = [[3, 3, 2, 4, 4],
1447
+ [3, 5, 2, 5, 1],
1448
+ [5, 5, 8, 3, 5]]
1449
+ footprint = [[1, 0, 1], [0, 1, 0]]
1450
+ array = np.array([[3, 2, 5, 1, 4],
1451
+ [5, 8, 3, 7, 1],
1452
+ [5, 6, 9, 3, 5]], dtype)
1453
+ output = ndimage.rank_filter(array, 1, footprint=footprint)
1454
+ assert_array_almost_equal(expected, output)
1455
+ output = ndimage.percentile_filter(array, 35, footprint=footprint)
1456
+ assert_array_almost_equal(expected, output)
1457
+
1458
+ def test_rank10(self):
1459
+ array = np.array([[3, 2, 5, 1, 4],
1460
+ [7, 6, 9, 3, 5],
1461
+ [5, 8, 3, 7, 1]])
1462
+ expected = [[2, 2, 1, 1, 1],
1463
+ [2, 3, 1, 3, 1],
1464
+ [5, 5, 3, 3, 1]]
1465
+ footprint = [[1, 0, 1], [1, 1, 0]]
1466
+ output = ndimage.rank_filter(array, 0, footprint=footprint)
1467
+ assert_array_almost_equal(expected, output)
1468
+ output = ndimage.percentile_filter(array, 0.0, footprint=footprint)
1469
+ assert_array_almost_equal(expected, output)
1470
+
1471
+ def test_rank11(self):
1472
+ array = np.array([[3, 2, 5, 1, 4],
1473
+ [7, 6, 9, 3, 5],
1474
+ [5, 8, 3, 7, 1]])
1475
+ expected = [[3, 5, 5, 5, 4],
1476
+ [7, 7, 9, 9, 5],
1477
+ [7, 9, 8, 9, 7]]
1478
+ footprint = [[1, 0, 1], [1, 1, 0]]
1479
+ output = ndimage.rank_filter(array, -1, footprint=footprint)
1480
+ assert_array_almost_equal(expected, output)
1481
+ output = ndimage.percentile_filter(array, 100.0, footprint=footprint)
1482
+ assert_array_almost_equal(expected, output)
1483
+
1484
+ @pytest.mark.parametrize('dtype', types)
1485
+ def test_rank12(self, dtype):
1486
+ expected = [[3, 3, 2, 4, 4],
1487
+ [3, 5, 2, 5, 1],
1488
+ [5, 5, 8, 3, 5]]
1489
+ footprint = [[1, 0, 1], [0, 1, 0]]
1490
+ array = np.array([[3, 2, 5, 1, 4],
1491
+ [5, 8, 3, 7, 1],
1492
+ [5, 6, 9, 3, 5]], dtype)
1493
+ output = ndimage.rank_filter(array, 1, footprint=footprint)
1494
+ assert_array_almost_equal(expected, output)
1495
+ output = ndimage.percentile_filter(array, 50.0,
1496
+ footprint=footprint)
1497
+ assert_array_almost_equal(expected, output)
1498
+ output = ndimage.median_filter(array, footprint=footprint)
1499
+ assert_array_almost_equal(expected, output)
1500
+
1501
+ @pytest.mark.parametrize('dtype', types)
1502
+ def test_rank13(self, dtype):
1503
+ expected = [[5, 2, 5, 1, 1],
1504
+ [5, 8, 3, 5, 5],
1505
+ [6, 6, 5, 5, 5]]
1506
+ footprint = [[1, 0, 1], [0, 1, 0]]
1507
+ array = np.array([[3, 2, 5, 1, 4],
1508
+ [5, 8, 3, 7, 1],
1509
+ [5, 6, 9, 3, 5]], dtype)
1510
+ output = ndimage.rank_filter(array, 1, footprint=footprint,
1511
+ origin=-1)
1512
+ assert_array_almost_equal(expected, output)
1513
+
1514
+ @pytest.mark.parametrize('dtype', types)
1515
+ def test_rank14(self, dtype):
1516
+ expected = [[3, 5, 2, 5, 1],
1517
+ [5, 5, 8, 3, 5],
1518
+ [5, 6, 6, 5, 5]]
1519
+ footprint = [[1, 0, 1], [0, 1, 0]]
1520
+ array = np.array([[3, 2, 5, 1, 4],
1521
+ [5, 8, 3, 7, 1],
1522
+ [5, 6, 9, 3, 5]], dtype)
1523
+ output = ndimage.rank_filter(array, 1, footprint=footprint,
1524
+ origin=[-1, 0])
1525
+ assert_array_almost_equal(expected, output)
1526
+
1527
+ @pytest.mark.parametrize('dtype', types)
1528
+ def test_rank15(self, dtype):
1529
+ expected = [[2, 3, 1, 4, 1],
1530
+ [5, 3, 7, 1, 1],
1531
+ [5, 5, 3, 3, 3]]
1532
+ footprint = [[1, 0, 1], [0, 1, 0]]
1533
+ array = np.array([[3, 2, 5, 1, 4],
1534
+ [5, 8, 3, 7, 1],
1535
+ [5, 6, 9, 3, 5]], dtype)
1536
+ output = ndimage.rank_filter(array, 0, footprint=footprint,
1537
+ origin=[-1, 0])
1538
+ assert_array_almost_equal(expected, output)
1539
+
1540
+ @pytest.mark.parametrize('dtype', types)
1541
+ def test_generic_filter1d01(self, dtype):
1542
+ weights = np.array([1.1, 2.2, 3.3])
1543
+
1544
+ def _filter_func(input, output, fltr, total):
1545
+ fltr = fltr / total
1546
+ for ii in range(input.shape[0] - 2):
1547
+ output[ii] = input[ii] * fltr[0]
1548
+ output[ii] += input[ii + 1] * fltr[1]
1549
+ output[ii] += input[ii + 2] * fltr[2]
1550
+ a = np.arange(12, dtype=dtype)
1551
+ a.shape = (3, 4)
1552
+ r1 = ndimage.correlate1d(a, weights / weights.sum(), 0, origin=-1)
1553
+ r2 = ndimage.generic_filter1d(
1554
+ a, _filter_func, 3, axis=0, origin=-1,
1555
+ extra_arguments=(weights,),
1556
+ extra_keywords={'total': weights.sum()})
1557
+ assert_array_almost_equal(r1, r2)
1558
+
1559
+ @pytest.mark.parametrize('dtype', types)
1560
+ def test_generic_filter01(self, dtype):
1561
+ filter_ = np.array([[1.0, 2.0], [3.0, 4.0]])
1562
+ footprint = np.array([[1, 0], [0, 1]])
1563
+ cf = np.array([1., 4.])
1564
+
1565
+ def _filter_func(buffer, weights, total=1.0):
1566
+ weights = cf / total
1567
+ return (buffer * weights).sum()
1568
+
1569
+ a = np.arange(12, dtype=dtype)
1570
+ a.shape = (3, 4)
1571
+ r1 = ndimage.correlate(a, filter_ * footprint)
1572
+ if dtype in float_types:
1573
+ r1 /= 5
1574
+ else:
1575
+ r1 //= 5
1576
+ r2 = ndimage.generic_filter(
1577
+ a, _filter_func, footprint=footprint, extra_arguments=(cf,),
1578
+ extra_keywords={'total': cf.sum()})
1579
+ assert_array_almost_equal(r1, r2)
1580
+
1581
+ # generic_filter doesn't allow mode sequence
1582
+ with assert_raises(RuntimeError):
1583
+ r2 = ndimage.generic_filter(
1584
+ a, _filter_func, mode=['reflect', 'reflect'],
1585
+ footprint=footprint, extra_arguments=(cf,),
1586
+ extra_keywords={'total': cf.sum()})
1587
+
1588
+ @pytest.mark.parametrize(
1589
+ 'mode, expected_value',
1590
+ [('nearest', [1, 1, 2]),
1591
+ ('wrap', [3, 1, 2]),
1592
+ ('reflect', [1, 1, 2]),
1593
+ ('mirror', [2, 1, 2]),
1594
+ ('constant', [0, 1, 2])]
1595
+ )
1596
+ def test_extend01(self, mode, expected_value):
1597
+ array = np.array([1, 2, 3])
1598
+ weights = np.array([1, 0])
1599
+ output = ndimage.correlate1d(array, weights, 0, mode=mode, cval=0)
1600
+ assert_array_equal(output, expected_value)
1601
+
1602
+ @pytest.mark.parametrize(
1603
+ 'mode, expected_value',
1604
+ [('nearest', [1, 1, 1]),
1605
+ ('wrap', [3, 1, 2]),
1606
+ ('reflect', [3, 3, 2]),
1607
+ ('mirror', [1, 2, 3]),
1608
+ ('constant', [0, 0, 0])]
1609
+ )
1610
+ def test_extend02(self, mode, expected_value):
1611
+ array = np.array([1, 2, 3])
1612
+ weights = np.array([1, 0, 0, 0, 0, 0, 0, 0])
1613
+ output = ndimage.correlate1d(array, weights, 0, mode=mode, cval=0)
1614
+ assert_array_equal(output, expected_value)
1615
+
1616
+ @pytest.mark.parametrize(
1617
+ 'mode, expected_value',
1618
+ [('nearest', [2, 3, 3]),
1619
+ ('wrap', [2, 3, 1]),
1620
+ ('reflect', [2, 3, 3]),
1621
+ ('mirror', [2, 3, 2]),
1622
+ ('constant', [2, 3, 0])]
1623
+ )
1624
+ def test_extend03(self, mode, expected_value):
1625
+ array = np.array([1, 2, 3])
1626
+ weights = np.array([0, 0, 1])
1627
+ output = ndimage.correlate1d(array, weights, 0, mode=mode, cval=0)
1628
+ assert_array_equal(output, expected_value)
1629
+
1630
+ @pytest.mark.parametrize(
1631
+ 'mode, expected_value',
1632
+ [('nearest', [3, 3, 3]),
1633
+ ('wrap', [2, 3, 1]),
1634
+ ('reflect', [2, 1, 1]),
1635
+ ('mirror', [1, 2, 3]),
1636
+ ('constant', [0, 0, 0])]
1637
+ )
1638
+ def test_extend04(self, mode, expected_value):
1639
+ array = np.array([1, 2, 3])
1640
+ weights = np.array([0, 0, 0, 0, 0, 0, 0, 0, 1])
1641
+ output = ndimage.correlate1d(array, weights, 0, mode=mode, cval=0)
1642
+ assert_array_equal(output, expected_value)
1643
+
1644
+ @pytest.mark.parametrize(
1645
+ 'mode, expected_value',
1646
+ [('nearest', [[1, 1, 2], [1, 1, 2], [4, 4, 5]]),
1647
+ ('wrap', [[9, 7, 8], [3, 1, 2], [6, 4, 5]]),
1648
+ ('reflect', [[1, 1, 2], [1, 1, 2], [4, 4, 5]]),
1649
+ ('mirror', [[5, 4, 5], [2, 1, 2], [5, 4, 5]]),
1650
+ ('constant', [[0, 0, 0], [0, 1, 2], [0, 4, 5]])]
1651
+ )
1652
+ def test_extend05(self, mode, expected_value):
1653
+ array = np.array([[1, 2, 3],
1654
+ [4, 5, 6],
1655
+ [7, 8, 9]])
1656
+ weights = np.array([[1, 0], [0, 0]])
1657
+ output = ndimage.correlate(array, weights, mode=mode, cval=0)
1658
+ assert_array_equal(output, expected_value)
1659
+
1660
+ @pytest.mark.parametrize(
1661
+ 'mode, expected_value',
1662
+ [('nearest', [[5, 6, 6], [8, 9, 9], [8, 9, 9]]),
1663
+ ('wrap', [[5, 6, 4], [8, 9, 7], [2, 3, 1]]),
1664
+ ('reflect', [[5, 6, 6], [8, 9, 9], [8, 9, 9]]),
1665
+ ('mirror', [[5, 6, 5], [8, 9, 8], [5, 6, 5]]),
1666
+ ('constant', [[5, 6, 0], [8, 9, 0], [0, 0, 0]])]
1667
+ )
1668
+ def test_extend06(self, mode, expected_value):
1669
+ array = np.array([[1, 2, 3],
1670
+ [4, 5, 6],
1671
+ [7, 8, 9]])
1672
+ weights = np.array([[0, 0, 0], [0, 0, 0], [0, 0, 1]])
1673
+ output = ndimage.correlate(array, weights, mode=mode, cval=0)
1674
+ assert_array_equal(output, expected_value)
1675
+
1676
+ @pytest.mark.parametrize(
1677
+ 'mode, expected_value',
1678
+ [('nearest', [3, 3, 3]),
1679
+ ('wrap', [2, 3, 1]),
1680
+ ('reflect', [2, 1, 1]),
1681
+ ('mirror', [1, 2, 3]),
1682
+ ('constant', [0, 0, 0])]
1683
+ )
1684
+ def test_extend07(self, mode, expected_value):
1685
+ array = np.array([1, 2, 3])
1686
+ weights = np.array([0, 0, 0, 0, 0, 0, 0, 0, 1])
1687
+ output = ndimage.correlate(array, weights, mode=mode, cval=0)
1688
+ assert_array_equal(output, expected_value)
1689
+
1690
+ @pytest.mark.parametrize(
1691
+ 'mode, expected_value',
1692
+ [('nearest', [[3], [3], [3]]),
1693
+ ('wrap', [[2], [3], [1]]),
1694
+ ('reflect', [[2], [1], [1]]),
1695
+ ('mirror', [[1], [2], [3]]),
1696
+ ('constant', [[0], [0], [0]])]
1697
+ )
1698
+ def test_extend08(self, mode, expected_value):
1699
+ array = np.array([[1], [2], [3]])
1700
+ weights = np.array([[0], [0], [0], [0], [0], [0], [0], [0], [1]])
1701
+ output = ndimage.correlate(array, weights, mode=mode, cval=0)
1702
+ assert_array_equal(output, expected_value)
1703
+
1704
+ @pytest.mark.parametrize(
1705
+ 'mode, expected_value',
1706
+ [('nearest', [3, 3, 3]),
1707
+ ('wrap', [2, 3, 1]),
1708
+ ('reflect', [2, 1, 1]),
1709
+ ('mirror', [1, 2, 3]),
1710
+ ('constant', [0, 0, 0])]
1711
+ )
1712
+ def test_extend09(self, mode, expected_value):
1713
+ array = np.array([1, 2, 3])
1714
+ weights = np.array([0, 0, 0, 0, 0, 0, 0, 0, 1])
1715
+ output = ndimage.correlate(array, weights, mode=mode, cval=0)
1716
+ assert_array_equal(output, expected_value)
1717
+
1718
+ @pytest.mark.parametrize(
1719
+ 'mode, expected_value',
1720
+ [('nearest', [[3], [3], [3]]),
1721
+ ('wrap', [[2], [3], [1]]),
1722
+ ('reflect', [[2], [1], [1]]),
1723
+ ('mirror', [[1], [2], [3]]),
1724
+ ('constant', [[0], [0], [0]])]
1725
+ )
1726
+ def test_extend10(self, mode, expected_value):
1727
+ array = np.array([[1], [2], [3]])
1728
+ weights = np.array([[0], [0], [0], [0], [0], [0], [0], [0], [1]])
1729
+ output = ndimage.correlate(array, weights, mode=mode, cval=0)
1730
+ assert_array_equal(output, expected_value)
1731
+
1732
+
1733
+ def test_ticket_701():
1734
+ # Test generic filter sizes
1735
+ arr = np.arange(4).reshape((2, 2))
1736
+ def func(x):
1737
+ return np.min(x)
1738
+ res = ndimage.generic_filter(arr, func, size=(1, 1))
1739
+ # The following raises an error unless ticket 701 is fixed
1740
+ res2 = ndimage.generic_filter(arr, func, size=1)
1741
+ assert_equal(res, res2)
1742
+
1743
+
1744
+ def test_gh_5430():
1745
+ # At least one of these raises an error unless gh-5430 is
1746
+ # fixed. In py2k an int is implemented using a C long, so
1747
+ # which one fails depends on your system. In py3k there is only
1748
+ # one arbitrary precision integer type, so both should fail.
1749
+ sigma = np.int32(1)
1750
+ out = ndimage._ni_support._normalize_sequence(sigma, 1)
1751
+ assert_equal(out, [sigma])
1752
+ sigma = np.int64(1)
1753
+ out = ndimage._ni_support._normalize_sequence(sigma, 1)
1754
+ assert_equal(out, [sigma])
1755
+ # This worked before; make sure it still works
1756
+ sigma = 1
1757
+ out = ndimage._ni_support._normalize_sequence(sigma, 1)
1758
+ assert_equal(out, [sigma])
1759
+ # This worked before; make sure it still works
1760
+ sigma = [1, 1]
1761
+ out = ndimage._ni_support._normalize_sequence(sigma, 2)
1762
+ assert_equal(out, sigma)
1763
+ # Also include the OPs original example to make sure we fixed the issue
1764
+ x = np.random.normal(size=(256, 256))
1765
+ perlin = np.zeros_like(x)
1766
+ for i in 2**np.arange(6):
1767
+ perlin += ndimage.gaussian_filter(x, i, mode="wrap") * i**2
1768
+ # This also fixes gh-4106, show that the OPs example now runs.
1769
+ x = np.int64(21)
1770
+ ndimage._ni_support._normalize_sequence(x, 0)
1771
+
1772
+
1773
+ def test_gaussian_kernel1d():
1774
+ radius = 10
1775
+ sigma = 2
1776
+ sigma2 = sigma * sigma
1777
+ x = np.arange(-radius, radius + 1, dtype=np.double)
1778
+ phi_x = np.exp(-0.5 * x * x / sigma2)
1779
+ phi_x /= phi_x.sum()
1780
+ assert_allclose(phi_x, _gaussian_kernel1d(sigma, 0, radius))
1781
+ assert_allclose(-phi_x * x / sigma2, _gaussian_kernel1d(sigma, 1, radius))
1782
+ assert_allclose(phi_x * (x * x / sigma2 - 1) / sigma2,
1783
+ _gaussian_kernel1d(sigma, 2, radius))
1784
+ assert_allclose(phi_x * (3 - x * x / sigma2) * x / (sigma2 * sigma2),
1785
+ _gaussian_kernel1d(sigma, 3, radius))
1786
+
1787
+
1788
+ def test_orders_gauss():
1789
+ # Check order inputs to Gaussians
1790
+ arr = np.zeros((1,))
1791
+ assert_equal(0, ndimage.gaussian_filter(arr, 1, order=0))
1792
+ assert_equal(0, ndimage.gaussian_filter(arr, 1, order=3))
1793
+ assert_raises(ValueError, ndimage.gaussian_filter, arr, 1, -1)
1794
+ assert_equal(0, ndimage.gaussian_filter1d(arr, 1, axis=-1, order=0))
1795
+ assert_equal(0, ndimage.gaussian_filter1d(arr, 1, axis=-1, order=3))
1796
+ assert_raises(ValueError, ndimage.gaussian_filter1d, arr, 1, -1, -1)
1797
+
1798
+
1799
+ def test_valid_origins():
1800
+ """Regression test for #1311."""
1801
+ def func(x):
1802
+ return np.mean(x)
1803
+ data = np.array([1, 2, 3, 4, 5], dtype=np.float64)
1804
+ assert_raises(ValueError, ndimage.generic_filter, data, func, size=3,
1805
+ origin=2)
1806
+ assert_raises(ValueError, ndimage.generic_filter1d, data, func,
1807
+ filter_size=3, origin=2)
1808
+ assert_raises(ValueError, ndimage.percentile_filter, data, 0.2, size=3,
1809
+ origin=2)
1810
+
1811
+ for filter in [ndimage.uniform_filter, ndimage.minimum_filter,
1812
+ ndimage.maximum_filter, ndimage.maximum_filter1d,
1813
+ ndimage.median_filter, ndimage.minimum_filter1d]:
1814
+ # This should work, since for size == 3, the valid range for origin is
1815
+ # -1 to 1.
1816
+ list(filter(data, 3, origin=-1))
1817
+ list(filter(data, 3, origin=1))
1818
+ # Just check this raises an error instead of silently accepting or
1819
+ # segfaulting.
1820
+ assert_raises(ValueError, filter, data, 3, origin=2)
1821
+
1822
+
1823
+ def test_bad_convolve_and_correlate_origins():
1824
+ """Regression test for gh-822."""
1825
+ # Before gh-822 was fixed, these would generate seg. faults or
1826
+ # other crashes on many system.
1827
+ assert_raises(ValueError, ndimage.correlate1d,
1828
+ [0, 1, 2, 3, 4, 5], [1, 1, 2, 0], origin=2)
1829
+ assert_raises(ValueError, ndimage.correlate,
1830
+ [0, 1, 2, 3, 4, 5], [0, 1, 2], origin=[2])
1831
+ assert_raises(ValueError, ndimage.correlate,
1832
+ np.ones((3, 5)), np.ones((2, 2)), origin=[0, 1])
1833
+
1834
+ assert_raises(ValueError, ndimage.convolve1d,
1835
+ np.arange(10), np.ones(3), origin=-2)
1836
+ assert_raises(ValueError, ndimage.convolve,
1837
+ np.arange(10), np.ones(3), origin=[-2])
1838
+ assert_raises(ValueError, ndimage.convolve,
1839
+ np.ones((3, 5)), np.ones((2, 2)), origin=[0, -2])
1840
+
1841
+
1842
+ def test_multiple_modes():
1843
+ # Test that the filters with multiple mode cababilities for different
1844
+ # dimensions give the same result as applying a single mode.
1845
+ arr = np.array([[1., 0., 0.],
1846
+ [1., 1., 0.],
1847
+ [0., 0., 0.]])
1848
+
1849
+ mode1 = 'reflect'
1850
+ mode2 = ['reflect', 'reflect']
1851
+
1852
+ assert_equal(ndimage.gaussian_filter(arr, 1, mode=mode1),
1853
+ ndimage.gaussian_filter(arr, 1, mode=mode2))
1854
+ assert_equal(ndimage.prewitt(arr, mode=mode1),
1855
+ ndimage.prewitt(arr, mode=mode2))
1856
+ assert_equal(ndimage.sobel(arr, mode=mode1),
1857
+ ndimage.sobel(arr, mode=mode2))
1858
+ assert_equal(ndimage.laplace(arr, mode=mode1),
1859
+ ndimage.laplace(arr, mode=mode2))
1860
+ assert_equal(ndimage.gaussian_laplace(arr, 1, mode=mode1),
1861
+ ndimage.gaussian_laplace(arr, 1, mode=mode2))
1862
+ assert_equal(ndimage.maximum_filter(arr, size=5, mode=mode1),
1863
+ ndimage.maximum_filter(arr, size=5, mode=mode2))
1864
+ assert_equal(ndimage.minimum_filter(arr, size=5, mode=mode1),
1865
+ ndimage.minimum_filter(arr, size=5, mode=mode2))
1866
+ assert_equal(ndimage.gaussian_gradient_magnitude(arr, 1, mode=mode1),
1867
+ ndimage.gaussian_gradient_magnitude(arr, 1, mode=mode2))
1868
+ assert_equal(ndimage.uniform_filter(arr, 5, mode=mode1),
1869
+ ndimage.uniform_filter(arr, 5, mode=mode2))
1870
+
1871
+
1872
+ def test_multiple_modes_sequentially():
1873
+ # Test that the filters with multiple mode cababilities for different
1874
+ # dimensions give the same result as applying the filters with
1875
+ # different modes sequentially
1876
+ arr = np.array([[1., 0., 0.],
1877
+ [1., 1., 0.],
1878
+ [0., 0., 0.]])
1879
+
1880
+ modes = ['reflect', 'wrap']
1881
+
1882
+ expected = ndimage.gaussian_filter1d(arr, 1, axis=0, mode=modes[0])
1883
+ expected = ndimage.gaussian_filter1d(expected, 1, axis=1, mode=modes[1])
1884
+ assert_equal(expected,
1885
+ ndimage.gaussian_filter(arr, 1, mode=modes))
1886
+
1887
+ expected = ndimage.uniform_filter1d(arr, 5, axis=0, mode=modes[0])
1888
+ expected = ndimage.uniform_filter1d(expected, 5, axis=1, mode=modes[1])
1889
+ assert_equal(expected,
1890
+ ndimage.uniform_filter(arr, 5, mode=modes))
1891
+
1892
+ expected = ndimage.maximum_filter1d(arr, size=5, axis=0, mode=modes[0])
1893
+ expected = ndimage.maximum_filter1d(expected, size=5, axis=1,
1894
+ mode=modes[1])
1895
+ assert_equal(expected,
1896
+ ndimage.maximum_filter(arr, size=5, mode=modes))
1897
+
1898
+ expected = ndimage.minimum_filter1d(arr, size=5, axis=0, mode=modes[0])
1899
+ expected = ndimage.minimum_filter1d(expected, size=5, axis=1,
1900
+ mode=modes[1])
1901
+ assert_equal(expected,
1902
+ ndimage.minimum_filter(arr, size=5, mode=modes))
1903
+
1904
+
1905
+ def test_multiple_modes_prewitt():
1906
+ # Test prewitt filter for multiple extrapolation modes
1907
+ arr = np.array([[1., 0., 0.],
1908
+ [1., 1., 0.],
1909
+ [0., 0., 0.]])
1910
+
1911
+ expected = np.array([[1., -3., 2.],
1912
+ [1., -2., 1.],
1913
+ [1., -1., 0.]])
1914
+
1915
+ modes = ['reflect', 'wrap']
1916
+
1917
+ assert_equal(expected,
1918
+ ndimage.prewitt(arr, mode=modes))
1919
+
1920
+
1921
+ def test_multiple_modes_sobel():
1922
+ # Test sobel filter for multiple extrapolation modes
1923
+ arr = np.array([[1., 0., 0.],
1924
+ [1., 1., 0.],
1925
+ [0., 0., 0.]])
1926
+
1927
+ expected = np.array([[1., -4., 3.],
1928
+ [2., -3., 1.],
1929
+ [1., -1., 0.]])
1930
+
1931
+ modes = ['reflect', 'wrap']
1932
+
1933
+ assert_equal(expected,
1934
+ ndimage.sobel(arr, mode=modes))
1935
+
1936
+
1937
+ def test_multiple_modes_laplace():
1938
+ # Test laplace filter for multiple extrapolation modes
1939
+ arr = np.array([[1., 0., 0.],
1940
+ [1., 1., 0.],
1941
+ [0., 0., 0.]])
1942
+
1943
+ expected = np.array([[-2., 2., 1.],
1944
+ [-2., -3., 2.],
1945
+ [1., 1., 0.]])
1946
+
1947
+ modes = ['reflect', 'wrap']
1948
+
1949
+ assert_equal(expected,
1950
+ ndimage.laplace(arr, mode=modes))
1951
+
1952
+
1953
+ def test_multiple_modes_gaussian_laplace():
1954
+ # Test gaussian_laplace filter for multiple extrapolation modes
1955
+ arr = np.array([[1., 0., 0.],
1956
+ [1., 1., 0.],
1957
+ [0., 0., 0.]])
1958
+
1959
+ expected = np.array([[-0.28438687, 0.01559809, 0.19773499],
1960
+ [-0.36630503, -0.20069774, 0.07483620],
1961
+ [0.15849176, 0.18495566, 0.21934094]])
1962
+
1963
+ modes = ['reflect', 'wrap']
1964
+
1965
+ assert_almost_equal(expected,
1966
+ ndimage.gaussian_laplace(arr, 1, mode=modes))
1967
+
1968
+
1969
+ def test_multiple_modes_gaussian_gradient_magnitude():
1970
+ # Test gaussian_gradient_magnitude filter for multiple
1971
+ # extrapolation modes
1972
+ arr = np.array([[1., 0., 0.],
1973
+ [1., 1., 0.],
1974
+ [0., 0., 0.]])
1975
+
1976
+ expected = np.array([[0.04928965, 0.09745625, 0.06405368],
1977
+ [0.23056905, 0.14025305, 0.04550846],
1978
+ [0.19894369, 0.14950060, 0.06796850]])
1979
+
1980
+ modes = ['reflect', 'wrap']
1981
+
1982
+ calculated = ndimage.gaussian_gradient_magnitude(arr, 1, mode=modes)
1983
+
1984
+ assert_almost_equal(expected, calculated)
1985
+
1986
+
1987
+ def test_multiple_modes_uniform():
1988
+ # Test uniform filter for multiple extrapolation modes
1989
+ arr = np.array([[1., 0., 0.],
1990
+ [1., 1., 0.],
1991
+ [0., 0., 0.]])
1992
+
1993
+ expected = np.array([[0.32, 0.40, 0.48],
1994
+ [0.20, 0.28, 0.32],
1995
+ [0.28, 0.32, 0.40]])
1996
+
1997
+ modes = ['reflect', 'wrap']
1998
+
1999
+ assert_almost_equal(expected,
2000
+ ndimage.uniform_filter(arr, 5, mode=modes))
2001
+
2002
+
2003
+ def test_gaussian_truncate():
2004
+ # Test that Gaussian filters can be truncated at different widths.
2005
+ # These tests only check that the result has the expected number
2006
+ # of nonzero elements.
2007
+ arr = np.zeros((100, 100), float)
2008
+ arr[50, 50] = 1
2009
+ num_nonzeros_2 = (ndimage.gaussian_filter(arr, 5, truncate=2) > 0).sum()
2010
+ assert_equal(num_nonzeros_2, 21**2)
2011
+ num_nonzeros_5 = (ndimage.gaussian_filter(arr, 5, truncate=5) > 0).sum()
2012
+ assert_equal(num_nonzeros_5, 51**2)
2013
+
2014
+ # Test truncate when sigma is a sequence.
2015
+ f = ndimage.gaussian_filter(arr, [0.5, 2.5], truncate=3.5)
2016
+ fpos = f > 0
2017
+ n0 = fpos.any(axis=0).sum()
2018
+ # n0 should be 2*int(2.5*3.5 + 0.5) + 1
2019
+ assert_equal(n0, 19)
2020
+ n1 = fpos.any(axis=1).sum()
2021
+ # n1 should be 2*int(0.5*3.5 + 0.5) + 1
2022
+ assert_equal(n1, 5)
2023
+
2024
+ # Test gaussian_filter1d.
2025
+ x = np.zeros(51)
2026
+ x[25] = 1
2027
+ f = ndimage.gaussian_filter1d(x, sigma=2, truncate=3.5)
2028
+ n = (f > 0).sum()
2029
+ assert_equal(n, 15)
2030
+
2031
+ # Test gaussian_laplace
2032
+ y = ndimage.gaussian_laplace(x, sigma=2, truncate=3.5)
2033
+ nonzero_indices = np.nonzero(y != 0)[0]
2034
+ n = np.ptp(nonzero_indices) + 1
2035
+ assert_equal(n, 15)
2036
+
2037
+ # Test gaussian_gradient_magnitude
2038
+ y = ndimage.gaussian_gradient_magnitude(x, sigma=2, truncate=3.5)
2039
+ nonzero_indices = np.nonzero(y != 0)[0]
2040
+ n = np.ptp(nonzero_indices) + 1
2041
+ assert_equal(n, 15)
2042
+
2043
+
2044
+ def test_gaussian_radius():
2045
+ # Test that Gaussian filters with radius argument produce the same
2046
+ # results as the filters with corresponding truncate argument.
2047
+ # radius = int(truncate * sigma + 0.5)
2048
+ # Test gaussian_filter1d
2049
+ x = np.zeros(7)
2050
+ x[3] = 1
2051
+ f1 = ndimage.gaussian_filter1d(x, sigma=2, truncate=1.5)
2052
+ f2 = ndimage.gaussian_filter1d(x, sigma=2, radius=3)
2053
+ assert_equal(f1, f2)
2054
+
2055
+ # Test gaussian_filter when sigma is a number.
2056
+ a = np.zeros((9, 9))
2057
+ a[4, 4] = 1
2058
+ f1 = ndimage.gaussian_filter(a, sigma=0.5, truncate=3.5)
2059
+ f2 = ndimage.gaussian_filter(a, sigma=0.5, radius=2)
2060
+ assert_equal(f1, f2)
2061
+
2062
+ # Test gaussian_filter when sigma is a sequence.
2063
+ a = np.zeros((50, 50))
2064
+ a[25, 25] = 1
2065
+ f1 = ndimage.gaussian_filter(a, sigma=[0.5, 2.5], truncate=3.5)
2066
+ f2 = ndimage.gaussian_filter(a, sigma=[0.5, 2.5], radius=[2, 9])
2067
+ assert_equal(f1, f2)
2068
+
2069
+
2070
+ def test_gaussian_radius_invalid():
2071
+ # radius must be a nonnegative integer
2072
+ with assert_raises(ValueError):
2073
+ ndimage.gaussian_filter1d(np.zeros(8), sigma=1, radius=-1)
2074
+ with assert_raises(ValueError):
2075
+ ndimage.gaussian_filter1d(np.zeros(8), sigma=1, radius=1.1)
2076
+
2077
+
2078
+ class TestThreading:
2079
+ def check_func_thread(self, n, fun, args, out):
2080
+ from threading import Thread
2081
+ thrds = [Thread(target=fun, args=args, kwargs={'output': out[x]})
2082
+ for x in range(n)]
2083
+ [t.start() for t in thrds]
2084
+ [t.join() for t in thrds]
2085
+
2086
+ def check_func_serial(self, n, fun, args, out):
2087
+ for i in range(n):
2088
+ fun(*args, output=out[i])
2089
+
2090
+ def test_correlate1d(self):
2091
+ d = np.random.randn(5000)
2092
+ os = np.empty((4, d.size))
2093
+ ot = np.empty_like(os)
2094
+ k = np.arange(5)
2095
+ self.check_func_serial(4, ndimage.correlate1d, (d, k), os)
2096
+ self.check_func_thread(4, ndimage.correlate1d, (d, k), ot)
2097
+ assert_array_equal(os, ot)
2098
+
2099
+ def test_correlate(self):
2100
+ d = np.random.randn(500, 500)
2101
+ k = np.random.randn(10, 10)
2102
+ os = np.empty([4] + list(d.shape))
2103
+ ot = np.empty_like(os)
2104
+ self.check_func_serial(4, ndimage.correlate, (d, k), os)
2105
+ self.check_func_thread(4, ndimage.correlate, (d, k), ot)
2106
+ assert_array_equal(os, ot)
2107
+
2108
+ def test_median_filter(self):
2109
+ d = np.random.randn(500, 500)
2110
+ os = np.empty([4] + list(d.shape))
2111
+ ot = np.empty_like(os)
2112
+ self.check_func_serial(4, ndimage.median_filter, (d, 3), os)
2113
+ self.check_func_thread(4, ndimage.median_filter, (d, 3), ot)
2114
+ assert_array_equal(os, ot)
2115
+
2116
+ def test_uniform_filter1d(self):
2117
+ d = np.random.randn(5000)
2118
+ os = np.empty((4, d.size))
2119
+ ot = np.empty_like(os)
2120
+ self.check_func_serial(4, ndimage.uniform_filter1d, (d, 5), os)
2121
+ self.check_func_thread(4, ndimage.uniform_filter1d, (d, 5), ot)
2122
+ assert_array_equal(os, ot)
2123
+
2124
+ def test_minmax_filter(self):
2125
+ d = np.random.randn(500, 500)
2126
+ os = np.empty([4] + list(d.shape))
2127
+ ot = np.empty_like(os)
2128
+ self.check_func_serial(4, ndimage.maximum_filter, (d, 3), os)
2129
+ self.check_func_thread(4, ndimage.maximum_filter, (d, 3), ot)
2130
+ assert_array_equal(os, ot)
2131
+ self.check_func_serial(4, ndimage.minimum_filter, (d, 3), os)
2132
+ self.check_func_thread(4, ndimage.minimum_filter, (d, 3), ot)
2133
+ assert_array_equal(os, ot)
2134
+
2135
+
2136
+ def test_minmaximum_filter1d():
2137
+ # Regression gh-3898
2138
+ in_ = np.arange(10)
2139
+ out = ndimage.minimum_filter1d(in_, 1)
2140
+ assert_equal(in_, out)
2141
+ out = ndimage.maximum_filter1d(in_, 1)
2142
+ assert_equal(in_, out)
2143
+ # Test reflect
2144
+ out = ndimage.minimum_filter1d(in_, 5, mode='reflect')
2145
+ assert_equal([0, 0, 0, 1, 2, 3, 4, 5, 6, 7], out)
2146
+ out = ndimage.maximum_filter1d(in_, 5, mode='reflect')
2147
+ assert_equal([2, 3, 4, 5, 6, 7, 8, 9, 9, 9], out)
2148
+ # Test constant
2149
+ out = ndimage.minimum_filter1d(in_, 5, mode='constant', cval=-1)
2150
+ assert_equal([-1, -1, 0, 1, 2, 3, 4, 5, -1, -1], out)
2151
+ out = ndimage.maximum_filter1d(in_, 5, mode='constant', cval=10)
2152
+ assert_equal([10, 10, 4, 5, 6, 7, 8, 9, 10, 10], out)
2153
+ # Test nearest
2154
+ out = ndimage.minimum_filter1d(in_, 5, mode='nearest')
2155
+ assert_equal([0, 0, 0, 1, 2, 3, 4, 5, 6, 7], out)
2156
+ out = ndimage.maximum_filter1d(in_, 5, mode='nearest')
2157
+ assert_equal([2, 3, 4, 5, 6, 7, 8, 9, 9, 9], out)
2158
+ # Test wrap
2159
+ out = ndimage.minimum_filter1d(in_, 5, mode='wrap')
2160
+ assert_equal([0, 0, 0, 1, 2, 3, 4, 5, 0, 0], out)
2161
+ out = ndimage.maximum_filter1d(in_, 5, mode='wrap')
2162
+ assert_equal([9, 9, 4, 5, 6, 7, 8, 9, 9, 9], out)
2163
+
2164
+
2165
+ def test_uniform_filter1d_roundoff_errors():
2166
+ # gh-6930
2167
+ in_ = np.repeat([0, 1, 0], [9, 9, 9])
2168
+ for filter_size in range(3, 10):
2169
+ out = ndimage.uniform_filter1d(in_, filter_size)
2170
+ assert_equal(out.sum(), 10 - filter_size)
2171
+
2172
+
2173
+ def test_footprint_all_zeros():
2174
+ # regression test for gh-6876: footprint of all zeros segfaults
2175
+ arr = np.random.randint(0, 100, (100, 100))
2176
+ kernel = np.zeros((3, 3), bool)
2177
+ with assert_raises(ValueError):
2178
+ ndimage.maximum_filter(arr, footprint=kernel)
2179
+
2180
+
2181
+ def test_gaussian_filter():
2182
+ # Test gaussian filter with np.float16
2183
+ # gh-8207
2184
+ data = np.array([1], dtype=np.float16)
2185
+ sigma = 1.0
2186
+ with assert_raises(RuntimeError):
2187
+ ndimage.gaussian_filter(data, sigma)
2188
+
2189
+
2190
+ def test_rank_filter_noninteger_rank():
2191
+ # regression test for issue 9388: ValueError for
2192
+ # non integer rank when performing rank_filter
2193
+ arr = np.random.random((10, 20, 30))
2194
+ assert_raises(TypeError, ndimage.rank_filter, arr, 0.5,
2195
+ footprint=np.ones((1, 1, 10), dtype=bool))
2196
+
2197
+
2198
+ def test_size_footprint_both_set():
2199
+ # test for input validation, expect user warning when
2200
+ # size and footprint is set
2201
+ with suppress_warnings() as sup:
2202
+ sup.filter(UserWarning,
2203
+ "ignoring size because footprint is set")
2204
+ arr = np.random.random((10, 20, 30))
2205
+ ndimage.rank_filter(arr, 5, size=2, footprint=np.ones((1, 1, 10), dtype=bool))
2206
+
2207
+
2208
+ def test_byte_order_median():
2209
+ """Regression test for #413: median_filter does not handle bytes orders."""
2210
+ a = np.arange(9, dtype='<f4').reshape(3, 3)
2211
+ ref = ndimage.median_filter(a, (3, 3))
2212
+ b = np.arange(9, dtype='>f4').reshape(3, 3)
2213
+ t = ndimage.median_filter(b, (3, 3))
2214
+ assert_array_almost_equal(ref, t)
parrot/lib/python3.10/site-packages/scipy/odr/__pycache__/_add_newdocs.cpython-310.pyc ADDED
Binary file (1.29 kB). View file
 
parrot/lib/python3.10/site-packages/scipy/odr/__pycache__/_odrpack.cpython-310.pyc ADDED
Binary file (37.3 kB). View file
 
parrot/lib/python3.10/site-packages/scipy/odr/__pycache__/models.cpython-310.pyc ADDED
Binary file (637 Bytes). View file
 
parrot/lib/python3.10/site-packages/scipy/odr/tests/__pycache__/test_odr.cpython-310.pyc ADDED
Binary file (19.2 kB). View file
 
parrot/lib/python3.10/site-packages/scipy/stats/_ansari_swilk_statistics.cpython-310-x86_64-linux-gnu.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:50ea55d6b09b350feff4f762d9fc7b052836298983aa3396de825159ebca1539
3
+ size 277968
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/batch_mm.h ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ TORCH_API void BatchMM(std::shared_ptr<Graph>& graph);
9
+
10
+ }
11
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/canonicalize_graph_fuser_ops.h ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ TORCH_API void CanonicalizeOps(const std::shared_ptr<Graph>& graph);
9
+
10
+ }
11
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/clear_profiling.h ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/ATen.h>
4
+ #include <ATen/core/ivalue.h>
5
+ #include <ATen/core/jit_type.h>
6
+ #include <torch/csrc/Export.h>
7
+ #include <torch/csrc/jit/ir/ir.h>
8
+
9
+ namespace torch {
10
+ namespace jit {
11
+
12
+ TORCH_API void unprofileGraphInputs(const std::shared_ptr<Graph>& graph);
13
+ TORCH_API void unprofileBlock(Block* start_block);
14
+ // Unprofiles all the node outputs in a block.
15
+
16
+ TORCH_API void ClearProfilingInformation(const std::shared_ptr<Graph>& graph);
17
+
18
+ } // namespace jit
19
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/concat_opt.h ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ // Eliminates common inputs among `aten::cat` ops.
9
+ TORCH_API bool EliminateConcatCommonInputs(const std::shared_ptr<Graph>& graph);
10
+
11
+ // Expands `aten::cat` ops into `aten::copy` ops and eliminates redudancies
12
+ // in the buffers used for concatenation if possible.
13
+ TORCH_API void ExpandConcatAndEliminateRedundancy(
14
+ const std::shared_ptr<Graph>& graph);
15
+
16
+ TORCH_API bool CombineConcats(const std::shared_ptr<Graph>& graph);
17
+
18
+ } // namespace jit
19
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/constant_propagation.h ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ // Runs constant propagation on all objects unless ignore_custom_classes is
9
+ // specified as true, in which case user defined classes are skipped. This is
10
+ // useful to prevent early fusion of packing operations, which end up lowering
11
+ // away information about their constructors (e.g. packed::linear_clamp_prepack
12
+ // and prepacked::conv2d_clamp_prepack)
13
+ // Returns True if the pass made a change to the graph
14
+ TORCH_API bool ConstantPropagation(
15
+ std::shared_ptr<Graph>& graph,
16
+ bool ignore_custom_classes = false);
17
+
18
+ // runs constant propagation only on ops that have non-aliasing inputs & outputs
19
+ // Returns True if the pass made a change to the graph
20
+ TORCH_API bool ConstantPropagationImmutableTypes(std::shared_ptr<Graph>& graph);
21
+
22
+ // Runs the node if its inputs are constants. Callers of this function must
23
+ // make their own determination if constant prop is appropriate - for example
24
+ // non-deterministic ops or ops with side effects. If ignore_custom_classes is
25
+ // specified, nodes that output user defined classes are not run.
26
+ TORCH_API c10::optional<Stack> runNodeIfInputsAreConstant(
27
+ const Node* node,
28
+ bool ignore_custom_classes = false,
29
+ AliasDb* db = nullptr);
30
+
31
+ } // namespace jit
32
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/create_autodiff_subgraphs.h ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/Export.h>
4
+ #include <torch/csrc/jit/ir/ir.h>
5
+
6
+ #include <cstddef>
7
+
8
+ namespace torch {
9
+ namespace jit {
10
+
11
+ // insert GraphExecutor nodes that group together
12
+ // subgraphs that are differentiable by the jit's autodiff passes
13
+ // threshold - minimum number of nodes that will appear in a block
14
+ // returns all differentiable blocks that have been found
15
+ TORCH_API std::vector<Node*> CreateAutodiffSubgraphs(
16
+ const std::shared_ptr<Graph>& graph,
17
+ size_t threshold = 2);
18
+ } // namespace jit
19
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/create_functional_graphs.h ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/Export.h>
4
+ #include <torch/csrc/jit/ir/ir.h>
5
+
6
+ namespace torch {
7
+ namespace jit {
8
+
9
+ TORCH_API void CreateFunctionalGraphs(const std::shared_ptr<Graph>& graph);
10
+
11
+ TORCH_API void InlineFunctionalGraphs(const std::shared_ptr<Graph>& graph);
12
+
13
+ } // namespace jit
14
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/dead_code_elimination.h ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ // If given a top-level graph, DCE will construct do alias analysis that allows
9
+ // for "smarter" dead code elimination (we will eliminate mutable ops if we can
10
+ // prove the mutated values are not used). Otherwise, we will not allow DCE to
11
+ // eliminate mutable ops.
12
+ //
13
+ // So, prefer to use the graph version if you can.
14
+ enum class DCESideEffectPolicy : uint8_t {
15
+ // default behavior: dead code elimination will check if a node has side
16
+ // effects
17
+ // and not delete it if it does.
18
+ DONT_DELETE_NODES_WITH_SIDE_EFFECTS,
19
+ // with this flag, dead code elimination will not check if a node has side
20
+ // effects and treat nodes with side effects like any other node,
21
+ // i.e. delete them if their outputs aren't used anywhere.
22
+ ALLOW_DELETING_NODES_WITH_SIDE_EFFECTS
23
+ };
24
+
25
+ TORCH_API void EliminateDeadCode(
26
+ const std::shared_ptr<Graph>& graph,
27
+ DCESideEffectPolicy sideEffectPolicy =
28
+ DCESideEffectPolicy::DONT_DELETE_NODES_WITH_SIDE_EFFECTS);
29
+ TORCH_API void EliminateDeadCode(
30
+ Block* block,
31
+ bool recurse = true,
32
+ DCESideEffectPolicy sideEffectPolicy =
33
+ DCESideEffectPolicy::DONT_DELETE_NODES_WITH_SIDE_EFFECTS);
34
+
35
+ // Invoke the user-provided callback on all live values before deleting anything
36
+ TORCH_API void EliminateDeadCode(
37
+ Block* block,
38
+ std::function<void(const std::unordered_set<const Value*>&)> cb,
39
+ DCESideEffectPolicy sideEffectPolicy =
40
+ DCESideEffectPolicy::DONT_DELETE_NODES_WITH_SIDE_EFFECTS);
41
+ } // namespace jit
42
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/decompose_ops.h ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ TORCH_API void DecomposeOps(std::shared_ptr<Graph>& graph);
9
+
10
+ }
11
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/eliminate_no_ops.h ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ // Remove ops that do nothing on the forward pass (like aten::detach).
9
+ // This pass is invoked as a part of freeze_module.
10
+ // This function also takes a set of custom ops to eliminate. All ops in this
11
+ // set must take their output as their first input, i.e. x = f(x, ...)
12
+ TORCH_API bool EliminateNoOps(
13
+ std::shared_ptr<Graph>& graph,
14
+ std::unordered_set<c10::Symbol> custom_ops = {});
15
+
16
+ } // namespace jit
17
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/erase_number_types.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ // Erase NumberType information. This is necessary for and only used in
9
+ // exporting to ONNX. This pass ensures that no remaining Values have
10
+ // NumberType types, replacing them with tensors.
11
+ // The following things are done to erase NumberType info:
12
+ // - NumberType outputs are changed to DynamicType.
13
+ // - prim::Constant nodes which are numbers get changed into 0-dim tensors of
14
+ // the corresponding type
15
+ // - prim::TensorToNum, aten::Float, aten::Int and prim::NumToTensor nodes
16
+ // are erased.
17
+ //
18
+ // The pass assumes that DCE will be called sometime after.
19
+ TORCH_API void EraseNumberTypes(const std::shared_ptr<Graph>& graph);
20
+ TORCH_API void EraseNumberTypesOnBlock(Block* block);
21
+
22
+ } // namespace jit
23
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/fixup_trace_scope_blocks.h ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/api/module.h>
4
+ #include <torch/csrc/jit/ir/ir.h>
5
+
6
+ namespace torch {
7
+ namespace jit {
8
+
9
+ // Directly after tracing, we have an ill-formed graph with blocks inserted.
10
+ // Example:
11
+ //
12
+ // graph(%self : ClassType<Module>,
13
+ // %input.1 : Float(3, 4)):
14
+ // %1 : ClassType<Module> = prim::GetAttr[name="relu1"](%self)
15
+ // %2 : ClassType<Module> = prim::GetAttr[name="relu2"](%self)
16
+ // %3 : ClassType<Module> = prim::GetAttr[name="rrr"](%2)
17
+ // = prim::TracedModuleForward[scope="__module.relu1"]()
18
+ // block0():
19
+ // %input : Float(3, 4) = aten::relu(%input.1),
20
+ // -> ()
21
+ // = prim::TracedModuleForward[scope="__module.relu2"](),
22
+ // block0():
23
+ // = prim::TracedModuleForward[scope="__module.relu2.rrr"](),
24
+ // block0():
25
+ // %6 : Float(3, 4) = aten::relu(%input),
26
+ // -> ()
27
+ // -> ()
28
+ // return (%6)
29
+ //
30
+ // In this pass, we:
31
+ // 1) Lift Value defs to as high of a scope as needed to ensure that
32
+ // they dominate all their uses. For example, `input` in the above
33
+ // graph needs to be lifted to the top-level block so that its use
34
+ // in the second `relu` operator is dominated.
35
+ // 2) Lambda lift the blocks. This ensures that all values used within
36
+ // each scope have their defs captured.
37
+ // 3) Convert the scope blocks into methods on their respective Modules,
38
+ // and convert TracedModuleForward nodes to CallMethod nodes into those
39
+ // methods.
40
+ //
41
+ // Then, we'll have a well-formed graph with proper method calls.
42
+ TORCH_API void FixupTraceScopeBlocks(
43
+ std::shared_ptr<Graph>& graph,
44
+ Module* self);
45
+
46
+ } // namespace jit
47
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/fold_conv_bn.h ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/api/module.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ /** \brief Fold Conv2d-BatchNorm2d into Conv2d in all methods of this
9
+ * module and all its submodules, forward is included by default.
10
+ *
11
+ * The weight and bias of the Conv2d are correspondingly updated. Should only be
12
+ * used on modules in eval mode.
13
+ */
14
+ TORCH_API Module FoldConvBatchNorm(const Module& module);
15
+
16
+ struct TORCH_API ConvBNParameters {
17
+ at::Tensor conv_w;
18
+ at::Tensor conv_b;
19
+ at::Tensor bn_rm;
20
+ at::Tensor bn_rv;
21
+ double bn_eps = 0.0;
22
+ at::Tensor bn_w;
23
+ at::Tensor bn_b;
24
+ };
25
+
26
+ /**
27
+ * Given the current weight and bias tensors of a Conv module and parameters
28
+ * of the BatchNorm module we're folding with, compute the updated values
29
+ * for the weight and bias.
30
+ *
31
+ * The function is basically copied from torch/nn/utils/fusion.py
32
+ */
33
+ TORCH_API std::tuple<at::Tensor, at::Tensor> computeUpdatedConvWeightAndBias(
34
+ const ConvBNParameters& p);
35
+
36
+ } // namespace jit
37
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_concat_linear.h ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ // Concats multiple linear ops with the same Tensor input
9
+ // into a single linear op.
10
+ TORCH_API bool FrozenConcatLinear(std::shared_ptr<Graph>& graph);
11
+
12
+ } // namespace jit
13
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_conv_add_relu_fusion.h ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/api/module.h>
4
+ #include <torch/csrc/jit/ir/ir.h>
5
+
6
+ namespace torch {
7
+ namespace jit {
8
+
9
+ TORCH_API extern std::function<void(std::shared_ptr<Graph>&)>&
10
+ getFuseFrozenConvAddReluImpl();
11
+
12
+ TORCH_API void FuseFrozenConvAddRelu(std::shared_ptr<Graph>& graph);
13
+
14
+ } // namespace jit
15
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_linear_folding.h ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ // Fuses Linear -> BatchNormNd into a single Linear by
9
+ // folding batchnorm weights into linear weights.
10
+ // This pass only works on Frozen Graphs; otherwise it is a No-Op.
11
+ TORCH_API bool FoldFrozenLinearBatchnorm(std::shared_ptr<Graph>& graph);
12
+
13
+ } // namespace jit
14
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_linear_transpose.h ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ // Transposes the weight matrix for frozen linear modules.
9
+ // and converts it into a matmul
10
+ TORCH_API bool FrozenLinearTranspose(std::shared_ptr<Graph>& graph);
11
+
12
+ } // namespace jit
13
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_ops_to_mkldnn.h ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ // Converts operators & their parameters to mkldnn if it is profitable
9
+ // Currently encompassing Conv2d and Conv3d, and Linear
10
+ // Op must be in float32 and mkldnn must be built
11
+ // This pass only works on frozen graph
12
+ TORCH_API void ConvertFrozenOpsToMKLDNN(std::shared_ptr<Graph>& graph);
13
+
14
+ } // namespace jit
15
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/fuse_linear.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /** \brief Fusing linear patterns as single at::linear for easier pattern
2
+ * matching in later passes
3
+ */
4
+ #pragma once
5
+
6
+ #include <torch/csrc/jit/ir/ir.h>
7
+
8
+ namespace torch {
9
+ namespace jit {
10
+
11
+ /** \brief Match the at::linear pattern and fuse it into a single at::linear
12
+ * This pass fuse the addmm or matmul + add generated by JIT back to linear
13
+ * This pass can be deleted once the JIT can emit the aten::linear in the future
14
+ */
15
+ TORCH_API void FuseLinear(std::shared_ptr<Graph>& graph);
16
+
17
+ /** Swap functional linear CallFunctions to aten::linear
18
+ */
19
+ TORCH_API void SwapFunctionalLinear(std::shared_ptr<Graph>& graph);
20
+ /** Swap all functional linear CallFunctions in module
21
+ */
22
+ TORCH_API void SwapFunctionalLinear(Module& module);
23
+ } // namespace jit
24
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/graph_fuser.h ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ TORCH_API bool canFuseOnCPULegacy();
9
+ TORCH_API void overrideCanFuseOnCPULegacy(bool value);
10
+
11
+ // NB: Be sure to run DCE before fusion, because dead instructions
12
+ // can prevent fusion opportunities from being exploited.
13
+ // On Windows will noop, NYI
14
+ TORCH_API void FuseGraph(
15
+ std::shared_ptr<Graph>& graph,
16
+ bool strict_fuser_check = false);
17
+
18
+ // \brief Custom fusion pass using a node-level callback to
19
+ // determine the inclusion of nodes in a subgraph.
20
+ //
21
+ // This helper omits aliased inputs and fusion across control flow
22
+ // boundaries.
23
+ //
24
+ // \arg graph The graph to be modified in-place
25
+ // \arg is_fusable A callback run on each fusable node in the graph.
26
+ // \arg kind The label given to the resultant fused subgraph
27
+ // \arg arg_limit The maximum number of args the resultant fused subgraph
28
+ // should have. Note: This will likely develop into a general
29
+ // post condition on the fused subgraph.
30
+ TORCH_API void CustomFuseGraph(
31
+ std::shared_ptr<Graph>& graph,
32
+ const std::function<bool(Node*)>& is_fusable,
33
+ Symbol kind,
34
+ size_t arg_limit = std::numeric_limits<size_t>::max());
35
+
36
+ } // namespace jit
37
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/graph_rewrite_helper.h ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+ #include <torch/csrc/jit/ir/irparser.h>
5
+ #include <torch/csrc/jit/ir/subgraph_matcher.h>
6
+ #include <torch/csrc/jit/passes/subgraph_rewrite.h>
7
+
8
+ namespace torch {
9
+ namespace jit {
10
+ namespace graph_rewrite_helper {
11
+
12
+ std::string getFuncName(Value* func_value);
13
+ Value* getValue(
14
+ const std::string& name,
15
+ const std::unordered_map<const Value*, Value*>& match_vmap,
16
+ const std::unordered_map<std::string, Value*>& vmap);
17
+ c10::optional<IValue> getIValue(
18
+ const std::string& name,
19
+ const std::unordered_map<const Value*, Value*>& match_vmap,
20
+ const std::unordered_map<std::string, Value*>& vmap);
21
+ TORCH_API void replaceConvolutionWithAtenConv(std::shared_ptr<Graph>& graph);
22
+
23
+ bool isClampFusable(
24
+ const Match& match,
25
+ const std::unordered_map<std::string, Value*>& vmap);
26
+
27
+ // This struct contains a compiled IR patterns slated for use in the
28
+ // findPatternMatches function. The struct encapsulates the common
29
+ // information from parseIR that is used in conjunction with the
30
+ // pattern matching facility. A const instance of this struct can
31
+ // also be stored away to cache the compiled IR pattern and reduce
32
+ // runtime cost
33
+ struct PatternInfo {
34
+ std::string pattern_string;
35
+ std::unique_ptr<Graph> pattern_graph;
36
+ std::unordered_map<std::string, Value*> vmap;
37
+ std::vector<MatchFilter> filters;
38
+
39
+ static PatternInfo parse_from_str(
40
+ std::string pattern_string,
41
+ const std::vector<MatchFilter>& filters = {}) {
42
+ PatternInfo rv{
43
+ std::move(pattern_string),
44
+ std::make_unique<Graph>(),
45
+ decltype(vmap){},
46
+ filters};
47
+ parseIR(rv.pattern_string, rv.pattern_graph.get(), rv.vmap);
48
+ return rv;
49
+ }
50
+ };
51
+
52
+ } // namespace graph_rewrite_helper
53
+ } // namespace jit
54
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/hoist_conv_packed_params.h ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/api/module.h>
4
+ #include <torch/csrc/jit/ir/ir.h>
5
+
6
+ namespace torch {
7
+ namespace jit {
8
+
9
+ void HoistConvPackedParams(script::Module& m);
10
+
11
+ } // namespace jit
12
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/inline_fork_wait.h ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ // Inline Fork and Wait calls. This is used, for example, in ONNX export, where
9
+ // we do not support the explicit parallelism structures and would rather
10
+ // just have a flat graph. This inlines the forked section in the fork()
11
+ // callsite and replaces uses of the result of wait() calls with the values
12
+ // produced from the (now-inlined) forked section.
13
+ TORCH_API void InlineForkWait(const std::shared_ptr<Graph>& graph);
14
+
15
+ } // namespace jit
16
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/inliner.h ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ // Inline function and method calls.
9
+ TORCH_API void Inline(Graph& graph);
10
+
11
+ TORCH_API GraphFunction* tryToGraphFunction(Node* n);
12
+
13
+ } // namespace jit
14
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/insert_guards.h ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/ATen.h>
4
+ #include <ATen/core/ivalue.h>
5
+ #include <ATen/core/jit_type.h>
6
+ #include <ATen/core/stack.h>
7
+ #include <torch/csrc/Export.h>
8
+ #include <torch/csrc/jit/ir/ir.h>
9
+
10
+ #include <list>
11
+ #include <vector>
12
+
13
+ namespace torch {
14
+ namespace jit {
15
+
16
+ TORCH_API void InsertGuards(std::shared_ptr<Graph> graph);
17
+
18
+ TORCH_API void RemoveProfilingNodes(const std::shared_ptr<Graph>& graph);
19
+
20
+ } // namespace jit
21
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/lift_closures.h ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/Export.h>
4
+ #include <torch/csrc/jit/ir/ir.h>
5
+
6
+ namespace torch {
7
+ namespace jit {
8
+
9
+ TORCH_API void liftClosures(const std::shared_ptr<Graph>& graph);
10
+
11
+ } // namespace jit
12
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/liveness.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/ATen.h>
4
+ #include <ATen/core/ivalue.h>
5
+ #include <ATen/core/jit_type.h>
6
+ #include <ATen/core/stack.h>
7
+ #include <c10/util/sparse_bitset.h>
8
+ #include <torch/csrc/Export.h>
9
+ #include <torch/csrc/jit/ir/ir.h>
10
+ #include <list>
11
+ #include <unordered_map>
12
+ #include <vector>
13
+ namespace torch {
14
+ namespace jit {
15
+
16
+ using SparseBitVector = ::c10::SparseBitVector<256>;
17
+
18
+ // BuildLivenessSets computes "bailout" liveness which is equivalent to
19
+ // "{LIVE_IN} or {GEN}" or "{LIVE_OUT} - {KILL}"
20
+ TORCH_API std::unordered_map<Node*, std::vector<Value*>> BuildLivenessSets(
21
+ std::shared_ptr<Graph> graph);
22
+ } // namespace jit
23
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/lower_grad_of.h ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ // This pass removes 'grad_of' nodes, replacing them with conditionals of
9
+ // the form:
10
+ // if any_defined(inputs):
11
+ // outputs = <original_computation>
12
+ // else:
13
+ // outputs = undefineds
14
+ TORCH_API void LowerGradOf(Graph& g);
15
+
16
+ } // namespace jit
17
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/lower_graph.h ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ using ModulePtr = c10::intrusive_ptr<c10::ivalue::Object>;
9
+
10
+ // Given a graph with of a method which first argument is %self, lower it to a
11
+ // graph where all attributes accesses are replaced with explicit inputs of the
12
+ // graph (rather than results of prim::GetAttr executed on %self).
13
+ //
14
+ // Returns a tuple (graph, parameters) where the last module.parameters.size()
15
+ // inputs to the graph are the trainable parameters used in this method. The
16
+ // remaining inputs are the true inputs to the function.
17
+ TORCH_API std::pair<std::shared_ptr<Graph>, std::vector<IValue>> LowerGraph(
18
+ Graph& graph,
19
+ const ModulePtr& self);
20
+
21
+ } // namespace jit
22
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/lower_tuples.h ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ // removes tuples where TupleConstruct and TupleUnpack are matched
9
+ // but leaves tuples in place across if statements, loops, and as inputs/outputs
10
+ TORCH_API void LowerSimpleTuples(const std::shared_ptr<Graph>& graph);
11
+
12
+ // removes _all_ tuples and raises an error if some cannot be removed
13
+ // this is used by ONNX to ensure there are not tuples before conversion,
14
+ // but will not work on graphs whose inputs contain tuples.
15
+ TORCH_API void LowerAllTuples(const std::shared_ptr<Graph>& graph);
16
+
17
+ TORCH_API void LowerSimpleTuples(Block* block);
18
+
19
+ } // namespace jit
20
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/mobile_optimizer_type.h ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <cstdint>
4
+
5
+ enum class MobileOptimizerType : int8_t {
6
+ CONV_BN_FUSION,
7
+ INSERT_FOLD_PREPACK_OPS,
8
+ REMOVE_DROPOUT,
9
+ FUSE_ADD_RELU,
10
+ HOIST_CONV_PACKED_PARAMS,
11
+ CONV_1D_TO_2D,
12
+ VULKAN_AUTOMATIC_GPU_TRANSFER,
13
+ };
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/onednn_graph_fuser.h ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+ #include <torch/csrc/jit/passes/pass_manager.h>
5
+
6
+ #include <ATen/Config.h>
7
+
8
+ namespace torch {
9
+ namespace jit {
10
+ namespace fuser {
11
+ namespace onednn {
12
+
13
+ static std::atomic<bool> onednn_enabled{true};
14
+
15
+ static std::atomic<bool>& getLlgaEnabled() {
16
+ return onednn_enabled;
17
+ }
18
+
19
+ TORCH_API void fuseGraph(std::shared_ptr<Graph>& g);
20
+
21
+ } // namespace onednn
22
+ } // namespace fuser
23
+
24
+ struct C10_EXPORT RegisterLlgaFuseGraph
25
+ : public PassManager<RegisterLlgaFuseGraph> {
26
+ static bool setEnabled(bool enabled) {
27
+ TORCH_CHECK(
28
+ AT_MKLDNN_ENABLED(),
29
+ "Running oneDNN Graph fuser is only supported with MKLDNN builds.");
30
+ bool oldState = fuser::onednn::getLlgaEnabled();
31
+ fuser::onednn::getLlgaEnabled() = enabled;
32
+ if (enabled) {
33
+ registerPass(fuser::onednn::fuseGraph);
34
+ } else {
35
+ clearPass();
36
+ }
37
+ return oldState;
38
+ }
39
+
40
+ static bool isEnabled() {
41
+ return fuser::onednn::getLlgaEnabled();
42
+ }
43
+
44
+ // override PassManager::registerPass to register pre-pass
45
+ static bool registerPass(GraphPass p) {
46
+ if (!isRegistered()) {
47
+ passID(registerPrePass(std::move(p)), true);
48
+ isRegistered(true);
49
+ return false;
50
+ }
51
+ return true;
52
+ }
53
+
54
+ // override PassManager::clearPass to clear pre-pass
55
+ static void clearPass() {
56
+ if (isRegistered()) {
57
+ clearPrePass(passID());
58
+ isRegistered(true);
59
+ }
60
+ }
61
+ };
62
+
63
+ } // namespace jit
64
+ } // namespace torch