ZTWHHH commited on
Commit
e2fb210
·
verified ·
1 Parent(s): bbdcbb7

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +2 -0
  2. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/lstm.h +35 -0
  3. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/reciprocal_meta.h +27 -0
  4. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/view_as_real_copy_compositeexplicitautograd_dispatch.h +24 -0
  5. vllm/lib/python3.10/site-packages/cupyx/scipy/__pycache__/__init__.cpython-310.pyc +0 -0
  6. vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/__init__.py +73 -0
  7. vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/__pycache__/__init__.cpython-310.pyc +0 -0
  8. vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/__pycache__/_distance_transform.cpython-310.pyc +0 -0
  9. vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/__pycache__/_filters.cpython-310.pyc +0 -0
  10. vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/__pycache__/_filters_core.cpython-310.pyc +0 -0
  11. vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/__pycache__/_filters_generic.cpython-310.pyc +0 -0
  12. vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/__pycache__/_fourier.cpython-310.pyc +0 -0
  13. vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/__pycache__/_interp_kernels.cpython-310.pyc +0 -0
  14. vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/__pycache__/_interpolation.cpython-310.pyc +0 -0
  15. vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/__pycache__/_measurements.cpython-310.pyc +0 -0
  16. vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/__pycache__/_morphology.cpython-310.pyc +0 -0
  17. vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/__pycache__/_pba_2d.cpython-310.pyc +0 -0
  18. vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/__pycache__/_pba_3d.cpython-310.pyc +0 -0
  19. vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/__pycache__/_spline_kernel_weights.cpython-310.pyc +0 -0
  20. vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/__pycache__/_spline_prefilter_core.cpython-310.pyc +0 -0
  21. vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/__pycache__/_util.cpython-310.pyc +0 -0
  22. vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/_distance_transform.py +181 -0
  23. vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/_filters.py +1255 -0
  24. vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/_filters_core.py +308 -0
  25. vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/_filters_generic.py +272 -0
  26. vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/_fourier.py +253 -0
  27. vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/_interp_kernels.py +598 -0
  28. vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/_interpolation.py +780 -0
  29. vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/_measurements.py +1380 -0
  30. vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/_morphology.py +1017 -0
  31. vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/_pba_2d.py +503 -0
  32. vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/_pba_3d.py +491 -0
  33. vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/_spline_kernel_weights.py +73 -0
  34. vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/_spline_prefilter_core.py +261 -0
  35. vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/_util.py +160 -0
  36. vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/cuda/LICENSE +21 -0
  37. vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/cuda/pba_kernels_2d.h +695 -0
  38. vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/cuda/pba_kernels_3d.h +387 -0
  39. vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/__pycache__/__init__.cpython-310.pyc +0 -0
  40. vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/__pycache__/_base.cpython-310.pyc +0 -0
  41. vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/__pycache__/_compressed.cpython-310.pyc +0 -0
  42. vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/__pycache__/_coo.cpython-310.pyc +0 -0
  43. vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/__pycache__/_csr.cpython-310.pyc +0 -0
  44. vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/__pycache__/_data.cpython-310.pyc +0 -0
  45. vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/__pycache__/_dia.cpython-310.pyc +0 -0
  46. vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/__pycache__/_extract.cpython-310.pyc +0 -0
  47. vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/__pycache__/_index.cpython-310.pyc +0 -0
  48. vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/__pycache__/_sputils.cpython-310.pyc +0 -0
  49. vllm/lib/python3.10/site-packages/packaging/__init__.py +15 -0
  50. vllm/lib/python3.10/site-packages/packaging/__pycache__/_structures.cpython-310.pyc +0 -0
.gitattributes CHANGED
@@ -1441,3 +1441,5 @@ vllm/lib/python3.10/site-packages/huggingface_hub/inference/__pycache__/_client.
1441
  vglm/bin/python filter=lfs diff=lfs merge=lfs -text
1442
  vllm/lib/python3.10/site-packages/huggingface_hub/__pycache__/hf_api.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1443
  parrot/lib/python3.10/site-packages/numpy/lib/__pycache__/_function_base_impl.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
 
 
 
1441
  vglm/bin/python filter=lfs diff=lfs merge=lfs -text
1442
  vllm/lib/python3.10/site-packages/huggingface_hub/__pycache__/hf_api.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1443
  parrot/lib/python3.10/site-packages/numpy/lib/__pycache__/_function_base_impl.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1444
+ vllm/lib/python3.10/site-packages/pyzmq.libs/libzmq-a430b4ce.so.5.2.5 filter=lfs diff=lfs merge=lfs -text
1445
+ vllm/lib/python3.10/site-packages/pyzmq.libs/libsodium-1b1f72d5.so.26.1.0 filter=lfs diff=lfs merge=lfs -text
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/lstm.h ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/lstm_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::lstm.input(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor, Tensor)
26
+ inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor> lstm(const at::Tensor & input, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) {
27
+ return at::_ops::lstm_input::call(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
28
+ }
29
+
30
+ // aten::lstm.data(Tensor data, Tensor batch_sizes, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor, Tensor)
31
+ inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor> lstm(const at::Tensor & data, const at::Tensor & batch_sizes, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) {
32
+ return at::_ops::lstm_data::call(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional);
33
+ }
34
+
35
+ }
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/reciprocal_meta.h ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeMetaFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/TensorIterator.h>
13
+ #include <ATen/TensorMeta.h>
14
+ #include <tuple>
15
+ #include <vector>
16
+
17
+ namespace at {
18
+ namespace meta {
19
+
20
+ struct TORCH_API structured_reciprocal : public TensorIteratorBase {
21
+
22
+
23
+ void meta(const at::Tensor & self);
24
+ };
25
+
26
+ } // namespace native
27
+ } // namespace at
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/view_as_real_copy_compositeexplicitautograd_dispatch.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeexplicitautograd {
19
+
20
+ TORCH_API at::Tensor & view_as_real_copy_out(at::Tensor & out, const at::Tensor & self);
21
+ TORCH_API at::Tensor & view_as_real_copy_outf(const at::Tensor & self, at::Tensor & out);
22
+
23
+ } // namespace compositeexplicitautograd
24
+ } // namespace at
vllm/lib/python3.10/site-packages/cupyx/scipy/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.07 kB). View file
 
vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/__init__.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from cupyx.scipy.ndimage._filters import correlate # NOQA
2
+ from cupyx.scipy.ndimage._filters import convolve # NOQA
3
+ from cupyx.scipy.ndimage._filters import correlate1d # NOQA
4
+ from cupyx.scipy.ndimage._filters import convolve1d # NOQA
5
+ from cupyx.scipy.ndimage._filters import uniform_filter1d # NOQA
6
+ from cupyx.scipy.ndimage._filters import uniform_filter # NOQA
7
+ from cupyx.scipy.ndimage._filters import gaussian_filter1d # NOQA
8
+ from cupyx.scipy.ndimage._filters import gaussian_filter # NOQA
9
+ from cupyx.scipy.ndimage._filters import prewitt # NOQA
10
+ from cupyx.scipy.ndimage._filters import sobel # NOQA
11
+ from cupyx.scipy.ndimage._filters import generic_laplace # NOQA
12
+ from cupyx.scipy.ndimage._filters import laplace # NOQA
13
+ from cupyx.scipy.ndimage._filters import gaussian_laplace # NOQA
14
+ from cupyx.scipy.ndimage._filters import generic_gradient_magnitude # NOQA
15
+ from cupyx.scipy.ndimage._filters import gaussian_gradient_magnitude # NOQA
16
+ from cupyx.scipy.ndimage._filters import minimum_filter # NOQA
17
+ from cupyx.scipy.ndimage._filters import maximum_filter # NOQA
18
+ from cupyx.scipy.ndimage._filters import minimum_filter1d # NOQA
19
+ from cupyx.scipy.ndimage._filters import maximum_filter1d # NOQA
20
+ from cupyx.scipy.ndimage._filters import median_filter # NOQA
21
+ from cupyx.scipy.ndimage._filters import rank_filter # NOQA
22
+ from cupyx.scipy.ndimage._filters import percentile_filter # NOQA
23
+ from cupyx.scipy.ndimage._filters import generic_filter # NOQA
24
+ from cupyx.scipy.ndimage._filters import generic_filter1d # NOQA
25
+
26
+ from cupyx.scipy.ndimage._fourier import fourier_ellipsoid # NOQA
27
+ from cupyx.scipy.ndimage._fourier import fourier_gaussian # NOQA
28
+ from cupyx.scipy.ndimage._fourier import fourier_shift # NOQA
29
+ from cupyx.scipy.ndimage._fourier import fourier_uniform # NOQA
30
+
31
+ from cupyx.scipy.ndimage._interpolation import affine_transform # NOQA
32
+ from cupyx.scipy.ndimage._interpolation import map_coordinates # NOQA
33
+ from cupyx.scipy.ndimage._interpolation import rotate # NOQA
34
+ from cupyx.scipy.ndimage._interpolation import shift # NOQA
35
+ from cupyx.scipy.ndimage._interpolation import spline_filter # NOQA
36
+ from cupyx.scipy.ndimage._interpolation import spline_filter1d # NOQA
37
+ from cupyx.scipy.ndimage._interpolation import zoom # NOQA
38
+
39
+ from cupyx.scipy.ndimage._measurements import label # NOQA
40
+ from cupyx.scipy.ndimage._measurements import sum # NOQA
41
+ from cupyx.scipy.ndimage._measurements import sum_labels # NOQA
42
+ from cupyx.scipy.ndimage._measurements import mean # NOQA
43
+ from cupyx.scipy.ndimage._measurements import variance # NOQA
44
+ from cupyx.scipy.ndimage._measurements import standard_deviation # NOQA
45
+ from cupyx.scipy.ndimage._measurements import minimum # NOQA
46
+ from cupyx.scipy.ndimage._measurements import maximum # NOQA
47
+ from cupyx.scipy.ndimage._measurements import minimum_position # NOQA
48
+ from cupyx.scipy.ndimage._measurements import maximum_position # NOQA
49
+ from cupyx.scipy.ndimage._measurements import median # NOQA
50
+ from cupyx.scipy.ndimage._measurements import extrema # NOQA
51
+ from cupyx.scipy.ndimage._measurements import center_of_mass # NOQA
52
+ from cupyx.scipy.ndimage._measurements import histogram # NOQA
53
+ from cupyx.scipy.ndimage._measurements import labeled_comprehension # NOQA
54
+ from cupyx.scipy.ndimage._measurements import value_indices # NOQA
55
+
56
+ from cupyx.scipy.ndimage._morphology import generate_binary_structure # NOQA
57
+ from cupyx.scipy.ndimage._morphology import iterate_structure # NOQA
58
+ from cupyx.scipy.ndimage._morphology import binary_erosion # NOQA
59
+ from cupyx.scipy.ndimage._morphology import binary_dilation # NOQA
60
+ from cupyx.scipy.ndimage._morphology import binary_opening # NOQA
61
+ from cupyx.scipy.ndimage._morphology import binary_closing # NOQA
62
+ from cupyx.scipy.ndimage._morphology import binary_hit_or_miss # NOQA
63
+ from cupyx.scipy.ndimage._morphology import binary_fill_holes # NOQA
64
+ from cupyx.scipy.ndimage._morphology import binary_propagation # NOQA
65
+ from cupyx.scipy.ndimage._morphology import grey_erosion # NOQA
66
+ from cupyx.scipy.ndimage._morphology import grey_dilation # NOQA
67
+ from cupyx.scipy.ndimage._morphology import grey_closing # NOQA
68
+ from cupyx.scipy.ndimage._morphology import grey_opening # NOQA
69
+ from cupyx.scipy.ndimage._morphology import morphological_gradient # NOQA
70
+ from cupyx.scipy.ndimage._morphology import morphological_laplace # NOQA
71
+ from cupyx.scipy.ndimage._morphology import white_tophat # NOQA
72
+ from cupyx.scipy.ndimage._morphology import black_tophat # NOQA
73
+ from cupyx.scipy.ndimage._distance_transform import distance_transform_edt # NOQA
vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (2.88 kB). View file
 
vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/__pycache__/_distance_transform.cpython-310.pyc ADDED
Binary file (6.8 kB). View file
 
vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/__pycache__/_filters.cpython-310.pyc ADDED
Binary file (49.5 kB). View file
 
vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/__pycache__/_filters_core.cpython-310.pyc ADDED
Binary file (9.55 kB). View file
 
vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/__pycache__/_filters_generic.cpython-310.pyc ADDED
Binary file (8.16 kB). View file
 
vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/__pycache__/_fourier.cpython-310.pyc ADDED
Binary file (8.05 kB). View file
 
vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/__pycache__/_interp_kernels.cpython-310.pyc ADDED
Binary file (15.5 kB). View file
 
vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/__pycache__/_interpolation.cpython-310.pyc ADDED
Binary file (24.7 kB). View file
 
vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/__pycache__/_measurements.cpython-310.pyc ADDED
Binary file (42.2 kB). View file
 
vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/__pycache__/_morphology.cpython-310.pyc ADDED
Binary file (36.6 kB). View file
 
vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/__pycache__/_pba_2d.cpython-310.pyc ADDED
Binary file (11.6 kB). View file
 
vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/__pycache__/_pba_3d.cpython-310.pyc ADDED
Binary file (11.1 kB). View file
 
vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/__pycache__/_spline_kernel_weights.cpython-310.pyc ADDED
Binary file (2.49 kB). View file
 
vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/__pycache__/_spline_prefilter_core.cpython-310.pyc ADDED
Binary file (7.11 kB). View file
 
vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/__pycache__/_util.cpython-310.pyc ADDED
Binary file (5.63 kB). View file
 
vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/_distance_transform.py ADDED
@@ -0,0 +1,181 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numbers
2
+
3
+ from ._pba_2d import _pba_2d
4
+ from ._pba_3d import _pba_3d
5
+
6
+
7
+ def distance_transform_edt(image, sampling=None, return_distances=True,
8
+ return_indices=False, distances=None, indices=None,
9
+ *, block_params=None, float64_distances=True):
10
+ r"""Exact Euclidean distance transform.
11
+
12
+ This function calculates the distance transform of the `input`, by
13
+ replacing each foreground (non-zero) element, with its shortest distance to
14
+ the background (any zero-valued element).
15
+
16
+ In addition to the distance transform, the feature transform can be
17
+ calculated. In this case the index of the closest background element to
18
+ each foreground element is returned in a separate array.
19
+
20
+ Parameters
21
+ ----------
22
+ image : array_like
23
+ Input data to transform. Can be any type but will be converted into
24
+ binary: 1 wherever image equates to True, 0 elsewhere.
25
+ sampling : float, or sequence of float, optional
26
+ Spacing of elements along each dimension. If a sequence, must be of
27
+ length equal to the image rank; if a single number, this is used for
28
+ all axes. If not specified, a grid spacing of unity is implied.
29
+ return_distances : bool, optional
30
+ Whether to calculate the distance transform.
31
+ return_indices : bool, optional
32
+ Whether to calculate the feature transform.
33
+ distances : cupy.ndarray, optional
34
+ An output array to store the calculated distance transform, instead of
35
+ returning it. `return_distances` must be ``True``. It must be the same
36
+ shape as `image`. Should have dtype ``cp.float32`` if
37
+ `float64_distances` is ``False``, otherwise it should be
38
+ ``cp.float64``.
39
+ indices : cupy.ndarray, optional
40
+ An output array to store the calculated feature transform, instead of
41
+ returning it. `return_indicies` must be ``True``. Its shape must be
42
+ ``(image.ndim,) + image.shape``. Its dtype must be a signed or unsigned
43
+ integer type of at least 16-bits in 2D or 32-bits in 3D.
44
+
45
+ Other Parameters
46
+ ----------------
47
+ block_params : 3-tuple of int
48
+ The m1, m2, m3 algorithm parameters as described in [2]_. If None,
49
+ suitable defaults will be chosen. Note: This parameter is specific to
50
+ cuCIM and does not exist in SciPy.
51
+ float64_distances : bool, optional
52
+ If True, use double precision in the distance computation (to match
53
+ SciPy behavior). Otherwise, single precision will be used for
54
+ efficiency. Note: This parameter is specific to cuCIM and does not
55
+ exist in SciPy.
56
+
57
+ Returns
58
+ -------
59
+ distances : cupy.ndarray, optional
60
+ The calculated distance transform. Returned only when
61
+ `return_distances` is ``True`` and `distances` is not supplied. It will
62
+ have the same shape as `image`. Will have dtype `cp.float64` if
63
+ `float64_distances` is ``True``, otherwise it will have dtype
64
+ ``cp.float32``.
65
+ indices : ndarray, optional
66
+ The calculated feature transform. It has an image-shaped array for each
67
+ dimension of the image. See example below. Returned only when
68
+ `return_indices` is ``True`` and `indices` is not supplied.
69
+
70
+ Notes
71
+ -----
72
+ The Euclidean distance transform gives values of the Euclidean distance.
73
+
74
+ .. math::
75
+
76
+ y_i = \sqrt{\sum_{i}^{n} (x[i] - b[i])^2}
77
+
78
+ where :math:`b[i]` is the background point (value 0) with the smallest
79
+ Euclidean distance to input points :math:`x[i]`, and :math:`n` is the
80
+ number of dimensions.
81
+
82
+ Note that the `indices` output may differ from the one given by
83
+ :func:`scipy.ndimage.distance_transform_edt` in the case of input pixels
84
+ that are equidistant from multiple background points.
85
+
86
+ The parallel banding algorithm implemented here was originally described in
87
+ [1]_. The kernels used here correspond to the revised PBA+ implementation
88
+ that is described on the author's website [2]_. The source code of the
89
+ author's PBA+ implementation is available at [3]_.
90
+
91
+ References
92
+ ----------
93
+ .. [1] Thanh-Tung Cao, Ke Tang, Anis Mohamed, and Tiow-Seng Tan. 2010.
94
+ Parallel Banding Algorithm to compute exact distance transform with the
95
+ GPU. In Proceedings of the 2010 ACM SIGGRAPH symposium on Interactive
96
+ 3D Graphics and Games (I3D ’10). Association for Computing Machinery,
97
+ New York, NY, USA, 83–90.
98
+ DOI:https://doi.org/10.1145/1730804.1730818
99
+ .. [2] https://www.comp.nus.edu.sg/~tants/pba.html
100
+ .. [3] https://github.com/orzzzjq/Parallel-Banding-Algorithm-plus
101
+
102
+ Examples
103
+ --------
104
+ >>> import cupy as cp
105
+ >>> from cucim.core.operations import morphology
106
+ >>> a = cp.array(([0,1,1,1,1],
107
+ ... [0,0,1,1,1],
108
+ ... [0,1,1,1,1],
109
+ ... [0,1,1,1,0],
110
+ ... [0,1,1,0,0]))
111
+ >>> morphology.distance_transform_edt(a)
112
+ array([[ 0. , 1. , 1.4142, 2.2361, 3. ],
113
+ [ 0. , 0. , 1. , 2. , 2. ],
114
+ [ 0. , 1. , 1.4142, 1.4142, 1. ],
115
+ [ 0. , 1. , 1.4142, 1. , 0. ],
116
+ [ 0. , 1. , 1. , 0. , 0. ]])
117
+
118
+ With a sampling of 2 units along x, 1 along y:
119
+
120
+ >>> morphology.distance_transform_edt(a, sampling=[2,1])
121
+ array([[ 0. , 1. , 2. , 2.8284, 3.6056],
122
+ [ 0. , 0. , 1. , 2. , 3. ],
123
+ [ 0. , 1. , 2. , 2.2361, 2. ],
124
+ [ 0. , 1. , 2. , 1. , 0. ],
125
+ [ 0. , 1. , 1. , 0. , 0. ]])
126
+
127
+ Asking for indices as well:
128
+
129
+ >>> edt, inds = morphology.distance_transform_edt(a, return_indices=True)
130
+ >>> inds
131
+ array([[[0, 0, 1, 1, 3],
132
+ [1, 1, 1, 1, 3],
133
+ [2, 2, 1, 3, 3],
134
+ [3, 3, 4, 4, 3],
135
+ [4, 4, 4, 4, 4]],
136
+ [[0, 0, 1, 1, 4],
137
+ [0, 1, 1, 1, 4],
138
+ [0, 0, 1, 4, 4],
139
+ [0, 0, 3, 3, 4],
140
+ [0, 0, 3, 3, 4]]])
141
+
142
+ """
143
+ scalar_sampling = None
144
+ if sampling is not None:
145
+ if isinstance(sampling, numbers.Number):
146
+ sampling = (sampling,)
147
+ if len(set(sampling)) == 1:
148
+ # In the isotropic case, can use the kernels without sample scaling
149
+ # and just adjust the final distance accordingly.
150
+ scalar_sampling = float(sampling[0])
151
+ sampling = None
152
+
153
+ if image.ndim == 3:
154
+ pba_func = _pba_3d
155
+ elif image.ndim == 2:
156
+ pba_func = _pba_2d
157
+ else:
158
+ raise NotImplementedError(
159
+ "Only 2D and 3D distance transforms are supported.")
160
+
161
+ vals = pba_func(
162
+ image,
163
+ sampling=sampling,
164
+ return_distances=return_distances,
165
+ return_indices=return_indices,
166
+ block_params=block_params,
167
+ distances=distances,
168
+ indices=indices,
169
+ float64_distances=float64_distances,
170
+ )
171
+
172
+ if return_distances and scalar_sampling is not None:
173
+ # inplace multiply in case distance != None
174
+ vals = list(vals)
175
+ vals[0] *= scalar_sampling
176
+ vals = tuple(vals)
177
+
178
+ if len(vals) == 1:
179
+ vals = vals[0]
180
+
181
+ return vals
vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/_filters.py ADDED
@@ -0,0 +1,1255 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy
2
+
3
+ import cupy
4
+
5
+ from cupy import _core
6
+ from cupy._core import internal
7
+ from cupyx.scipy.ndimage import _util
8
+ from cupyx.scipy.ndimage import _filters_core
9
+ from cupyx.scipy.ndimage import _filters_generic
10
+
11
+
12
+ def correlate(input, weights, output=None, mode='reflect', cval=0.0, origin=0):
13
+ """Multi-dimensional correlate.
14
+
15
+ The array is correlated with the given kernel.
16
+
17
+ Args:
18
+ input (cupy.ndarray): The input array.
19
+ weights (cupy.ndarray): Array of weights, same number of dimensions as
20
+ input
21
+ output (cupy.ndarray, dtype or None): The array in which to place the
22
+ output.
23
+ mode (str): The array borders are handled according to the given mode
24
+ (``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``,
25
+ ``'wrap'``). Default is ``'reflect'``.
26
+ cval (scalar): Value to fill past edges of input if mode is
27
+ ``constant``. Default is ``0.0``.
28
+ origin (scalar or tuple of scalar): The origin parameter controls the
29
+ placement of the filter, relative to the center of the current
30
+ element of the input. Default of 0 is equivalent to
31
+ ``(0,)*input.ndim``.
32
+
33
+ Returns:
34
+ cupy.ndarray: The result of correlate.
35
+
36
+ .. seealso:: :func:`scipy.ndimage.correlate`
37
+
38
+ .. note::
39
+ When the output data type is integral (or when no output is provided
40
+ and input is integral) the results may not perfectly match the results
41
+ from SciPy due to floating-point rounding of intermediate results.
42
+ """
43
+ return _correlate_or_convolve(input, weights, output, mode, cval, origin)
44
+
45
+
46
+ def convolve(input, weights, output=None, mode='reflect', cval=0.0, origin=0):
47
+ """Multi-dimensional convolution.
48
+
49
+ The array is convolved with the given kernel.
50
+
51
+ Args:
52
+ input (cupy.ndarray): The input array.
53
+ weights (cupy.ndarray): Array of weights, same number of dimensions as
54
+ input
55
+ output (cupy.ndarray, dtype or None): The array in which to place the
56
+ output.
57
+ mode (str): The array borders are handled according to the given mode
58
+ (``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``,
59
+ ``'wrap'``). Default is ``'reflect'``.
60
+ cval (scalar): Value to fill past edges of input if mode is
61
+ ``constant``. Default is ``0.0``.
62
+ origin (scalar or tuple of scalar): The origin parameter controls the
63
+ placement of the filter, relative to the center of the current
64
+ element of the input. Default of 0 is equivalent to
65
+ ``(0,)*input.ndim``.
66
+
67
+ Returns:
68
+ cupy.ndarray: The result of convolution.
69
+
70
+ .. seealso:: :func:`scipy.ndimage.convolve`
71
+
72
+ .. note::
73
+ When the output data type is integral (or when no output is provided
74
+ and input is integral) the results may not perfectly match the results
75
+ from SciPy due to floating-point rounding of intermediate results.
76
+ """
77
+ return _correlate_or_convolve(input, weights, output, mode, cval, origin,
78
+ True)
79
+
80
+
81
+ def correlate1d(input, weights, axis=-1, output=None, mode="reflect", cval=0.0,
82
+ origin=0):
83
+ """One-dimensional correlate.
84
+
85
+ The array is correlated with the given kernel.
86
+
87
+ Args:
88
+ input (cupy.ndarray): The input array.
89
+ weights (cupy.ndarray): One-dimensional array of weights
90
+ axis (int): The axis of input along which to calculate. Default is -1.
91
+ output (cupy.ndarray, dtype or None): The array in which to place the
92
+ output. Default is is same dtype as the input.
93
+ mode (str): The array borders are handled according to the given mode
94
+ (``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``,
95
+ ``'wrap'``). Default is ``'reflect'``.
96
+ cval (scalar): Value to fill past edges of input if mode is
97
+ ``'constant'``. Default is ``0.0``.
98
+ origin (int): The origin parameter controls the placement of the
99
+ filter, relative to the center of the current element of the
100
+ input. Default is ``0``.
101
+
102
+ Returns:
103
+ cupy.ndarray: The result of the 1D correlation.
104
+
105
+ .. seealso:: :func:`scipy.ndimage.correlate1d`
106
+
107
+ .. note::
108
+ When the output data type is integral (or when no output is provided
109
+ and input is integral) the results may not perfectly match the results
110
+ from SciPy due to floating-point rounding of intermediate results.
111
+ """
112
+ weights, origins = _filters_core._convert_1d_args(input.ndim, weights,
113
+ origin, axis)
114
+ return _correlate_or_convolve(input, weights, output, mode, cval, origins)
115
+
116
+
117
+ def convolve1d(input, weights, axis=-1, output=None, mode="reflect", cval=0.0,
118
+ origin=0):
119
+ """One-dimensional convolution.
120
+
121
+ The array is convolved with the given kernel.
122
+
123
+ Args:
124
+ input (cupy.ndarray): The input array.
125
+ weights (cupy.ndarray): One-dimensional array of weights
126
+ axis (int): The axis of input along which to calculate. Default is -1.
127
+ output (cupy.ndarray, dtype or None): The array in which to place the
128
+ output. Default is is same dtype as the input.
129
+ mode (str): The array borders are handled according to the given mode
130
+ (``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``,
131
+ ``'wrap'``). Default is ``'reflect'``.
132
+ cval (scalar): Value to fill past edges of input if mode is
133
+ ``'constant'``. Default is ``0.0``.
134
+ origin (int): The origin parameter controls the placement of the
135
+ filter, relative to the center of the current element of the
136
+ input. Default is ``0``.
137
+ Returns:
138
+ cupy.ndarray: The result of the 1D convolution.
139
+
140
+ .. seealso:: :func:`scipy.ndimage.convolve1d`
141
+
142
+ .. note::
143
+ When the output data type is integral (or when no output is provided
144
+ and input is integral) the results may not perfectly match the results
145
+ from SciPy due to floating-point rounding of intermediate results.
146
+ """
147
+ weights, origins = _filters_core._convert_1d_args(input.ndim, weights,
148
+ origin, axis)
149
+ return _correlate_or_convolve(input, weights, output, mode, cval, origins,
150
+ True)
151
+
152
+
153
+ def _correlate_or_convolve(input, weights, output, mode, cval, origin,
154
+ convolution=False):
155
+ origins, int_type = _filters_core._check_nd_args(input, weights,
156
+ mode, origin)
157
+ if weights.size == 0:
158
+ return cupy.zeros_like(input)
159
+
160
+ _util._check_cval(mode, cval, _util._is_integer_output(output, input))
161
+
162
+ if convolution:
163
+ weights = weights[tuple([slice(None, None, -1)] * weights.ndim)]
164
+ origins = list(origins)
165
+ for i, wsize in enumerate(weights.shape):
166
+ origins[i] = -origins[i]
167
+ if wsize % 2 == 0:
168
+ origins[i] -= 1
169
+ origins = tuple(origins)
170
+ elif weights.dtype.kind == "c":
171
+ # numpy.correlate conjugates weights rather than input.
172
+ weights = weights.conj()
173
+ weights_dtype = _util._get_weights_dtype(input, weights)
174
+ offsets = _filters_core._origins_to_offsets(origins, weights.shape)
175
+ kernel = _get_correlate_kernel(mode, weights.shape, int_type,
176
+ offsets, cval)
177
+ output = _filters_core._call_kernel(kernel, input, weights, output,
178
+ weights_dtype=weights_dtype)
179
+ return output
180
+
181
+
182
+ @cupy._util.memoize(for_each_device=True)
183
+ def _get_correlate_kernel(mode, w_shape, int_type, offsets, cval):
184
+ return _filters_core._generate_nd_kernel(
185
+ 'correlate',
186
+ 'W sum = (W)0;',
187
+ 'sum += cast<W>({value}) * wval;',
188
+ 'y = cast<Y>(sum);',
189
+ mode, w_shape, int_type, offsets, cval, ctype='W')
190
+
191
+
192
+ def _run_1d_correlates(input, params, get_weights, output, mode, cval,
193
+ origin=0):
194
+ """
195
+ Enhanced version of _run_1d_filters that uses correlate1d as the filter
196
+ function. The params are a list of values to pass to the get_weights
197
+ callable given. If duplicate param values are found, the weights are
198
+ reused from the first invocation of get_weights. The get_weights callable
199
+ must return a 1D array of weights to give to correlate1d.
200
+ """
201
+ wghts = {}
202
+ for param in params:
203
+ if param not in wghts:
204
+ wghts[param] = get_weights(param)
205
+ wghts = [wghts[param] for param in params]
206
+ return _filters_core._run_1d_filters(
207
+ [None if w is None else correlate1d for w in wghts],
208
+ input, wghts, output, mode, cval, origin)
209
+
210
+
211
+ def uniform_filter1d(input, size, axis=-1, output=None, mode="reflect",
212
+ cval=0.0, origin=0):
213
+ """One-dimensional uniform filter along the given axis.
214
+
215
+ The lines of the array along the given axis are filtered with a uniform
216
+ filter of the given size.
217
+
218
+ Args:
219
+ input (cupy.ndarray): The input array.
220
+ size (int): Length of the uniform filter.
221
+ axis (int): The axis of input along which to calculate. Default is -1.
222
+ output (cupy.ndarray, dtype or None): The array in which to place the
223
+ output. Default is is same dtype as the input.
224
+ mode (str): The array borders are handled according to the given mode
225
+ (``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``,
226
+ ``'wrap'``). Default is ``'reflect'``.
227
+ cval (scalar): Value to fill past edges of input if mode is
228
+ ``'constant'``. Default is ``0.0``.
229
+ origin (int): The origin parameter controls the placement of the
230
+ filter, relative to the center of the current element of the
231
+ input. Default is ``0``.
232
+
233
+ Returns:
234
+ cupy.ndarray: The result of the filtering.
235
+
236
+ .. seealso:: :func:`scipy.ndimage.uniform_filter1d`
237
+
238
+ .. note::
239
+ When the output data type is integral (or when no output is provided
240
+ and input is integral) the results may not perfectly match the results
241
+ from SciPy due to floating-point rounding of intermediate results.
242
+ """
243
+ weights_dtype = _util._init_weights_dtype(input)
244
+ weights = cupy.full(size, 1 / size, dtype=weights_dtype)
245
+ return correlate1d(input, weights, axis, output, mode, cval, origin)
246
+
247
+
248
+ def uniform_filter(input, size=3, output=None, mode="reflect", cval=0.0,
249
+ origin=0):
250
+ """Multi-dimensional uniform filter.
251
+
252
+ Args:
253
+ input (cupy.ndarray): The input array.
254
+ size (int or sequence of int): Lengths of the uniform filter for each
255
+ dimension. A single value applies to all axes.
256
+ output (cupy.ndarray, dtype or None): The array in which to place the
257
+ output. Default is is same dtype as the input.
258
+ mode (str): The array borders are handled according to the given mode
259
+ (``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``,
260
+ ``'wrap'``). Default is ``'reflect'``.
261
+ cval (scalar): Value to fill past edges of input if mode is
262
+ ``'constant'``. Default is ``0.0``.
263
+ origin (int or sequence of int): The origin parameter controls the
264
+ placement of the filter, relative to the center of the current
265
+ element of the input. Default of ``0`` is equivalent to
266
+ ``(0,)*input.ndim``.
267
+
268
+ Returns:
269
+ cupy.ndarray: The result of the filtering.
270
+
271
+ .. seealso:: :func:`scipy.ndimage.uniform_filter`
272
+
273
+ .. note::
274
+ When the output data type is integral (or when no output is provided
275
+ and input is integral) the results may not perfectly match the results
276
+ from SciPy due to floating-point rounding of intermediate results.
277
+ """
278
+ sizes = _util._fix_sequence_arg(size, input.ndim, 'size', int)
279
+ weights_dtype = _util._init_weights_dtype(input)
280
+
281
+ def get(size, dtype=weights_dtype):
282
+ return None if size <= 1 else cupy.full(size, 1 / size, dtype=dtype)
283
+
284
+ return _run_1d_correlates(input, sizes, get, output, mode, cval, origin)
285
+
286
+
287
+ def gaussian_filter1d(input, sigma, axis=-1, order=0, output=None,
288
+ mode="reflect", cval=0.0, truncate=4.0):
289
+ """One-dimensional Gaussian filter along the given axis.
290
+
291
+ The lines of the array along the given axis are filtered with a Gaussian
292
+ filter of the given standard deviation.
293
+
294
+ Args:
295
+ input (cupy.ndarray): The input array.
296
+ sigma (scalar): Standard deviation for Gaussian kernel.
297
+ axis (int): The axis of input along which to calculate. Default is -1.
298
+ order (int): An order of ``0``, the default, corresponds to convolution
299
+ with a Gaussian kernel. A positive order corresponds to convolution
300
+ with that derivative of a Gaussian.
301
+ output (cupy.ndarray, dtype or None): The array in which to place the
302
+ output. Default is is same dtype as the input.
303
+ mode (str): The array borders are handled according to the given mode
304
+ (``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``,
305
+ ``'wrap'``). Default is ``'reflect'``.
306
+ cval (scalar): Value to fill past edges of input if mode is
307
+ ``'constant'``. Default is ``0.0``.
308
+ truncate (float): Truncate the filter at this many standard deviations.
309
+ Default is ``4.0``.
310
+
311
+ Returns:
312
+ cupy.ndarray: The result of the filtering.
313
+
314
+ .. seealso:: :func:`scipy.ndimage.gaussian_filter1d`
315
+
316
+ .. note::
317
+ When the output data type is integral (or when no output is provided
318
+ and input is integral) the results may not perfectly match the results
319
+ from SciPy due to floating-point rounding of intermediate results.
320
+ """
321
+ radius = int(float(truncate) * float(sigma) + 0.5)
322
+ weights_dtype = _util._init_weights_dtype(input)
323
+ weights = _gaussian_kernel1d(
324
+ sigma, int(order), radius, dtype=weights_dtype
325
+ )
326
+ return correlate1d(input, weights, axis, output, mode, cval)
327
+
328
+
329
+ def gaussian_filter(input, sigma, order=0, output=None, mode="reflect",
330
+ cval=0.0, truncate=4.0):
331
+ """Multi-dimensional Gaussian filter.
332
+
333
+ Args:
334
+ input (cupy.ndarray): The input array.
335
+ sigma (scalar or sequence of scalar): Standard deviations for each axis
336
+ of Gaussian kernel. A single value applies to all axes.
337
+ order (int or sequence of scalar): An order of ``0``, the default,
338
+ corresponds to convolution with a Gaussian kernel. A positive order
339
+ corresponds to convolution with that derivative of a Gaussian. A
340
+ single value applies to all axes.
341
+ output (cupy.ndarray, dtype or None): The array in which to place the
342
+ output. Default is is same dtype as the input.
343
+ mode (str): The array borders are handled according to the given mode
344
+ (``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``,
345
+ ``'wrap'``). Default is ``'reflect'``.
346
+ cval (scalar): Value to fill past edges of input if mode is
347
+ ``'constant'``. Default is ``0.0``.
348
+ truncate (float): Truncate the filter at this many standard deviations.
349
+ Default is ``4.0``.
350
+
351
+ Returns:
352
+ cupy.ndarray: The result of the filtering.
353
+
354
+ .. seealso:: :func:`scipy.ndimage.gaussian_filter`
355
+
356
+ .. note::
357
+ When the output data type is integral (or when no output is provided
358
+ and input is integral) the results may not perfectly match the results
359
+ from SciPy due to floating-point rounding of intermediate results.
360
+ """
361
+ sigmas = _util._fix_sequence_arg(sigma, input.ndim, 'sigma', float)
362
+ orders = _util._fix_sequence_arg(order, input.ndim, 'order', int)
363
+ truncate = float(truncate)
364
+ weights_dtype = _util._init_weights_dtype(input)
365
+
366
+ def get(param):
367
+ sigma, order = param
368
+ radius = int(truncate * float(sigma) + 0.5)
369
+ if radius <= 0:
370
+ return None
371
+ return _gaussian_kernel1d(sigma, order, radius, dtype=weights_dtype)
372
+
373
+ return _run_1d_correlates(input, list(zip(sigmas, orders)), get, output,
374
+ mode, cval, 0)
375
+
376
+
377
+ def _gaussian_kernel1d(sigma, order, radius, dtype=cupy.float64):
378
+ """
379
+ Computes a 1-D Gaussian correlation kernel.
380
+ """
381
+ if order < 0:
382
+ raise ValueError('order must be non-negative')
383
+ sigma2 = sigma * sigma
384
+ x = numpy.arange(-radius, radius+1)
385
+ phi_x = numpy.exp(-0.5 / sigma2 * x ** 2)
386
+ phi_x /= phi_x.sum()
387
+
388
+ if order == 0:
389
+ return cupy.asarray(phi_x)
390
+
391
+ # f(x) = q(x) * phi(x) = q(x) * exp(p(x))
392
+ # f'(x) = (q'(x) + q(x) * p'(x)) * phi(x)
393
+ # p'(x) = -1 / sigma ** 2
394
+ # Implement q'(x) + q(x) * p'(x) as a matrix operator and apply to the
395
+ # coefficients of q(x)
396
+ exponent_range = numpy.arange(order + 1)
397
+ q = numpy.zeros(order + 1)
398
+ q[0] = 1
399
+ D = numpy.diag(exponent_range[1:], 1) # D @ q(x) = q'(x)
400
+ P = numpy.diag(numpy.ones(order)/-sigma2, -1) # P @ q(x) = q(x) * p'(x)
401
+ Q_deriv = D + P
402
+ for _ in range(order):
403
+ q = Q_deriv.dot(q)
404
+ q = (x[:, None] ** exponent_range).dot(q)
405
+ return cupy.asarray((q * phi_x)[::-1], dtype=dtype)
406
+
407
+
408
+ def prewitt(input, axis=-1, output=None, mode="reflect", cval=0.0):
409
+ """Compute a Prewitt filter along the given axis.
410
+
411
+ Args:
412
+ input (cupy.ndarray): The input array.
413
+ axis (int): The axis of input along which to calculate. Default is -1.
414
+ output (cupy.ndarray, dtype or None): The array in which to place the
415
+ output. Default is is same dtype as the input.
416
+ mode (str): The array borders are handled according to the given mode
417
+ (``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``,
418
+ ``'wrap'``). Default is ``'reflect'``.
419
+ cval (scalar): Value to fill past edges of input if mode is
420
+ ``'constant'``. Default is ``0.0``.
421
+
422
+ Returns:
423
+ cupy.ndarray: The result of the filtering.
424
+
425
+ .. seealso:: :func:`scipy.ndimage.prewitt`
426
+
427
+ .. note::
428
+ When the output data type is integral (or when no output is provided
429
+ and input is integral) the results may not perfectly match the results
430
+ from SciPy due to floating-point rounding of intermediate results.
431
+ """
432
+ weights_dtype = _util._init_weights_dtype(input)
433
+ weights = cupy.ones(3, dtype=weights_dtype)
434
+ return _prewitt_or_sobel(input, axis, output, mode, cval, weights)
435
+
436
+
437
+ def sobel(input, axis=-1, output=None, mode="reflect", cval=0.0):
438
+ """Compute a Sobel filter along the given axis.
439
+
440
+ Args:
441
+ input (cupy.ndarray): The input array.
442
+ axis (int): The axis of input along which to calculate. Default is -1.
443
+ output (cupy.ndarray, dtype or None): The array in which to place the
444
+ output. Default is is same dtype as the input.
445
+ mode (str): The array borders are handled according to the given mode
446
+ (``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``,
447
+ ``'wrap'``). Default is ``'reflect'``.
448
+ cval (scalar): Value to fill past edges of input if mode is
449
+ ``'constant'``. Default is ``0.0``.
450
+
451
+ Returns:
452
+ cupy.ndarray: The result of the filtering.
453
+
454
+ .. seealso:: :func:`scipy.ndimage.sobel`
455
+
456
+ .. note::
457
+ When the output data type is integral (or when no output is provided
458
+ and input is integral) the results may not perfectly match the results
459
+ from SciPy due to floating-point rounding of intermediate results.
460
+ """
461
+ weights_dtype = _util._init_weights_dtype(input)
462
+ return _prewitt_or_sobel(input, axis, output, mode, cval,
463
+ cupy.array([1, 2, 1], dtype=weights_dtype))
464
+
465
+
466
+ def _prewitt_or_sobel(input, axis, output, mode, cval, weights):
467
+ axis = internal._normalize_axis_index(axis, input.ndim)
468
+
469
+ def get(is_diff):
470
+ return cupy.array([-1, 0, 1], dtype=weights.dtype) if is_diff else weights # noqa
471
+
472
+ return _run_1d_correlates(input, [a == axis for a in range(input.ndim)],
473
+ get, output, mode, cval)
474
+
475
+
476
+ def generic_laplace(input, derivative2, output=None, mode="reflect",
477
+ cval=0.0, extra_arguments=(), extra_keywords=None):
478
+ """Multi-dimensional Laplace filter using a provided second derivative
479
+ function.
480
+
481
+ Args:
482
+ input (cupy.ndarray): The input array.
483
+ derivative2 (callable): Function or other callable with the following
484
+ signature that is called once per axis::
485
+
486
+ derivative2(input, axis, output, mode, cval,
487
+ *extra_arguments, **extra_keywords)
488
+
489
+ where ``input`` and ``output`` are ``cupy.ndarray``, ``axis`` is an
490
+ ``int`` from ``0`` to the number of dimensions, and ``mode``,
491
+ ``cval``, ``extra_arguments``, ``extra_keywords`` are the values
492
+ given to this function.
493
+ output (cupy.ndarray, dtype or None): The array in which to place the
494
+ output. Default is is same dtype as the input.
495
+ mode (str): The array borders are handled according to the given mode
496
+ (``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``,
497
+ ``'wrap'``). Default is ``'reflect'``.
498
+ cval (scalar): Value to fill past edges of input if mode is
499
+ ``'constant'``. Default is ``0.0``.
500
+ extra_arguments (sequence, optional):
501
+ Sequence of extra positional arguments to pass to ``derivative2``.
502
+ extra_keywords (dict, optional):
503
+ dict of extra keyword arguments to pass ``derivative2``.
504
+
505
+ Returns:
506
+ cupy.ndarray: The result of the filtering.
507
+
508
+ .. seealso:: :func:`scipy.ndimage.generic_laplace`
509
+
510
+ .. note::
511
+ When the output data type is integral (or when no output is provided
512
+ and input is integral) the results may not perfectly match the results
513
+ from SciPy due to floating-point rounding of intermediate results.
514
+ """
515
+ if extra_keywords is None:
516
+ extra_keywords = {}
517
+ ndim = input.ndim
518
+ modes = _util._fix_sequence_arg(mode, ndim, 'mode',
519
+ _util._check_mode)
520
+ output = _util._get_output(output, input)
521
+ if ndim == 0:
522
+ _core.elementwise_copy(input, output)
523
+ return output
524
+ derivative2(input, 0, output, modes[0], cval,
525
+ *extra_arguments, **extra_keywords)
526
+ if ndim > 1:
527
+ tmp = _util._get_output(output.dtype, input)
528
+ for i in range(1, ndim):
529
+ derivative2(input, i, tmp, modes[i], cval,
530
+ *extra_arguments, **extra_keywords)
531
+ output += tmp
532
+ return output
533
+
534
+
535
+ def laplace(input, output=None, mode="reflect", cval=0.0):
536
+ """Multi-dimensional Laplace filter based on approximate second
537
+ derivatives.
538
+
539
+ Args:
540
+ input (cupy.ndarray): The input array.
541
+ output (cupy.ndarray, dtype or None): The array in which to place the
542
+ output. Default is is same dtype as the input.
543
+ mode (str): The array borders are handled according to the given mode
544
+ (``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``,
545
+ ``'wrap'``). Default is ``'reflect'``.
546
+ cval (scalar): Value to fill past edges of input if mode is
547
+ ``'constant'``. Default is ``0.0``.
548
+ Returns:
549
+ cupy.ndarray: The result of the filtering.
550
+
551
+ .. seealso:: :func:`scipy.ndimage.laplace`
552
+
553
+ .. note::
554
+ When the output data type is integral (or when no output is provided
555
+ and input is integral) the results may not perfectly match the results
556
+ from SciPy due to floating-point rounding of intermediate results.
557
+ """
558
+ weights_dtype = _util._init_weights_dtype(input)
559
+ weights = cupy.array([1, -2, 1], dtype=weights_dtype)
560
+
561
+ def derivative2(input, axis, output, mode, cval):
562
+ return correlate1d(input, weights, axis, output, mode, cval)
563
+
564
+ return generic_laplace(input, derivative2, output, mode, cval)
565
+
566
+
567
+ def gaussian_laplace(input, sigma, output=None, mode="reflect",
568
+ cval=0.0, **kwargs):
569
+ """Multi-dimensional Laplace filter using Gaussian second derivatives.
570
+
571
+ Args:
572
+ input (cupy.ndarray): The input array.
573
+ sigma (scalar or sequence of scalar): Standard deviations for each axis
574
+ of Gaussian kernel. A single value applies to all axes.
575
+ output (cupy.ndarray, dtype or None): The array in which to place the
576
+ output. Default is is same dtype as the input.
577
+ mode (str): The array borders are handled according to the given mode
578
+ (``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``,
579
+ ``'wrap'``). Default is ``'reflect'``.
580
+ cval (scalar): Value to fill past edges of input if mode is
581
+ ``'constant'``. Default is ``0.0``.
582
+ kwargs (dict, optional):
583
+ dict of extra keyword arguments to pass ``gaussian_filter()``.
584
+
585
+ Returns:
586
+ cupy.ndarray: The result of the filtering.
587
+
588
+ .. seealso:: :func:`scipy.ndimage.gaussian_laplace`
589
+
590
+ .. note::
591
+ When the output data type is integral (or when no output is provided
592
+ and input is integral) the results may not perfectly match the results
593
+ from SciPy due to floating-point rounding of intermediate results.
594
+ """
595
+ def derivative2(input, axis, output, mode, cval):
596
+ order = [0] * input.ndim
597
+ order[axis] = 2
598
+ return gaussian_filter(input, sigma, order, output, mode, cval,
599
+ **kwargs)
600
+ return generic_laplace(input, derivative2, output, mode, cval)
601
+
602
+
603
+ def generic_gradient_magnitude(input, derivative, output=None,
604
+ mode="reflect", cval=0.0,
605
+ extra_arguments=(), extra_keywords=None):
606
+ """Multi-dimensional gradient magnitude filter using a provided derivative
607
+ function.
608
+
609
+ Args:
610
+ input (cupy.ndarray): The input array.
611
+ derivative (callable): Function or other callable with the following
612
+ signature that is called once per axis::
613
+
614
+ derivative(input, axis, output, mode, cval,
615
+ *extra_arguments, **extra_keywords)
616
+
617
+ where ``input`` and ``output`` are ``cupy.ndarray``, ``axis`` is an
618
+ ``int`` from ``0`` to the number of dimensions, and ``mode``,
619
+ ``cval``, ``extra_arguments``, ``extra_keywords`` are the values
620
+ given to this function.
621
+ output (cupy.ndarray, dtype or None): The array in which to place the
622
+ output. Default is is same dtype as the input.
623
+ mode (str): The array borders are handled according to the given mode
624
+ (``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``,
625
+ ``'wrap'``). Default is ``'reflect'``.
626
+ cval (scalar): Value to fill past edges of input if mode is
627
+ ``'constant'``. Default is ``0.0``.
628
+ extra_arguments (sequence, optional):
629
+ Sequence of extra positional arguments to pass to ``derivative2``.
630
+ extra_keywords (dict, optional):
631
+ dict of extra keyword arguments to pass ``derivative2``.
632
+
633
+ Returns:
634
+ cupy.ndarray: The result of the filtering.
635
+
636
+ .. seealso:: :func:`scipy.ndimage.generic_gradient_magnitude`
637
+
638
+ .. note::
639
+ When the output data type is integral (or when no output is provided
640
+ and input is integral) the results may not perfectly match the results
641
+ from SciPy due to floating-point rounding of intermediate results.
642
+ """
643
+ if extra_keywords is None:
644
+ extra_keywords = {}
645
+ ndim = input.ndim
646
+ modes = _util._fix_sequence_arg(mode, ndim, 'mode',
647
+ _util._check_mode)
648
+ output = _util._get_output(output, input)
649
+ if ndim == 0:
650
+ _core.elementwise_copy(input, output)
651
+ return output
652
+ derivative(input, 0, output, modes[0], cval,
653
+ *extra_arguments, **extra_keywords)
654
+ output *= output
655
+ if ndim > 1:
656
+ tmp = _util._get_output(output.dtype, input)
657
+ for i in range(1, ndim):
658
+ derivative(input, i, tmp, modes[i], cval,
659
+ *extra_arguments, **extra_keywords)
660
+ tmp *= tmp
661
+ output += tmp
662
+ return cupy.sqrt(output, output, casting='unsafe')
663
+
664
+
665
+ def gaussian_gradient_magnitude(input, sigma, output=None, mode="reflect",
666
+ cval=0.0, **kwargs):
667
+ """Multi-dimensional gradient magnitude using Gaussian derivatives.
668
+
669
+ Args:
670
+ input (cupy.ndarray): The input array.
671
+ sigma (scalar or sequence of scalar): Standard deviations for each axis
672
+ of Gaussian kernel. A single value applies to all axes.
673
+ output (cupy.ndarray, dtype or None): The array in which to place the
674
+ output. Default is is same dtype as the input.
675
+ mode (str): The array borders are handled according to the given mode
676
+ (``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``,
677
+ ``'wrap'``). Default is ``'reflect'``.
678
+ cval (scalar): Value to fill past edges of input if mode is
679
+ ``'constant'``. Default is ``0.0``.
680
+ kwargs (dict, optional):
681
+ dict of extra keyword arguments to pass ``gaussian_filter()``.
682
+
683
+ Returns:
684
+ cupy.ndarray: The result of the filtering.
685
+
686
+ .. seealso:: :func:`scipy.ndimage.gaussian_gradient_magnitude`
687
+
688
+ .. note::
689
+ When the output data type is integral (or when no output is provided
690
+ and input is integral) the results may not perfectly match the results
691
+ from SciPy due to floating-point rounding of intermediate results.
692
+ """
693
+ def derivative(input, axis, output, mode, cval):
694
+ order = [0] * input.ndim
695
+ order[axis] = 1
696
+ return gaussian_filter(input, sigma, order, output, mode, cval,
697
+ **kwargs)
698
+ return generic_gradient_magnitude(input, derivative, output, mode, cval)
699
+
700
+
701
+ def minimum_filter(input, size=None, footprint=None, output=None,
702
+ mode="reflect", cval=0.0, origin=0):
703
+ """Multi-dimensional minimum filter.
704
+
705
+ Args:
706
+ input (cupy.ndarray): The input array.
707
+ size (int or sequence of int): One of ``size`` or ``footprint`` must be
708
+ provided. If ``footprint`` is given, ``size`` is ignored. Otherwise
709
+ ``footprint = cupy.ones(size)`` with ``size`` automatically made to
710
+ match the number of dimensions in ``input``.
711
+ footprint (cupy.ndarray): a boolean array which specifies which of the
712
+ elements within this shape will get passed to the filter function.
713
+ output (cupy.ndarray, dtype or None): The array in which to place the
714
+ output. Default is is same dtype as the input.
715
+ mode (str): The array borders are handled according to the given mode
716
+ (``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``,
717
+ ``'wrap'``). Default is ``'reflect'``.
718
+ cval (scalar): Value to fill past edges of input if mode is
719
+ ``'constant'``. Default is ``0.0``.
720
+ origin (int or sequence of int): The origin parameter controls the
721
+ placement of the filter, relative to the center of the current
722
+ element of the input. Default of 0 is equivalent to
723
+ ``(0,)*input.ndim``.
724
+
725
+ Returns:
726
+ cupy.ndarray: The result of the filtering.
727
+
728
+ .. seealso:: :func:`scipy.ndimage.minimum_filter`
729
+ """
730
+ return _min_or_max_filter(input, size, footprint, None, output, mode,
731
+ cval, origin, 'min')
732
+
733
+
734
+ def maximum_filter(input, size=None, footprint=None, output=None,
735
+ mode="reflect", cval=0.0, origin=0):
736
+ """Multi-dimensional maximum filter.
737
+
738
+ Args:
739
+ input (cupy.ndarray): The input array.
740
+ size (int or sequence of int): One of ``size`` or ``footprint`` must be
741
+ provided. If ``footprint`` is given, ``size`` is ignored. Otherwise
742
+ ``footprint = cupy.ones(size)`` with ``size`` automatically made to
743
+ match the number of dimensions in ``input``.
744
+ footprint (cupy.ndarray): a boolean array which specifies which of the
745
+ elements within this shape will get passed to the filter function.
746
+ output (cupy.ndarray, dtype or None): The array in which to place the
747
+ output. Default is is same dtype as the input.
748
+ mode (str): The array borders are handled according to the given mode
749
+ (``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``,
750
+ ``'wrap'``). Default is ``'reflect'``.
751
+ cval (scalar): Value to fill past edges of input if mode is
752
+ ``'constant'``. Default is ``0.0``.
753
+ origin (int or sequence of int): The origin parameter controls the
754
+ placement of the filter, relative to the center of the current
755
+ element of the input. Default of 0 is equivalent to
756
+ ``(0,)*input.ndim``.
757
+
758
+ Returns:
759
+ cupy.ndarray: The result of the filtering.
760
+
761
+ .. seealso:: :func:`scipy.ndimage.maximum_filter`
762
+ """
763
+ return _min_or_max_filter(input, size, footprint, None, output, mode,
764
+ cval, origin, 'max')
765
+
766
+
767
+ def _min_or_max_filter(input, size, ftprnt, structure, output, mode, cval,
768
+ origin, func):
769
+ # structure is used by morphology.grey_erosion() and grey_dilation()
770
+ # and not by the regular min/max filters
771
+
772
+ sizes, ftprnt, structure = _filters_core._check_size_footprint_structure(
773
+ input.ndim, size, ftprnt, structure)
774
+ if cval is cupy.nan:
775
+ raise NotImplementedError("NaN cval is unsupported")
776
+
777
+ if sizes is not None:
778
+ # Separable filter, run as a series of 1D filters
779
+ fltr = minimum_filter1d if func == 'min' else maximum_filter1d
780
+ return _filters_core._run_1d_filters(
781
+ [fltr if size > 1 else None for size in sizes],
782
+ input, sizes, output, mode, cval, origin)
783
+
784
+ origins, int_type = _filters_core._check_nd_args(input, ftprnt,
785
+ mode, origin, 'footprint')
786
+ if structure is not None and structure.ndim != input.ndim:
787
+ raise RuntimeError('structure array has incorrect shape')
788
+
789
+ if ftprnt.size == 0:
790
+ return cupy.zeros_like(input)
791
+ offsets = _filters_core._origins_to_offsets(origins, ftprnt.shape)
792
+ kernel = _get_min_or_max_kernel(mode, ftprnt.shape, func,
793
+ offsets, float(cval), int_type,
794
+ has_structure=structure is not None,
795
+ has_central_value=bool(ftprnt[offsets]))
796
+ return _filters_core._call_kernel(kernel, input, ftprnt, output,
797
+ structure, weights_dtype=bool)
798
+
799
+
800
+ def minimum_filter1d(input, size, axis=-1, output=None, mode="reflect",
801
+ cval=0.0, origin=0):
802
+ """Compute the minimum filter along a single axis.
803
+
804
+ Args:
805
+ input (cupy.ndarray): The input array.
806
+ size (int): Length of the minimum filter.
807
+ axis (int): The axis of input along which to calculate. Default is -1.
808
+ output (cupy.ndarray, dtype or None): The array in which to place the
809
+ output. Default is is same dtype as the input.
810
+ mode (str): The array borders are handled according to the given mode
811
+ (``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``,
812
+ ``'wrap'``). Default is ``'reflect'``.
813
+ cval (scalar): Value to fill past edges of input if mode is
814
+ ``'constant'``. Default is ``0.0``.
815
+ origin (int): The origin parameter controls the placement of the
816
+ filter, relative to the center of the current element of the
817
+ input. Default is ``0``.
818
+
819
+ Returns:
820
+ cupy.ndarray: The result of the filtering.
821
+
822
+ .. seealso:: :func:`scipy.ndimage.minimum_filter1d`
823
+ """
824
+ return _min_or_max_1d(input, size, axis, output, mode, cval, origin, 'min')
825
+
826
+
827
+ def maximum_filter1d(input, size, axis=-1, output=None, mode="reflect",
828
+ cval=0.0, origin=0):
829
+ """Compute the maximum filter along a single axis.
830
+
831
+ Args:
832
+ input (cupy.ndarray): The input array.
833
+ size (int): Length of the maximum filter.
834
+ axis (int): The axis of input along which to calculate. Default is -1.
835
+ output (cupy.ndarray, dtype or None): The array in which to place the
836
+ output. Default is is same dtype as the input.
837
+ mode (str): The array borders are handled according to the given mode
838
+ (``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``,
839
+ ``'wrap'``). Default is ``'reflect'``.
840
+ cval (scalar): Value to fill past edges of input if mode is
841
+ ``'constant'``. Default is ``0.0``.
842
+ origin (int): The origin parameter controls the placement of the
843
+ filter, relative to the center of the current element of the
844
+ input. Default is ``0``.
845
+
846
+ Returns:
847
+ cupy.ndarray: The result of the filtering.
848
+
849
+ .. seealso:: :func:`scipy.ndimage.maximum_filter1d`
850
+ """
851
+ return _min_or_max_1d(input, size, axis, output, mode, cval, origin, 'max')
852
+
853
+
854
+ def _min_or_max_1d(input, size, axis=-1, output=None, mode="reflect", cval=0.0,
855
+ origin=0, func='min'):
856
+ ftprnt = cupy.ones(size, dtype=bool)
857
+ ftprnt, origin = _filters_core._convert_1d_args(input.ndim, ftprnt,
858
+ origin, axis)
859
+ origins, int_type = _filters_core._check_nd_args(input, ftprnt,
860
+ mode, origin, 'footprint')
861
+ offsets = _filters_core._origins_to_offsets(origins, ftprnt.shape)
862
+ kernel = _get_min_or_max_kernel(mode, ftprnt.shape, func, offsets,
863
+ float(cval), int_type, has_weights=False)
864
+ return _filters_core._call_kernel(kernel, input, None, output,
865
+ weights_dtype=bool)
866
+
867
+
868
+ @cupy._util.memoize(for_each_device=True)
869
+ def _get_min_or_max_kernel(mode, w_shape, func, offsets, cval, int_type,
870
+ has_weights=True, has_structure=False,
871
+ has_central_value=True):
872
+ # When there are no 'weights' (the footprint, for the 1D variants) then
873
+ # we need to make sure intermediate results are stored as doubles for
874
+ # consistent results with scipy.
875
+ ctype = 'X' if has_weights else 'double'
876
+ value = '{value}'
877
+ if not has_weights:
878
+ value = 'cast<double>({})'.format(value)
879
+
880
+ # Having a non-flat structure biases the values
881
+ if has_structure:
882
+ value += ('-' if func == 'min' else '+') + 'cast<X>(sval)'
883
+
884
+ if has_central_value:
885
+ pre = '{} value = x[i];'
886
+ found = 'value = {func}({value}, value);'
887
+ else:
888
+ # If the central pixel is not included in the footprint we cannot
889
+ # assume `x[i]` is not below the min or above the max and thus cannot
890
+ # seed with that value. Instead we keep track of having set `value`.
891
+ pre = '{} value; bool set = false;'
892
+ found = 'value = set ? {func}({value}, value) : {value}; set=true;'
893
+
894
+ return _filters_core._generate_nd_kernel(
895
+ func, pre.format(ctype),
896
+ found.format(func=func, value=value), 'y = cast<Y>(value);',
897
+ mode, w_shape, int_type, offsets, cval, ctype=ctype,
898
+ has_weights=has_weights, has_structure=has_structure)
899
+
900
+
901
+ def rank_filter(input, rank, size=None, footprint=None, output=None,
902
+ mode="reflect", cval=0.0, origin=0):
903
+ """Multi-dimensional rank filter.
904
+
905
+ Args:
906
+ input (cupy.ndarray): The input array.
907
+ rank (int): The rank of the element to get. Can be negative to count
908
+ from the largest value, e.g. ``-1`` indicates the largest value.
909
+ size (int or sequence of int): One of ``size`` or ``footprint`` must be
910
+ provided. If ``footprint`` is given, ``size`` is ignored. Otherwise
911
+ ``footprint = cupy.ones(size)`` with ``size`` automatically made to
912
+ match the number of dimensions in ``input``.
913
+ footprint (cupy.ndarray): a boolean array which specifies which of the
914
+ elements within this shape will get passed to the filter function.
915
+ output (cupy.ndarray, dtype or None): The array in which to place the
916
+ output. Default is is same dtype as the input.
917
+ mode (str): The array borders are handled according to the given mode
918
+ (``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``,
919
+ ``'wrap'``). Default is ``'reflect'``.
920
+ cval (scalar): Value to fill past edges of input if mode is
921
+ ``'constant'``. Default is ``0.0``.
922
+ origin (int or sequence of int): The origin parameter controls the
923
+ placement of the filter, relative to the center of the current
924
+ element of the input. Default of 0 is equivalent to
925
+ ``(0,)*input.ndim``.
926
+
927
+ Returns:
928
+ cupy.ndarray: The result of the filtering.
929
+
930
+ .. seealso:: :func:`scipy.ndimage.rank_filter`
931
+ """
932
+ rank = int(rank)
933
+ return _rank_filter(input, lambda fs: rank+fs if rank < 0 else rank,
934
+ size, footprint, output, mode, cval, origin)
935
+
936
+
937
+ def median_filter(input, size=None, footprint=None, output=None,
938
+ mode="reflect", cval=0.0, origin=0):
939
+ """Multi-dimensional median filter.
940
+
941
+ Args:
942
+ input (cupy.ndarray): The input array.
943
+ size (int or sequence of int): One of ``size`` or ``footprint`` must be
944
+ provided. If ``footprint`` is given, ``size`` is ignored. Otherwise
945
+ ``footprint = cupy.ones(size)`` with ``size`` automatically made to
946
+ match the number of dimensions in ``input``.
947
+ footprint (cupy.ndarray): a boolean array which specifies which of the
948
+ elements within this shape will get passed to the filter function.
949
+ output (cupy.ndarray, dtype or None): The array in which to place the
950
+ output. Default is is same dtype as the input.
951
+ mode (str): The array borders are handled according to the given mode
952
+ (``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``,
953
+ ``'wrap'``). Default is ``'reflect'``.
954
+ cval (scalar): Value to fill past edges of input if mode is
955
+ ``'constant'``. Default is ``0.0``.
956
+ origin (int or sequence of int): The origin parameter controls the
957
+ placement of the filter, relative to the center of the current
958
+ element of the input. Default of 0 is equivalent to
959
+ ``(0,)*input.ndim``.
960
+
961
+ Returns:
962
+ cupy.ndarray: The result of the filtering.
963
+
964
+ .. seealso:: :func:`scipy.ndimage.median_filter`
965
+ """
966
+ return _rank_filter(input, lambda fs: fs//2,
967
+ size, footprint, output, mode, cval, origin)
968
+
969
+
970
+ def percentile_filter(input, percentile, size=None, footprint=None,
971
+ output=None, mode="reflect", cval=0.0, origin=0):
972
+ """Multi-dimensional percentile filter.
973
+
974
+ Args:
975
+ input (cupy.ndarray): The input array.
976
+ percentile (scalar): The percentile of the element to get (from ``0``
977
+ to ``100``). Can be negative, thus ``-20`` equals ``80``.
978
+ size (int or sequence of int): One of ``size`` or ``footprint`` must be
979
+ provided. If ``footprint`` is given, ``size`` is ignored. Otherwise
980
+ ``footprint = cupy.ones(size)`` with ``size`` automatically made to
981
+ match the number of dimensions in ``input``.
982
+ footprint (cupy.ndarray): a boolean array which specifies which of the
983
+ elements within this shape will get passed to the filter function.
984
+ output (cupy.ndarray, dtype or None): The array in which to place the
985
+ output. Default is is same dtype as the input.
986
+ mode (str): The array borders are handled according to the given mode
987
+ (``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``,
988
+ ``'wrap'``). Default is ``'reflect'``.
989
+ cval (scalar): Value to fill past edges of input if mode is
990
+ ``'constant'``. Default is ``0.0``.
991
+ origin (int or sequence of int): The origin parameter controls the
992
+ placement of the filter, relative to the center of the current
993
+ element of the input. Default of 0 is equivalent to
994
+ ``(0,)*input.ndim``.
995
+
996
+ Returns:
997
+ cupy.ndarray: The result of the filtering.
998
+
999
+ .. seealso:: :func:`scipy.ndimage.percentile_filter`
1000
+ """
1001
+ percentile = float(percentile)
1002
+ if percentile < 0.0:
1003
+ percentile += 100.0
1004
+ if percentile < 0.0 or percentile > 100.0:
1005
+ raise RuntimeError('invalid percentile')
1006
+ if percentile == 100.0:
1007
+ def get_rank(fs):
1008
+ return fs - 1
1009
+ else:
1010
+ def get_rank(fs):
1011
+ return int(float(fs) * percentile / 100.0)
1012
+ return _rank_filter(input, get_rank,
1013
+ size, footprint, output, mode, cval, origin)
1014
+
1015
+
1016
+ def _rank_filter(input, get_rank, size=None, footprint=None, output=None,
1017
+ mode="reflect", cval=0.0, origin=0):
1018
+ _, footprint, _ = _filters_core._check_size_footprint_structure(
1019
+ input.ndim, size, footprint, None, force_footprint=True)
1020
+ if cval is cupy.nan:
1021
+ raise NotImplementedError("NaN cval is unsupported")
1022
+ origins, int_type = _filters_core._check_nd_args(input, footprint,
1023
+ mode, origin, 'footprint')
1024
+ if footprint.size == 0:
1025
+ return cupy.zeros_like(input)
1026
+ filter_size = int(footprint.sum())
1027
+ rank = get_rank(filter_size)
1028
+ if rank < 0 or rank >= filter_size:
1029
+ raise RuntimeError('rank not within filter footprint size')
1030
+ if rank == 0:
1031
+ return _min_or_max_filter(input, None, footprint, None, output, mode,
1032
+ cval, origins, 'min')
1033
+ if rank == filter_size - 1:
1034
+ return _min_or_max_filter(input, None, footprint, None, output, mode,
1035
+ cval, origins, 'max')
1036
+ offsets = _filters_core._origins_to_offsets(origins, footprint.shape)
1037
+ kernel = _get_rank_kernel(filter_size, rank, mode, footprint.shape,
1038
+ offsets, float(cval), int_type)
1039
+ return _filters_core._call_kernel(kernel, input, footprint, output,
1040
+ weights_dtype=bool)
1041
+
1042
+
1043
+ __SHELL_SORT = '''
1044
+ __device__ void sort(X *array, int size) {{
1045
+ int gap = {gap};
1046
+ while (gap > 1) {{
1047
+ gap /= 3;
1048
+ for (int i = gap; i < size; ++i) {{
1049
+ X value = array[i];
1050
+ int j = i - gap;
1051
+ while (j >= 0 && value < array[j]) {{
1052
+ array[j + gap] = array[j];
1053
+ j -= gap;
1054
+ }}
1055
+ array[j + gap] = value;
1056
+ }}
1057
+ }}
1058
+ }}'''
1059
+
1060
+
1061
+ @cupy._util.memoize()
1062
+ def _get_shell_gap(filter_size):
1063
+ gap = 1
1064
+ while gap < filter_size:
1065
+ gap = 3*gap+1
1066
+ return gap
1067
+
1068
+
1069
+ @cupy._util.memoize(for_each_device=True)
1070
+ def _get_rank_kernel(filter_size, rank, mode, w_shape, offsets, cval,
1071
+ int_type):
1072
+ s_rank = min(rank, filter_size - rank - 1)
1073
+ # The threshold was set based on the measurements on a V100
1074
+ # TODO(leofang, anaruse): Use Optuna to automatically tune the threshold,
1075
+ # as it may vary depending on the GPU in use, compiler version, dtype,
1076
+ # filter size, etc.
1077
+ if s_rank <= 80:
1078
+ # When s_rank is small and register usage is low, this partial
1079
+ # selection sort approach is faster than general sorting approach
1080
+ # using shell sort.
1081
+ if s_rank == rank:
1082
+ comp_op = '<'
1083
+ else:
1084
+ comp_op = '>'
1085
+ array_size = s_rank + 2
1086
+ found_post = '''
1087
+ if (iv > {rank} + 1) {{{{
1088
+ int target_iv = 0;
1089
+ X target_val = values[0];
1090
+ for (int jv = 1; jv <= {rank} + 1; jv++) {{{{
1091
+ if (target_val {comp_op} values[jv]) {{{{
1092
+ target_val = values[jv];
1093
+ target_iv = jv;
1094
+ }}}}
1095
+ }}}}
1096
+ if (target_iv <= {rank}) {{{{
1097
+ values[target_iv] = values[{rank} + 1];
1098
+ }}}}
1099
+ iv = {rank} + 1;
1100
+ }}}}'''.format(rank=s_rank, comp_op=comp_op)
1101
+ post = '''
1102
+ X target_val = values[0];
1103
+ for (int jv = 1; jv <= {rank}; jv++) {{
1104
+ if (target_val {comp_op} values[jv]) {{
1105
+ target_val = values[jv];
1106
+ }}
1107
+ }}
1108
+ y=cast<Y>(target_val);'''.format(rank=s_rank, comp_op=comp_op)
1109
+ sorter = ''
1110
+ else:
1111
+ array_size = filter_size
1112
+ found_post = ''
1113
+ post = 'sort(values,{});\ny=cast<Y>(values[{}]);'.format(
1114
+ filter_size, rank)
1115
+ sorter = __SHELL_SORT.format(gap=_get_shell_gap(filter_size))
1116
+
1117
+ return _filters_core._generate_nd_kernel(
1118
+ 'rank_{}_{}'.format(filter_size, rank),
1119
+ 'int iv = 0;\nX values[{}];'.format(array_size),
1120
+ 'values[iv++] = {value};' + found_post, post,
1121
+ mode, w_shape, int_type, offsets, cval, preamble=sorter)
1122
+
1123
+
1124
+ def generic_filter(input, function, size=None, footprint=None,
1125
+ output=None, mode="reflect", cval=0.0, origin=0):
1126
+ """Compute a multi-dimensional filter using the provided raw kernel or
1127
+ reduction kernel.
1128
+
1129
+ Unlike the scipy.ndimage function, this does not support the
1130
+ ``extra_arguments`` or ``extra_keywordsdict`` arguments and has significant
1131
+ restrictions on the ``function`` provided.
1132
+
1133
+ Args:
1134
+ input (cupy.ndarray): The input array.
1135
+ function (cupy.ReductionKernel or cupy.RawKernel):
1136
+ The kernel or function to apply to each region.
1137
+ size (int or sequence of int): One of ``size`` or ``footprint`` must be
1138
+ provided. If ``footprint`` is given, ``size`` is ignored. Otherwise
1139
+ ``footprint = cupy.ones(size)`` with ``size`` automatically made to
1140
+ match the number of dimensions in ``input``.
1141
+ footprint (cupy.ndarray): a boolean array which specifies which of the
1142
+ elements within this shape will get passed to the filter function.
1143
+ output (cupy.ndarray, dtype or None): The array in which to place the
1144
+ output. Default is is same dtype as the input.
1145
+ mode (str): The array borders are handled according to the given mode
1146
+ (``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``,
1147
+ ``'wrap'``). Default is ``'reflect'``.
1148
+ cval (scalar): Value to fill past edges of input if mode is
1149
+ ``'constant'``. Default is ``0.0``.
1150
+ origin (scalar or tuple of scalar): The origin parameter controls the
1151
+ placement of the filter, relative to the center of the current
1152
+ element of the input. Default of 0 is equivalent to
1153
+ ``(0,)*input.ndim``.
1154
+
1155
+ Returns:
1156
+ cupy.ndarray: The result of the filtering.
1157
+
1158
+ .. note::
1159
+ If the `function` is a :class:`cupy.RawKernel` then it must be for a
1160
+ function that has the following signature. Unlike most functions, this
1161
+ should not utilize `blockDim`/`blockIdx`/`threadIdx`::
1162
+
1163
+ __global__ void func(double *buffer, int filter_size,
1164
+ double *return_value)
1165
+
1166
+ If the `function` is a :class:`cupy.ReductionKernel` then it must be
1167
+ for a kernel that takes 1 array input and produces 1 'scalar' output.
1168
+
1169
+ .. seealso:: :func:`scipy.ndimage.generic_filter`
1170
+ """
1171
+ _, footprint, _ = _filters_core._check_size_footprint_structure(
1172
+ input.ndim, size, footprint, None, 2, True)
1173
+ filter_size = int(footprint.sum())
1174
+ origins, int_type = _filters_core._check_nd_args(input, footprint,
1175
+ mode, origin, 'footprint')
1176
+ in_dtype = input.dtype
1177
+ sub = _filters_generic._get_sub_kernel(function)
1178
+ if footprint.size == 0:
1179
+ return cupy.zeros_like(input)
1180
+ output = _util._get_output(output, input)
1181
+ offsets = _filters_core._origins_to_offsets(origins, footprint.shape)
1182
+ args = (filter_size, mode, footprint.shape, offsets, float(cval), int_type)
1183
+ if isinstance(sub, cupy.RawKernel):
1184
+ kernel = _filters_generic._get_generic_filter_raw(sub, *args)
1185
+ elif isinstance(sub, cupy.ReductionKernel):
1186
+ kernel = _filters_generic._get_generic_filter_red(
1187
+ sub, in_dtype, output.dtype, *args)
1188
+ return _filters_core._call_kernel(kernel, input, footprint, output,
1189
+ weights_dtype=bool)
1190
+
1191
+
1192
+ def generic_filter1d(input, function, filter_size, axis=-1, output=None,
1193
+ mode="reflect", cval=0.0, origin=0):
1194
+ """Compute a 1D filter along the given axis using the provided raw kernel.
1195
+
1196
+ Unlike the scipy.ndimage function, this does not support the
1197
+ ``extra_arguments`` or ``extra_keywordsdict`` arguments and has significant
1198
+ restrictions on the ``function`` provided.
1199
+
1200
+ Args:
1201
+ input (cupy.ndarray): The input array.
1202
+ function (cupy.RawKernel): The kernel to apply along each axis.
1203
+ filter_size (int): Length of the filter.
1204
+ axis (int): The axis of input along which to calculate. Default is -1.
1205
+ output (cupy.ndarray, dtype or None): The array in which to place the
1206
+ output. Default is is same dtype as the input.
1207
+ mode (str): The array borders are handled according to the given mode
1208
+ (``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``,
1209
+ ``'wrap'``). Default is ``'reflect'``.
1210
+ cval (scalar): Value to fill past edges of input if mode is
1211
+ ``'constant'``. Default is ``0.0``.
1212
+ origin (int): The origin parameter controls the placement of the
1213
+ filter, relative to the center of the current element of the
1214
+ input. Default is ``0``.
1215
+
1216
+ Returns:
1217
+ cupy.ndarray: The result of the filtering.
1218
+
1219
+ .. note::
1220
+ The provided function (as a RawKernel) must have the following
1221
+ signature. Unlike most functions, this should not utilize
1222
+ `blockDim`/`blockIdx`/`threadIdx`::
1223
+
1224
+ __global__ void func(double *input_line, ptrdiff_t input_length,
1225
+ double *output_line, ptrdiff_t output_length)
1226
+
1227
+ .. seealso:: :func:`scipy.ndimage.generic_filter1d`
1228
+ """
1229
+ # This filter is very different than all other filters (including
1230
+ # generic_filter and all 1d filters) and it has a customized solution.
1231
+ # It is also likely fairly terrible, but only so much can be done when
1232
+ # matching the scipy interface of having the sub-kernel work on entire
1233
+ # lines of data.
1234
+ if input.dtype.kind == 'c':
1235
+ raise TypeError('Complex type not supported')
1236
+ if not isinstance(function, cupy.RawKernel):
1237
+ raise TypeError('bad function type')
1238
+ if filter_size < 1:
1239
+ raise RuntimeError('invalid filter size')
1240
+ axis = internal._normalize_axis_index(axis, input.ndim)
1241
+ origin = _util._check_origin(origin, filter_size)
1242
+ _util._check_mode(mode)
1243
+ output = _util._get_output(output, input)
1244
+ in_ctype = cupy._core._scalar.get_typename(input.dtype)
1245
+ out_ctype = cupy._core._scalar.get_typename(output.dtype)
1246
+ int_type = _util._get_inttype(input)
1247
+ n_lines = input.size // input.shape[axis]
1248
+ kernel = _filters_generic._get_generic_filter1d(
1249
+ function, input.shape[axis], n_lines, filter_size,
1250
+ origin, mode, float(cval), in_ctype, out_ctype, int_type)
1251
+ data = cupy.array(
1252
+ (axis, input.ndim) + input.shape + input.strides + output.strides,
1253
+ dtype=cupy.int32 if int_type == 'int' else cupy.int64)
1254
+ kernel(((n_lines+128-1) // 128,), (128,), (input, output, data))
1255
+ return output
vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/_filters_core.py ADDED
@@ -0,0 +1,308 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import warnings
2
+
3
+ import numpy
4
+ import cupy
5
+
6
+ from cupy_backends.cuda.api import runtime
7
+ from cupy import _core
8
+ from cupy._core import internal
9
+ from cupyx.scipy.ndimage import _util
10
+
11
+
12
+ def _origins_to_offsets(origins, w_shape):
13
+ return tuple(x//2+o for x, o in zip(w_shape, origins))
14
+
15
+
16
+ def _check_size_footprint_structure(ndim, size, footprint, structure,
17
+ stacklevel=3, force_footprint=False):
18
+ if structure is None and footprint is None:
19
+ if size is None:
20
+ raise RuntimeError("no footprint or filter size provided")
21
+ sizes = _util._fix_sequence_arg(size, ndim, 'size', int)
22
+ if force_footprint:
23
+ return None, cupy.ones(sizes, bool), None
24
+ return sizes, None, None
25
+ if size is not None:
26
+ warnings.warn("ignoring size because {} is set".format(
27
+ 'structure' if footprint is None else 'footprint'),
28
+ UserWarning, stacklevel=stacklevel+1)
29
+
30
+ if footprint is not None:
31
+ footprint = cupy.array(footprint, bool, True, 'C')
32
+ if not footprint.any():
33
+ raise ValueError("all-zero footprint is not supported")
34
+
35
+ if structure is None:
36
+ if not force_footprint and footprint.all():
37
+ if footprint.ndim != ndim:
38
+ raise RuntimeError("size must have length equal to input rank")
39
+ return footprint.shape, None, None
40
+ return None, footprint, None
41
+
42
+ structure = cupy.ascontiguousarray(structure)
43
+ if footprint is None:
44
+ footprint = cupy.ones(structure.shape, bool)
45
+ return None, footprint, structure
46
+
47
+
48
+ def _convert_1d_args(ndim, weights, origin, axis):
49
+ if weights.ndim != 1 or weights.size < 1:
50
+ raise RuntimeError('incorrect filter size')
51
+ axis = internal._normalize_axis_index(axis, ndim)
52
+ w_shape = [1]*ndim
53
+ w_shape[axis] = weights.size
54
+ weights = weights.reshape(w_shape)
55
+ origins = [0]*ndim
56
+ origins[axis] = _util._check_origin(origin, weights.size)
57
+ return weights, tuple(origins)
58
+
59
+
60
+ def _check_nd_args(input, weights, mode, origin, wghts_name='filter weights'):
61
+ _util._check_mode(mode)
62
+ # Weights must always be less than 2 GiB
63
+ if weights.nbytes >= (1 << 31):
64
+ raise RuntimeError('weights must be 2 GiB or less, use FFTs instead')
65
+ weight_dims = [x for x in weights.shape if x != 0]
66
+ if len(weight_dims) != input.ndim:
67
+ raise RuntimeError('{} array has incorrect shape'.format(wghts_name))
68
+ origins = _util._fix_sequence_arg(origin, len(weight_dims), 'origin', int)
69
+ for origin, width in zip(origins, weight_dims):
70
+ _util._check_origin(origin, width)
71
+ return tuple(origins), _util._get_inttype(input)
72
+
73
+
74
+ def _run_1d_filters(filters, input, args, output, mode, cval, origin=0):
75
+ """
76
+ Runs a series of 1D filters forming an nd filter. The filters must be a
77
+ list of callables that take input, arg, axis, output, mode, cval, origin.
78
+ The args is a list of values that are passed for the arg value to the
79
+ filter. Individual filters can be None causing that axis to be skipped.
80
+ """
81
+ output = _util._get_output(output, input)
82
+ modes = _util._fix_sequence_arg(mode, input.ndim, 'mode',
83
+ _util._check_mode)
84
+ # for filters, "wrap" is a synonym for "grid-wrap".
85
+ modes = ['grid-wrap' if m == 'wrap' else m for m in modes]
86
+ origins = _util._fix_sequence_arg(origin, input.ndim, 'origin', int)
87
+ n_filters = sum(filter is not None for filter in filters)
88
+ if n_filters == 0:
89
+ _core.elementwise_copy(input, output)
90
+ return output
91
+ # We can't operate in-place efficiently, so use a 2-buffer system
92
+ temp = _util._get_output(output.dtype, input) if n_filters > 1 else None
93
+ iterator = zip(filters, args, modes, origins)
94
+ # skip any axes where the filter is None
95
+ for axis, (fltr, arg, mode, origin) in enumerate(iterator):
96
+ if fltr is not None:
97
+ break
98
+ # To avoid need for any additional copies, we have to start with a
99
+ # different output array depending on whether the total number of filters
100
+ # is odd or even.
101
+ if n_filters % 2 == 0:
102
+ fltr(input, arg, axis, temp, mode, cval, origin)
103
+ input = temp
104
+ else:
105
+ fltr(input, arg, axis, output, mode, cval, origin)
106
+ input, output = output, temp
107
+ for axis, (fltr, arg, mode, origin) in enumerate(iterator, start=axis + 1):
108
+ if fltr is None:
109
+ continue
110
+ fltr(input, arg, axis, output, mode, cval, origin)
111
+ input, output = output, input
112
+ return input
113
+
114
+
115
+ def _call_kernel(kernel, input, weights, output, structure=None,
116
+ weights_dtype=numpy.float64, structure_dtype=numpy.float64):
117
+ """
118
+ Calls a constructed ElementwiseKernel. The kernel must take an input image,
119
+ an optional array of weights, an optional array for the structure, and an
120
+ output array.
121
+
122
+ weights and structure can be given as None (structure defaults to None) in
123
+ which case they are not passed to the kernel at all. If the output is given
124
+ as None then it will be allocated in this function.
125
+
126
+ This function deals with making sure that the weights and structure are
127
+ contiguous and float64 (or bool for weights that are footprints)*, that the
128
+ output is allocated and appriopately shaped. This also deals with the
129
+ situation that the input and output arrays overlap in memory.
130
+
131
+ * weights is always cast to float64 or bool in order to get an output
132
+ compatible with SciPy, though float32 might be sufficient when input dtype
133
+ is low precision. If weights_dtype is passed as weights.dtype then no
134
+ dtype conversion will occur. The input and output are never converted.
135
+ """
136
+ args = [input]
137
+ complex_output = input.dtype.kind == 'c'
138
+ if weights is not None:
139
+ weights = cupy.ascontiguousarray(weights, weights_dtype)
140
+ complex_output = complex_output or weights.dtype.kind == 'c'
141
+ args.append(weights)
142
+ if structure is not None:
143
+ structure = cupy.ascontiguousarray(structure, structure_dtype)
144
+ args.append(structure)
145
+ output = _util._get_output(output, input, None, complex_output)
146
+ needs_temp = cupy.shares_memory(output, input, 'MAY_SHARE_BOUNDS')
147
+ if needs_temp:
148
+ output, temp = _util._get_output(output.dtype, input), output
149
+ args.append(output)
150
+ kernel(*args)
151
+ if needs_temp:
152
+ _core.elementwise_copy(temp, output)
153
+ output = temp
154
+ return output
155
+
156
+
157
+ if runtime.is_hip:
158
+ includes = r'''
159
+ // workaround for HIP: line begins with #include
160
+ #include <cupy/math_constants.h>\n
161
+ '''
162
+ else:
163
+ includes = r'''
164
+ #include <cupy/cuda_workaround.h> // provide C++ std:: coverage
165
+ #include <cupy/math_constants.h>
166
+
167
+ template<> struct std::is_floating_point<float16> : std::true_type {};
168
+ template<> struct std::is_signed<float16> : std::true_type {};
169
+ '''
170
+
171
+
172
+ _CAST_FUNCTION = """
173
+ // Implements a casting function to make it compatible with scipy
174
+ // Use like cast<to_type>(value)
175
+ template <class B, class A>
176
+ __device__ __forceinline__
177
+ typename std::enable_if<(!std::is_floating_point<A>::value
178
+ || std::is_signed<B>::value), B>::type
179
+ cast(A a) { return (B)a; }
180
+
181
+ template <class B, class A>
182
+ __device__ __forceinline__
183
+ typename std::enable_if<(std::is_floating_point<A>::value
184
+ && (!std::is_signed<B>::value)), B>::type
185
+ cast(A a) { return (a >= 0) ? (B)a : -(B)(-a); }
186
+
187
+ template <class T>
188
+ __device__ __forceinline__ bool nonzero(T x) { return x != static_cast<T>(0); }
189
+ """
190
+
191
+
192
+ def _generate_nd_kernel(name, pre, found, post, mode, w_shape, int_type,
193
+ offsets, cval, ctype='X', preamble='', options=(),
194
+ has_weights=True, has_structure=False, has_mask=False,
195
+ binary_morphology=False, all_weights_nonzero=False):
196
+ # Currently this code uses CArray for weights but avoids using CArray for
197
+ # the input data and instead does the indexing itself since it is faster.
198
+ # If CArray becomes faster than follow the comments that start with
199
+ # CArray: to switch over to using CArray for the input data as well.
200
+
201
+ ndim = len(w_shape)
202
+ in_params = 'raw X x'
203
+ if has_weights:
204
+ in_params += ', raw W w'
205
+ if has_structure:
206
+ in_params += ', raw S s'
207
+ if has_mask:
208
+ in_params += ', raw M mask'
209
+ out_params = 'Y y'
210
+
211
+ # for filters, "wrap" is a synonym for "grid-wrap"
212
+ mode = 'grid-wrap' if mode == 'wrap' else mode
213
+
214
+ # CArray: remove xstride_{j}=... from string
215
+ size = ('%s xsize_{j}=x.shape()[{j}], ysize_{j} = _raw_y.shape()[{j}]'
216
+ ', xstride_{j}=x.strides()[{j}];' % int_type)
217
+ sizes = [size.format(j=j) for j in range(ndim)]
218
+ inds = _util._generate_indices_ops(ndim, int_type, offsets)
219
+ # CArray: remove expr entirely
220
+ expr = ' + '.join(['ix_{}'.format(j) for j in range(ndim)])
221
+
222
+ ws_init = ws_pre = ws_post = ''
223
+ if has_weights or has_structure:
224
+ ws_init = 'int iws = 0;'
225
+ if has_structure:
226
+ ws_pre = 'S sval = s[iws];\n'
227
+ if has_weights:
228
+ ws_pre += 'W wval = w[iws];\n'
229
+ if not all_weights_nonzero:
230
+ ws_pre += 'if (nonzero(wval))'
231
+ ws_post = 'iws++;'
232
+
233
+ loops = []
234
+ for j in range(ndim):
235
+ if w_shape[j] == 1:
236
+ # CArray: string becomes 'inds[{j}] = ind_{j};', remove (int_)type
237
+ loops.append('{{ {type} ix_{j} = ind_{j} * xstride_{j};'.
238
+ format(j=j, type=int_type))
239
+ else:
240
+ boundary = _util._generate_boundary_condition_ops(
241
+ mode, 'ix_{}'.format(j), 'xsize_{}'.format(j), int_type)
242
+ # CArray: last line of string becomes inds[{j}] = ix_{j};
243
+ loops.append('''
244
+ for (int iw_{j} = 0; iw_{j} < {wsize}; iw_{j}++)
245
+ {{
246
+ {type} ix_{j} = ind_{j} + iw_{j};
247
+ {boundary}
248
+ ix_{j} *= xstride_{j};
249
+ '''.format(j=j, wsize=w_shape[j], boundary=boundary, type=int_type))
250
+
251
+ # CArray: string becomes 'x[inds]', no format call needed
252
+ value = '(*(X*)&data[{expr}])'.format(expr=expr)
253
+ if mode == 'constant':
254
+ cond = ' || '.join(['(ix_{} < 0)'.format(j) for j in range(ndim)])
255
+
256
+ if cval is numpy.nan:
257
+ cval = 'CUDART_NAN'
258
+ elif cval == numpy.inf:
259
+ cval = 'CUDART_INF'
260
+ elif cval == -numpy.inf:
261
+ cval = '-CUDART_INF'
262
+
263
+ if binary_morphology:
264
+ found = found.format(cond=cond, value=value)
265
+ else:
266
+ if mode == 'constant':
267
+ value = '(({cond}) ? cast<{ctype}>({cval}) : {value})'.format(
268
+ cond=cond, ctype=ctype, cval=cval, value=value)
269
+ found = found.format(value=value)
270
+
271
+ # CArray: replace comment and next line in string with
272
+ # {type} inds[{ndim}] = {{0}};
273
+ # and add ndim=ndim, type=int_type to format call
274
+ operation = '''
275
+ {sizes}
276
+ {inds}
277
+ // don't use a CArray for indexing (faster to deal with indexing ourselves)
278
+ const unsigned char* data = (const unsigned char*)&x[0];
279
+ {ws_init}
280
+ {pre}
281
+ {loops}
282
+ // inner-most loop
283
+ {ws_pre} {{
284
+ {found}
285
+ }}
286
+ {ws_post}
287
+ {end_loops}
288
+ {post}
289
+ '''.format(sizes='\n'.join(sizes), inds=inds, pre=pre, post=post,
290
+ ws_init=ws_init, ws_pre=ws_pre, ws_post=ws_post,
291
+ loops='\n'.join(loops), found=found, end_loops='}'*ndim)
292
+
293
+ mode_str = mode.replace('-', '_') # avoid potential hyphen in kernel name
294
+ name = 'cupyx_scipy_ndimage_{}_{}d_{}_w{}'.format(
295
+ name, ndim, mode_str, '_'.join(['{}'.format(x) for x in w_shape]))
296
+ if all_weights_nonzero:
297
+ name += '_all_nonzero'
298
+ if int_type == 'ptrdiff_t':
299
+ name += '_i64'
300
+ if has_structure:
301
+ name += '_with_structure'
302
+ if has_mask:
303
+ name += '_with_mask'
304
+ preamble = includes + _CAST_FUNCTION + preamble
305
+ options += ('--std=c++11', )
306
+ return cupy.ElementwiseKernel(in_params, out_params, operation, name,
307
+ reduce_dims=False, preamble=preamble,
308
+ options=options)
vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/_filters_generic.py ADDED
@@ -0,0 +1,272 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cupy
2
+ from cupy_backends.cuda.api import runtime
3
+ from cupy import _util
4
+ from cupyx.scipy.ndimage import _filters_core
5
+
6
+
7
+ def _get_sub_kernel(f):
8
+ """
9
+ Takes the "function" given to generic_filter and returns the "sub-kernel"
10
+ that will be called, one of RawKernel or ReductionKernel.
11
+
12
+ This supports:
13
+ * cupy.RawKernel
14
+ no checks are possible
15
+ * cupy.ReductionKernel
16
+ checks that there is a single input and output
17
+ """
18
+ if isinstance(f, cupy.RawKernel):
19
+ # We will assume that it has the correct API
20
+ return f
21
+ elif isinstance(f, cupy.ReductionKernel):
22
+ if f.nin != 1 or f.nout != 1:
23
+ raise TypeError('ReductionKernel must have 1 input and output')
24
+ return f
25
+ elif isinstance(f, cupy.ElementwiseKernel):
26
+ # special error message for ElementwiseKernels
27
+ raise TypeError('only ReductionKernel allowed (not ElementwiseKernel)')
28
+ else:
29
+ raise TypeError('bad function type')
30
+
31
+
32
+ @_util.memoize(for_each_device=True)
33
+ def _get_generic_filter_red(rk, in_dtype, out_dtype, filter_size, mode,
34
+ wshape, offsets, cval, int_type):
35
+ """Generic filter implementation based on a reduction kernel."""
36
+ # Get the temporary output c type
37
+ in_param, out_param = rk.in_params[0], rk.out_params[0]
38
+ out_ctype = out_param.ctype
39
+ if out_param.dtype is None: # resolve template
40
+ out_ctype = cupy._core._scalar.get_typename(
41
+ in_dtype if out_param.ctype == in_param.ctype else out_dtype)
42
+
43
+ # Get code chunks
44
+ setup = '''
45
+ int iv = 0;
46
+ X values[{size}];
47
+ CArray<X, 1, true, true> sub_in(values, {{{size}}});
48
+ {out_ctype} val_out;
49
+ CArray<{out_ctype}, 1, true, true> sub_out(&val_out, {{1}});
50
+ '''.format(size=filter_size, out_ctype=out_ctype)
51
+
52
+ sub_call = '''reduction_kernel::{}(sub_in, sub_out);
53
+ y = cast<Y>(val_out);'''.format(rk.name)
54
+
55
+ sub_kernel = _reduction_kernel_code(rk, filter_size, out_dtype, in_dtype)
56
+
57
+ # Get the final kernel
58
+ return _filters_core._generate_nd_kernel(
59
+ 'generic_{}_{}'.format(filter_size, rk.name),
60
+ setup, 'values[iv++] = {value};', sub_call,
61
+ mode, wshape, int_type, offsets, cval, preamble=sub_kernel,
62
+ options=getattr(rk, 'options', ()))
63
+
64
+
65
+ def _reduction_kernel_code(rk, filter_size, out_dtype, in_dtype):
66
+ # NOTE: differences from the code generated for real reduction kernels:
67
+ # * input is always 1D and always less than 2^31 elements
68
+ # * output is always 1D with a single element
69
+ # * never across threads (no _block_stride, _sdata, _sdata_raw, _REDUCE,
70
+ # _tid, _J, _i, _i_base, _j_offset, _J_offset, _j_stride, _J_stride)
71
+ # Also, the code is moved into a namespace so that clashes are minimized
72
+ # between the typedefs for the "template" variables.
73
+
74
+ # figure out the types
75
+ types = {}
76
+ in_param, out_param = rk.in_params[0], rk.out_params[0]
77
+ in_ctype = _get_type_info(in_param, in_dtype, types)
78
+ out_ctype = _get_type_info(out_param, out_dtype, types)
79
+ types = '\n'.join('typedef {} {};'.format(typ, name)
80
+ for name, typ in types.items())
81
+
82
+ return '''namespace reduction_kernel {{
83
+ {type_preamble}
84
+ {preamble}
85
+ __device__
86
+ void {name}({in_const} CArray<{in_ctype}, 1, true, true>& _raw_{in_name},
87
+ CArray<{out_ctype}, 1, true, true>& _raw_{out_name}) {{
88
+ // these are just provided so if they are available for the RK
89
+ CIndexer<1> _in_ind({{{size}}});
90
+ CIndexer<0> _out_ind;
91
+
92
+ #define REDUCE(a, b) ({reduce_expr})
93
+ #define POST_MAP(a) ({post_map_expr})
94
+ typedef {reduce_type} _type_reduce;
95
+ _type_reduce _s = _type_reduce({identity});
96
+ for (int _j = 0; _j < {size}; ++_j) {{
97
+ _in_ind.set(_j);
98
+ {in_const} {in_ctype}& {in_name} = _raw_{in_name}[_j];
99
+ _type_reduce _a = static_cast<_type_reduce>({pre_map_expr});
100
+ _s = REDUCE(_s, _a);
101
+ }}
102
+ _out_ind.set(0);
103
+ {out_ctype} &{out_name} = _raw_{out_name}[0];
104
+ POST_MAP(_s);
105
+ #undef REDUCE
106
+ #undef POST_MAP
107
+ }}
108
+ }}'''.format(
109
+ name=rk.name, type_preamble=types, preamble=rk.preamble,
110
+ in_const='const' if in_param.is_const else '',
111
+ in_ctype=in_ctype, in_name=in_param.name,
112
+ out_ctype=out_ctype, out_name=out_param.name,
113
+
114
+ pre_map_expr=rk.map_expr,
115
+ identity='' if rk.identity is None else rk.identity,
116
+ size=filter_size,
117
+ reduce_type=rk.reduce_type, reduce_expr=rk.reduce_expr,
118
+ post_map_expr=rk.post_map_expr,
119
+ )
120
+
121
+
122
+ def _get_type_info(param, dtype, types):
123
+ if param.dtype is not None:
124
+ return param.ctype
125
+ # Template type -> map to actual output type
126
+ ctype = cupy._core._scalar.get_typename(dtype)
127
+ types.setdefault(param.ctype, ctype)
128
+ return ctype
129
+
130
+
131
+ @_util.memoize(for_each_device=True)
132
+ def _get_generic_filter_raw(rk, filter_size, mode, wshape, offsets, cval,
133
+ int_type):
134
+ """Generic filter implementation based on a raw kernel."""
135
+ setup = '''
136
+ int iv = 0;
137
+ double values[{}];
138
+ double val_out;'''.format(filter_size)
139
+
140
+ sub_call = '''raw_kernel::{}(values, {}, &val_out);
141
+ y = cast<Y>(val_out);'''.format(rk.name, filter_size)
142
+
143
+ return _filters_core._generate_nd_kernel(
144
+ 'generic_{}_{}'.format(filter_size, rk.name),
145
+ setup, 'values[iv++] = cast<double>({value});', sub_call,
146
+ mode, wshape, int_type, offsets, cval,
147
+ preamble='namespace raw_kernel {{\n{}\n}}'.format(
148
+ # Users can test RawKernel independently, but when passed to here
149
+ # it must be used as a device function here. In fact, RawKernel
150
+ # wouldn't compile if code only contains device functions, so this
151
+ # is necessary.
152
+ rk.code.replace('__global__', '__device__')),
153
+ options=rk.options)
154
+
155
+
156
+ @_util.memoize(for_each_device=True)
157
+ def _get_generic_filter1d(rk, length, n_lines, filter_size, origin, mode, cval,
158
+ in_ctype, out_ctype, int_type):
159
+ """
160
+ The generic 1d filter is different than all other filters and thus is the
161
+ only filter that doesn't use _generate_nd_kernel() and has a completely
162
+ custom raw kernel.
163
+ """
164
+ in_length = length + filter_size - 1
165
+ start = filter_size // 2 + origin
166
+ end = start + length
167
+
168
+ if mode == 'constant':
169
+ boundary, boundary_early = '', '''
170
+ for (idx_t j = 0; j < {start}; ++j) {{ input_line[j] = {cval}; }}
171
+ for (idx_t j = {end}; j<{in_length}; ++j) {{ input_line[j] = {cval}; }}
172
+ '''.format(start=start, end=end, in_length=in_length, cval=cval)
173
+ else:
174
+ if length == 1:
175
+ a = b = 'j_ = 0;'
176
+ elif mode == 'reflect':
177
+ j = ('j_ = ({j}) % ({length} * 2);\n'
178
+ 'j_ = min(j_, 2 * {length} - 1 - j_);')
179
+ a = j.format(j='-1 - j_', length=length)
180
+ b = j.format(j='j_', length=length)
181
+ elif mode == 'mirror':
182
+ j = ('j_ = 1 + (({j}) - 1) % (({length} - 1) * 2);\n'
183
+ 'j_ = min(j_, 2 * {length} - 2 - j_);')
184
+ a = j.format(j='-j_', length=length)
185
+ b = j.format(j='j_', length=length)
186
+ elif mode == 'nearest':
187
+ a, b = 'j_ = 0;', 'j_ = {length}-1;'.format(length=length)
188
+ elif mode == 'wrap':
189
+ a = 'j_ = j_ % {length} + {length};'.format(length=length)
190
+ b = 'j_ = j_ % {length};'.format(length=length)
191
+ loop = '''for (idx_t j = {{}}; j < {{}}; ++j) {{{{
192
+ idx_t j_ = j - {start};
193
+ {{}}
194
+ input_line[j] = input_line[j_ + {start}];
195
+ }}}}'''.format(start=start)
196
+ boundary_early = ''
197
+ boundary = (loop.format(0, start, a) + '\n' +
198
+ loop.format(end, in_length, b))
199
+
200
+ name = 'generic1d_{}_{}_{}'.format(length, filter_size, rk.name)
201
+ if runtime.is_hip:
202
+ include_type_traits = ''
203
+ else:
204
+ include_type_traits = '''
205
+ #include <cupy/cuda_workaround.h> // provide C++ std:: coverage
206
+ '''
207
+ code = '''#include "cupy/carray.cuh"
208
+ #include "cupy/complex.cuh"
209
+ {include_type_traits}
210
+
211
+ namespace raw_kernel {{\n{rk_code}\n}}
212
+
213
+ {CAST}
214
+
215
+ typedef unsigned char byte;
216
+ typedef {in_ctype} X;
217
+ typedef {out_ctype} Y;
218
+ typedef {int_type} idx_t;
219
+
220
+ __device__ idx_t offset(idx_t i, idx_t axis, idx_t ndim,
221
+ const idx_t* shape, const idx_t* strides) {{
222
+ idx_t index = 0;
223
+ for (idx_t a = ndim; --a > 0; ) {{
224
+ if (a == axis) {{ continue; }}
225
+ index += (i % shape[a]) * strides[a];
226
+ i /= shape[a];
227
+ }}
228
+ return index + strides[0] * i;
229
+ }}
230
+
231
+ extern "C" __global__
232
+ void {name}(const byte* input, byte* output, const idx_t* x) {{
233
+ const idx_t axis = x[0], ndim = x[1],
234
+ *shape = x+2, *in_strides = x+2+ndim, *out_strides = x+2+2*ndim;
235
+
236
+ const idx_t in_elem_stride = in_strides[axis];
237
+ const idx_t out_elem_stride = out_strides[axis];
238
+
239
+ double input_line[{in_length}];
240
+ double output_line[{length}];
241
+ {boundary_early}
242
+
243
+ for (idx_t i = ((idx_t)blockIdx.x) * blockDim.x + threadIdx.x;
244
+ i < {n_lines};
245
+ i += ((idx_t)blockDim.x) * gridDim.x) {{
246
+ // Copy line from input (with boundary filling)
247
+ const byte* input_ = input + offset(i, axis, ndim, shape, in_strides);
248
+ for (idx_t j = 0; j < {length}; ++j) {{
249
+ input_line[j+{start}] = (double)*(X*)(input_+j*in_elem_stride);
250
+ }}
251
+ {boundary}
252
+
253
+ raw_kernel::{rk_name}(input_line, {in_length}, output_line, {length});
254
+
255
+ // Copy line to output
256
+ byte* output_ = output + offset(i, axis, ndim, shape, out_strides);
257
+ for (idx_t j = 0; j < {length}; ++j) {{
258
+ *(Y*)(output_+j*out_elem_stride) = cast<Y>(output_line[j]);
259
+ }}
260
+ }}
261
+ }}'''.format(n_lines=n_lines, length=length, in_length=in_length, start=start,
262
+ in_ctype=in_ctype, out_ctype=out_ctype, int_type=int_type,
263
+ boundary_early=boundary_early, boundary=boundary,
264
+ name=name, rk_name=rk.name,
265
+ # Users can test RawKernel independently, but when passed to here
266
+ # it must be used as a device function here. In fact, RawKernel
267
+ # wouldn't compile if code only contains device functions, so this
268
+ # is necessary.
269
+ rk_code=rk.code.replace('__global__', '__device__'),
270
+ include_type_traits=include_type_traits,
271
+ CAST=_filters_core._CAST_FUNCTION)
272
+ return cupy.RawKernel(code, name, ('--std=c++11',) + rk.options)
vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/_fourier.py ADDED
@@ -0,0 +1,253 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy
2
+
3
+ import cupy
4
+ from cupy import _core
5
+ from cupy._core import internal
6
+ from cupyx.scipy.ndimage import _util
7
+ from cupyx.scipy import special
8
+
9
+
10
+ def _get_output_fourier(output, input, complex_only=False):
11
+ types = [cupy.complex64, cupy.complex128]
12
+ if not complex_only:
13
+ types += [cupy.float32, cupy.float64]
14
+
15
+ if output is None:
16
+ if input.dtype in types:
17
+ output = cupy.empty(input.shape, dtype=input.dtype)
18
+ else:
19
+ output = cupy.empty(input.shape, dtype=types[-1])
20
+ elif type(output) is type:
21
+ if output not in types:
22
+ raise RuntimeError('output type not supported')
23
+ output = cupy.empty(input.shape, dtype=output)
24
+ elif output.shape != input.shape:
25
+ raise RuntimeError('output shape not correct')
26
+ return output
27
+
28
+
29
+ def _reshape_nd(arr, ndim, axis):
30
+ """Promote a 1d array to ndim with non-singleton size along axis."""
31
+ nd_shape = (1,) * axis + (arr.size,) + (1,) * (ndim - axis - 1)
32
+ return arr.reshape(nd_shape)
33
+
34
+
35
+ def fourier_gaussian(input, sigma, n=-1, axis=-1, output=None):
36
+ """Multidimensional Gaussian shift filter.
37
+
38
+ The array is multiplied with the Fourier transform of a (separable)
39
+ Gaussian kernel.
40
+
41
+ Args:
42
+ input (cupy.ndarray): The input array.
43
+ sigma (float or sequence of float): The sigma of the Gaussian kernel.
44
+ If a float, `sigma` is the same for all axes. If a sequence,
45
+ `sigma` has to contain one value for each axis.
46
+ n (int, optional): If `n` is negative (default), then the input is
47
+ assumed to be the result of a complex fft. If `n` is larger than or
48
+ equal to zero, the input is assumed to be the result of a real fft,
49
+ and `n` gives the length of the array before transformation along
50
+ the real transform direction.
51
+ axis (int, optional): The axis of the real transform (only used when
52
+ ``n > -1``).
53
+ output (cupy.ndarray, optional):
54
+ If given, the result of shifting the input is placed in this array.
55
+
56
+ Returns:
57
+ output (cupy.ndarray): The filtered output.
58
+ """
59
+ ndim = input.ndim
60
+ output = _get_output_fourier(output, input)
61
+ axis = internal._normalize_axis_index(axis, ndim)
62
+ sigmas = _util._fix_sequence_arg(sigma, ndim, 'sigma')
63
+
64
+ _core.elementwise_copy(input, output)
65
+ for ax, (sigmak, ax_size) in enumerate(zip(sigmas, output.shape)):
66
+
67
+ # compute the frequency grid in Hz
68
+ if ax == axis and n > 0:
69
+ arr = cupy.arange(ax_size, dtype=output.real.dtype)
70
+ arr /= n
71
+ else:
72
+ arr = cupy.fft.fftfreq(ax_size)
73
+ arr = arr.astype(output.real.dtype, copy=False)
74
+
75
+ # compute the Gaussian weights
76
+ arr *= arr
77
+ scale = sigmak * sigmak / -2
78
+ arr *= (4 * numpy.pi * numpy.pi) * scale
79
+ cupy.exp(arr, out=arr)
80
+
81
+ # reshape for broadcasting
82
+ arr = _reshape_nd(arr, ndim=ndim, axis=ax)
83
+ output *= arr
84
+
85
+ return output
86
+
87
+
88
+ def fourier_uniform(input, size, n=-1, axis=-1, output=None):
89
+ """Multidimensional uniform shift filter.
90
+
91
+ The array is multiplied with the Fourier transform of a box of given size.
92
+
93
+ Args:
94
+ input (cupy.ndarray): The input array.
95
+ size (float or sequence of float): The sigma of the box used for
96
+ filtering. If a float, `size` is the same for all axes. If a
97
+ sequence, `size` has to contain one value for each axis.
98
+ n (int, optional): If `n` is negative (default), then the input is
99
+ assumed to be the result of a complex fft. If `n` is larger than or
100
+ equal to zero, the input is assumed to be the result of a real fft,
101
+ and `n` gives the length of the array before transformation along
102
+ the real transform direction.
103
+ axis (int, optional): The axis of the real transform (only used when
104
+ ``n > -1``).
105
+ output (cupy.ndarray, optional):
106
+ If given, the result of shifting the input is placed in this array.
107
+
108
+ Returns:
109
+ output (cupy.ndarray): The filtered output.
110
+ """
111
+ ndim = input.ndim
112
+ output = _get_output_fourier(output, input)
113
+ axis = internal._normalize_axis_index(axis, ndim)
114
+ sizes = _util._fix_sequence_arg(size, ndim, 'size')
115
+
116
+ _core.elementwise_copy(input, output)
117
+ for ax, (size, ax_size) in enumerate(zip(sizes, output.shape)):
118
+
119
+ # compute the frequency grid in Hz
120
+ if ax == axis and n > 0:
121
+ arr = cupy.arange(ax_size, dtype=output.real.dtype)
122
+ arr /= n
123
+ else:
124
+ arr = cupy.fft.fftfreq(ax_size)
125
+ arr = arr.astype(output.real.dtype, copy=False)
126
+
127
+ # compute the uniform filter weights
128
+ arr *= size
129
+ cupy.sinc(arr, out=arr)
130
+
131
+ # reshape for broadcasting
132
+ arr = _reshape_nd(arr, ndim=ndim, axis=ax)
133
+ output *= arr
134
+
135
+ return output
136
+
137
+
138
+ def fourier_shift(input, shift, n=-1, axis=-1, output=None):
139
+ """Multidimensional Fourier shift filter.
140
+
141
+ The array is multiplied with the Fourier transform of a shift operation.
142
+
143
+ Args:
144
+ input (cupy.ndarray): The input array. This should be in the Fourier
145
+ domain.
146
+ shift (float or sequence of float): The size of shift. If a float,
147
+ `shift` is the same for all axes. If a sequence, `shift` has to
148
+ contain one value for each axis.
149
+ n (int, optional): If `n` is negative (default), then the input is
150
+ assumed to be the result of a complex fft. If `n` is larger than or
151
+ equal to zero, the input is assumed to be the result of a real fft,
152
+ and `n` gives the length of the array before transformation along
153
+ the real transform direction.
154
+ axis (int, optional): The axis of the real transform (only used when
155
+ ``n > -1``).
156
+ output (cupy.ndarray, optional):
157
+ If given, the result of shifting the input is placed in this array.
158
+
159
+ Returns:
160
+ output (cupy.ndarray): The shifted output (in the Fourier domain).
161
+ """
162
+ ndim = input.ndim
163
+ output = _get_output_fourier(output, input, complex_only=True)
164
+ axis = internal._normalize_axis_index(axis, ndim)
165
+ shifts = _util._fix_sequence_arg(shift, ndim, 'shift')
166
+
167
+ _core.elementwise_copy(input, output)
168
+ for ax, (shiftk, ax_size) in enumerate(zip(shifts, output.shape)):
169
+ if shiftk == 0:
170
+ continue
171
+ if ax == axis and n > 0:
172
+ # cp.fft.rfftfreq(ax_size) * (-2j * numpy.pi * shiftk * ax_size/n)
173
+ arr = cupy.arange(ax_size, dtype=output.dtype)
174
+ arr *= -2j * numpy.pi * shiftk / n
175
+ else:
176
+ arr = cupy.fft.fftfreq(ax_size)
177
+ arr = arr * (-2j * numpy.pi * shiftk)
178
+ cupy.exp(arr, out=arr)
179
+
180
+ # reshape for broadcasting
181
+ arr = _reshape_nd(arr, ndim=ndim, axis=ax)
182
+ output *= arr
183
+
184
+ return output
185
+
186
+
187
+ def fourier_ellipsoid(input, size, n=-1, axis=-1, output=None):
188
+ """Multidimensional ellipsoid Fourier filter.
189
+
190
+ The array is multiplied with the fourier transform of a ellipsoid of
191
+ given sizes.
192
+
193
+ Args:
194
+ input (cupy.ndarray): The input array.
195
+ size (float or sequence of float): The size of the box used for
196
+ filtering. If a float, `size` is the same for all axes. If a
197
+ sequence, `size` has to contain one value for each axis.
198
+ n (int, optional): If `n` is negative (default), then the input is
199
+ assumed to be the result of a complex fft. If `n` is larger than or
200
+ equal to zero, the input is assumed to be the result of a real fft,
201
+ and `n` gives the length of the array before transformation along
202
+ the real transform direction.
203
+ axis (int, optional): The axis of the real transform (only used when
204
+ ``n > -1``).
205
+ output (cupy.ndarray, optional):
206
+ If given, the result of shifting the input is placed in this array.
207
+
208
+ Returns:
209
+ output (cupy.ndarray): The filtered output.
210
+ """
211
+ ndim = input.ndim
212
+ if ndim == 1:
213
+ return fourier_uniform(input, size, n, axis, output)
214
+
215
+ if ndim > 3:
216
+ # Note: SciPy currently does not do any filtering on >=4d inputs, but
217
+ # does not warn about this!
218
+ raise NotImplementedError('Only 1d, 2d and 3d inputs are supported')
219
+ output = _get_output_fourier(output, input)
220
+ axis = internal._normalize_axis_index(axis, ndim)
221
+ sizes = _util._fix_sequence_arg(size, ndim, 'size')
222
+
223
+ _core.elementwise_copy(input, output)
224
+
225
+ # compute the distance from the origin for all samples in Fourier space
226
+ distance = 0
227
+ for ax, (size, ax_size) in enumerate(zip(sizes, output.shape)):
228
+ # compute the frequency grid in Hz
229
+ if ax == axis and n > 0:
230
+ arr = cupy.arange(ax_size, dtype=output.real.dtype)
231
+ arr *= numpy.pi * size / n
232
+ else:
233
+ arr = cupy.fft.fftfreq(ax_size)
234
+ arr *= numpy.pi * size
235
+ arr = arr.astype(output.real.dtype, copy=False)
236
+ arr *= arr
237
+ arr = _reshape_nd(arr, ndim=ndim, axis=ax)
238
+ distance = distance + arr
239
+ cupy.sqrt(distance, out=distance)
240
+
241
+ if ndim == 2:
242
+ special.j1(distance, out=output)
243
+ output *= 2
244
+ output /= distance
245
+ elif ndim == 3:
246
+ cupy.sin(distance, out=output)
247
+ output -= distance * cupy.cos(distance)
248
+ output *= 3
249
+ output /= distance ** 3
250
+ output[(0,) * ndim] = 1.0 # avoid NaN in corner at frequency=0 location
251
+ output *= input
252
+
253
+ return output
vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/_interp_kernels.py ADDED
@@ -0,0 +1,598 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy
2
+
3
+ import cupy
4
+ import cupy._core.internal
5
+
6
+ from cupyx.scipy.ndimage import _spline_prefilter_core
7
+ from cupyx.scipy.ndimage import _spline_kernel_weights
8
+ from cupyx.scipy.ndimage import _util
9
+
10
+ math_constants_preamble = r'''
11
+ // workaround for HIP: line begins with #include
12
+ #include <cupy/math_constants.h>
13
+ '''
14
+
15
+ spline_weights_inline = _spline_kernel_weights.spline_weights_inline
16
+
17
+
18
+ def _get_coord_map(ndim, nprepad=0):
19
+ """Extract target coordinate from coords array (for map_coordinates).
20
+
21
+ Notes
22
+ -----
23
+ Assumes the following variables have been initialized on the device::
24
+
25
+ coords (ndarray): array of shape (ncoords, ndim) containing the target
26
+ coordinates.
27
+ c_j: variables to hold the target coordinates
28
+
29
+ computes::
30
+
31
+ c_j = coords[i + j * ncoords];
32
+
33
+ ncoords is determined by the size of the output array, y.
34
+ y will be indexed by the CIndexer, _ind.
35
+ Thus ncoords = _ind.size();
36
+
37
+ """
38
+ ops = []
39
+ ops.append('ptrdiff_t ncoords = _ind.size();')
40
+ pre = f" + (W){nprepad}" if nprepad > 0 else ''
41
+ for j in range(ndim):
42
+ ops.append(f'''
43
+ W c_{j} = coords[i + {j} * ncoords]{pre};''')
44
+ return ops
45
+
46
+
47
+ def _get_coord_zoom_and_shift(ndim, nprepad=0):
48
+ """Compute target coordinate based on a shift followed by a zoom.
49
+
50
+ This version zooms from the center of the edge pixels.
51
+
52
+ Notes
53
+ -----
54
+ Assumes the following variables have been initialized on the device::
55
+
56
+ in_coord[ndim]: array containing the source coordinate
57
+ zoom[ndim]: array containing the zoom for each axis
58
+ shift[ndim]: array containing the zoom for each axis
59
+
60
+ computes::
61
+
62
+ c_j = zoom[j] * (in_coord[j] - shift[j])
63
+
64
+ """
65
+ ops = []
66
+ pre = f" + (W){nprepad}" if nprepad > 0 else ''
67
+ for j in range(ndim):
68
+ ops.append(f'''
69
+ W c_{j} = zoom[{j}] * ((W)in_coord[{j}] - shift[{j}]){pre};''')
70
+ return ops
71
+
72
+
73
+ def _get_coord_zoom_and_shift_grid(ndim, nprepad=0):
74
+ """Compute target coordinate based on a shift followed by a zoom.
75
+
76
+ This version zooms from the outer edges of the grid pixels.
77
+
78
+ Notes
79
+ -----
80
+ Assumes the following variables have been initialized on the device::
81
+
82
+ in_coord[ndim]: array containing the source coordinate
83
+ zoom[ndim]: array containing the zoom for each axis
84
+ shift[ndim]: array containing the zoom for each axis
85
+
86
+ computes::
87
+
88
+ c_j = zoom[j] * (in_coord[j] - shift[j] + 0.5) - 0.5
89
+
90
+ """
91
+ ops = []
92
+ pre = f" + (W){nprepad}" if nprepad > 0 else ''
93
+ for j in range(ndim):
94
+ ops.append(f'''
95
+ W c_{j} = zoom[{j}] * ((W)in_coord[{j}] - shift[j] + 0.5) - 0.5{pre};''')
96
+ return ops
97
+
98
+
99
+ def _get_coord_zoom(ndim, nprepad=0):
100
+ """Compute target coordinate based on a zoom.
101
+
102
+ This version zooms from the center of the edge pixels.
103
+
104
+ Notes
105
+ -----
106
+ Assumes the following variables have been initialized on the device::
107
+
108
+ in_coord[ndim]: array containing the source coordinate
109
+ zoom[ndim]: array containing the zoom for each axis
110
+
111
+ computes::
112
+
113
+ c_j = zoom[j] * in_coord[j]
114
+
115
+ """
116
+ ops = []
117
+ pre = f" + (W){nprepad}" if nprepad > 0 else ''
118
+ for j in range(ndim):
119
+ ops.append(f'''
120
+ W c_{j} = zoom[{j}] * (W)in_coord[{j}]{pre};''')
121
+ return ops
122
+
123
+
124
+ def _get_coord_zoom_grid(ndim, nprepad=0):
125
+ """Compute target coordinate based on a zoom (grid_mode=True version).
126
+
127
+ This version zooms from the outer edges of the grid pixels.
128
+
129
+ Notes
130
+ -----
131
+ Assumes the following variables have been initialized on the device::
132
+
133
+ in_coord[ndim]: array containing the source coordinate
134
+ zoom[ndim]: array containing the zoom for each axis
135
+
136
+ computes::
137
+
138
+ c_j = zoom[j] * (in_coord[j] + 0.5) - 0.5
139
+
140
+ """
141
+ ops = []
142
+ pre = f" + (W){nprepad}" if nprepad > 0 else ''
143
+ for j in range(ndim):
144
+ ops.append(f'''
145
+ W c_{j} = zoom[{j}] * ((W)in_coord[{j}] + 0.5) - 0.5{pre};''')
146
+ return ops
147
+
148
+
149
+ def _get_coord_shift(ndim, nprepad=0):
150
+ """Compute target coordinate based on a shift.
151
+
152
+ Notes
153
+ -----
154
+ Assumes the following variables have been initialized on the device::
155
+
156
+ in_coord[ndim]: array containing the source coordinate
157
+ shift[ndim]: array containing the zoom for each axis
158
+
159
+ computes::
160
+
161
+ c_j = in_coord[j] - shift[j]
162
+
163
+ """
164
+ ops = []
165
+ pre = f" + (W){nprepad}" if nprepad > 0 else ''
166
+ for j in range(ndim):
167
+ ops.append(f'''
168
+ W c_{j} = (W)in_coord[{j}] - shift[{j}]{pre};''')
169
+ return ops
170
+
171
+
172
+ def _get_coord_affine(ndim, nprepad=0):
173
+ """Compute target coordinate based on a homogeneous transformation matrix.
174
+
175
+ The homogeneous matrix has shape (ndim, ndim + 1). It corresponds to
176
+ affine matrix where the last row of the affine is assumed to be:
177
+ ``[0] * ndim + [1]``.
178
+
179
+ Notes
180
+ -----
181
+ Assumes the following variables have been initialized on the device::
182
+
183
+ mat(array): array containing the (ndim, ndim + 1) transform matrix.
184
+ in_coords(array): coordinates of the input
185
+
186
+ For example, in 2D:
187
+
188
+ c_0 = mat[0] * in_coords[0] + mat[1] * in_coords[1] + aff[2];
189
+ c_1 = mat[3] * in_coords[0] + mat[4] * in_coords[1] + aff[5];
190
+
191
+ """
192
+ ops = []
193
+ pre = f" + (W){nprepad}" if nprepad > 0 else ''
194
+ ncol = ndim + 1
195
+ for j in range(ndim):
196
+ ops.append(f'''
197
+ W c_{j} = (W)0.0;''')
198
+ for k in range(ndim):
199
+ ops.append(f'''
200
+ c_{j} += mat[{ncol * j + k}] * (W)in_coord[{k}];''')
201
+ ops.append(f'''
202
+ c_{j} += mat[{ncol * j + ndim}]{pre};''')
203
+ return ops
204
+
205
+
206
+ def _unravel_loop_index(shape, uint_t='unsigned int'):
207
+ """
208
+ declare a multi-index array in_coord and unravel the 1D index, i into it.
209
+ This code assumes that the array is a C-ordered array.
210
+ """
211
+ ndim = len(shape)
212
+ code = [f'''
213
+ {uint_t} in_coord[{ndim}];
214
+ {uint_t} s, t, idx = i;''']
215
+ for j in range(ndim - 1, 0, -1):
216
+ code.append(f'''
217
+ s = {shape[j]};
218
+ t = idx / s;
219
+ in_coord[{j}] = idx - t * s;
220
+ idx = t;''')
221
+ code.append('''
222
+ in_coord[0] = idx;''')
223
+ return '\n'.join(code)
224
+
225
+
226
+ def _generate_interp_custom(coord_func, ndim, large_int, yshape, mode, cval,
227
+ order, name='', integer_output=False, nprepad=0,
228
+ omit_in_coord=False):
229
+ """
230
+ Args:
231
+ coord_func (function): generates code to do the coordinate
232
+ transformation. See for example, `_get_coord_shift`.
233
+ ndim (int): The number of dimensions.
234
+ large_int (bool): If true use Py_ssize_t instead of int for indexing.
235
+ yshape (tuple): Shape of the output array.
236
+ mode (str): Signal extension mode to use at the array boundaries
237
+ cval (float): constant value used when `mode == 'constant'`.
238
+ name (str): base name for the interpolation kernel
239
+ integer_output (bool): boolean indicating whether the output has an
240
+ integer type.
241
+ nprepad (int): integer indicating the amount of prepadding at the
242
+ boundaries.
243
+
244
+ Returns:
245
+ operation (str): code body for the ElementwiseKernel
246
+ name (str): name for the ElementwiseKernel
247
+ """
248
+
249
+ ops = []
250
+ internal_dtype = 'double' if integer_output else 'Y'
251
+ ops.append(f'{internal_dtype} out = 0.0;')
252
+
253
+ if large_int:
254
+ uint_t = 'size_t'
255
+ int_t = 'ptrdiff_t'
256
+ else:
257
+ uint_t = 'unsigned int'
258
+ int_t = 'int'
259
+
260
+ # determine strides for x along each axis
261
+ for j in range(ndim):
262
+ ops.append(f'const {int_t} xsize_{j} = x.shape()[{j}];')
263
+ ops.append(f'const {uint_t} sx_{ndim - 1} = 1;')
264
+ for j in range(ndim - 1, 0, -1):
265
+ ops.append(f'const {uint_t} sx_{j - 1} = sx_{j} * xsize_{j};')
266
+
267
+ if not omit_in_coord:
268
+ # create in_coords array to store the unraveled indices
269
+ ops.append(_unravel_loop_index(yshape, uint_t))
270
+
271
+ # compute the transformed (target) coordinates, c_j
272
+ ops = ops + coord_func(ndim, nprepad)
273
+
274
+ if cval is numpy.nan:
275
+ cval = '(Y)CUDART_NAN'
276
+ elif cval == numpy.inf:
277
+ cval = '(Y)CUDART_INF'
278
+ elif cval == -numpy.inf:
279
+ cval = '(Y)(-CUDART_INF)'
280
+ else:
281
+ cval = f'({internal_dtype}){cval}'
282
+
283
+ if mode == 'constant':
284
+ # use cval if coordinate is outside the bounds of x
285
+ _cond = ' || '.join(
286
+ [f'(c_{j} < 0) || (c_{j} > xsize_{j} - 1)' for j in range(ndim)])
287
+ ops.append(f'''
288
+ if ({_cond})
289
+ {{
290
+ out = {cval};
291
+ }}
292
+ else
293
+ {{''')
294
+
295
+ if order == 0:
296
+ if mode == 'wrap':
297
+ ops.append('double dcoord;') # mode 'wrap' requires this to work
298
+ for j in range(ndim):
299
+ # determine nearest neighbor
300
+ if mode == 'wrap':
301
+ ops.append(f'''
302
+ dcoord = c_{j};''')
303
+ else:
304
+ ops.append(f'''
305
+ {int_t} cf_{j} = ({int_t})floor((double)c_{j} + 0.5);''')
306
+
307
+ # handle boundary
308
+ if mode != 'constant':
309
+ if mode == 'wrap':
310
+ ixvar = 'dcoord'
311
+ float_ix = True
312
+ else:
313
+ ixvar = f'cf_{j}'
314
+ float_ix = False
315
+ ops.append(
316
+ _util._generate_boundary_condition_ops(
317
+ mode, ixvar, f'xsize_{j}', int_t, float_ix))
318
+ if mode == 'wrap':
319
+ ops.append(f'''
320
+ {int_t} cf_{j} = ({int_t})floor(dcoord + 0.5);''')
321
+
322
+ # sum over ic_j will give the raveled coordinate in the input
323
+ ops.append(f'''
324
+ {int_t} ic_{j} = cf_{j} * sx_{j};''')
325
+ _coord_idx = ' + '.join([f'ic_{j}' for j in range(ndim)])
326
+ if mode == 'grid-constant':
327
+ _cond = ' || '.join([f'(ic_{j} < 0)' for j in range(ndim)])
328
+ ops.append(f'''
329
+ if ({_cond}) {{
330
+ out = {cval};
331
+ }} else {{
332
+ out = ({internal_dtype})x[{_coord_idx}];
333
+ }}''')
334
+ else:
335
+ ops.append(f'''
336
+ out = ({internal_dtype})x[{_coord_idx}];''')
337
+
338
+ elif order == 1:
339
+ for j in range(ndim):
340
+ # get coordinates for linear interpolation along axis j
341
+ ops.append(f'''
342
+ {int_t} cf_{j} = ({int_t})floor((double)c_{j});
343
+ {int_t} cc_{j} = cf_{j} + 1;
344
+ {int_t} n_{j} = (c_{j} == cf_{j}) ? 1 : 2; // points needed
345
+ ''')
346
+
347
+ if mode == 'wrap':
348
+ ops.append(f'''
349
+ double dcoordf = c_{j};
350
+ double dcoordc = c_{j} + 1;''')
351
+ else:
352
+ # handle boundaries for extension modes.
353
+ ops.append(f'''
354
+ {int_t} cf_bounded_{j} = cf_{j};
355
+ {int_t} cc_bounded_{j} = cc_{j};''')
356
+
357
+ if mode != 'constant':
358
+ if mode == 'wrap':
359
+ ixvar = 'dcoordf'
360
+ float_ix = True
361
+ else:
362
+ ixvar = f'cf_bounded_{j}'
363
+ float_ix = False
364
+ ops.append(
365
+ _util._generate_boundary_condition_ops(
366
+ mode, ixvar, f'xsize_{j}', int_t, float_ix))
367
+
368
+ ixvar = 'dcoordc' if mode == 'wrap' else f'cc_bounded_{j}'
369
+ ops.append(
370
+ _util._generate_boundary_condition_ops(
371
+ mode, ixvar, f'xsize_{j}', int_t, float_ix))
372
+ if mode == 'wrap':
373
+ ops.append(
374
+ f'''
375
+ {int_t} cf_bounded_{j} = ({int_t})floor(dcoordf);;
376
+ {int_t} cc_bounded_{j} = ({int_t})floor(dcoordf + 1);;
377
+ '''
378
+ )
379
+
380
+ ops.append(f'''
381
+ for (int s_{j} = 0; s_{j} < n_{j}; s_{j}++)
382
+ {{
383
+ W w_{j};
384
+ {int_t} ic_{j};
385
+ if (s_{j} == 0)
386
+ {{
387
+ w_{j} = (W)cc_{j} - c_{j};
388
+ ic_{j} = cf_bounded_{j} * sx_{j};
389
+ }} else
390
+ {{
391
+ w_{j} = c_{j} - (W)cf_{j};
392
+ ic_{j} = cc_bounded_{j} * sx_{j};
393
+ }}''')
394
+ elif order > 1:
395
+ if mode == 'grid-constant':
396
+ spline_mode = 'constant'
397
+ elif mode == 'nearest':
398
+ spline_mode = 'nearest'
399
+ else:
400
+ spline_mode = _spline_prefilter_core._get_spline_mode(mode)
401
+
402
+ # wx, wy are temporary variables used during spline weight computation
403
+ ops.append(f'''
404
+ W wx, wy;
405
+ {int_t} start;''')
406
+ for j in range(ndim):
407
+ # determine weights along the current axis
408
+ ops.append(f'''
409
+ W weights_{j}[{order + 1}];''')
410
+ ops.append(spline_weights_inline[order].format(j=j, order=order))
411
+
412
+ # get starting coordinate for spline interpolation along axis j
413
+ if mode in ['wrap']:
414
+ ops.append(f'double dcoord = c_{j};')
415
+ coord_var = 'dcoord'
416
+ ops.append(
417
+ _util._generate_boundary_condition_ops(
418
+ mode, coord_var, f'xsize_{j}', int_t, True))
419
+ else:
420
+ coord_var = f'(double)c_{j}'
421
+
422
+ if order & 1:
423
+ op_str = '''
424
+ start = ({int_t})floor({coord_var}) - {order_2};'''
425
+ else:
426
+ op_str = '''
427
+ start = ({int_t})floor({coord_var} + 0.5) - {order_2};'''
428
+ ops.append(
429
+ op_str.format(
430
+ int_t=int_t, coord_var=coord_var, order_2=order // 2
431
+ ))
432
+
433
+ # set of coordinate values within spline footprint along axis j
434
+ ops.append(f'''{int_t} ci_{j}[{order + 1}];''')
435
+ for k in range(order + 1):
436
+ ixvar = f'ci_{j}[{k}]'
437
+ ops.append(f'''
438
+ {ixvar} = start + {k};''')
439
+ ops.append(
440
+ _util._generate_boundary_condition_ops(
441
+ spline_mode, ixvar, f'xsize_{j}', int_t))
442
+
443
+ # loop over the order + 1 values in the spline filter
444
+ ops.append(f'''
445
+ W w_{j};
446
+ {int_t} ic_{j};
447
+ for (int k_{j} = 0; k_{j} <= {order}; k_{j}++)
448
+ {{
449
+ w_{j} = weights_{j}[k_{j}];
450
+ ic_{j} = ci_{j}[k_{j}] * sx_{j};
451
+ ''')
452
+
453
+ if order > 0:
454
+
455
+ _weight = ' * '.join([f'w_{j}' for j in range(ndim)])
456
+ _coord_idx = ' + '.join([f'ic_{j}' for j in range(ndim)])
457
+ if mode == 'grid-constant' or (order > 1 and mode == 'constant'):
458
+ _cond = ' || '.join([f'(ic_{j} < 0)' for j in range(ndim)])
459
+ ops.append(f'''
460
+ if ({_cond}) {{
461
+ out += {cval} * ({internal_dtype})({_weight});
462
+ }} else {{
463
+ {internal_dtype} val = ({internal_dtype})x[{_coord_idx}];
464
+ out += val * ({internal_dtype})({_weight});
465
+ }}''')
466
+ else:
467
+ ops.append(f'''
468
+ {internal_dtype} val = ({internal_dtype})x[{_coord_idx}];
469
+ out += val * ({internal_dtype})({_weight});''')
470
+
471
+ ops.append('}' * ndim)
472
+
473
+ if mode == 'constant':
474
+ ops.append('}')
475
+
476
+ if integer_output:
477
+ ops.append('y = (Y)rint((double)out);')
478
+ else:
479
+ ops.append('y = (Y)out;')
480
+ operation = '\n'.join(ops)
481
+
482
+ mode_str = mode.replace('-', '_') # avoid hyphen in kernel name
483
+ name = 'cupyx_scipy_ndimage_interpolate_{}_order{}_{}_{}d_y{}'.format(
484
+ name, order, mode_str, ndim, '_'.join([f'{j}' for j in yshape]),
485
+ )
486
+ if uint_t == 'size_t':
487
+ name += '_i64'
488
+ return operation, name
489
+
490
+
491
+ @cupy._util.memoize(for_each_device=True)
492
+ def _get_map_kernel(ndim, large_int, yshape, mode, cval=0.0, order=1,
493
+ integer_output=False, nprepad=0):
494
+ in_params = 'raw X x, raw W coords'
495
+ out_params = 'Y y'
496
+ operation, name = _generate_interp_custom(
497
+ coord_func=_get_coord_map,
498
+ ndim=ndim,
499
+ large_int=large_int,
500
+ yshape=yshape,
501
+ mode=mode,
502
+ cval=cval,
503
+ order=order,
504
+ name='map',
505
+ integer_output=integer_output,
506
+ nprepad=nprepad,
507
+ omit_in_coord=True, # input image coordinates are not needed
508
+ )
509
+ return cupy.ElementwiseKernel(in_params, out_params, operation, name,
510
+ preamble=math_constants_preamble)
511
+
512
+
513
+ @cupy._util.memoize(for_each_device=True)
514
+ def _get_shift_kernel(ndim, large_int, yshape, mode, cval=0.0, order=1,
515
+ integer_output=False, nprepad=0):
516
+ in_params = 'raw X x, raw W shift'
517
+ out_params = 'Y y'
518
+ operation, name = _generate_interp_custom(
519
+ coord_func=_get_coord_shift,
520
+ ndim=ndim,
521
+ large_int=large_int,
522
+ yshape=yshape,
523
+ mode=mode,
524
+ cval=cval,
525
+ order=order,
526
+ name='shift',
527
+ integer_output=integer_output,
528
+ nprepad=nprepad,
529
+ )
530
+ return cupy.ElementwiseKernel(in_params, out_params, operation, name,
531
+ preamble=math_constants_preamble)
532
+
533
+
534
+ @cupy._util.memoize(for_each_device=True)
535
+ def _get_zoom_shift_kernel(ndim, large_int, yshape, mode, cval=0.0, order=1,
536
+ integer_output=False, grid_mode=False, nprepad=0):
537
+ in_params = 'raw X x, raw W shift, raw W zoom'
538
+ out_params = 'Y y'
539
+ if grid_mode:
540
+ zoom_shift_func = _get_coord_zoom_and_shift_grid
541
+ else:
542
+ zoom_shift_func = _get_coord_zoom_and_shift
543
+ operation, name = _generate_interp_custom(
544
+ coord_func=zoom_shift_func,
545
+ ndim=ndim,
546
+ large_int=large_int,
547
+ yshape=yshape,
548
+ mode=mode,
549
+ cval=cval,
550
+ order=order,
551
+ name="zoom_shift_grid" if grid_mode else "zoom_shift",
552
+ integer_output=integer_output,
553
+ nprepad=nprepad,
554
+ )
555
+ return cupy.ElementwiseKernel(in_params, out_params, operation, name,
556
+ preamble=math_constants_preamble)
557
+
558
+
559
+ @cupy._util.memoize(for_each_device=True)
560
+ def _get_zoom_kernel(ndim, large_int, yshape, mode, cval=0.0, order=1,
561
+ integer_output=False, grid_mode=False, nprepad=0):
562
+ in_params = 'raw X x, raw W zoom'
563
+ out_params = 'Y y'
564
+ operation, name = _generate_interp_custom(
565
+ coord_func=_get_coord_zoom_grid if grid_mode else _get_coord_zoom,
566
+ ndim=ndim,
567
+ large_int=large_int,
568
+ yshape=yshape,
569
+ mode=mode,
570
+ cval=cval,
571
+ order=order,
572
+ name="zoom_grid" if grid_mode else "zoom",
573
+ integer_output=integer_output,
574
+ nprepad=nprepad,
575
+ )
576
+ return cupy.ElementwiseKernel(in_params, out_params, operation, name,
577
+ preamble=math_constants_preamble)
578
+
579
+
580
+ @cupy._util.memoize(for_each_device=True)
581
+ def _get_affine_kernel(ndim, large_int, yshape, mode, cval=0.0, order=1,
582
+ integer_output=False, nprepad=0):
583
+ in_params = 'raw X x, raw W mat'
584
+ out_params = 'Y y'
585
+ operation, name = _generate_interp_custom(
586
+ coord_func=_get_coord_affine,
587
+ ndim=ndim,
588
+ large_int=large_int,
589
+ yshape=yshape,
590
+ mode=mode,
591
+ cval=cval,
592
+ order=order,
593
+ name='affine',
594
+ integer_output=integer_output,
595
+ nprepad=nprepad,
596
+ )
597
+ return cupy.ElementwiseKernel(in_params, out_params, operation, name,
598
+ preamble=math_constants_preamble)
vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/_interpolation.py ADDED
@@ -0,0 +1,780 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import warnings
3
+
4
+ import cupy
5
+ import numpy
6
+
7
+ from cupy import _core
8
+ from cupy._core import internal
9
+ from cupy.cuda import runtime
10
+ from cupyx import _texture
11
+ from cupyx.scipy.ndimage import _util
12
+ from cupyx.scipy.ndimage import _interp_kernels
13
+ from cupyx.scipy.ndimage import _spline_prefilter_core
14
+
15
+ _prod = cupy._core.internal.prod
16
+
17
+
18
+ def _check_parameter(func_name, order, mode):
19
+ if order is None:
20
+ warnings.warn(f'Currently the default order of {func_name} is 1. In a '
21
+ 'future release this may change to 3 to match '
22
+ 'scipy.ndimage ')
23
+ elif order < 0 or 5 < order:
24
+ raise ValueError('spline order is not supported')
25
+
26
+ if mode not in ('constant', 'grid-constant', 'nearest', 'mirror',
27
+ 'reflect', 'grid-mirror', 'wrap', 'grid-wrap', 'opencv',
28
+ '_opencv_edge'):
29
+ raise ValueError('boundary mode ({}) is not supported'.format(mode))
30
+
31
+
32
+ def _get_spline_output(input, output):
33
+ """Create workspace array, temp, and the final dtype for the output.
34
+
35
+ Differs from SciPy by not always forcing the internal floating point dtype
36
+ to be double precision.
37
+ """
38
+ complex_data = input.dtype.kind == 'c'
39
+ if complex_data:
40
+ min_float_dtype = cupy.complex64
41
+ else:
42
+ min_float_dtype = cupy.float32
43
+ if isinstance(output, cupy.ndarray):
44
+ if complex_data and output.dtype.kind != 'c':
45
+ raise ValueError(
46
+ 'output must have complex dtype for complex inputs'
47
+ )
48
+ float_dtype = cupy.promote_types(output.dtype, min_float_dtype)
49
+ output_dtype = output.dtype
50
+ else:
51
+ if output is None:
52
+ output = output_dtype = input.dtype
53
+ else:
54
+ output_dtype = cupy.dtype(output)
55
+ float_dtype = cupy.promote_types(output, min_float_dtype)
56
+
57
+ if (isinstance(output, cupy.ndarray)
58
+ and output.dtype == float_dtype == output_dtype
59
+ and output.flags.c_contiguous):
60
+ if output is not input:
61
+ _core.elementwise_copy(input, output)
62
+ temp = output
63
+ else:
64
+ temp = input.astype(float_dtype, copy=False)
65
+ temp = cupy.ascontiguousarray(temp)
66
+ if cupy.shares_memory(temp, input, 'MAY_SHARE_BOUNDS'):
67
+ temp = temp.copy()
68
+ return temp, float_dtype, output_dtype
69
+
70
+
71
+ def spline_filter1d(input, order=3, axis=-1, output=cupy.float64,
72
+ mode='mirror'):
73
+ """
74
+ Calculate a 1-D spline filter along the given axis.
75
+
76
+ The lines of the array along the given axis are filtered by a
77
+ spline filter. The order of the spline must be >= 2 and <= 5.
78
+
79
+ Args:
80
+ input (cupy.ndarray): The input array.
81
+ order (int): The order of the spline interpolation, default is 3. Must
82
+ be in the range 0-5.
83
+ axis (int): The axis along which the spline filter is applied. Default
84
+ is the last axis.
85
+ output (cupy.ndarray or dtype, optional): The array in which to place
86
+ the output, or the dtype of the returned array. Default is
87
+ ``numpy.float64``.
88
+ mode (str): Points outside the boundaries of the input are filled
89
+ according to the given mode (``'constant'``, ``'nearest'``,
90
+ ``'mirror'``, ``'reflect'``, ``'wrap'``, ``'grid-mirror'``,
91
+ ``'grid-wrap'``, ``'grid-constant'`` or ``'opencv'``).
92
+
93
+ Returns:
94
+ cupy.ndarray: The result of prefiltering the input.
95
+
96
+ .. seealso:: :func:`scipy.spline_filter1d`
97
+ """
98
+ if order < 0 or order > 5:
99
+ raise RuntimeError('spline order not supported')
100
+ x = input
101
+ ndim = x.ndim
102
+ axis = internal._normalize_axis_index(axis, ndim)
103
+
104
+ # order 0, 1 don't require reshaping as no CUDA kernel will be called
105
+ # scalar or size 1 arrays also don't need to be filtered
106
+ run_kernel = not (order < 2 or x.ndim == 0 or x.shape[axis] == 1)
107
+ if not run_kernel:
108
+ output = _util._get_output(output, input)
109
+ _core.elementwise_copy(x, output)
110
+ return output
111
+
112
+ temp, data_dtype, output_dtype = _get_spline_output(x, output)
113
+ data_type = cupy._core._scalar.get_typename(temp.dtype)
114
+ pole_type = cupy._core._scalar.get_typename(temp.real.dtype)
115
+
116
+ index_type = _util._get_inttype(input)
117
+ index_dtype = cupy.int32 if index_type == 'int' else cupy.int64
118
+
119
+ n_samples = x.shape[axis]
120
+ n_signals = x.size // n_samples
121
+ info = cupy.array((n_signals, n_samples) + x.shape, dtype=index_dtype)
122
+
123
+ # empirical choice of block size that seemed to work well
124
+ block_size = max(2 ** math.ceil(numpy.log2(n_samples / 32)), 8)
125
+ kern = _spline_prefilter_core.get_raw_spline1d_kernel(
126
+ axis,
127
+ ndim,
128
+ mode,
129
+ order=order,
130
+ index_type=index_type,
131
+ data_type=data_type,
132
+ pole_type=pole_type,
133
+ block_size=block_size,
134
+ )
135
+
136
+ # Due to recursive nature, a given line of data must be processed by a
137
+ # single thread. n_signals lines will be processed in total.
138
+ block = (block_size,)
139
+ grid = ((n_signals + block[0] - 1) // block[0],)
140
+
141
+ # apply prefilter gain
142
+ poles = _spline_prefilter_core.get_poles(order=order)
143
+ temp *= _spline_prefilter_core.get_gain(poles)
144
+
145
+ # apply caual + anti-causal IIR spline filters
146
+ kern(grid, block, (temp, info))
147
+
148
+ if isinstance(output, cupy.ndarray) and temp is not output:
149
+ # copy kernel output into the user-provided output array
150
+ _core.elementwise_copy(temp, output)
151
+ return output
152
+ return temp.astype(output_dtype, copy=False)
153
+
154
+
155
+ def spline_filter(input, order=3, output=cupy.float64, mode='mirror'):
156
+ """Multidimensional spline filter.
157
+
158
+ Args:
159
+ input (cupy.ndarray): The input array.
160
+ order (int): The order of the spline interpolation, default is 3. Must
161
+ be in the range 0-5.
162
+ output (cupy.ndarray or dtype, optional): The array in which to place
163
+ the output, or the dtype of the returned array. Default is
164
+ ``numpy.float64``.
165
+ mode (str): Points outside the boundaries of the input are filled
166
+ according to the given mode (``'constant'``, ``'nearest'``,
167
+ ``'mirror'``, ``'reflect'``, ``'wrap'``, ``'grid-mirror'``,
168
+ ``'grid-wrap'``, ``'grid-constant'`` or ``'opencv'``).
169
+
170
+ Returns:
171
+ cupy.ndarray: The result of prefiltering the input.
172
+
173
+ .. seealso:: :func:`scipy.spline_filter1d`
174
+ """
175
+ if order < 2 or order > 5:
176
+ raise RuntimeError('spline order not supported')
177
+
178
+ x = input
179
+ temp, data_dtype, output_dtype = _get_spline_output(x, output)
180
+ if order not in [0, 1] and input.ndim > 0:
181
+ for axis in range(x.ndim):
182
+ spline_filter1d(x, order, axis, output=temp, mode=mode)
183
+ x = temp
184
+ if isinstance(output, cupy.ndarray):
185
+ _core.elementwise_copy(temp, output)
186
+ else:
187
+ output = temp
188
+ if output.dtype != output_dtype:
189
+ output = output.astype(output_dtype)
190
+ return output
191
+
192
+
193
+ def _check_coordinates(coordinates, order, allow_float32=True):
194
+ if coordinates.dtype.kind == 'f':
195
+ if allow_float32:
196
+ coord_dtype = cupy.promote_types(coordinates.dtype, cupy.float32)
197
+ else:
198
+ coord_dtype = cupy.promote_types(coordinates.dtype, cupy.float64)
199
+ coordinates = coordinates.astype(coord_dtype, copy=False)
200
+ elif coordinates.dtype.kind in 'iu':
201
+ if order > 1:
202
+ # order > 1 (spline) kernels require floating-point coordinates
203
+ if allow_float32:
204
+ coord_dtype = cupy.promote_types(
205
+ coordinates.dtype, cupy.float32
206
+ )
207
+ else:
208
+ coord_dtype = cupy.promote_types(
209
+ coordinates.dtype, cupy.float64
210
+ )
211
+ coordinates = coordinates.astype(coord_dtype)
212
+ else:
213
+ raise ValueError('coordinates should have floating point dtype')
214
+ if not coordinates.flags.c_contiguous:
215
+ coordinates = cupy.ascontiguousarray(coordinates)
216
+ return coordinates
217
+
218
+
219
+ def _prepad_for_spline_filter(input, mode, cval):
220
+ if mode in ['nearest', 'grid-constant']:
221
+ # these modes need padding to get accurate boundary values
222
+ npad = 12 # empirical factor chosen by SciPy
223
+ if mode == 'grid-constant':
224
+ kwargs = dict(mode='constant', constant_values=cval)
225
+ else:
226
+ kwargs = dict(mode='edge')
227
+ padded = cupy.pad(input, npad, **kwargs)
228
+ else:
229
+ npad = 0
230
+ padded = input
231
+ return padded, npad
232
+
233
+
234
+ def _filter_input(image, prefilter, mode, cval, order):
235
+ """Perform spline prefiltering when needed.
236
+
237
+ Spline orders > 1 need a prefiltering stage to preserve resolution.
238
+
239
+ For boundary modes without analytical spline boundary conditions, some
240
+ prepadding of the input with cupy.pad is used to maintain accuracy.
241
+ ``npad`` is an integer corresponding to the amount of padding at each edge
242
+ of the array.
243
+ """
244
+ if not prefilter or order < 2:
245
+ return (cupy.ascontiguousarray(image), 0)
246
+ padded, npad = _prepad_for_spline_filter(image, mode, cval)
247
+ float_dtype = cupy.promote_types(image.dtype, cupy.float32)
248
+ filtered = spline_filter(padded, order, output=float_dtype, mode=mode)
249
+ return cupy.ascontiguousarray(filtered), npad
250
+
251
+
252
+ def map_coordinates(input, coordinates, output=None, order=3,
253
+ mode='constant', cval=0.0, prefilter=True):
254
+ """Map the input array to new coordinates by interpolation.
255
+
256
+ The array of coordinates is used to find, for each point in the output, the
257
+ corresponding coordinates in the input. The value of the input at those
258
+ coordinates is determined by spline interpolation of the requested order.
259
+
260
+ The shape of the output is derived from that of the coordinate array by
261
+ dropping the first axis. The values of the array along the first axis are
262
+ the coordinates in the input array at which the output value is found.
263
+
264
+ Args:
265
+ input (cupy.ndarray): The input array.
266
+ coordinates (array_like): The coordinates at which ``input`` is
267
+ evaluated.
268
+ output (cupy.ndarray or ~cupy.dtype): The array in which to place the
269
+ output, or the dtype of the returned array.
270
+ order (int): The order of the spline interpolation, default is 3. Must
271
+ be in the range 0-5.
272
+ mode (str): Points outside the boundaries of the input are filled
273
+ according to the given mode (``'constant'``, ``'nearest'``,
274
+ ``'mirror'``, ``'reflect'``, ``'wrap'``, ``'grid-mirror'``,
275
+ ``'grid-wrap'``, ``'grid-constant'`` or ``'opencv'``).
276
+ cval (scalar): Value used for points outside the boundaries of
277
+ the input if ``mode='constant'`` or ``mode='opencv'``. Default is
278
+ 0.0
279
+ prefilter (bool): Determines if the input array is prefiltered with
280
+ ``spline_filter`` before interpolation. The default is True, which
281
+ will create a temporary ``float64`` array of filtered values if
282
+ ``order > 1``. If setting this to False, the output will be
283
+ slightly blurred if ``order > 1``, unless the input is prefiltered,
284
+ i.e. it is the result of calling ``spline_filter`` on the original
285
+ input.
286
+
287
+ Returns:
288
+ cupy.ndarray:
289
+ The result of transforming the input. The shape of the output is
290
+ derived from that of ``coordinates`` by dropping the first axis.
291
+
292
+ .. seealso:: :func:`scipy.ndimage.map_coordinates`
293
+ """
294
+
295
+ _check_parameter('map_coordinates', order, mode)
296
+
297
+ if mode == 'opencv' or mode == '_opencv_edge':
298
+ input = cupy.pad(input, [(1, 1)] * input.ndim, 'constant',
299
+ constant_values=cval)
300
+ coordinates = cupy.add(coordinates, 1)
301
+ mode = 'constant'
302
+
303
+ ret = _util._get_output(output, input, coordinates.shape[1:])
304
+ integer_output = ret.dtype.kind in 'iu'
305
+ _util._check_cval(mode, cval, integer_output)
306
+
307
+ if input.dtype.kind in 'iu':
308
+ input = input.astype(cupy.float32)
309
+ coordinates = _check_coordinates(coordinates, order)
310
+ filtered, nprepad = _filter_input(input, prefilter, mode, cval, order)
311
+ large_int = max(_prod(input.shape), coordinates.shape[0]) > 1 << 31
312
+ kern = _interp_kernels._get_map_kernel(
313
+ input.ndim, large_int, yshape=coordinates.shape, mode=mode, cval=cval,
314
+ order=order, integer_output=integer_output, nprepad=nprepad)
315
+ kern(filtered, coordinates, ret)
316
+ return ret
317
+
318
+
319
+ def affine_transform(input, matrix, offset=0.0, output_shape=None, output=None,
320
+ order=3, mode='constant', cval=0.0, prefilter=True, *,
321
+ texture_memory=False):
322
+ """Apply an affine transformation.
323
+
324
+ Given an output image pixel index vector ``o``, the pixel value is
325
+ determined from the input image at position
326
+ ``cupy.dot(matrix, o) + offset``.
327
+
328
+ Args:
329
+ input (cupy.ndarray): The input array.
330
+ matrix (cupy.ndarray): The inverse coordinate transformation matrix,
331
+ mapping output coordinates to input coordinates. If ``ndim`` is the
332
+ number of dimensions of ``input``, the given matrix must have one
333
+ of the following shapes:
334
+
335
+ - ``(ndim, ndim)``: the linear transformation matrix for each
336
+ output coordinate.
337
+ - ``(ndim,)``: assume that the 2D transformation matrix is
338
+ diagonal, with the diagonal specified by the given value.
339
+ - ``(ndim + 1, ndim + 1)``: assume that the transformation is
340
+ specified using homogeneous coordinates. In this case, any
341
+ value passed to ``offset`` is ignored.
342
+ - ``(ndim, ndim + 1)``: as above, but the bottom row of a
343
+ homogeneous transformation matrix is always
344
+ ``[0, 0, ..., 1]``, and may be omitted.
345
+
346
+ offset (float or sequence): The offset into the array where the
347
+ transform is applied. If a float, ``offset`` is the same for each
348
+ axis. If a sequence, ``offset`` should contain one value for each
349
+ axis.
350
+ output_shape (tuple of ints): Shape tuple.
351
+ output (cupy.ndarray or ~cupy.dtype): The array in which to place the
352
+ output, or the dtype of the returned array.
353
+ order (int): The order of the spline interpolation, default is 3. Must
354
+ be in the range 0-5.
355
+ mode (str): Points outside the boundaries of the input are filled
356
+ according to the given mode (``'constant'``, ``'nearest'``,
357
+ ``'mirror'``, ``'reflect'``, ``'wrap'``, ``'grid-mirror'``,
358
+ ``'grid-wrap'``, ``'grid-constant'`` or ``'opencv'``).
359
+ cval (scalar): Value used for points outside the boundaries of
360
+ the input if ``mode='constant'`` or ``mode='opencv'``. Default is
361
+ 0.0
362
+ prefilter (bool): Determines if the input array is prefiltered with
363
+ ``spline_filter`` before interpolation. The default is True, which
364
+ will create a temporary ``float64`` array of filtered values if
365
+ ``order > 1``. If setting this to False, the output will be
366
+ slightly blurred if ``order > 1``, unless the input is prefiltered,
367
+ i.e. it is the result of calling ``spline_filter`` on the original
368
+ input.
369
+ texture_memory (bool): If True, uses GPU texture memory. Supports only:
370
+
371
+ - 2D and 3D float32 arrays as input
372
+ - ``(ndim + 1, ndim + 1)`` homogeneous float32 transformation
373
+ matrix
374
+ - ``mode='constant'`` and ``mode='nearest'``
375
+ - ``order=0`` (nearest neighbor) and ``order=1`` (linear
376
+ interpolation)
377
+ - NVIDIA CUDA GPUs
378
+
379
+ Returns:
380
+ cupy.ndarray or None:
381
+ The transformed input. If ``output`` is given as a parameter,
382
+ ``None`` is returned.
383
+
384
+ .. seealso:: :func:`scipy.ndimage.affine_transform`
385
+ """
386
+
387
+ if texture_memory:
388
+ if runtime.is_hip:
389
+ raise RuntimeError(
390
+ 'HIP currently does not support texture acceleration')
391
+ tm_interp = 'linear' if order > 0 else 'nearest'
392
+ return _texture.affine_transformation(data=input,
393
+ transformation_matrix=matrix,
394
+ output_shape=output_shape,
395
+ output=output,
396
+ interpolation=tm_interp,
397
+ mode=mode,
398
+ border_value=cval)
399
+
400
+ _check_parameter('affine_transform', order, mode)
401
+
402
+ offset = _util._fix_sequence_arg(offset, input.ndim, 'offset', float)
403
+
404
+ if matrix.ndim not in [1, 2] or matrix.shape[0] < 1:
405
+ raise RuntimeError('no proper affine matrix provided')
406
+ if matrix.ndim == 2:
407
+ if matrix.shape[0] == matrix.shape[1] - 1:
408
+ offset = matrix[:, -1]
409
+ matrix = matrix[:, :-1]
410
+ elif matrix.shape[0] == input.ndim + 1:
411
+ offset = matrix[:-1, -1]
412
+ matrix = matrix[:-1, :-1]
413
+ if matrix.shape != (input.ndim, input.ndim):
414
+ raise RuntimeError('improper affine shape')
415
+
416
+ if mode == 'opencv':
417
+ m = cupy.zeros((input.ndim + 1, input.ndim + 1))
418
+ m[:-1, :-1] = matrix
419
+ m[:-1, -1] = offset
420
+ m[-1, -1] = 1
421
+ m = cupy.linalg.inv(m)
422
+ m[:2] = cupy.roll(m[:2], 1, axis=0)
423
+ m[:2, :2] = cupy.roll(m[:2, :2], 1, axis=1)
424
+ matrix = m[:-1, :-1]
425
+ offset = m[:-1, -1]
426
+
427
+ if output_shape is None:
428
+ output_shape = input.shape
429
+
430
+ if mode == 'opencv' or mode == '_opencv_edge':
431
+ if matrix.ndim == 1:
432
+ matrix = cupy.diag(matrix)
433
+ coordinates = cupy.indices(output_shape, dtype=cupy.float64)
434
+ coordinates = cupy.dot(matrix, coordinates.reshape((input.ndim, -1)))
435
+ coordinates += cupy.expand_dims(cupy.asarray(offset), -1)
436
+ ret = _util._get_output(output, input, shape=output_shape)
437
+ ret[:] = map_coordinates(input, coordinates, ret.dtype, order, mode,
438
+ cval, prefilter).reshape(output_shape)
439
+ return ret
440
+
441
+ matrix = matrix.astype(cupy.float64, copy=False)
442
+ ndim = input.ndim
443
+ output = _util._get_output(output, input, shape=output_shape)
444
+ if input.dtype.kind in 'iu':
445
+ input = input.astype(cupy.float32)
446
+ filtered, nprepad = _filter_input(input, prefilter, mode, cval, order)
447
+
448
+ integer_output = output.dtype.kind in 'iu'
449
+ _util._check_cval(mode, cval, integer_output)
450
+ large_int = max(_prod(input.shape), _prod(output_shape)) > 1 << 31
451
+ if matrix.ndim == 1:
452
+ offset = cupy.asarray(offset, dtype=cupy.float64)
453
+ offset = -offset / matrix
454
+ kern = _interp_kernels._get_zoom_shift_kernel(
455
+ ndim, large_int, output_shape, mode, cval=cval, order=order,
456
+ integer_output=integer_output, nprepad=nprepad)
457
+ kern(filtered, offset, matrix, output)
458
+ else:
459
+ kern = _interp_kernels._get_affine_kernel(
460
+ ndim, large_int, output_shape, mode, cval=cval, order=order,
461
+ integer_output=integer_output, nprepad=nprepad)
462
+ m = cupy.zeros((ndim, ndim + 1), dtype=cupy.float64)
463
+ m[:, :-1] = matrix
464
+ m[:, -1] = cupy.asarray(offset, dtype=cupy.float64)
465
+ kern(filtered, m, output)
466
+ return output
467
+
468
+
469
+ def _minmax(coor, minc, maxc):
470
+ if coor[0] < minc[0]:
471
+ minc[0] = coor[0]
472
+ if coor[0] > maxc[0]:
473
+ maxc[0] = coor[0]
474
+ if coor[1] < minc[1]:
475
+ minc[1] = coor[1]
476
+ if coor[1] > maxc[1]:
477
+ maxc[1] = coor[1]
478
+ return minc, maxc
479
+
480
+
481
+ def rotate(input, angle, axes=(1, 0), reshape=True, output=None, order=3,
482
+ mode='constant', cval=0.0, prefilter=True):
483
+ """Rotate an array.
484
+
485
+ The array is rotated in the plane defined by the two axes given by the
486
+ ``axes`` parameter using spline interpolation of the requested order.
487
+
488
+ Args:
489
+ input (cupy.ndarray): The input array.
490
+ angle (float): The rotation angle in degrees.
491
+ axes (tuple of 2 ints): The two axes that define the plane of rotation.
492
+ Default is the first two axes.
493
+ reshape (bool): If ``reshape`` is True, the output shape is adapted so
494
+ that the input array is contained completely in the output. Default
495
+ is True.
496
+ output (cupy.ndarray or ~cupy.dtype): The array in which to place the
497
+ output, or the dtype of the returned array.
498
+ order (int): The order of the spline interpolation, default is 3. Must
499
+ be in the range 0-5.
500
+ mode (str): Points outside the boundaries of the input are filled
501
+ according to the given mode (``'constant'``, ``'nearest'``,
502
+ ``'mirror'``, ``'reflect'``, ``'wrap'``, ``'grid-mirror'``,
503
+ ``'grid-wrap'``, ``'grid-constant'`` or ``'opencv'``).
504
+ cval (scalar): Value used for points outside the boundaries of
505
+ the input if ``mode='constant'`` or ``mode='opencv'``. Default is
506
+ 0.0
507
+ prefilter (bool): Determines if the input array is prefiltered with
508
+ ``spline_filter`` before interpolation. The default is True, which
509
+ will create a temporary ``float64`` array of filtered values if
510
+ ``order > 1``. If setting this to False, the output will be
511
+ slightly blurred if ``order > 1``, unless the input is prefiltered,
512
+ i.e. it is the result of calling ``spline_filter`` on the original
513
+ input.
514
+
515
+ Returns:
516
+ cupy.ndarray or None:
517
+ The rotated input.
518
+
519
+ .. seealso:: :func:`scipy.ndimage.rotate`
520
+ """
521
+
522
+ _check_parameter('rotate', order, mode)
523
+
524
+ if mode == 'opencv':
525
+ mode = '_opencv_edge'
526
+
527
+ input_arr = input
528
+ axes = list(axes)
529
+ if axes[0] < 0:
530
+ axes[0] += input_arr.ndim
531
+ if axes[1] < 0:
532
+ axes[1] += input_arr.ndim
533
+ if axes[0] > axes[1]:
534
+ axes = [axes[1], axes[0]]
535
+ if axes[0] < 0 or input_arr.ndim <= axes[1]:
536
+ raise ValueError('invalid rotation plane specified')
537
+
538
+ ndim = input_arr.ndim
539
+ rad = numpy.deg2rad(angle)
540
+ sin = math.sin(rad)
541
+ cos = math.cos(rad)
542
+
543
+ # determine offsets and output shape as in scipy.ndimage.rotate
544
+ rot_matrix = numpy.array([[cos, sin],
545
+ [-sin, cos]])
546
+
547
+ img_shape = numpy.asarray(input_arr.shape)
548
+ in_plane_shape = img_shape[axes]
549
+ if reshape:
550
+ # Compute transformed input bounds
551
+ iy, ix = in_plane_shape
552
+ out_bounds = rot_matrix @ [[0, 0, iy, iy],
553
+ [0, ix, 0, ix]]
554
+ # Compute the shape of the transformed input plane
555
+ out_plane_shape = (numpy.ptp(out_bounds, axis=1) +
556
+ 0.5).astype(cupy.int64)
557
+ else:
558
+ out_plane_shape = img_shape[axes]
559
+
560
+ out_center = rot_matrix @ ((out_plane_shape - 1) / 2)
561
+ in_center = (in_plane_shape - 1) / 2
562
+
563
+ output_shape = img_shape
564
+ output_shape[axes] = out_plane_shape
565
+ output_shape = tuple(output_shape)
566
+
567
+ matrix = numpy.identity(ndim)
568
+ matrix[axes[0], axes[0]] = cos
569
+ matrix[axes[0], axes[1]] = sin
570
+ matrix[axes[1], axes[0]] = -sin
571
+ matrix[axes[1], axes[1]] = cos
572
+
573
+ offset = numpy.zeros(ndim, dtype=cupy.float64)
574
+ offset[axes] = in_center - out_center
575
+
576
+ matrix = cupy.asarray(matrix)
577
+ offset = cupy.asarray(offset)
578
+
579
+ return affine_transform(input, matrix, offset, output_shape, output, order,
580
+ mode, cval, prefilter)
581
+
582
+
583
+ def shift(input, shift, output=None, order=3, mode='constant', cval=0.0,
584
+ prefilter=True):
585
+ """Shift an array.
586
+
587
+ The array is shifted using spline interpolation of the requested order.
588
+ Points outside the boundaries of the input are filled according to the
589
+ given mode.
590
+
591
+ Args:
592
+ input (cupy.ndarray): The input array.
593
+ shift (float or sequence): The shift along the axes. If a float,
594
+ ``shift`` is the same for each axis. If a sequence, ``shift``
595
+ should contain one value for each axis.
596
+ output (cupy.ndarray or ~cupy.dtype): The array in which to place the
597
+ output, or the dtype of the returned array.
598
+ order (int): The order of the spline interpolation, default is 3. Must
599
+ be in the range 0-5.
600
+ mode (str): Points outside the boundaries of the input are filled
601
+ according to the given mode (``'constant'``, ``'nearest'``,
602
+ ``'mirror'``, ``'reflect'``, ``'wrap'``, ``'grid-mirror'``,
603
+ ``'grid-wrap'``, ``'grid-constant'`` or ``'opencv'``).
604
+ cval (scalar): Value used for points outside the boundaries of
605
+ the input if ``mode='constant'`` or ``mode='opencv'``. Default is
606
+ 0.0
607
+ prefilter (bool): Determines if the input array is prefiltered with
608
+ ``spline_filter`` before interpolation. The default is True, which
609
+ will create a temporary ``float64`` array of filtered values if
610
+ ``order > 1``. If setting this to False, the output will be
611
+ slightly blurred if ``order > 1``, unless the input is prefiltered,
612
+ i.e. it is the result of calling ``spline_filter`` on the original
613
+ input.
614
+
615
+ Returns:
616
+ cupy.ndarray or None:
617
+ The shifted input.
618
+
619
+ .. seealso:: :func:`scipy.ndimage.shift`
620
+ """
621
+
622
+ _check_parameter('shift', order, mode)
623
+
624
+ shift = _util._fix_sequence_arg(shift, input.ndim, 'shift', float)
625
+
626
+ if mode == 'opencv':
627
+ mode = '_opencv_edge'
628
+
629
+ output = affine_transform(
630
+ input,
631
+ cupy.ones(input.ndim, input.dtype),
632
+ cupy.negative(cupy.asarray(shift)),
633
+ None,
634
+ output,
635
+ order,
636
+ mode,
637
+ cval,
638
+ prefilter,
639
+ )
640
+ else:
641
+ output = _util._get_output(output, input)
642
+ if input.dtype.kind in 'iu':
643
+ input = input.astype(cupy.float32)
644
+ filtered, nprepad = _filter_input(input, prefilter, mode, cval, order)
645
+ integer_output = output.dtype.kind in 'iu'
646
+ _util._check_cval(mode, cval, integer_output)
647
+ large_int = _prod(input.shape) > 1 << 31
648
+ kern = _interp_kernels._get_shift_kernel(
649
+ input.ndim, large_int, input.shape, mode, cval=cval, order=order,
650
+ integer_output=integer_output, nprepad=nprepad)
651
+ shift = cupy.asarray(shift, dtype=cupy.float64, order='C')
652
+ if shift.ndim != 1:
653
+ raise ValueError('shift must be 1d')
654
+ if shift.size != filtered.ndim:
655
+ raise ValueError('len(shift) must equal input.ndim')
656
+ kern(filtered, shift, output)
657
+ return output
658
+
659
+
660
+ def zoom(input, zoom, output=None, order=3, mode='constant', cval=0.0,
661
+ prefilter=True, *, grid_mode=False):
662
+ """Zoom an array.
663
+
664
+ The array is zoomed using spline interpolation of the requested order.
665
+
666
+ Args:
667
+ input (cupy.ndarray): The input array.
668
+ zoom (float or sequence): The zoom factor along the axes. If a float,
669
+ ``zoom`` is the same for each axis. If a sequence, ``zoom`` should
670
+ contain one value for each axis.
671
+ output (cupy.ndarray or ~cupy.dtype): The array in which to place the
672
+ output, or the dtype of the returned array.
673
+ order (int): The order of the spline interpolation, default is 3. Must
674
+ be in the range 0-5.
675
+ mode (str): Points outside the boundaries of the input are filled
676
+ according to the given mode (``'constant'``, ``'nearest'``,
677
+ ``'mirror'``, ``'reflect'``, ``'wrap'``, ``'grid-mirror'``,
678
+ ``'grid-wrap'``, ``'grid-constant'`` or ``'opencv'``).
679
+ cval (scalar): Value used for points outside the boundaries of
680
+ the input if ``mode='constant'`` or ``mode='opencv'``. Default is
681
+ 0.0
682
+ prefilter (bool): Determines if the input array is prefiltered with
683
+ ``spline_filter`` before interpolation. The default is True, which
684
+ will create a temporary ``float64`` array of filtered values if
685
+ ``order > 1``. If setting this to False, the output will be
686
+ slightly blurred if ``order > 1``, unless the input is prefiltered,
687
+ i.e. it is the result of calling ``spline_filter`` on the original
688
+ input.
689
+ grid_mode (bool, optional): If False, the distance from the pixel
690
+ centers is zoomed. Otherwise, the distance including the full pixel
691
+ extent is used. For example, a 1d signal of length 5 is considered
692
+ to have length 4 when ``grid_mode`` is False, but length 5 when
693
+ ``grid_mode`` is True. See the following visual illustration:
694
+
695
+ .. code-block:: text
696
+
697
+ | pixel 1 | pixel 2 | pixel 3 | pixel 4 | pixel 5 |
698
+ |<-------------------------------------->|
699
+ vs.
700
+ |<----------------------------------------------->|
701
+
702
+ The starting point of the arrow in the diagram above corresponds to
703
+ coordinate location 0 in each mode.
704
+
705
+ Returns:
706
+ cupy.ndarray or None:
707
+ The zoomed input.
708
+
709
+ .. seealso:: :func:`scipy.ndimage.zoom`
710
+ """
711
+
712
+ _check_parameter('zoom', order, mode)
713
+
714
+ zoom = _util._fix_sequence_arg(zoom, input.ndim, 'zoom', float)
715
+
716
+ output_shape = []
717
+ for s, z in zip(input.shape, zoom):
718
+ output_shape.append(int(round(s * z)))
719
+ output_shape = tuple(output_shape)
720
+
721
+ if mode == 'opencv':
722
+ zoom = []
723
+ offset = []
724
+ for in_size, out_size in zip(input.shape, output_shape):
725
+ if out_size > 0:
726
+ zoom.append(float(in_size) / out_size)
727
+ offset.append((zoom[-1] - 1) / 2.0)
728
+ else:
729
+ zoom.append(0)
730
+ offset.append(0)
731
+ mode = 'nearest'
732
+
733
+ output = affine_transform(
734
+ input,
735
+ cupy.asarray(zoom),
736
+ offset,
737
+ output_shape,
738
+ output,
739
+ order,
740
+ mode,
741
+ cval,
742
+ prefilter,
743
+ )
744
+ else:
745
+ if grid_mode:
746
+
747
+ # warn about modes that may have surprising behavior
748
+ suggest_mode = None
749
+ if mode == 'constant':
750
+ suggest_mode = 'grid-constant'
751
+ elif mode == 'wrap':
752
+ suggest_mode = 'grid-wrap'
753
+ if suggest_mode is not None:
754
+ warnings.warn(
755
+ f'It is recommended to use mode = {suggest_mode} instead '
756
+ f'of {mode} when grid_mode is True.')
757
+
758
+ zoom = []
759
+ for in_size, out_size in zip(input.shape, output_shape):
760
+ if grid_mode and out_size > 0:
761
+ zoom.append(in_size / out_size)
762
+ elif out_size > 1:
763
+ zoom.append((in_size - 1) / (out_size - 1))
764
+ else:
765
+ zoom.append(0)
766
+
767
+ output = _util._get_output(output, input, shape=output_shape)
768
+ if input.dtype.kind in 'iu':
769
+ input = input.astype(cupy.float32)
770
+ filtered, nprepad = _filter_input(input, prefilter, mode, cval, order)
771
+ integer_output = output.dtype.kind in 'iu'
772
+ _util._check_cval(mode, cval, integer_output)
773
+ large_int = max(_prod(input.shape), _prod(output_shape)) > 1 << 31
774
+ kern = _interp_kernels._get_zoom_kernel(
775
+ input.ndim, large_int, output_shape, mode, order=order,
776
+ integer_output=integer_output, grid_mode=grid_mode,
777
+ nprepad=nprepad)
778
+ zoom = cupy.asarray(zoom, dtype=cupy.float64)
779
+ kern(filtered, zoom, output)
780
+ return output
vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/_measurements.py ADDED
@@ -0,0 +1,1380 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import warnings
2
+
3
+ import numpy
4
+
5
+ import cupy
6
+ from cupy import _core
7
+ from cupy import _util
8
+
9
+
10
+ def label(input, structure=None, output=None):
11
+ """Labels features in an array.
12
+
13
+ Args:
14
+ input (cupy.ndarray): The input array.
15
+ structure (array_like or None): A structuring element that defines
16
+ feature connections. ```structure``` must be centersymmetric. If
17
+ None, structure is automatically generated with a squared
18
+ connectivity equal to one.
19
+ output (cupy.ndarray, dtype or None): The array in which to place the
20
+ output.
21
+ Returns:
22
+ label (cupy.ndarray): An integer array where each unique feature in
23
+ ```input``` has a unique label in the array.
24
+
25
+ num_features (int): Number of features found.
26
+
27
+ .. warning::
28
+
29
+ This function may synchronize the device.
30
+
31
+ .. seealso:: :func:`scipy.ndimage.label`
32
+ """
33
+ if not isinstance(input, cupy.ndarray):
34
+ raise TypeError('input must be cupy.ndarray')
35
+ if input.dtype.char in 'FD':
36
+ raise TypeError('Complex type not supported')
37
+ if structure is None:
38
+ structure = _generate_binary_structure(input.ndim, 1)
39
+ elif isinstance(structure, cupy.ndarray):
40
+ structure = cupy.asnumpy(structure)
41
+ structure = numpy.array(structure, dtype=bool)
42
+ if structure.ndim != input.ndim:
43
+ raise RuntimeError('structure and input must have equal rank')
44
+ for i in structure.shape:
45
+ if i != 3:
46
+ raise ValueError('structure dimensions must be equal to 3')
47
+
48
+ if isinstance(output, cupy.ndarray):
49
+ if output.shape != input.shape:
50
+ raise ValueError("output shape not correct")
51
+ caller_provided_output = True
52
+ else:
53
+ caller_provided_output = False
54
+ if output is None:
55
+ output = cupy.empty(input.shape, numpy.int32)
56
+ else:
57
+ output = cupy.empty(input.shape, output)
58
+
59
+ if input.size == 0:
60
+ # empty
61
+ maxlabel = 0
62
+ elif input.ndim == 0:
63
+ # 0-dim array
64
+ maxlabel = 0 if input.item() == 0 else 1
65
+ output.fill(maxlabel)
66
+ else:
67
+ if output.dtype != numpy.int32:
68
+ y = cupy.empty(input.shape, numpy.int32)
69
+ else:
70
+ y = output
71
+ maxlabel = _label(input, structure, y)
72
+ if output.dtype != numpy.int32:
73
+ _core.elementwise_copy(y, output)
74
+
75
+ if caller_provided_output:
76
+ return maxlabel
77
+ else:
78
+ return output, maxlabel
79
+
80
+
81
+ def _generate_binary_structure(rank, connectivity):
82
+ if connectivity < 1:
83
+ connectivity = 1
84
+ if rank < 1:
85
+ return numpy.array(True, dtype=bool)
86
+ output = numpy.fabs(numpy.indices([3] * rank) - 1)
87
+ output = numpy.add.reduce(output, 0)
88
+ return output <= connectivity
89
+
90
+
91
+ def _label(x, structure, y):
92
+ elems = numpy.where(structure != 0)
93
+ vecs = [elems[dm] - 1 for dm in range(x.ndim)]
94
+ offset = vecs[0]
95
+ for dm in range(1, x.ndim):
96
+ offset = offset * 3 + vecs[dm]
97
+ indxs = numpy.where(offset < 0)[0]
98
+ dirs = [[vecs[dm][dr] for dm in range(x.ndim)] for dr in indxs]
99
+ dirs = cupy.array(dirs, dtype=numpy.int32)
100
+ ndirs = indxs.shape[0]
101
+ y_shape = cupy.array(y.shape, dtype=numpy.int32)
102
+ count = cupy.zeros(2, dtype=numpy.int32)
103
+ _kernel_init()(x, y)
104
+ _kernel_connect()(y_shape, dirs, ndirs, x.ndim, y, size=y.size)
105
+ _kernel_count()(y, count, size=y.size)
106
+ maxlabel = int(count[0])
107
+ labels = cupy.empty(maxlabel, dtype=numpy.int32)
108
+ _kernel_labels()(y, count, labels, size=y.size)
109
+ _kernel_finalize()(maxlabel, cupy.sort(labels), y, size=y.size)
110
+ return maxlabel
111
+
112
+
113
+ def _kernel_init():
114
+ return _core.ElementwiseKernel(
115
+ 'X x', 'Y y', 'if (x == 0) { y = -1; } else { y = i; }',
116
+ 'cupyx_scipy_ndimage_label_init')
117
+
118
+
119
+ def _kernel_connect():
120
+ return _core.ElementwiseKernel(
121
+ 'raw int32 shape, raw int32 dirs, int32 ndirs, int32 ndim',
122
+ 'raw Y y',
123
+ '''
124
+ if (y[i] < 0) continue;
125
+ for (int dr = 0; dr < ndirs; dr++) {
126
+ int j = i;
127
+ int rest = j;
128
+ int stride = 1;
129
+ int k = 0;
130
+ for (int dm = ndim-1; dm >= 0; dm--) {
131
+ int pos = rest % shape[dm] + dirs[dm + dr * ndim];
132
+ if (pos < 0 || pos >= shape[dm]) {
133
+ k = -1;
134
+ break;
135
+ }
136
+ k += pos * stride;
137
+ rest /= shape[dm];
138
+ stride *= shape[dm];
139
+ }
140
+ if (k < 0) continue;
141
+ if (y[k] < 0) continue;
142
+ while (1) {
143
+ while (j != y[j]) { j = y[j]; }
144
+ while (k != y[k]) { k = y[k]; }
145
+ if (j == k) break;
146
+ if (j < k) {
147
+ int old = atomicCAS( &y[k], k, j );
148
+ if (old == k) break;
149
+ k = old;
150
+ }
151
+ else {
152
+ int old = atomicCAS( &y[j], j, k );
153
+ if (old == j) break;
154
+ j = old;
155
+ }
156
+ }
157
+ }
158
+ ''',
159
+ 'cupyx_scipy_ndimage_label_connect')
160
+
161
+
162
+ def _kernel_count():
163
+ return _core.ElementwiseKernel(
164
+ '', 'raw Y y, raw int32 count',
165
+ '''
166
+ if (y[i] < 0) continue;
167
+ int j = i;
168
+ while (j != y[j]) { j = y[j]; }
169
+ if (j != i) y[i] = j;
170
+ else atomicAdd(&count[0], 1);
171
+ ''',
172
+ 'cupyx_scipy_ndimage_label_count')
173
+
174
+
175
+ def _kernel_labels():
176
+ return _core.ElementwiseKernel(
177
+ '', 'raw Y y, raw int32 count, raw int32 labels',
178
+ '''
179
+ if (y[i] != i) continue;
180
+ int j = atomicAdd(&count[1], 1);
181
+ labels[j] = i;
182
+ ''',
183
+ 'cupyx_scipy_ndimage_label_labels')
184
+
185
+
186
+ def _kernel_finalize():
187
+ return _core.ElementwiseKernel(
188
+ 'int32 maxlabel', 'raw int32 labels, raw Y y',
189
+ '''
190
+ if (y[i] < 0) {
191
+ y[i] = 0;
192
+ continue;
193
+ }
194
+ int yi = y[i];
195
+ int j_min = 0;
196
+ int j_max = maxlabel - 1;
197
+ int j = (j_min + j_max) / 2;
198
+ while (j_min < j_max) {
199
+ if (yi == labels[j]) break;
200
+ if (yi < labels[j]) j_max = j - 1;
201
+ else j_min = j + 1;
202
+ j = (j_min + j_max) / 2;
203
+ }
204
+ y[i] = j + 1;
205
+ ''',
206
+ 'cupyx_scipy_ndimage_label_finalize')
207
+
208
+
209
+ _ndimage_variance_kernel = _core.ElementwiseKernel(
210
+ 'T input, R labels, raw X index, uint64 size, raw float64 mean',
211
+ 'raw float64 out',
212
+ """
213
+ for (ptrdiff_t j = 0; j < size; j++) {
214
+ if (labels == index[j]) {
215
+ atomicAdd(&out[j], (input - mean[j]) * (input - mean[j]));
216
+ break;
217
+ }
218
+ }
219
+ """,
220
+ 'cupyx_scipy_ndimage_variance')
221
+
222
+
223
+ _ndimage_sum_kernel = _core.ElementwiseKernel(
224
+ 'T input, R labels, raw X index, uint64 size',
225
+ 'raw float64 out',
226
+ """
227
+ for (ptrdiff_t j = 0; j < size; j++) {
228
+ if (labels == index[j]) {
229
+ atomicAdd(&out[j], input);
230
+ break;
231
+ }
232
+ }
233
+ """,
234
+ 'cupyx_scipy_ndimage_sum')
235
+
236
+
237
+ def _ndimage_sum_kernel_2(input, labels, index, sum_val, batch_size=4):
238
+ for i in range(0, index.size, batch_size):
239
+ matched = labels == index[i:i + batch_size].reshape(
240
+ (-1,) + (1,) * input.ndim)
241
+ sum_axes = tuple(range(1, 1 + input.ndim))
242
+ sum_val[i:i + batch_size] = cupy.where(matched, input, 0).sum(
243
+ axis=sum_axes)
244
+ return sum_val
245
+
246
+
247
+ _ndimage_mean_kernel = _core.ElementwiseKernel(
248
+ 'T input, R labels, raw X index, uint64 size',
249
+ 'raw float64 out, raw uint64 count',
250
+ """
251
+ for (ptrdiff_t j = 0; j < size; j++) {
252
+ if (labels == index[j]) {
253
+ atomicAdd(&out[j], input);
254
+ atomicAdd(&count[j], 1);
255
+ break;
256
+ }
257
+ }
258
+ """,
259
+ 'cupyx_scipy_ndimage_mean')
260
+
261
+
262
+ def _ndimage_mean_kernel_2(input, labels, index, batch_size=4,
263
+ return_count=False):
264
+ sum_val = cupy.empty_like(index, dtype=cupy.float64)
265
+ count = cupy.empty_like(index, dtype=cupy.uint64)
266
+ for i in range(0, index.size, batch_size):
267
+ matched = labels == index[i:i + batch_size].reshape(
268
+ (-1,) + (1,) * input.ndim)
269
+ mean_axes = tuple(range(1, 1 + input.ndim))
270
+ count[i:i + batch_size] = matched.sum(axis=mean_axes)
271
+ sum_val[i:i + batch_size] = cupy.where(matched, input, 0).sum(
272
+ axis=mean_axes)
273
+ if return_count:
274
+ return sum_val / count, count
275
+ return sum_val / count
276
+
277
+
278
+ def _mean_driver(input, labels, index, return_count=False, use_kern=False):
279
+ if use_kern:
280
+ return _ndimage_mean_kernel_2(input, labels, index,
281
+ return_count=return_count)
282
+
283
+ out = cupy.zeros_like(index, cupy.float64)
284
+ count = cupy.zeros_like(index, dtype=cupy.uint64)
285
+ sum, count = _ndimage_mean_kernel(input,
286
+ labels, index, index.size, out, count)
287
+ if return_count:
288
+ return sum / count, count
289
+ return sum / count
290
+
291
+
292
+ def variance(input, labels=None, index=None):
293
+ """Calculates the variance of the values of an n-D image array, optionally
294
+ at specified sub-regions.
295
+
296
+ Args:
297
+ input (cupy.ndarray): Nd-image data to process.
298
+ labels (cupy.ndarray or None): Labels defining sub-regions in `input`.
299
+ If not None, must be same shape as `input`.
300
+ index (cupy.ndarray or None): `labels` to include in output. If None
301
+ (default), all values where `labels` is non-zero are used.
302
+
303
+ Returns:
304
+ cupy.ndarray: Values of variance, for each sub-region if
305
+ `labels` and `index` are specified.
306
+
307
+ .. seealso:: :func:`scipy.ndimage.variance`
308
+ """
309
+ if not isinstance(input, cupy.ndarray):
310
+ raise TypeError('input must be cupy.ndarray')
311
+
312
+ if input.dtype in (cupy.complex64, cupy.complex128):
313
+ raise TypeError("cupyx.scipy.ndimage.variance doesn't support %{}"
314
+ "".format(input.dtype.type))
315
+
316
+ use_kern = False
317
+ # There are constraints on types because of atomicAdd() in CUDA.
318
+ if input.dtype not in [cupy.int32, cupy.float16, cupy.float32,
319
+ cupy.float64, cupy.uint32, cupy.uint64,
320
+ cupy.ulonglong]:
321
+ warnings.warn(
322
+ 'Using the slower implementation because the provided '
323
+ f'type {input.dtype} is not supported by cupyx.scipy.ndimage.sum. '
324
+ 'Consider using an array of type int32, float16, '
325
+ 'float32, float64, uint32, uint64 as data types '
326
+ 'for the fast implementation', _util.PerformanceWarning)
327
+ use_kern = True
328
+
329
+ def calc_var_with_intermediate_float(input):
330
+ vals_c = input - input.mean()
331
+ count = vals_c.size
332
+ # Does not use `ndarray.mean()` here to return the same results as
333
+ # SciPy does, especially in case `input`'s dtype is float16.
334
+ return cupy.square(vals_c).sum() / cupy.asanyarray(count).astype(float)
335
+
336
+ if labels is None:
337
+ return calc_var_with_intermediate_float(input)
338
+
339
+ if not isinstance(labels, cupy.ndarray):
340
+ raise TypeError('label must be cupy.ndarray')
341
+
342
+ input, labels = cupy.broadcast_arrays(input, labels)
343
+
344
+ if index is None:
345
+ return calc_var_with_intermediate_float(input[labels > 0])
346
+
347
+ if cupy.isscalar(index):
348
+ return calc_var_with_intermediate_float(input[labels == index])
349
+
350
+ if not isinstance(index, cupy.ndarray):
351
+ if not isinstance(index, int):
352
+ raise TypeError('index must be cupy.ndarray or a scalar int')
353
+ else:
354
+ return (input[labels == index]).var().astype(cupy.float64,
355
+ copy=False)
356
+
357
+ mean_val, count = _mean_driver(input, labels, index, True, use_kern)
358
+ if use_kern:
359
+ new_axis = (..., *(cupy.newaxis for _ in range(input.ndim)))
360
+ return cupy.where(labels[None, ...] == index[new_axis],
361
+ cupy.square(input - mean_val[new_axis]),
362
+ 0).sum(tuple(range(1, input.ndim + 1))) / count
363
+ out = cupy.zeros_like(index, dtype=cupy.float64)
364
+ return _ndimage_variance_kernel(input, labels, index, index.size, mean_val,
365
+ out) / count
366
+
367
+
368
+ def sum_labels(input, labels=None, index=None):
369
+ """Calculates the sum of the values of an n-D image array, optionally
370
+ at specified sub-regions.
371
+
372
+ Args:
373
+ input (cupy.ndarray): Nd-image data to process.
374
+ labels (cupy.ndarray or None): Labels defining sub-regions in `input`.
375
+ If not None, must be same shape as `input`.
376
+ index (cupy.ndarray or None): `labels` to include in output. If None
377
+ (default), all values where `labels` is non-zero are used.
378
+
379
+ Returns:
380
+ sum (cupy.ndarray): sum of values, for each sub-region if
381
+ `labels` and `index` are specified.
382
+
383
+ .. seealso:: :func:`scipy.ndimage.sum_labels`
384
+ """
385
+ if not isinstance(input, cupy.ndarray):
386
+ raise TypeError('input must be cupy.ndarray')
387
+
388
+ if input.dtype in (cupy.complex64, cupy.complex128):
389
+ raise TypeError("cupyx.scipy.ndimage.sum does not support %{}".format(
390
+ input.dtype.type))
391
+
392
+ use_kern = False
393
+ # There is constraints on types because of atomicAdd() in CUDA.
394
+ if input.dtype not in [cupy.int32, cupy.float16, cupy.float32,
395
+ cupy.float64, cupy.uint32, cupy.uint64,
396
+ cupy.ulonglong]:
397
+ warnings.warn(
398
+ 'Using the slower implementation as '
399
+ 'cupyx.scipy.ndimage.sum supports int32, float16, '
400
+ 'float32, float64, uint32, uint64 as data types'
401
+ 'for the fast implmentation', _util.PerformanceWarning)
402
+ use_kern = True
403
+
404
+ if labels is None:
405
+ return input.sum()
406
+
407
+ if not isinstance(labels, cupy.ndarray):
408
+ raise TypeError('label must be cupy.ndarray')
409
+
410
+ input, labels = cupy.broadcast_arrays(input, labels)
411
+
412
+ if index is None:
413
+ return input[labels != 0].sum()
414
+
415
+ if not isinstance(index, cupy.ndarray):
416
+ if not isinstance(index, int):
417
+ raise TypeError('index must be cupy.ndarray or a scalar int')
418
+ else:
419
+ return (input[labels == index]).sum()
420
+
421
+ if index.size == 0:
422
+ return cupy.array([], dtype=cupy.int64)
423
+
424
+ out = cupy.zeros_like(index, dtype=cupy.float64)
425
+
426
+ # The following parameters for sum where determined using a Tesla P100.
427
+ if (input.size >= 262144 and index.size <= 4) or use_kern:
428
+ return _ndimage_sum_kernel_2(input, labels, index, out)
429
+ return _ndimage_sum_kernel(input, labels, index, index.size, out)
430
+
431
+
432
+ def sum(input, labels=None, index=None):
433
+ """Calculates the sum of the values of an n-D image array, optionally
434
+ at specified sub-regions.
435
+
436
+ Args:
437
+ input (cupy.ndarray): Nd-image data to process.
438
+ labels (cupy.ndarray or None): Labels defining sub-regions in `input`.
439
+ If not None, must be same shape as `input`.
440
+ index (cupy.ndarray or None): `labels` to include in output. If None
441
+ (default), all values where `labels` is non-zero are used.
442
+
443
+ Returns:
444
+ sum (cupy.ndarray): sum of values, for each sub-region if
445
+ `labels` and `index` are specified.
446
+
447
+ Notes:
448
+ This is an alias for `cupyx.scipy.ndimage.sum_labels` kept for
449
+ backwards compatibility reasons. For new code please prefer
450
+ `sum_labels`.
451
+
452
+ .. seealso:: :func:`scipy.ndimage.sum`
453
+ """
454
+ return sum_labels(input, labels, index)
455
+
456
+
457
+ def mean(input, labels=None, index=None):
458
+ """Calculates the mean of the values of an n-D image array, optionally
459
+ at specified sub-regions.
460
+
461
+ Args:
462
+ input (cupy.ndarray): Nd-image data to process.
463
+ labels (cupy.ndarray or None): Labels defining sub-regions in `input`.
464
+ If not None, must be same shape as `input`.
465
+ index (cupy.ndarray or None): `labels` to include in output. If None
466
+ (default), all values where `labels` is non-zero are used.
467
+
468
+ Returns:
469
+ mean (cupy.ndarray): mean of values, for each sub-region if
470
+ `labels` and `index` are specified.
471
+
472
+
473
+ .. seealso:: :func:`scipy.ndimage.mean`
474
+ """
475
+ if not isinstance(input, cupy.ndarray):
476
+ raise TypeError('input must be cupy.ndarray')
477
+
478
+ if input.dtype in (cupy.complex64, cupy.complex128):
479
+ raise TypeError("cupyx.scipy.ndimage.mean does not support %{}".format(
480
+ input.dtype.type))
481
+
482
+ use_kern = False
483
+ # There is constraints on types because of atomicAdd() in CUDA.
484
+ if input.dtype not in [cupy.int32, cupy.float16, cupy.float32,
485
+ cupy.float64, cupy.uint32, cupy.uint64,
486
+ cupy.ulonglong]:
487
+ warnings.warn(
488
+ 'Using the slower implementation as '
489
+ 'cupyx.scipy.ndimage.mean supports int32, float16, '
490
+ 'float32, float64, uint32, uint64 as data types '
491
+ 'for the fast implmentation', _util.PerformanceWarning)
492
+ use_kern = True
493
+
494
+ def calc_mean_with_intermediate_float(input):
495
+ sum = input.sum()
496
+ count = input.size
497
+ # Does not use `ndarray.mean()` here to return the same results as
498
+ # SciPy does, especially in case `input`'s dtype is float16.
499
+ return sum / cupy.asanyarray(count).astype(float)
500
+
501
+ if labels is None:
502
+ return calc_mean_with_intermediate_float(input)
503
+
504
+ if not isinstance(labels, cupy.ndarray):
505
+ raise TypeError('label must be cupy.ndarray')
506
+
507
+ input, labels = cupy.broadcast_arrays(input, labels)
508
+
509
+ if index is None:
510
+ return calc_mean_with_intermediate_float(input[labels > 0])
511
+
512
+ if cupy.isscalar(index):
513
+ return calc_mean_with_intermediate_float(input[labels == index])
514
+
515
+ if not isinstance(index, cupy.ndarray):
516
+ if not isinstance(index, int):
517
+ raise TypeError('index must be cupy.ndarray or a scalar int')
518
+ else:
519
+ return (input[labels == index]).mean(dtype=cupy.float64)
520
+
521
+ return _mean_driver(input, labels, index, use_kern=use_kern)
522
+
523
+
524
+ def standard_deviation(input, labels=None, index=None):
525
+ """Calculates the standard deviation of the values of an n-D image array,
526
+ optionally at specified sub-regions.
527
+
528
+ Args:
529
+ input (cupy.ndarray): Nd-image data to process.
530
+ labels (cupy.ndarray or None): Labels defining sub-regions in `input`.
531
+ If not None, must be same shape as `input`.
532
+ index (cupy.ndarray or None): `labels` to include in output. If None
533
+ (default), all values where `labels` is non-zero are used.
534
+
535
+ Returns:
536
+ standard_deviation (cupy.ndarray): standard deviation of values, for
537
+ each sub-region if `labels` and `index` are specified.
538
+
539
+ .. seealso:: :func:`scipy.ndimage.standard_deviation`
540
+ """
541
+ return cupy.sqrt(variance(input, labels, index))
542
+
543
+
544
+ def _safely_castable_to_int(dt):
545
+ """Test whether the NumPy data type `dt` can be safely cast to an int."""
546
+ int_size = cupy.dtype(int).itemsize
547
+ safe = (
548
+ cupy.issubdtype(dt, cupy.signedinteger) and dt.itemsize <= int_size
549
+ ) or (cupy.issubdtype(dt, cupy.unsignedinteger) and dt.itemsize < int_size)
550
+ return safe
551
+
552
+
553
+ def _get_values(arrays, func):
554
+ """Concatenated result of applying func to a list of arrays.
555
+
556
+ func should be cupy.min, cupy.max or cupy.median
557
+ """
558
+ dtype = arrays[0].dtype
559
+ return cupy.concatenate(
560
+ [
561
+ func(a, keepdims=True)
562
+ if a.size != 0 else cupy.asarray([0], dtype=dtype)
563
+ for a in arrays
564
+ ]
565
+ )
566
+
567
+
568
+ def _get_positions(arrays, position_arrays, arg_func):
569
+ """Concatenated positions from applying arg_func to arrays.
570
+
571
+ arg_func should be cupy.argmin or cupy.argmax
572
+ """
573
+ return cupy.concatenate(
574
+ [
575
+ pos[arg_func(a, keepdims=True)]
576
+ if a.size != 0 else cupy.asarray([0], dtype=int)
577
+ for pos, a in zip(position_arrays, arrays)
578
+ ]
579
+ )
580
+
581
+
582
+ def _select_via_looping(input, labels, idxs, positions, find_min,
583
+ find_min_positions, find_max, find_max_positions,
584
+ find_median):
585
+ """Internal helper routine for _select.
586
+
587
+ With relatively few labels it is faster to call this function rather than
588
+ using the implementation based on cupy.lexsort.
589
+ """
590
+ find_positions = find_min_positions or find_max_positions
591
+
592
+ # extract labeled regions into separate arrays
593
+ arrays = []
594
+ position_arrays = []
595
+ for i in idxs:
596
+ label_idx = labels == i
597
+ arrays.append(input[label_idx])
598
+ if find_positions:
599
+ position_arrays.append(positions[label_idx])
600
+
601
+ result = []
602
+ # the order below matches the order expected by cupy.ndimage.extrema
603
+ if find_min:
604
+ result += [_get_values(arrays, cupy.min)]
605
+ if find_min_positions:
606
+ result += [_get_positions(arrays, position_arrays, cupy.argmin)]
607
+ if find_max:
608
+ result += [_get_values(arrays, cupy.max)]
609
+ if find_max_positions:
610
+ result += [_get_positions(arrays, position_arrays, cupy.argmax)]
611
+ if find_median:
612
+ result += [_get_values(arrays, cupy.median)]
613
+ return result
614
+
615
+
616
+ def _select(input, labels=None, index=None, find_min=False, find_max=False,
617
+ find_min_positions=False, find_max_positions=False,
618
+ find_median=False):
619
+ """Return one or more of: min, max, min position, max position, median.
620
+
621
+ If neither `labels` or `index` is provided, these are the global values
622
+ in `input`. If `index` is None, but `labels` is provided, a global value
623
+ across all non-zero labels is given. When both `labels` and `index` are
624
+ provided, lists of values are provided for each labeled region specified
625
+ in `index`. See further details in :func:`cupyx.scipy.ndimage.minimum`,
626
+ etc.
627
+
628
+ Used by minimum, maximum, minimum_position, maximum_position, extrema.
629
+ """
630
+ find_positions = find_min_positions or find_max_positions
631
+ positions = None
632
+ if find_positions:
633
+ positions = cupy.arange(input.size).reshape(input.shape)
634
+
635
+ def single_group(vals, positions):
636
+ result = []
637
+ if find_min:
638
+ result += [vals.min()]
639
+ if find_min_positions:
640
+ result += [positions[vals == vals.min()][0]]
641
+ if find_max:
642
+ result += [vals.max()]
643
+ if find_max_positions:
644
+ result += [positions[vals == vals.max()][0]]
645
+ if find_median:
646
+ result += [cupy.median(vals)]
647
+ return result
648
+
649
+ if labels is None:
650
+ return single_group(input, positions)
651
+
652
+ # ensure input and labels match sizes
653
+ input, labels = cupy.broadcast_arrays(input, labels)
654
+
655
+ if index is None:
656
+ mask = labels > 0
657
+ masked_positions = None
658
+ if find_positions:
659
+ masked_positions = positions[mask]
660
+ return single_group(input[mask], masked_positions)
661
+
662
+ if cupy.isscalar(index):
663
+ mask = labels == index
664
+ masked_positions = None
665
+ if find_positions:
666
+ masked_positions = positions[mask]
667
+ return single_group(input[mask], masked_positions)
668
+
669
+ index = cupy.asarray(index)
670
+
671
+ safe_int = _safely_castable_to_int(labels.dtype)
672
+ min_label = labels.min()
673
+ max_label = labels.max()
674
+
675
+ # Remap labels to unique integers if necessary, or if the largest label is
676
+ # larger than the number of values.
677
+ if (not safe_int or min_label < 0 or max_label > labels.size):
678
+ # Remap labels, and indexes
679
+ unique_labels, labels = cupy.unique(labels, return_inverse=True)
680
+ idxs = cupy.searchsorted(unique_labels, index)
681
+
682
+ # Make all of idxs valid
683
+ idxs[idxs >= unique_labels.size] = 0
684
+ found = unique_labels[idxs] == index
685
+ else:
686
+ # Labels are an integer type, and there aren't too many
687
+ idxs = cupy.asanyarray(index, int).copy()
688
+ found = (idxs >= 0) & (idxs <= max_label)
689
+
690
+ idxs[~found] = max_label + 1
691
+
692
+ input = input.ravel()
693
+ labels = labels.ravel()
694
+ if find_positions:
695
+ positions = positions.ravel()
696
+
697
+ using_cub = _core._accelerator.ACCELERATOR_CUB in \
698
+ cupy._core.get_routine_accelerators()
699
+
700
+ if using_cub:
701
+ # Cutoff values below were determined empirically for relatively large
702
+ # input arrays.
703
+ if find_positions or find_median:
704
+ n_label_cutoff = 15
705
+ else:
706
+ n_label_cutoff = 30
707
+ else:
708
+ n_label_cutoff = 0
709
+
710
+ if n_label_cutoff and len(idxs) <= n_label_cutoff:
711
+ return _select_via_looping(
712
+ input, labels, idxs, positions, find_min, find_min_positions,
713
+ find_max, find_max_positions, find_median
714
+ )
715
+
716
+ order = cupy.lexsort(cupy.stack((input.ravel(), labels.ravel())))
717
+ input = input[order]
718
+ labels = labels[order]
719
+ if find_positions:
720
+ positions = positions[order]
721
+
722
+ # Determine indices corresponding to the min or max value for each label
723
+ label_change_index = cupy.searchsorted(labels,
724
+ cupy.arange(1, max_label + 2))
725
+ if find_min or find_min_positions or find_median:
726
+ # index corresponding to the minimum value at each label
727
+ min_index = label_change_index[:-1]
728
+ if find_max or find_max_positions or find_median:
729
+ # index corresponding to the maximum value at each label
730
+ max_index = label_change_index[1:] - 1
731
+
732
+ result = []
733
+ # the order below matches the order expected by cupy.ndimage.extrema
734
+ if find_min:
735
+ mins = cupy.zeros(int(labels.max()) + 2, input.dtype)
736
+ mins[labels[min_index]] = input[min_index]
737
+ result += [mins[idxs]]
738
+ if find_min_positions:
739
+ minpos = cupy.zeros(labels.max().item() + 2, int)
740
+ minpos[labels[min_index]] = positions[min_index]
741
+ result += [minpos[idxs]]
742
+ if find_max:
743
+ maxs = cupy.zeros(int(labels.max()) + 2, input.dtype)
744
+ maxs[labels[max_index]] = input[max_index]
745
+ result += [maxs[idxs]]
746
+ if find_max_positions:
747
+ maxpos = cupy.zeros(labels.max().item() + 2, int)
748
+ maxpos[labels[max_index]] = positions[max_index]
749
+ result += [maxpos[idxs]]
750
+ if find_median:
751
+ locs = cupy.arange(len(labels))
752
+ lo = cupy.zeros(int(labels.max()) + 2, int)
753
+ lo[labels[min_index]] = locs[min_index]
754
+ hi = cupy.zeros(int(labels.max()) + 2, int)
755
+ hi[labels[max_index]] = locs[max_index]
756
+ lo = lo[idxs]
757
+ hi = hi[idxs]
758
+ # lo is an index to the lowest value in input for each label,
759
+ # hi is an index to the largest value.
760
+ # move them to be either the same ((hi - lo) % 2 == 0) or next
761
+ # to each other ((hi - lo) % 2 == 1), then average.
762
+ step = (hi - lo) // 2
763
+ lo += step
764
+ hi -= step
765
+ if input.dtype.kind in 'iub':
766
+ # fix for https://github.com/scipy/scipy/issues/12836
767
+ result += [(input[lo].astype(float) + input[hi].astype(float)) /
768
+ 2.0]
769
+ else:
770
+ result += [(input[lo] + input[hi]) / 2.0]
771
+
772
+ return result
773
+
774
+
775
+ def minimum(input, labels=None, index=None):
776
+ """Calculate the minimum of the values of an array over labeled regions.
777
+
778
+ Args:
779
+ input (cupy.ndarray):
780
+ Array of values. For each region specified by `labels`, the
781
+ minimal values of `input` over the region is computed.
782
+ labels (cupy.ndarray, optional): An array of integers marking different
783
+ regions over which the minimum value of `input` is to be computed.
784
+ `labels` must have the same shape as `input`. If `labels` is not
785
+ specified, the minimum over the whole array is returned.
786
+ index (array_like, optional): A list of region labels that are taken
787
+ into account for computing the minima. If `index` is None, the
788
+ minimum over all elements where `labels` is non-zero is returned.
789
+
790
+ Returns:
791
+ cupy.ndarray: Array of minima of `input` over the regions
792
+ determined by `labels` and whose index is in `index`. If `index` or
793
+ `labels` are not specified, a 0-dimensional cupy.ndarray is
794
+ returned: the minimal value of `input` if `labels` is None,
795
+ and the minimal value of elements where `labels` is greater than
796
+ zero if `index` is None.
797
+
798
+ .. seealso:: :func:`scipy.ndimage.minimum`
799
+ """
800
+ return _select(input, labels, index, find_min=True)[0]
801
+
802
+
803
+ def maximum(input, labels=None, index=None):
804
+ """Calculate the maximum of the values of an array over labeled regions.
805
+
806
+ Args:
807
+ input (cupy.ndarray):
808
+ Array of values. For each region specified by `labels`, the
809
+ maximal values of `input` over the region is computed.
810
+ labels (cupy.ndarray, optional): An array of integers marking different
811
+ regions over which the maximum value of `input` is to be computed.
812
+ `labels` must have the same shape as `input`. If `labels` is not
813
+ specified, the maximum over the whole array is returned.
814
+ index (array_like, optional): A list of region labels that are taken
815
+ into account for computing the maxima. If `index` is None, the
816
+ maximum over all elements where `labels` is non-zero is returned.
817
+
818
+ Returns:
819
+ cupy.ndarray: Array of maxima of `input` over the regions
820
+ determaxed by `labels` and whose index is in `index`. If `index` or
821
+ `labels` are not specified, a 0-dimensional cupy.ndarray is
822
+ returned: the maximal value of `input` if `labels` is None,
823
+ and the maximal value of elements where `labels` is greater than
824
+ zero if `index` is None.
825
+
826
+ .. seealso:: :func:`scipy.ndimage.maximum`
827
+ """
828
+ return _select(input, labels, index, find_max=True)[0]
829
+
830
+
831
+ def median(input, labels=None, index=None):
832
+ """Calculate the median of the values of an array over labeled regions.
833
+
834
+ Args:
835
+ input (cupy.ndarray):
836
+ Array of values. For each region specified by `labels`, the
837
+ median values of `input` over the region is computed.
838
+ labels (cupy.ndarray, optional): An array of integers marking different
839
+ regions over which the median value of `input` is to be computed.
840
+ `labels` must have the same shape as `input`. If `labels` is not
841
+ specified, the median over the whole array is returned.
842
+ index (array_like, optional): A list of region labels that are taken
843
+ into account for computing the medians. If `index` is None, the
844
+ median over all elements where `labels` is non-zero is returned.
845
+
846
+ Returns:
847
+ cupy.ndarray: Array of medians of `input` over the regions
848
+ determined by `labels` and whose index is in `index`. If `index` or
849
+ `labels` are not specified, a 0-dimensional cupy.ndarray is
850
+ returned: the median value of `input` if `labels` is None,
851
+ and the median value of elements where `labels` is greater than
852
+ zero if `index` is None.
853
+
854
+ .. seealso:: :func:`scipy.ndimage.median`
855
+ """
856
+ return _select(input, labels, index, find_median=True)[0]
857
+
858
+
859
+ def minimum_position(input, labels=None, index=None):
860
+ """Find the positions of the minimums of the values of an array at labels.
861
+
862
+ For each region specified by `labels`, the position of the minimum
863
+ value of `input` within the region is returned.
864
+
865
+ Args:
866
+ input (cupy.ndarray):
867
+ Array of values. For each region specified by `labels`, the
868
+ minimal values of `input` over the region is computed.
869
+ labels (cupy.ndarray, optional): An array of integers marking different
870
+ regions over which the position of the minimum value of `input` is
871
+ to be computed. `labels` must have the same shape as `input`. If
872
+ `labels` is not specified, the location of the first minimum over
873
+ the whole array is returned.
874
+
875
+ The `labels` argument only works when `index` is specified.
876
+ index (array_like, optional): A list of region labels that are taken
877
+ into account for finding the location of the minima. If `index` is
878
+ None, the ``first`` minimum over all elements where `labels` is
879
+ non-zero is returned.
880
+
881
+ The `index` argument only works when `labels` is specified.
882
+
883
+ Returns:
884
+ Tuple of ints or list of tuples of ints that specify the location of
885
+ minima of `input` over the regions determined by `labels` and whose
886
+ index is in `index`.
887
+
888
+ If `index` or `labels` are not specified, a tuple of ints is returned
889
+ specifying the location of the first minimal value of `input`.
890
+
891
+ .. note::
892
+ When `input` has multiple identical minima within a labeled region,
893
+ the coordinates returned are not guaranteed to match those returned by
894
+ SciPy.
895
+
896
+ .. seealso:: :func:`scipy.ndimage.minimum_position`
897
+ """
898
+ dims = numpy.asarray(input.shape)
899
+ # see numpy.unravel_index to understand this line.
900
+ dim_prod = numpy.cumprod([1] + list(dims[:0:-1]))[::-1]
901
+
902
+ result = _select(input, labels, index, find_min_positions=True)[0]
903
+
904
+ # have to transfer result back to the CPU to return index tuples
905
+ if result.ndim == 0:
906
+ result = int(result) # synchronize
907
+ else:
908
+ result = cupy.asnumpy(result) # synchronize
909
+
910
+ if cupy.isscalar(result):
911
+ return tuple((result // dim_prod) % dims)
912
+
913
+ return [tuple(v) for v in (result.reshape(-1, 1) // dim_prod) % dims]
914
+
915
+
916
+ def maximum_position(input, labels=None, index=None):
917
+ """Find the positions of the maximums of the values of an array at labels.
918
+
919
+ For each region specified by `labels`, the position of the maximum
920
+ value of `input` within the region is returned.
921
+
922
+ Args:
923
+ input (cupy.ndarray):
924
+ Array of values. For each region specified by `labels`, the
925
+ maximal values of `input` over the region is computed.
926
+ labels (cupy.ndarray, optional): An array of integers marking different
927
+ regions over which the position of the maximum value of `input` is
928
+ to be computed. `labels` must have the same shape as `input`. If
929
+ `labels` is not specified, the location of the first maximum over
930
+ the whole array is returned.
931
+
932
+ The `labels` argument only works when `index` is specified.
933
+ index (array_like, optional): A list of region labels that are taken
934
+ into account for finding the location of the maxima. If `index` is
935
+ None, the ``first`` maximum over all elements where `labels` is
936
+ non-zero is returned.
937
+
938
+ The `index` argument only works when `labels` is specified.
939
+
940
+ Returns:
941
+ Tuple of ints or list of tuples of ints that specify the location of
942
+ maxima of `input` over the regions determaxed by `labels` and whose
943
+ index is in `index`.
944
+
945
+ If `index` or `labels` are not specified, a tuple of ints is returned
946
+ specifying the location of the first maximal value of `input`.
947
+
948
+ .. note::
949
+ When `input` has multiple identical maxima within a labeled region,
950
+ the coordinates returned are not guaranteed to match those returned by
951
+ SciPy.
952
+
953
+ .. seealso:: :func:`scipy.ndimage.maximum_position`
954
+ """
955
+ dims = numpy.asarray(input.shape)
956
+ # see numpy.unravel_index to understand this line.
957
+ dim_prod = numpy.cumprod([1] + list(dims[:0:-1]))[::-1]
958
+
959
+ result = _select(input, labels, index, find_max_positions=True)[0]
960
+
961
+ # have to transfer result back to the CPU to return index tuples
962
+ if result.ndim == 0:
963
+ result = int(result)
964
+ else:
965
+ result = cupy.asnumpy(result)
966
+
967
+ if cupy.isscalar(result):
968
+ return tuple((result // dim_prod) % dims)
969
+
970
+ return [tuple(v) for v in (result.reshape(-1, 1) // dim_prod) % dims]
971
+
972
+
973
+ def extrema(input, labels=None, index=None):
974
+ """Calculate the minimums and maximums of the values of an array at labels,
975
+ along with their positions.
976
+
977
+ Args:
978
+ input (cupy.ndarray): N-D image data to process.
979
+ labels (cupy.ndarray, optional): Labels of features in input. If not
980
+ None, must be same shape as `input`.
981
+ index (int or sequence of ints, optional): Labels to include in output.
982
+ If None (default), all values where non-zero `labels` are used.
983
+
984
+ Returns:
985
+ A tuple that contains the following values.
986
+
987
+ **minimums (cupy.ndarray)**: Values of minimums in each feature.
988
+
989
+ **maximums (cupy.ndarray)**: Values of maximums in each feature.
990
+
991
+ **min_positions (tuple or list of tuples)**: Each tuple gives the N-D
992
+ coordinates of the corresponding minimum.
993
+
994
+ **max_positions (tuple or list of tuples)**: Each tuple gives the N-D
995
+ coordinates of the corresponding maximum.
996
+
997
+ .. seealso:: :func:`scipy.ndimage.extrema`
998
+ """
999
+ dims = numpy.array(input.shape)
1000
+ # see numpy.unravel_index to understand this line.
1001
+ dim_prod = numpy.cumprod([1] + list(dims[:0:-1]))[::-1]
1002
+
1003
+ minimums, min_positions, maximums, max_positions = _select(
1004
+ input,
1005
+ labels,
1006
+ index,
1007
+ find_min=True,
1008
+ find_max=True,
1009
+ find_min_positions=True,
1010
+ find_max_positions=True,
1011
+ )
1012
+
1013
+ if min_positions.ndim == 0:
1014
+ # scalar output case
1015
+ min_positions = min_positions.item()
1016
+ max_positions = max_positions.item()
1017
+ return (
1018
+ minimums,
1019
+ maximums,
1020
+ tuple((min_positions // dim_prod) % dims),
1021
+ tuple((max_positions // dim_prod) % dims),
1022
+ )
1023
+
1024
+ # convert indexes to tuples on the host
1025
+ min_positions = cupy.asnumpy(min_positions)
1026
+ max_positions = cupy.asnumpy(max_positions)
1027
+ min_positions = [
1028
+ tuple(v) for v in (min_positions.reshape(-1, 1) // dim_prod) % dims
1029
+ ]
1030
+ max_positions = [
1031
+ tuple(v) for v in (max_positions.reshape(-1, 1) // dim_prod) % dims
1032
+ ]
1033
+
1034
+ return minimums, maximums, min_positions, max_positions
1035
+
1036
+
1037
+ def center_of_mass(input, labels=None, index=None):
1038
+ """
1039
+ Calculate the center of mass of the values of an array at labels.
1040
+
1041
+ Args:
1042
+ input (cupy.ndarray): Data from which to calculate center-of-mass. The
1043
+ masses can either be positive or negative.
1044
+ labels (cupy.ndarray, optional): Labels for objects in `input`, as
1045
+ enerated by `ndimage.label`. Only used with `index`. Dimensions
1046
+ must be the same as `input`.
1047
+ index (int or sequence of ints, optional): Labels for which to
1048
+ calculate centers-of-mass. If not specified, all labels greater
1049
+ than zero are used. Only used with `labels`.
1050
+
1051
+ Returns:
1052
+ tuple or list of tuples: Coordinates of centers-of-mass.
1053
+
1054
+ .. seealso:: :func:`scipy.ndimage.center_of_mass`
1055
+ """
1056
+ normalizer = sum(input, labels, index)
1057
+ grids = cupy.ogrid[[slice(0, i) for i in input.shape]]
1058
+
1059
+ results = [
1060
+ sum(input * grids[dir].astype(float), labels, index) / normalizer
1061
+ for dir in range(input.ndim)
1062
+ ]
1063
+
1064
+ # have to transfer 0-dim array back to CPU?
1065
+ # may want to modify to avoid this
1066
+ is_0dim_array = (
1067
+ isinstance(results[0], cupy.ndarray) and results[0].ndim == 0
1068
+ )
1069
+ if is_0dim_array:
1070
+ # tuple of 0-dimensional cupy arrays
1071
+ return tuple(res for res in results)
1072
+ # list of cupy coordinate arrays
1073
+ return [v for v in cupy.stack(results, axis=-1)]
1074
+
1075
+
1076
+ def labeled_comprehension(
1077
+ input, labels, index, func, out_dtype, default, pass_positions=False
1078
+ ):
1079
+ """Array resulting from applying ``func`` to each labeled region.
1080
+
1081
+ Roughly equivalent to [func(input[labels == i]) for i in index].
1082
+
1083
+ Sequentially applies an arbitrary function (that works on array_like input)
1084
+ to subsets of an N-D image array specified by `labels` and `index`.
1085
+ The option exists to provide the function with positional parameters as the
1086
+ second argument.
1087
+
1088
+ Args:
1089
+ input (cupy.ndarray): Data from which to select `labels` to process.
1090
+ labels (cupy.ndarray or None): Labels to objects in `input`. If not
1091
+ None, array must be same shape as `input`. If None, `func` is
1092
+ applied to raveled `input`.
1093
+ index (int, sequence of ints or None): Subset of `labels` to which to
1094
+ apply `func`. If a scalar, a single value is returned. If None,
1095
+ `func` is applied to all non-zero values of `labels`.
1096
+ func (callable): Python function to apply to `labels` from `input`.
1097
+ out_dtype (dtype): Dtype to use for `result`.
1098
+ default (int, float or None): Default return value when a element of
1099
+ `index` does not exist in `labels`.
1100
+ pass_positions (bool, optional): If True, pass linear indices to `func`
1101
+ as a second argument.
1102
+
1103
+ Returns:
1104
+ cupy.ndarray: Result of applying `func` to each of `labels` to `input`
1105
+ in `index`.
1106
+
1107
+ .. seealso:: :func:`scipy.ndimage.labeled_comprehension`
1108
+ """
1109
+ as_scalar = cupy.isscalar(index)
1110
+ input = cupy.asarray(input)
1111
+
1112
+ if pass_positions:
1113
+ positions = cupy.arange(input.size).reshape(input.shape)
1114
+
1115
+ if labels is None:
1116
+ if index is not None:
1117
+ raise ValueError('index without defined labels')
1118
+ if not pass_positions:
1119
+ return func(input.ravel())
1120
+ else:
1121
+ return func(input.ravel(), positions.ravel())
1122
+
1123
+ try:
1124
+ input, labels = cupy.broadcast_arrays(input, labels)
1125
+ except ValueError:
1126
+ raise ValueError(
1127
+ 'input and labels must have the same shape '
1128
+ '(excepting dimensions with width 1)'
1129
+ )
1130
+
1131
+ if index is None:
1132
+ if not pass_positions:
1133
+ return func(input[labels > 0])
1134
+ else:
1135
+ return func(input[labels > 0], positions[labels > 0])
1136
+
1137
+ index = cupy.atleast_1d(index)
1138
+ if cupy.any(index.astype(labels.dtype).astype(index.dtype) != index):
1139
+ raise ValueError(
1140
+ 'Cannot convert index values from <%s> to <%s> '
1141
+ '(labels.dtype) without loss of precision'
1142
+ % (index.dtype, labels.dtype)
1143
+ )
1144
+
1145
+ index = index.astype(labels.dtype)
1146
+
1147
+ # optimization: find min/max in index, and select those parts of labels,
1148
+ # input, and positions
1149
+ lo = index.min()
1150
+ hi = index.max()
1151
+ mask = (labels >= lo) & (labels <= hi)
1152
+
1153
+ # this also ravels the arrays
1154
+ labels = labels[mask]
1155
+ input = input[mask]
1156
+ if pass_positions:
1157
+ positions = positions[mask]
1158
+
1159
+ # sort everything by labels
1160
+ label_order = labels.argsort()
1161
+ labels = labels[label_order]
1162
+ input = input[label_order]
1163
+ if pass_positions:
1164
+ positions = positions[label_order]
1165
+
1166
+ index_order = index.argsort()
1167
+ sorted_index = index[index_order]
1168
+
1169
+ def do_map(inputs, output):
1170
+ """labels must be sorted"""
1171
+ nidx = sorted_index.size
1172
+
1173
+ # Find boundaries for each stretch of constant labels
1174
+ # This could be faster, but we already paid N log N to sort labels.
1175
+ lo = cupy.searchsorted(labels, sorted_index, side='left')
1176
+ hi = cupy.searchsorted(labels, sorted_index, side='right')
1177
+
1178
+ for i, low, high in zip(range(nidx), lo, hi):
1179
+ if low == high:
1180
+ continue
1181
+ output[i] = func(*[inp[low:high] for inp in inputs])
1182
+
1183
+ if out_dtype == object: # noqa: E721
1184
+ temp = {i: default for i in range(index.size)}
1185
+ else:
1186
+ temp = cupy.empty(index.shape, out_dtype)
1187
+ if default is None and temp.dtype.kind in 'fc':
1188
+ default = numpy.nan # match NumPy floating-point None behavior
1189
+ temp[:] = default
1190
+
1191
+ if not pass_positions:
1192
+ do_map([input], temp)
1193
+ else:
1194
+ do_map([input, positions], temp)
1195
+
1196
+ if out_dtype == object: # noqa: E721
1197
+ # use a list of arrays since object arrays are not supported
1198
+ index_order = cupy.asnumpy(index_order)
1199
+ output = [temp[i] for i in index_order.argsort()]
1200
+ else:
1201
+ output = cupy.zeros(index.shape, out_dtype)
1202
+ output[cupy.asnumpy(index_order)] = temp
1203
+ if as_scalar:
1204
+ output = output[0]
1205
+ return output
1206
+
1207
+
1208
+ def histogram(input, min, max, bins, labels=None, index=None):
1209
+ """Calculate the histogram of the values of an array, optionally at labels.
1210
+
1211
+ Histogram calculates the frequency of values in an array within bins
1212
+ determined by `min`, `max`, and `bins`. The `labels` and `index`
1213
+ keywords can limit the scope of the histogram to specified sub-regions
1214
+ within the array.
1215
+
1216
+ Args:
1217
+ input (cupy.ndarray): Data for which to calculate histogram.
1218
+ min (int): Minimum values of range of histogram bins.
1219
+ max (int): Maximum values of range of histogram bins.
1220
+ bins (int): Number of bins.
1221
+ labels (cupy.ndarray, optional): Labels for objects in `input`. If not
1222
+ None, must be same shape as `input`.
1223
+ index (int or sequence of ints, optional): Label or labels for which to
1224
+ calculate histogram. If None, all values where label is greater
1225
+ than zero are used.
1226
+
1227
+ Returns:
1228
+ cupy.ndarray: Histogram counts.
1229
+
1230
+ .. seealso:: :func:`scipy.ndimage.histogram`
1231
+ """
1232
+ _bins = cupy.linspace(min, max, bins + 1)
1233
+
1234
+ def _hist(vals):
1235
+ return cupy.histogram(vals, _bins)[0]
1236
+
1237
+ return labeled_comprehension(
1238
+ input, labels, index, _hist, object, None, pass_positions=False
1239
+ )
1240
+
1241
+
1242
+ def value_indices(arr, *, ignore_value=None, adaptive_index_dtype=False):
1243
+ """
1244
+ Find indices of each distinct value in given array.
1245
+
1246
+ Parameters
1247
+ ----------
1248
+ arr : ndarray of ints
1249
+ Array containing integer values.
1250
+ ignore_value : int, optional
1251
+ This value will be ignored in searching the `arr` array. If not
1252
+ given, all values found will be included in output. Default
1253
+ is None.
1254
+ adaptive_index_dtype : bool, optional
1255
+ If ``True``, instead of returning the default CuPy signed integer
1256
+ dtype, the smallest signed integer dtype capable of representing the
1257
+ image coordinate range will be used. This can substantially reduce
1258
+ memory usage and slightly reduce runtime. Note that this optional
1259
+ parameter is not available in the SciPy API.
1260
+
1261
+ Returns
1262
+ -------
1263
+ indices : dictionary
1264
+ A Python dictionary of array indices for each distinct value. The
1265
+ dictionary is keyed by the distinct values, the entries are array
1266
+ index tuples covering all occurrences of the value within the
1267
+ array.
1268
+
1269
+ This dictionary can occupy significant memory, often several times
1270
+ the size of the input array. To help reduce memory overhead, the
1271
+ argument `adaptive_index_dtype` can be set to ``True``.
1272
+
1273
+ Notes
1274
+ -----
1275
+ For a small array with few distinct values, one might use
1276
+ `numpy.unique()` to find all possible values, and ``(arr == val)`` to
1277
+ locate each value within that array. However, for large arrays,
1278
+ with many distinct values, this can become extremely inefficient,
1279
+ as locating each value would require a new search through the entire
1280
+ array. Using this function, there is essentially one search, with
1281
+ the indices saved for all distinct values.
1282
+
1283
+ This is useful when matching a categorical image (e.g. a segmentation
1284
+ or classification) to an associated image of other data, allowing
1285
+ any per-class statistic(s) to then be calculated. Provides a
1286
+ more flexible alternative to functions like ``scipy.ndimage.mean()``
1287
+ and ``scipy.ndimage.variance()``.
1288
+
1289
+ Some other closely related functionality, with different strengths and
1290
+ weaknesses, can also be found in ``scipy.stats.binned_statistic()`` and
1291
+ the `scikit-image <https://scikit-image.org/>`_ function
1292
+ ``skimage.measure.regionprops()``.
1293
+
1294
+ Note for IDL users: this provides functionality equivalent to IDL's
1295
+ REVERSE_INDICES option (as per the IDL documentation for the
1296
+ `HISTOGRAM <https://www.l3harrisgeospatial.com/docs/histogram.html>`_
1297
+ function).
1298
+
1299
+ .. versionadded:: 1.10.0
1300
+
1301
+ See Also
1302
+ --------
1303
+ label, maximum, median, minimum_position, extrema, sum, mean, variance,
1304
+ standard_deviation, cupy.where, cupy.unique
1305
+
1306
+ Examples
1307
+ --------
1308
+ >>> import cupy
1309
+ >>> from cupyx.scipy import ndimage
1310
+ >>> a = cupy.zeros((6, 6), dtype=int)
1311
+ >>> a[2:4, 2:4] = 1
1312
+ >>> a[4, 4] = 1
1313
+ >>> a[:2, :3] = 2
1314
+ >>> a[0, 5] = 3
1315
+ >>> a
1316
+ array([[2, 2, 2, 0, 0, 3],
1317
+ [2, 2, 2, 0, 0, 0],
1318
+ [0, 0, 1, 1, 0, 0],
1319
+ [0, 0, 1, 1, 0, 0],
1320
+ [0, 0, 0, 0, 1, 0],
1321
+ [0, 0, 0, 0, 0, 0]])
1322
+ >>> val_indices = ndimage.value_indices(a)
1323
+
1324
+ The dictionary `val_indices` will have an entry for each distinct
1325
+ value in the input array.
1326
+
1327
+ >>> val_indices.keys()
1328
+ dict_keys([0, 1, 2, 3])
1329
+
1330
+ The entry for each value is an index tuple, locating the elements
1331
+ with that value.
1332
+
1333
+ >>> ndx1 = val_indices[1]
1334
+ >>> ndx1
1335
+ (array([2, 2, 3, 3, 4]), array([2, 3, 2, 3, 4]))
1336
+
1337
+ This can be used to index into the original array, or any other
1338
+ array with the same shape.
1339
+
1340
+ >>> a[ndx1]
1341
+ array([1, 1, 1, 1, 1])
1342
+
1343
+ If the zeros were to be ignored, then the resulting dictionary
1344
+ would no longer have an entry for zero.
1345
+
1346
+ >>> val_indices = ndimage.value_indices(a, ignore_value=0)
1347
+ >>> val_indices.keys()
1348
+ dict_keys([1, 2, 3])
1349
+
1350
+ """
1351
+ if arr.dtype.kind not in 'iu':
1352
+ raise ValueError('Parameter \'arr\' must be an integer array')
1353
+ if adaptive_index_dtype:
1354
+ # determined the minimum signed integer type needed to store the
1355
+ # index rangle
1356
+ raveled_int_type = cupy.min_scalar_type(-(int(arr.size) + 1))
1357
+ coord_int_type = cupy.min_scalar_type(-(max(arr.shape) + 1))
1358
+ arr1d = arr.reshape(-1)
1359
+ counts = cupy.bincount(arr1d)
1360
+
1361
+ isort = cupy.argsort(arr1d, axis=None)
1362
+ if adaptive_index_dtype:
1363
+ isort = isort.astype(raveled_int_type, copy=False)
1364
+
1365
+ coords = cupy.unravel_index(isort, arr.shape)
1366
+ if adaptive_index_dtype:
1367
+ coords = tuple(c.astype(coord_int_type, copy=False) for c in coords)
1368
+
1369
+ offset = 0
1370
+ out = {}
1371
+ counts = cupy.asnumpy(counts) # need the counts on the host
1372
+ for value, count in enumerate(counts):
1373
+ if count == 0:
1374
+ continue
1375
+ elif value == ignore_value:
1376
+ offset += count
1377
+ continue
1378
+ out[value] = tuple(c[offset:offset + count] for c in coords)
1379
+ offset += count
1380
+ return out
vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/_morphology.py ADDED
@@ -0,0 +1,1017 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import operator
2
+ import warnings
3
+
4
+ import numpy
5
+
6
+ import cupy
7
+ from cupy import _core
8
+
9
+ from cupyx.scipy.ndimage import _filters_core
10
+ from cupyx.scipy.ndimage import _util
11
+ from cupyx.scipy.ndimage import _filters
12
+
13
+
14
+ @cupy.memoize(for_each_device=True)
15
+ def _get_binary_erosion_kernel(
16
+ w_shape, int_type, offsets, center_is_true, border_value, invert, masked,
17
+ all_weights_nonzero
18
+ ):
19
+ if invert:
20
+ border_value = int(not border_value)
21
+ true_val = 0
22
+ false_val = 1
23
+ else:
24
+ true_val = 1
25
+ false_val = 0
26
+
27
+ if masked:
28
+ pre = """
29
+ bool mv = (bool)mask[i];
30
+ bool _in = (bool)x[i];
31
+ if (!mv) {{
32
+ y = cast<Y>(_in);
33
+ return;
34
+ }} else if ({center_is_true} && _in == {false_val}) {{
35
+ y = cast<Y>(_in);
36
+ return;
37
+ }}""".format(center_is_true=int(center_is_true),
38
+ false_val=false_val)
39
+ else:
40
+ pre = """
41
+ bool _in = (bool)x[i];
42
+ if ({center_is_true} && _in == {false_val}) {{
43
+ y = cast<Y>(_in);
44
+ return;
45
+ }}""".format(center_is_true=int(center_is_true),
46
+ false_val=false_val)
47
+ pre = pre + """
48
+ y = cast<Y>({true_val});""".format(true_val=true_val)
49
+
50
+ # {{{{ required because format is called again within _generate_nd_kernel
51
+ found = """
52
+ if ({{cond}}) {{{{
53
+ if (!{border_value}) {{{{
54
+ y = cast<Y>({false_val});
55
+ return;
56
+ }}}}
57
+ }}}} else {{{{
58
+ bool nn = {{value}} ? {true_val} : {false_val};
59
+ if (!nn) {{{{
60
+ y = cast<Y>({false_val});
61
+ return;
62
+ }}}}
63
+ }}}}""".format(true_val=int(true_val),
64
+ false_val=int(false_val),
65
+ border_value=int(border_value),)
66
+
67
+ name = 'binary_erosion'
68
+ if false_val:
69
+ name += '_invert'
70
+ return _filters_core._generate_nd_kernel(
71
+ name,
72
+ pre,
73
+ found,
74
+ '',
75
+ 'constant', w_shape, int_type, offsets, 0, ctype='Y', has_weights=True,
76
+ has_structure=False, has_mask=masked, binary_morphology=True,
77
+ all_weights_nonzero=all_weights_nonzero)
78
+
79
+
80
+ def _center_is_true(structure, origin):
81
+ coor = tuple([oo + ss // 2 for ss, oo in zip(structure.shape, origin)])
82
+ return bool(structure[coor]) # device synchronization
83
+
84
+
85
+ def iterate_structure(structure, iterations, origin=None):
86
+ """Iterate a structure by dilating it with itself.
87
+
88
+ Args:
89
+ structure(array_like): Structuring element (an array of bools,
90
+ for example), to be dilated with itself.
91
+ iterations(int): The number of dilations performed on the structure
92
+ with itself.
93
+ origin(int or tuple of int, optional): If origin is None, only the
94
+ iterated structure is returned. If not, a tuple of the iterated
95
+ structure and the modified origin is returned.
96
+
97
+ Returns:
98
+ cupy.ndarray: A new structuring element obtained by dilating
99
+ ``structure`` (``iterations`` - 1) times with itself.
100
+
101
+ .. seealso:: :func:`scipy.ndimage.iterate_structure`
102
+ """
103
+ if iterations < 2:
104
+ return structure.copy()
105
+ ni = iterations - 1
106
+ shape = [ii + ni * (ii - 1) for ii in structure.shape]
107
+ pos = [ni * (structure.shape[ii] // 2) for ii in range(len(shape))]
108
+ slc = tuple(
109
+ slice(pos[ii], pos[ii] + structure.shape[ii], None)
110
+ for ii in range(len(shape))
111
+ )
112
+ out = cupy.zeros(shape, bool)
113
+ out[slc] = structure != 0
114
+ out = binary_dilation(out, structure, iterations=ni)
115
+ if origin is None:
116
+ return out
117
+ else:
118
+ origin = _util._fix_sequence_arg(origin, structure.ndim, 'origin', int)
119
+ origin = [iterations * o for o in origin]
120
+ return out, origin
121
+
122
+
123
+ def generate_binary_structure(rank, connectivity):
124
+ """Generate a binary structure for binary morphological operations.
125
+
126
+ Args:
127
+ rank(int): Number of dimensions of the array to which the structuring
128
+ element will be applied, as returned by ``np.ndim``.
129
+ connectivity(int): ``connectivity`` determines which elements of the
130
+ output array belong to the structure, i.e., are considered as
131
+ neighbors of the central element. Elements up to a squared distance
132
+ of ``connectivity`` from the center are considered neighbors.
133
+ ``connectivity`` may range from 1 (no diagonal elements are
134
+ neighbors) to ``rank`` (all elements are neighbors).
135
+
136
+ Returns:
137
+ cupy.ndarray: Structuring element which may be used for binary
138
+ morphological operations, with ``rank`` dimensions and all
139
+ dimensions equal to 3.
140
+
141
+ .. seealso:: :func:`scipy.ndimage.generate_binary_structure`
142
+ """
143
+ if connectivity < 1:
144
+ connectivity = 1
145
+ if rank < 1:
146
+ return cupy.asarray(True, dtype=bool)
147
+ output = numpy.fabs(numpy.indices([3] * rank) - 1)
148
+ output = numpy.add.reduce(output, 0)
149
+ output = output <= connectivity
150
+ return cupy.asarray(output)
151
+
152
+
153
+ def _binary_erosion(input, structure, iterations, mask, output, border_value,
154
+ origin, invert, brute_force=True):
155
+ try:
156
+ iterations = operator.index(iterations)
157
+ except TypeError:
158
+ raise TypeError('iterations parameter should be an integer')
159
+
160
+ if input.dtype.kind == 'c':
161
+ raise TypeError('Complex type not supported')
162
+ if structure is None:
163
+ structure = generate_binary_structure(input.ndim, 1)
164
+ all_weights_nonzero = input.ndim == 1
165
+ center_is_true = True
166
+ default_structure = True
167
+ else:
168
+ structure = structure.astype(dtype=bool, copy=False)
169
+ # transfer to CPU for use in determining if it is fully dense
170
+ # structure_cpu = cupy.asnumpy(structure)
171
+ default_structure = False
172
+ if structure.ndim != input.ndim:
173
+ raise RuntimeError('structure and input must have same dimensionality')
174
+ if not structure.flags.c_contiguous:
175
+ structure = cupy.ascontiguousarray(structure)
176
+ if structure.size < 1:
177
+ raise RuntimeError('structure must not be empty')
178
+
179
+ if mask is not None:
180
+ if mask.shape != input.shape:
181
+ raise RuntimeError('mask and input must have equal sizes')
182
+ if not mask.flags.c_contiguous:
183
+ mask = cupy.ascontiguousarray(mask)
184
+ masked = True
185
+ else:
186
+ masked = False
187
+ origin = _util._fix_sequence_arg(origin, input.ndim, 'origin', int)
188
+
189
+ if isinstance(output, cupy.ndarray):
190
+ if output.dtype.kind == 'c':
191
+ raise TypeError('Complex output type not supported')
192
+ else:
193
+ output = bool
194
+ output = _util._get_output(output, input)
195
+ temp_needed = cupy.shares_memory(output, input, 'MAY_SHARE_BOUNDS')
196
+ if temp_needed:
197
+ # input and output arrays cannot share memory
198
+ temp = output
199
+ output = _util._get_output(output.dtype, input)
200
+ if structure.ndim == 0:
201
+ # kernel doesn't handle ndim=0, so special case it here
202
+ if float(structure):
203
+ output[...] = cupy.asarray(input, dtype=bool)
204
+ else:
205
+ output[...] = ~cupy.asarray(input, dtype=bool)
206
+ return output
207
+ origin = tuple(origin)
208
+ int_type = _util._get_inttype(input)
209
+ offsets = _filters_core._origins_to_offsets(origin, structure.shape)
210
+ if not default_structure:
211
+ # synchronize required to determine if all weights are non-zero
212
+ nnz = int(cupy.count_nonzero(structure))
213
+ all_weights_nonzero = nnz == structure.size
214
+ if all_weights_nonzero:
215
+ center_is_true = True
216
+ else:
217
+ center_is_true = _center_is_true(structure, origin)
218
+
219
+ erode_kernel = _get_binary_erosion_kernel(
220
+ structure.shape, int_type, offsets, center_is_true, border_value,
221
+ invert, masked, all_weights_nonzero,
222
+ )
223
+
224
+ if iterations == 1:
225
+ if masked:
226
+ output = erode_kernel(input, structure, mask, output)
227
+ else:
228
+ output = erode_kernel(input, structure, output)
229
+ elif center_is_true and not brute_force:
230
+ raise NotImplementedError(
231
+ 'only brute_force iteration has been implemented'
232
+ )
233
+ else:
234
+ if cupy.shares_memory(output, input, 'MAY_SHARE_BOUNDS'):
235
+ raise ValueError('output and input may not overlap in memory')
236
+ tmp_in = cupy.empty_like(input, dtype=output.dtype)
237
+ tmp_out = output
238
+ if iterations >= 1 and not iterations & 1:
239
+ tmp_in, tmp_out = tmp_out, tmp_in
240
+ if masked:
241
+ tmp_out = erode_kernel(input, structure, mask, tmp_out)
242
+ else:
243
+ tmp_out = erode_kernel(input, structure, tmp_out)
244
+ # TODO: kernel doesn't return the changed status, so determine it here
245
+ changed = not (input == tmp_out).all() # synchronize!
246
+ ii = 1
247
+ while ii < iterations or ((iterations < 1) and changed):
248
+ tmp_in, tmp_out = tmp_out, tmp_in
249
+ if masked:
250
+ tmp_out = erode_kernel(tmp_in, structure, mask, tmp_out)
251
+ else:
252
+ tmp_out = erode_kernel(tmp_in, structure, tmp_out)
253
+ changed = not (tmp_in == tmp_out).all()
254
+ ii += 1
255
+ if not changed and (not ii & 1): # synchronize!
256
+ # can exit early if nothing changed
257
+ # (only do this after even number of tmp_in/out swaps)
258
+ break
259
+ output = tmp_out
260
+ if temp_needed:
261
+ _core.elementwise_copy(output, temp)
262
+ output = temp
263
+ return output
264
+
265
+
266
+ def binary_erosion(input, structure=None, iterations=1, mask=None, output=None,
267
+ border_value=0, origin=0, brute_force=False):
268
+ """Multidimensional binary erosion with a given structuring element.
269
+
270
+ Binary erosion is a mathematical morphology operation used for image
271
+ processing.
272
+
273
+ Args:
274
+ input(cupy.ndarray): The input binary array_like to be eroded.
275
+ Non-zero (True) elements form the subset to be eroded.
276
+ structure(cupy.ndarray, optional): The structuring element used for the
277
+ erosion. Non-zero elements are considered True. If no structuring
278
+ element is provided an element is generated with a square
279
+ connectivity equal to one. (Default value = None).
280
+ iterations(int, optional): The erosion is repeated ``iterations`` times
281
+ (one, by default). If iterations is less than 1, the erosion is
282
+ repeated until the result does not change anymore. Only an integer
283
+ of iterations is accepted.
284
+ mask(cupy.ndarray or None, optional): If a mask is given, only those
285
+ elements with a True value at the corresponding mask element are
286
+ modified at each iteration. (Default value = None)
287
+ output(cupy.ndarray, optional): Array of the same shape as input, into
288
+ which the output is placed. By default, a new array is created.
289
+ border_value(int (cast to 0 or 1), optional): Value at the
290
+ border in the output array. (Default value = 0)
291
+ origin(int or tuple of ints, optional): Placement of the filter, by
292
+ default 0.
293
+ brute_force(boolean, optional): Memory condition: if False, only the
294
+ pixels whose value was changed in the last iteration are tracked as
295
+ candidates to be updated (eroded) in the current iteration; if
296
+ True all pixels are considered as candidates for erosion,
297
+ regardless of what happened in the previous iteration.
298
+
299
+ Returns:
300
+ cupy.ndarray: The result of binary erosion.
301
+
302
+ .. warning::
303
+
304
+ This function may synchronize the device.
305
+
306
+ .. seealso:: :func:`scipy.ndimage.binary_erosion`
307
+ """
308
+ return _binary_erosion(input, structure, iterations, mask, output,
309
+ border_value, origin, 0, brute_force)
310
+
311
+
312
+ def binary_dilation(input, structure=None, iterations=1, mask=None,
313
+ output=None, border_value=0, origin=0, brute_force=False):
314
+ """Multidimensional binary dilation with the given structuring element.
315
+
316
+ Args:
317
+ input(cupy.ndarray): The input binary array_like to be dilated.
318
+ Non-zero (True) elements form the subset to be dilated.
319
+ structure(cupy.ndarray, optional): The structuring element used for the
320
+ dilation. Non-zero elements are considered True. If no structuring
321
+ element is provided an element is generated with a square
322
+ connectivity equal to one. (Default value = None).
323
+ iterations(int, optional): The dilation is repeated ``iterations``
324
+ times (one, by default). If iterations is less than 1, the dilation
325
+ is repeated until the result does not change anymore. Only an
326
+ integer of iterations is accepted.
327
+ mask(cupy.ndarray or None, optional): If a mask is given, only those
328
+ elements with a True value at the corresponding mask element are
329
+ modified at each iteration. (Default value = None)
330
+ output(cupy.ndarray, optional): Array of the same shape as input, into
331
+ which the output is placed. By default, a new array is created.
332
+ border_value(int (cast to 0 or 1), optional): Value at the
333
+ border in the output array. (Default value = 0)
334
+ origin(int or tuple of ints, optional): Placement of the filter, by
335
+ default 0.
336
+ brute_force(boolean, optional): Memory condition: if False, only the
337
+ pixels whose value was changed in the last iteration are tracked as
338
+ candidates to be updated (dilated) in the current iteration; if
339
+ True all pixels are considered as candidates for dilation,
340
+ regardless of what happened in the previous iteration.
341
+
342
+ Returns:
343
+ cupy.ndarray: The result of binary dilation.
344
+
345
+ .. warning::
346
+
347
+ This function may synchronize the device.
348
+
349
+ .. seealso:: :func:`scipy.ndimage.binary_dilation`
350
+ """
351
+ if structure is None:
352
+ structure = generate_binary_structure(input.ndim, 1)
353
+ origin = _util._fix_sequence_arg(origin, input.ndim, 'origin', int)
354
+ structure = structure[tuple([slice(None, None, -1)] * structure.ndim)]
355
+ for ii in range(len(origin)):
356
+ origin[ii] = -origin[ii]
357
+ if not structure.shape[ii] & 1:
358
+ origin[ii] -= 1
359
+ return _binary_erosion(input, structure, iterations, mask, output,
360
+ border_value, origin, 1, brute_force)
361
+
362
+
363
+ def binary_opening(input, structure=None, iterations=1, output=None, origin=0,
364
+ mask=None, border_value=0, brute_force=False):
365
+ """
366
+ Multidimensional binary opening with the given structuring element.
367
+
368
+ The *opening* of an input image by a structuring element is the
369
+ *dilation* of the *erosion* of the image by the structuring element.
370
+
371
+ Args:
372
+ input(cupy.ndarray): The input binary array to be opened.
373
+ Non-zero (True) elements form the subset to be opened.
374
+ structure(cupy.ndarray, optional): The structuring element used for the
375
+ opening. Non-zero elements are considered True. If no structuring
376
+ element is provided an element is generated with a square
377
+ connectivity equal to one. (Default value = None).
378
+ iterations(int, optional): The opening is repeated ``iterations`` times
379
+ (one, by default). If iterations is less than 1, the opening is
380
+ repeated until the result does not change anymore. Only an integer
381
+ of iterations is accepted.
382
+ output(cupy.ndarray, optional): Array of the same shape as input, into
383
+ which the output is placed. By default, a new array is created.
384
+ origin(int or tuple of ints, optional): Placement of the filter, by
385
+ default 0.
386
+ mask(cupy.ndarray or None, optional): If a mask is given, only those
387
+ elements with a True value at the corresponding mask element are
388
+ modified at each iteration. (Default value = None)
389
+ border_value(int (cast to 0 or 1), optional): Value at the
390
+ border in the output array. (Default value = 0)
391
+ brute_force(boolean, optional): Memory condition: if False, only the
392
+ pixels whose value was changed in the last iteration are tracked as
393
+ candidates to be updated (dilated) in the current iteration; if
394
+ True all pixels are considered as candidates for opening,
395
+ regardless of what happened in the previous iteration.
396
+
397
+ Returns:
398
+ cupy.ndarray: The result of binary opening.
399
+
400
+ .. warning::
401
+
402
+ This function may synchronize the device.
403
+
404
+ .. seealso:: :func:`scipy.ndimage.binary_opening`
405
+ """
406
+ if structure is None:
407
+ rank = input.ndim
408
+ structure = generate_binary_structure(rank, 1)
409
+ tmp = binary_erosion(input, structure, iterations, mask, None,
410
+ border_value, origin, brute_force)
411
+ return binary_dilation(tmp, structure, iterations, mask, output,
412
+ border_value, origin, brute_force)
413
+
414
+
415
+ def binary_closing(input, structure=None, iterations=1, output=None, origin=0,
416
+ mask=None, border_value=0, brute_force=False):
417
+ """
418
+ Multidimensional binary closing with the given structuring element.
419
+
420
+ The *closing* of an input image by a structuring element is the
421
+ *erosion* of the *dilation* of the image by the structuring element.
422
+
423
+ Args:
424
+ input(cupy.ndarray): The input binary array to be closed.
425
+ Non-zero (True) elements form the subset to be closed.
426
+ structure(cupy.ndarray, optional): The structuring element used for the
427
+ closing. Non-zero elements are considered True. If no structuring
428
+ element is provided an element is generated with a square
429
+ connectivity equal to one. (Default value = None).
430
+ iterations(int, optional): The closing is repeated ``iterations`` times
431
+ (one, by default). If iterations is less than 1, the closing is
432
+ repeated until the result does not change anymore. Only an integer
433
+ of iterations is accepted.
434
+ output(cupy.ndarray, optional): Array of the same shape as input, into
435
+ which the output is placed. By default, a new array is created.
436
+ origin(int or tuple of ints, optional): Placement of the filter, by
437
+ default 0.
438
+ mask(cupy.ndarray or None, optional): If a mask is given, only those
439
+ elements with a True value at the corresponding mask element are
440
+ modified at each iteration. (Default value = None)
441
+ border_value(int (cast to 0 or 1), optional): Value at the
442
+ border in the output array. (Default value = 0)
443
+ brute_force(boolean, optional): Memory condition: if False, only the
444
+ pixels whose value was changed in the last iteration are tracked as
445
+ candidates to be updated (dilated) in the current iteration; if
446
+ True all pixels are considered as candidates for closing,
447
+ regardless of what happened in the previous iteration.
448
+
449
+ Returns:
450
+ cupy.ndarray: The result of binary closing.
451
+
452
+ .. warning::
453
+
454
+ This function may synchronize the device.
455
+
456
+ .. seealso:: :func:`scipy.ndimage.binary_closing`
457
+ """
458
+ if structure is None:
459
+ rank = input.ndim
460
+ structure = generate_binary_structure(rank, 1)
461
+ tmp = binary_dilation(input, structure, iterations, mask, None,
462
+ border_value, origin, brute_force)
463
+ return binary_erosion(tmp, structure, iterations, mask, output,
464
+ border_value, origin, brute_force)
465
+
466
+
467
+ def binary_hit_or_miss(input, structure1=None, structure2=None, output=None,
468
+ origin1=0, origin2=None):
469
+ """
470
+ Multidimensional binary hit-or-miss transform.
471
+
472
+ The hit-or-miss transform finds the locations of a given pattern
473
+ inside the input image.
474
+
475
+ Args:
476
+ input (cupy.ndarray): Binary image where a pattern is to be detected.
477
+ structure1 (cupy.ndarray, optional): Part of the structuring element to
478
+ be fitted to the foreground (non-zero elements) of ``input``. If no
479
+ value is provided, a structure of square connectivity 1 is chosen.
480
+ structure2 (cupy.ndarray, optional): Second part of the structuring
481
+ element that has to miss completely the foreground. If no value is
482
+ provided, the complementary of ``structure1`` is taken.
483
+ output (cupy.ndarray, dtype or None, optional): Array of the same shape
484
+ as input, into which the output is placed. By default, a new array
485
+ is created.
486
+ origin1 (int or tuple of ints, optional): Placement of the first part
487
+ of the structuring element ``structure1``, by default 0 for a
488
+ centered structure.
489
+ origin2 (int or tuple of ints or None, optional): Placement of the
490
+ second part of the structuring element ``structure2``, by default 0
491
+ for a centered structure. If a value is provided for ``origin1``
492
+ and not for ``origin2``, then ``origin2`` is set to ``origin1``.
493
+
494
+ Returns:
495
+ cupy.ndarray: Hit-or-miss transform of ``input`` with the given
496
+ structuring element (``structure1``, ``structure2``).
497
+
498
+ .. warning::
499
+
500
+ This function may synchronize the device.
501
+
502
+ .. seealso:: :func:`scipy.ndimage.binary_hit_or_miss`
503
+ """
504
+ if structure1 is None:
505
+ structure1 = generate_binary_structure(input.ndim, 1)
506
+ if structure2 is None:
507
+ structure2 = cupy.logical_not(structure1)
508
+ origin1 = _util._fix_sequence_arg(origin1, input.ndim, 'origin1', int)
509
+ if origin2 is None:
510
+ origin2 = origin1
511
+ else:
512
+ origin2 = _util._fix_sequence_arg(origin2, input.ndim, 'origin2', int)
513
+
514
+ tmp1 = _binary_erosion(input, structure1, 1, None, None, 0, origin1, 0,
515
+ False)
516
+ inplace = isinstance(output, cupy.ndarray)
517
+ result = _binary_erosion(input, structure2, 1, None, output, 0, origin2, 1,
518
+ False)
519
+ if inplace:
520
+ cupy.logical_not(output, output)
521
+ cupy.logical_and(tmp1, output, output)
522
+ else:
523
+ cupy.logical_not(result, result)
524
+ return cupy.logical_and(tmp1, result)
525
+
526
+
527
+ def binary_propagation(input, structure=None, mask=None, output=None,
528
+ border_value=0, origin=0):
529
+ """
530
+ Multidimensional binary propagation with the given structuring element.
531
+
532
+ Args:
533
+ input (cupy.ndarray): Binary image to be propagated inside ``mask``.
534
+ structure (cupy.ndarray, optional): Structuring element used in the
535
+ successive dilations. The output may depend on the structuring
536
+ element, especially if ``mask`` has several connex components. If
537
+ no structuring element is provided, an element is generated with a
538
+ squared connectivity equal to one.
539
+ mask (cupy.ndarray, optional): Binary mask defining the region into
540
+ which ``input`` is allowed to propagate.
541
+ output (cupy.ndarray, optional): Array of the same shape as input, into
542
+ which the output is placed. By default, a new array is created.
543
+ border_value (int, optional): Value at the border in the output array.
544
+ The value is cast to 0 or 1.
545
+ origin (int or tuple of ints, optional): Placement of the filter.
546
+
547
+ Returns:
548
+ cupy.ndarray : Binary propagation of ``input`` inside ``mask``.
549
+
550
+ .. warning::
551
+
552
+ This function may synchronize the device.
553
+
554
+ .. seealso:: :func:`scipy.ndimage.binary_propagation`
555
+ """
556
+ return binary_dilation(input, structure, -1, mask, output, border_value,
557
+ origin, brute_force=True)
558
+
559
+
560
+ def binary_fill_holes(input, structure=None, output=None, origin=0):
561
+ """Fill the holes in binary objects.
562
+
563
+ Args:
564
+ input (cupy.ndarray): N-D binary array with holes to be filled.
565
+ structure (cupy.ndarray, optional): Structuring element used in the
566
+ computation; large-size elements make computations faster but may
567
+ miss holes separated from the background by thin regions. The
568
+ default element (with a square connectivity equal to one) yields
569
+ the intuitive result where all holes in the input have been filled.
570
+ output (cupy.ndarray, dtype or None, optional): Array of the same shape
571
+ as input, into which the output is placed. By default, a new array
572
+ is created.
573
+ origin (int, tuple of ints, optional): Position of the structuring
574
+ element.
575
+
576
+ Returns:
577
+ cupy.ndarray: Transformation of the initial image ``input`` where holes
578
+ have been filled.
579
+
580
+ .. warning::
581
+
582
+ This function may synchronize the device.
583
+
584
+ .. seealso:: :func:`scipy.ndimage.binary_fill_holes`
585
+ """
586
+ mask = cupy.logical_not(input)
587
+ tmp = cupy.zeros(mask.shape, bool)
588
+ inplace = isinstance(output, cupy.ndarray)
589
+ # TODO (grlee77): set brute_force=False below once implemented
590
+ if inplace:
591
+ binary_dilation(tmp, structure, -1, mask, output, 1, origin,
592
+ brute_force=True)
593
+ cupy.logical_not(output, output)
594
+ else:
595
+ output = binary_dilation(tmp, structure, -1, mask, None, 1, origin,
596
+ brute_force=True)
597
+ cupy.logical_not(output, output)
598
+ return output
599
+
600
+
601
+ def grey_erosion(input, size=None, footprint=None, structure=None, output=None,
602
+ mode='reflect', cval=0.0, origin=0):
603
+ """Calculates a greyscale erosion.
604
+
605
+ Args:
606
+ input (cupy.ndarray): The input array.
607
+ size (tuple of ints): Shape of a flat and full structuring element used
608
+ for the greyscale erosion. Optional if ``footprint`` or
609
+ ``structure`` is provided.
610
+ footprint (array of ints): Positions of non-infinite elements of a flat
611
+ structuring element used for greyscale erosion. Non-zero values
612
+ give the set of neighbors of the center over which minimum is
613
+ chosen.
614
+ structure (array of ints): Structuring element used for the greyscale
615
+ erosion. ``structure`` may be a non-flat structuring element.
616
+ output (cupy.ndarray, dtype or None): The array in which to place the
617
+ output.
618
+ mode (str): The array borders are handled according to the given mode
619
+ (``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``,
620
+ ``'wrap'``). Default is ``'reflect'``.
621
+ cval (scalar): Value to fill past edges of input if mode is
622
+ ``constant``. Default is ``0.0``.
623
+ origin (scalar or tuple of scalar): The origin parameter controls the
624
+ placement of the filter, relative to the center of the current
625
+ element of the input. Default of 0 is equivalent to
626
+ ``(0,)*input.ndim``.
627
+
628
+ Returns:
629
+ cupy.ndarray: The result of greyscale erosion.
630
+
631
+ .. seealso:: :func:`scipy.ndimage.grey_erosion`
632
+ """
633
+
634
+ if size is None and footprint is None and structure is None:
635
+ raise ValueError('size, footprint or structure must be specified')
636
+
637
+ return _filters._min_or_max_filter(input, size, footprint, structure,
638
+ output, mode, cval, origin, 'min')
639
+
640
+
641
+ def grey_dilation(input, size=None, footprint=None, structure=None,
642
+ output=None, mode='reflect', cval=0.0, origin=0):
643
+ """Calculates a greyscale dilation.
644
+
645
+ Args:
646
+ input (cupy.ndarray): The input array.
647
+ size (tuple of ints): Shape of a flat and full structuring element used
648
+ for the greyscale dilation. Optional if ``footprint`` or
649
+ ``structure`` is provided.
650
+ footprint (array of ints): Positions of non-infinite elements of a flat
651
+ structuring element used for greyscale dilation. Non-zero values
652
+ give the set of neighbors of the center over which maximum is
653
+ chosen.
654
+ structure (array of ints): Structuring element used for the greyscale
655
+ dilation. ``structure`` may be a non-flat structuring element.
656
+ output (cupy.ndarray, dtype or None): The array in which to place the
657
+ output.
658
+ mode (str): The array borders are handled according to the given mode
659
+ (``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``,
660
+ ``'wrap'``). Default is ``'reflect'``.
661
+ cval (scalar): Value to fill past edges of input if mode is
662
+ ``constant``. Default is ``0.0``.
663
+ origin (scalar or tuple of scalar): The origin parameter controls the
664
+ placement of the filter, relative to the center of the current
665
+ element of the input. Default of 0 is equivalent to
666
+ ``(0,)*input.ndim``.
667
+
668
+ Returns:
669
+ cupy.ndarray: The result of greyscale dilation.
670
+
671
+ .. seealso:: :func:`scipy.ndimage.grey_dilation`
672
+ """
673
+
674
+ if size is None and footprint is None and structure is None:
675
+ raise ValueError('size, footprint or structure must be specified')
676
+ if structure is not None:
677
+ structure = cupy.array(structure)
678
+ structure = structure[tuple([slice(None, None, -1)] * structure.ndim)]
679
+ if footprint is not None:
680
+ footprint = cupy.array(footprint)
681
+ footprint = footprint[tuple([slice(None, None, -1)] * footprint.ndim)]
682
+
683
+ origin = _util._fix_sequence_arg(origin, input.ndim, 'origin', int)
684
+ for i in range(len(origin)):
685
+ origin[i] = -origin[i]
686
+ if footprint is not None:
687
+ sz = footprint.shape[i]
688
+ elif structure is not None:
689
+ sz = structure.shape[i]
690
+ elif numpy.isscalar(size):
691
+ sz = size
692
+ else:
693
+ sz = size[i]
694
+ if sz % 2 == 0:
695
+ origin[i] -= 1
696
+
697
+ return _filters._min_or_max_filter(input, size, footprint, structure,
698
+ output, mode, cval, origin, 'max')
699
+
700
+
701
+ def grey_closing(input, size=None, footprint=None, structure=None,
702
+ output=None, mode='reflect', cval=0.0, origin=0):
703
+ """Calculates a multi-dimensional greyscale closing.
704
+
705
+ Args:
706
+ input (cupy.ndarray): The input array.
707
+ size (tuple of ints): Shape of a flat and full structuring element used
708
+ for the greyscale closing. Optional if ``footprint`` or
709
+ ``structure`` is provided.
710
+ footprint (array of ints): Positions of non-infinite elements of a flat
711
+ structuring element used for greyscale closing. Non-zero values
712
+ give the set of neighbors of the center over which closing is
713
+ chosen.
714
+ structure (array of ints): Structuring element used for the greyscale
715
+ closing. ``structure`` may be a non-flat structuring element.
716
+ output (cupy.ndarray, dtype or None): The array in which to place the
717
+ output.
718
+ mode (str): The array borders are handled according to the given mode
719
+ (``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``,
720
+ ``'wrap'``). Default is ``'reflect'``.
721
+ cval (scalar): Value to fill past edges of input if mode is
722
+ ``constant``. Default is ``0.0``.
723
+ origin (scalar or tuple of scalar): The origin parameter controls the
724
+ placement of the filter, relative to the center of the current
725
+ element of the input. Default of 0 is equivalent to
726
+ ``(0,)*input.ndim``.
727
+
728
+ Returns:
729
+ cupy.ndarray: The result of greyscale closing.
730
+
731
+ .. seealso:: :func:`scipy.ndimage.grey_closing`
732
+ """
733
+ if (size is not None) and (footprint is not None):
734
+ warnings.warn('ignoring size because footprint is set', UserWarning,
735
+ stacklevel=2)
736
+ tmp = grey_dilation(input, size, footprint, structure, None, mode, cval,
737
+ origin)
738
+ return grey_erosion(tmp, size, footprint, structure, output, mode, cval,
739
+ origin)
740
+
741
+
742
+ def grey_opening(input, size=None, footprint=None, structure=None,
743
+ output=None, mode='reflect', cval=0.0, origin=0):
744
+ """Calculates a multi-dimensional greyscale opening.
745
+
746
+ Args:
747
+ input (cupy.ndarray): The input array.
748
+ size (tuple of ints): Shape of a flat and full structuring element used
749
+ for the greyscale opening. Optional if ``footprint`` or
750
+ ``structure`` is provided.
751
+ footprint (array of ints): Positions of non-infinite elements of a flat
752
+ structuring element used for greyscale opening. Non-zero values
753
+ give the set of neighbors of the center over which opening is
754
+ chosen.
755
+ structure (array of ints): Structuring element used for the greyscale
756
+ opening. ``structure`` may be a non-flat structuring element.
757
+ output (cupy.ndarray, dtype or None): The array in which to place the
758
+ output.
759
+ mode (str): The array borders are handled according to the given mode
760
+ (``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``,
761
+ ``'wrap'``). Default is ``'reflect'``.
762
+ cval (scalar): Value to fill past edges of input if mode is
763
+ ``constant``. Default is ``0.0``.
764
+ origin (scalar or tuple of scalar): The origin parameter controls the
765
+ placement of the filter, relative to the center of the current
766
+ element of the input. Default of 0 is equivalent to
767
+ ``(0,)*input.ndim``.
768
+
769
+ Returns:
770
+ cupy.ndarray: The result of greyscale opening.
771
+
772
+ .. seealso:: :func:`scipy.ndimage.grey_opening`
773
+ """
774
+ if (size is not None) and (footprint is not None):
775
+ warnings.warn('ignoring size because footprint is set', UserWarning,
776
+ stacklevel=2)
777
+ tmp = grey_erosion(input, size, footprint, structure, None, mode, cval,
778
+ origin)
779
+ return grey_dilation(tmp, size, footprint, structure, output, mode, cval,
780
+ origin)
781
+
782
+
783
+ def morphological_gradient(
784
+ input,
785
+ size=None,
786
+ footprint=None,
787
+ structure=None,
788
+ output=None,
789
+ mode='reflect',
790
+ cval=0.0,
791
+ origin=0,
792
+ ):
793
+ """
794
+ Multidimensional morphological gradient.
795
+
796
+ The morphological gradient is calculated as the difference between a
797
+ dilation and an erosion of the input with a given structuring element.
798
+
799
+ Args:
800
+ input (cupy.ndarray): The input array.
801
+ size (tuple of ints): Shape of a flat and full structuring element used
802
+ for the morphological gradient. Optional if ``footprint`` or
803
+ ``structure`` is provided.
804
+ footprint (array of ints): Positions of non-infinite elements of a flat
805
+ structuring element used for morphological gradient. Non-zero
806
+ values give the set of neighbors of the center over which opening
807
+ is chosen.
808
+ structure (array of ints): Structuring element used for the
809
+ morphological gradient. ``structure`` may be a non-flat
810
+ structuring element.
811
+ output (cupy.ndarray, dtype or None): The array in which to place the
812
+ output.
813
+ mode (str): The array borders are handled according to the given mode
814
+ (``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``,
815
+ ``'wrap'``). Default is ``'reflect'``.
816
+ cval (scalar): Value to fill past edges of input if mode is
817
+ ``constant``. Default is ``0.0``.
818
+ origin (scalar or tuple of scalar): The origin parameter controls the
819
+ placement of the filter, relative to the center of the current
820
+ element of the input. Default of 0 is equivalent to
821
+ ``(0,)*input.ndim``.
822
+
823
+ Returns:
824
+ cupy.ndarray: The morphological gradient of the input.
825
+
826
+ .. seealso:: :func:`scipy.ndimage.morphological_gradient`
827
+ """
828
+ tmp = grey_dilation(
829
+ input, size, footprint, structure, None, mode, cval, origin
830
+ )
831
+ if isinstance(output, cupy.ndarray):
832
+ grey_erosion(
833
+ input, size, footprint, structure, output, mode, cval, origin
834
+ )
835
+ return cupy.subtract(tmp, output, output)
836
+ else:
837
+ return tmp - grey_erosion(
838
+ input, size, footprint, structure, None, mode, cval, origin
839
+ )
840
+
841
+
842
+ def morphological_laplace(
843
+ input,
844
+ size=None,
845
+ footprint=None,
846
+ structure=None,
847
+ output=None,
848
+ mode='reflect',
849
+ cval=0.0,
850
+ origin=0,
851
+ ):
852
+ """
853
+ Multidimensional morphological laplace.
854
+
855
+ Args:
856
+ input (cupy.ndarray): The input array.
857
+ size (tuple of ints): Shape of a flat and full structuring element used
858
+ for the morphological laplace. Optional if ``footprint`` or
859
+ ``structure`` is provided.
860
+ footprint (array of ints): Positions of non-infinite elements of a flat
861
+ structuring element used for morphological laplace. Non-zero
862
+ values give the set of neighbors of the center over which opening
863
+ is chosen.
864
+ structure (array of ints): Structuring element used for the
865
+ morphological laplace. ``structure`` may be a non-flat
866
+ structuring element.
867
+ output (cupy.ndarray, dtype or None): The array in which to place the
868
+ output.
869
+ mode (str): The array borders are handled according to the given mode
870
+ (``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``,
871
+ ``'wrap'``). Default is ``'reflect'``.
872
+ cval (scalar): Value to fill past edges of input if mode is
873
+ ``constant``. Default is ``0.0``.
874
+ origin (scalar or tuple of scalar): The origin parameter controls the
875
+ placement of the filter, relative to the center of the current
876
+ element of the input. Default of 0 is equivalent to
877
+ ``(0,)*input.ndim``.
878
+
879
+ Returns:
880
+ cupy.ndarray: The morphological laplace of the input.
881
+
882
+ .. seealso:: :func:`scipy.ndimage.morphological_laplace`
883
+ """
884
+ tmp1 = grey_dilation(
885
+ input, size, footprint, structure, None, mode, cval, origin
886
+ )
887
+ if isinstance(output, cupy.ndarray):
888
+ grey_erosion(
889
+ input, size, footprint, structure, output, mode, cval, origin
890
+ )
891
+ cupy.add(tmp1, output, output)
892
+ cupy.subtract(output, input, output)
893
+ return cupy.subtract(output, input, output)
894
+ else:
895
+ tmp2 = grey_erosion(
896
+ input, size, footprint, structure, None, mode, cval, origin
897
+ )
898
+ cupy.add(tmp1, tmp2, tmp2)
899
+ cupy.subtract(tmp2, input, tmp2)
900
+ cupy.subtract(tmp2, input, tmp2)
901
+ return tmp2
902
+
903
+
904
+ def white_tophat(
905
+ input,
906
+ size=None,
907
+ footprint=None,
908
+ structure=None,
909
+ output=None,
910
+ mode='reflect',
911
+ cval=0.0,
912
+ origin=0,
913
+ ):
914
+ """
915
+ Multidimensional white tophat filter.
916
+
917
+ Args:
918
+ input (cupy.ndarray): The input array.
919
+ size (tuple of ints): Shape of a flat and full structuring element used
920
+ for the white tophat. Optional if ``footprint`` or ``structure`` is
921
+ provided.
922
+ footprint (array of ints): Positions of non-infinite elements of a flat
923
+ structuring element used for the white tophat. Non-zero values
924
+ give the set of neighbors of the center over which opening is
925
+ chosen.
926
+ structure (array of ints): Structuring element used for the white
927
+ tophat. ``structure`` may be a non-flat structuring element.
928
+ output (cupy.ndarray, dtype or None): The array in which to place the
929
+ output.
930
+ mode (str): The array borders are handled according to the given mode
931
+ (``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``,
932
+ ``'wrap'``). Default is ``'reflect'``.
933
+ cval (scalar): Value to fill past edges of input if mode is
934
+ ``constant``. Default is ``0.0``.
935
+ origin (scalar or tuple of scalar): The origin parameter controls the
936
+ placement of the filter, relative to the center of the current
937
+ element of the input. Default of 0 is equivalent to
938
+ ``(0,)*input.ndim``.
939
+
940
+ Returns:
941
+ cupy.ndarray: Result of the filter of ``input`` with ``structure``.
942
+
943
+ .. seealso:: :func:`scipy.ndimage.white_tophat`
944
+ """
945
+ if (size is not None) and (footprint is not None):
946
+ warnings.warn(
947
+ 'ignoring size because footprint is set', UserWarning, stacklevel=2
948
+ )
949
+ tmp = grey_erosion(
950
+ input, size, footprint, structure, None, mode, cval, origin
951
+ )
952
+ tmp = grey_dilation(
953
+ tmp, size, footprint, structure, output, mode, cval, origin
954
+ )
955
+ if input.dtype == numpy.bool_ and tmp.dtype == numpy.bool_:
956
+ cupy.bitwise_xor(input, tmp, out=tmp)
957
+ else:
958
+ cupy.subtract(input, tmp, out=tmp)
959
+ return tmp
960
+
961
+
962
+ def black_tophat(
963
+ input,
964
+ size=None,
965
+ footprint=None,
966
+ structure=None,
967
+ output=None,
968
+ mode='reflect',
969
+ cval=0.0,
970
+ origin=0,
971
+ ):
972
+ """
973
+ Multidimensional black tophat filter.
974
+
975
+ Args:
976
+ input (cupy.ndarray): The input array.
977
+ size (tuple of ints): Shape of a flat and full structuring element used
978
+ for the black tophat. Optional if ``footprint`` or ``structure`` is
979
+ provided.
980
+ footprint (array of ints): Positions of non-infinite elements of a flat
981
+ structuring element used for the black tophat. Non-zero values
982
+ give the set of neighbors of the center over which opening is
983
+ chosen.
984
+ structure (array of ints): Structuring element used for the black
985
+ tophat. ``structure`` may be a non-flat structuring element.
986
+ output (cupy.ndarray, dtype or None): The array in which to place the
987
+ output.
988
+ mode (str): The array borders are handled according to the given mode
989
+ (``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``,
990
+ ``'wrap'``). Default is ``'reflect'``.
991
+ cval (scalar): Value to fill past edges of input if mode is
992
+ ``constant``. Default is ``0.0``.
993
+ origin (scalar or tuple of scalar): The origin parameter controls the
994
+ placement of the filter, relative to the center of the current
995
+ element of the input. Default of 0 is equivalent to
996
+ ``(0,)*input.ndim``.
997
+
998
+ Returns:
999
+ cupy.ndarry : Result of the filter of ``input`` with ``structure``.
1000
+
1001
+ .. seealso:: :func:`scipy.ndimage.black_tophat`
1002
+ """
1003
+ if (size is not None) and (footprint is not None):
1004
+ warnings.warn(
1005
+ 'ignoring size because footprint is set', UserWarning, stacklevel=2
1006
+ )
1007
+ tmp = grey_dilation(
1008
+ input, size, footprint, structure, None, mode, cval, origin
1009
+ )
1010
+ tmp = grey_erosion(
1011
+ tmp, size, footprint, structure, output, mode, cval, origin
1012
+ )
1013
+ if input.dtype == numpy.bool_ and tmp.dtype == numpy.bool_:
1014
+ cupy.bitwise_xor(tmp, input, out=tmp)
1015
+ else:
1016
+ cupy.subtract(tmp, input, out=tmp)
1017
+ return tmp
vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/_pba_2d.py ADDED
@@ -0,0 +1,503 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import numbers
3
+ import os
4
+
5
+ import cupy
6
+
7
+ from ._util import _get_inttype
8
+
9
+ if hasattr(math, 'lcm'):
10
+ lcm = math.lcm
11
+ else:
12
+ """Fallback implementation of least common multiple (lcm)"""
13
+ def _lcm(a, b):
14
+ return abs(b * (a // math.gcd(a, b)))
15
+
16
+ def lcm(*integers):
17
+ nargs = len(integers)
18
+ if not all(isinstance(a, numbers.Integral) for a in integers):
19
+ raise TypeError("all arguments must be integers")
20
+ if nargs == 0:
21
+ return 1
22
+ res = int(integers[0])
23
+ if nargs == 1:
24
+ return abs(res)
25
+ for i in range(1, nargs):
26
+ x = int(integers[i])
27
+ res = _lcm(res, x)
28
+ return res
29
+
30
+
31
+ pba2d_defines_template = """
32
+
33
+ // MARKER is used to mark blank pixels in the texture.
34
+ // Any uncolored pixels will have x = MARKER.
35
+ // Input texture should have x = MARKER for all pixels other than sites
36
+ #define MARKER {marker}
37
+ #define BLOCKSIZE {block_size_2d}
38
+ #define pixel_int2_t {pixel_int2_t} // typically short2 (int2 for images with > 32k pixels per side)
39
+ #define make_pixel(x, y) {make_pixel_func}(x, y) // typically make_short2 (make_int2 images with > 32k pixels per side
40
+
41
+ """ # noqa
42
+
43
+
44
+ def _init_marker(int_dtype):
45
+ """use a minimum value that is appropriate to the integer dtype"""
46
+ if int_dtype == cupy.int16:
47
+ # marker = cupy.iinfo(int_dtype).min
48
+ marker = -32768
49
+ elif int_dtype == cupy.int32:
50
+ # divide by two so we don't have to promote other intermediate int
51
+ # variables to 64-bit int
52
+ marker = -2147483648 // 2
53
+ else:
54
+ raise ValueError(
55
+ "expected int_dtype to be either cupy.int16 or cupy.int32"
56
+ )
57
+ return marker
58
+
59
+
60
+ @cupy.memoize(True)
61
+ def get_pba2d_src(block_size_2d=64, marker=-32768, pixel_int2_t="short2"):
62
+ make_pixel_func = "make_" + pixel_int2_t
63
+
64
+ pba2d_code = pba2d_defines_template.format(
65
+ block_size_2d=block_size_2d,
66
+ marker=marker,
67
+ pixel_int2_t=pixel_int2_t,
68
+ make_pixel_func=make_pixel_func
69
+ )
70
+ kernel_directory = os.path.join(os.path.dirname(__file__), "cuda")
71
+ with open(os.path.join(kernel_directory, "pba_kernels_2d.h"), "rt") as f:
72
+ pba2d_kernels = "\n".join(f.readlines())
73
+
74
+ pba2d_code += pba2d_kernels
75
+ return pba2d_code
76
+
77
+
78
+ def _get_block_size(check_warp_size=False):
79
+ if check_warp_size:
80
+ dev = cupy.cuda.runtime.getDevice()
81
+ device_properties = cupy.cuda.runtime.getDeviceProperties(dev)
82
+ return int(device_properties["warpSize"])
83
+ else:
84
+ return 32
85
+
86
+
87
+ @cupy.memoize(for_each_device=True)
88
+ def _get_pack_kernel(int_type, marker=-32768):
89
+ """Pack coordinates into array of type short2 (or int2).
90
+
91
+ This kernel works with 2D input data, `arr` (typically boolean).
92
+
93
+ The output array, `out` will be 3D with a signed integer dtype.
94
+ It will have size 2 on the last axis so that it can be viewed as a CUDA
95
+ vector type such as `int2` or `float2`.
96
+ """
97
+ code = f"""
98
+ if (arr[i]) {{
99
+ out[2*i] = {marker};
100
+ out[2*i + 1] = {marker};
101
+ }} else {{
102
+ int shape_1 = arr.shape()[1];
103
+ int _i = i;
104
+ int ind_1 = _i % shape_1;
105
+ _i /= shape_1;
106
+ out[2*i] = ind_1; // out.x
107
+ out[2*i + 1] = _i; // out.y
108
+ }}
109
+ """
110
+ return cupy.ElementwiseKernel(
111
+ in_params="raw B arr",
112
+ out_params="raw I out",
113
+ operation=code,
114
+ options=("--std=c++11",),
115
+ )
116
+
117
+
118
+ def _pack_int2(arr, marker=-32768, int_dtype=cupy.int16):
119
+ if arr.ndim != 2:
120
+ raise ValueError("only 2d arr supported")
121
+ int2_dtype = cupy.dtype({"names": ["x", "y"], "formats": [int_dtype] * 2})
122
+ out = cupy.zeros(arr.shape + (2,), dtype=int_dtype)
123
+ assert out.size == 2 * arr.size
124
+ pack_kernel = _get_pack_kernel(
125
+ int_type="short" if int_dtype == cupy.int16 else "int",
126
+ marker=marker
127
+ )
128
+ pack_kernel(arr, out, size=arr.size)
129
+ out = cupy.squeeze(out.view(int2_dtype))
130
+ return out
131
+
132
+
133
+ def _unpack_int2(img, make_copy=False, int_dtype=cupy.int16):
134
+ temp = img.view(int_dtype).reshape(img.shape + (2,))
135
+ if make_copy:
136
+ temp = temp.copy()
137
+ return temp
138
+
139
+
140
+ def _determine_padding(shape, padded_size, block_size):
141
+ # all kernels assume equal size along both axes, so pad up to equal size if
142
+ # shape is not isotropic
143
+ orig_sy, orig_sx = shape
144
+ if orig_sx != padded_size or orig_sy != padded_size:
145
+ padding_width = (
146
+ (0, padded_size - orig_sy), (0, padded_size - orig_sx)
147
+ )
148
+ else:
149
+ padding_width = None
150
+ return padding_width
151
+
152
+
153
+ def _generate_shape(ndim, int_type, var_name="out", raw_var=True):
154
+ code = ""
155
+ if not raw_var:
156
+ var_name = "_raw_" + var_name
157
+ for i in range(ndim):
158
+ code += f"{int_type} shape_{i} = {var_name}.shape()[{i}];\n"
159
+ return code
160
+
161
+
162
+ def _generate_indices_ops(ndim, int_type):
163
+ code = f"{int_type} _i = i;\n"
164
+ for j in range(ndim - 1, 0, -1):
165
+ code += f"{int_type} ind_{j} = _i % shape_{j};\n_i /= shape_{j};\n"
166
+ code += f"{int_type} ind_0 = _i;"
167
+ return code
168
+
169
+
170
+ def _get_distance_kernel_code(int_type, dist_int_type, raw_out_var=True):
171
+ code = _generate_shape(
172
+ ndim=2, int_type=int_type, var_name="dist", raw_var=raw_out_var
173
+ )
174
+ code += _generate_indices_ops(ndim=2, int_type=int_type)
175
+ code += f"""
176
+ {int_type} tmp;
177
+ {dist_int_type} sq_dist;
178
+ tmp = y[i] - ind_0;
179
+ sq_dist = tmp * tmp;
180
+ tmp = x[i] - ind_1;
181
+ sq_dist += tmp * tmp;
182
+ dist[i] = sqrt(static_cast<F>(sq_dist));
183
+ """
184
+ return code
185
+
186
+
187
+ @cupy.memoize(for_each_device=True)
188
+ def _get_distance_kernel(int_type, dist_int_type):
189
+ """Returns kernel computing the Euclidean distance from coordinates."""
190
+ operation = _get_distance_kernel_code(
191
+ int_type, dist_int_type, raw_out_var=True
192
+ )
193
+ return cupy.ElementwiseKernel(
194
+ in_params="raw I y, raw I x",
195
+ out_params="raw F dist",
196
+ operation=operation,
197
+ options=("--std=c++11",),
198
+ )
199
+
200
+
201
+ def _get_aniso_distance_kernel_code(int_type, raw_out_var=True):
202
+ code = _generate_shape(
203
+ ndim=2, int_type=int_type, var_name="dist", raw_var=raw_out_var
204
+ )
205
+ code += _generate_indices_ops(ndim=2, int_type=int_type)
206
+ code += """
207
+ F tmp;
208
+ F sq_dist;
209
+ tmp = static_cast<F>(y[i] - ind_0) * sampling[0];
210
+ sq_dist = tmp * tmp;
211
+ tmp = static_cast<F>(x[i] - ind_1) * sampling[1];
212
+ sq_dist += tmp * tmp;
213
+ dist[i] = sqrt(sq_dist);
214
+ """
215
+ return code
216
+
217
+
218
+ @cupy.memoize(for_each_device=True)
219
+ def _get_aniso_distance_kernel(int_type):
220
+ """Returns kernel computing the Euclidean distance from coordinates."""
221
+ operation = _get_aniso_distance_kernel_code(int_type, raw_out_var=True)
222
+ return cupy.ElementwiseKernel(
223
+ in_params="raw I y, raw I x, raw F sampling",
224
+ out_params="raw F dist",
225
+ operation=operation,
226
+ options=("--std=c++11",),
227
+ )
228
+
229
+
230
+ def _distance_tranform_arg_check(distances_out, indices_out,
231
+ return_distances, return_indices):
232
+ """Raise a RuntimeError if the arguments are invalid"""
233
+ error_msgs = []
234
+ if (not return_distances) and (not return_indices):
235
+ error_msgs.append(
236
+ "at least one of return_distances/return_indices must be True")
237
+ if distances_out and not return_distances:
238
+ error_msgs.append(
239
+ "return_distances must be True if distances is supplied"
240
+ )
241
+ if indices_out and not return_indices:
242
+ error_msgs.append("return_indices must be True if indices is supplied")
243
+ if error_msgs:
244
+ raise RuntimeError(", ".join(error_msgs))
245
+
246
+
247
+ def _check_distances(distances, shape, dtype):
248
+ if distances.shape != shape:
249
+ raise RuntimeError("distances array has wrong shape")
250
+ if distances.dtype != dtype:
251
+ raise RuntimeError(
252
+ f"distances array must have dtype: {dtype}")
253
+
254
+
255
+ def _check_indices(indices, shape, itemsize):
256
+ if indices.shape != shape:
257
+ raise RuntimeError("indices array has wrong shape")
258
+ if indices.dtype.kind not in 'iu':
259
+ raise RuntimeError(
260
+ "indices array must have an integer dtype"
261
+ )
262
+ elif indices.dtype.itemsize < itemsize:
263
+ raise RuntimeError(
264
+ f"indices dtype must have itemsize > {itemsize}"
265
+ )
266
+
267
+
268
+ def _pba_2d(arr, sampling=None, return_distances=True, return_indices=False,
269
+ block_params=None, check_warp_size=False, *,
270
+ float64_distances=False, distances=None, indices=None):
271
+
272
+ indices_inplace = isinstance(indices, cupy.ndarray)
273
+ dt_inplace = isinstance(distances, cupy.ndarray)
274
+ _distance_tranform_arg_check(
275
+ dt_inplace, indices_inplace, return_distances, return_indices
276
+ )
277
+
278
+ # input_arr: a 2D image
279
+ # For each site at (x, y), the pixel at coordinate (x, y) should contain
280
+ # the pair (x, y). Pixels that are not sites should contain the pair
281
+ # (MARKER, MARKER)
282
+
283
+ # Note: could query warp size here, but for now just assume 32 to avoid
284
+ # overhead of querying properties
285
+ block_size = _get_block_size(check_warp_size)
286
+
287
+ if block_params is None:
288
+ padded_size = math.ceil(max(arr.shape) / block_size) * block_size
289
+
290
+ # should be <= size / block_size. sy must be a multiple of m1
291
+ m1 = padded_size // block_size
292
+ # size must be a multiple of m2
293
+ m2 = max(1, min(padded_size // block_size, block_size))
294
+ # m2 must also be a power of two
295
+ m2 = 2**math.floor(math.log2(m2))
296
+ if padded_size % m2 != 0:
297
+ raise RuntimeError("error in setting default m2")
298
+ m3 = min(min(m1, m2), 2)
299
+ else:
300
+ if any(p < 1 for p in block_params):
301
+ raise ValueError("(m1, m2, m3) in blockparams must be >= 1")
302
+ m1, m2, m3 = block_params
303
+ if math.log2(m2) % 1 > 1e-5:
304
+ raise ValueError("m2 must be a power of 2")
305
+ multiple = lcm(block_size, m1, m2, m3)
306
+ padded_size = math.ceil(max(arr.shape) / multiple) * multiple
307
+
308
+ if m1 > padded_size // block_size:
309
+ raise ValueError(
310
+ f"m1 too large. must be <= padded arr.shape[0] // {block_size}"
311
+ )
312
+ if m2 > padded_size // block_size:
313
+ raise ValueError(
314
+ f"m2 too large. must be <= padded arr.shape[1] // {block_size}"
315
+ )
316
+ if m3 > padded_size // block_size:
317
+ raise ValueError(
318
+ f"m3 too large. must be <= padded arr.shape[1] // {block_size}"
319
+ )
320
+ for m in (m1, m2, m3):
321
+ if padded_size % m != 0:
322
+ raise ValueError(
323
+ f"Largest dimension of image ({padded_size}) must be evenly "
324
+ f"disivible by each element of block_params: {(m1, m2, m3)}."
325
+ )
326
+
327
+ shape_max = max(arr.shape)
328
+ if shape_max <= 32768:
329
+ int_dtype = cupy.int16
330
+ pixel_int2_type = "short2"
331
+ else:
332
+ if shape_max > (1 << 24):
333
+ # limit to coordinate range to 2**24 due to use of __mul24 in
334
+ # coordinate TOID macro
335
+ raise ValueError(
336
+ f"maximum axis size of {1 << 24} exceeded, for image with "
337
+ f"shape {arr.shape}"
338
+ )
339
+ int_dtype = cupy.int32
340
+ pixel_int2_type = "int2"
341
+
342
+ marker = _init_marker(int_dtype)
343
+
344
+ orig_sy, orig_sx = arr.shape
345
+ padding_width = _determine_padding(arr.shape, padded_size, block_size)
346
+ if padding_width is not None:
347
+ arr = cupy.pad(arr, padding_width, mode="constant", constant_values=1)
348
+ size = arr.shape[0]
349
+
350
+ input_arr = _pack_int2(arr, marker=marker, int_dtype=int_dtype)
351
+ output = cupy.zeros_like(input_arr)
352
+
353
+ int2_dtype = cupy.dtype({"names": ["x", "y"], "formats": [int_dtype] * 2})
354
+ margin = cupy.empty((2 * m1 * size,), dtype=int2_dtype)
355
+
356
+ # phase 1 of PBA. m1 must divide texture size and be <= 64
357
+ pba2d = cupy.RawModule(
358
+ code=get_pba2d_src(
359
+ block_size_2d=block_size,
360
+ marker=marker,
361
+ pixel_int2_t=pixel_int2_type,
362
+ )
363
+ )
364
+ kernelFloodDown = pba2d.get_function("kernelFloodDown")
365
+ kernelFloodUp = pba2d.get_function("kernelFloodUp")
366
+ kernelPropagateInterband = pba2d.get_function("kernelPropagateInterband")
367
+ kernelUpdateVertical = pba2d.get_function("kernelUpdateVertical")
368
+ kernelCreateForwardPointers = pba2d.get_function(
369
+ "kernelCreateForwardPointers"
370
+ )
371
+ kernelDoubleToSingleList = pba2d.get_function("kernelDoubleToSingleList")
372
+
373
+ if sampling is None:
374
+ kernelProximatePoints = pba2d.get_function("kernelProximatePoints")
375
+ kernelMergeBands = pba2d.get_function("kernelMergeBands")
376
+ kernelColor = pba2d.get_function("kernelColor")
377
+ else:
378
+ kernelProximatePoints = pba2d.get_function(
379
+ "kernelProximatePointsWithSpacing"
380
+ )
381
+ kernelMergeBands = pba2d.get_function("kernelMergeBandsWithSpacing")
382
+ kernelColor = pba2d.get_function("kernelColorWithSpacing")
383
+
384
+ block = (block_size, 1, 1)
385
+ grid = (math.ceil(size / block[0]), m1, 1)
386
+ bandSize1 = size // m1
387
+ # kernelFloodDown modifies input_arr in-place
388
+ kernelFloodDown(
389
+ grid,
390
+ block,
391
+ (input_arr, input_arr, size, bandSize1),
392
+ )
393
+ # kernelFloodUp modifies input_arr in-place
394
+ kernelFloodUp(
395
+ grid,
396
+ block,
397
+ (input_arr, input_arr, size, bandSize1),
398
+ )
399
+ # kernelFloodUp fills values into margin
400
+ kernelPropagateInterband(
401
+ grid,
402
+ block,
403
+ (input_arr, margin, size, bandSize1),
404
+ )
405
+ # kernelUpdateVertical stores output into an intermediate array of
406
+ # transposed shape
407
+ kernelUpdateVertical(
408
+ grid,
409
+ block,
410
+ (input_arr, margin, output, size, bandSize1),
411
+ )
412
+
413
+ # phase 2
414
+ block = (block_size, 1, 1)
415
+ grid = (math.ceil(size / block[0]), m2, 1)
416
+ bandSize2 = size // m2
417
+ if sampling is None:
418
+ sampling_args = ()
419
+ else:
420
+ # Originally the shape is (y, x) and sampling[1] corresponds to y.
421
+ # However, kernelUpdateVertical transposed the image, so
422
+ # we are now working with (x, y) instead. Need sampling ordered
423
+ # accordingly.
424
+ sampling = tuple(map(float, sampling))
425
+ sampling_args = (sampling[0], sampling[1])
426
+ kernelProximatePoints(
427
+ grid,
428
+ block,
429
+ (output, input_arr, size, bandSize2) + sampling_args,
430
+ )
431
+ kernelCreateForwardPointers(
432
+ grid,
433
+ block,
434
+ (input_arr, input_arr, size, bandSize2),
435
+ )
436
+ # Repeatedly merging two bands into one
437
+ noBand = m2
438
+ while noBand > 1:
439
+ grid = (math.ceil(size / block[0]), noBand // 2)
440
+ kernelMergeBands(
441
+ grid,
442
+ block,
443
+ (output, input_arr, input_arr, size, size // noBand) + sampling_args, # noqa
444
+ )
445
+ noBand //= 2
446
+ # Replace the forward link with the X coordinate of the seed to remove
447
+ # the need of looking at the other texture. We need it for coloring.
448
+ grid = (math.ceil(size / block[0]), size)
449
+ kernelDoubleToSingleList(
450
+ grid,
451
+ block,
452
+ (output, input_arr, input_arr, size),
453
+ )
454
+
455
+ # Phase 3 of PBA
456
+ block = (block_size, m3, 1)
457
+ grid = (math.ceil(size / block[0]), 1, 1)
458
+ kernelColor(
459
+ grid,
460
+ block,
461
+ (input_arr, output, size) + sampling_args,
462
+ )
463
+
464
+ output = _unpack_int2(output, make_copy=False, int_dtype=int_dtype)
465
+ # make sure to crop any padding that was added here!
466
+ x = output[:orig_sy, :orig_sx, 0]
467
+ y = output[:orig_sy, :orig_sx, 1]
468
+
469
+ vals = ()
470
+ if return_distances:
471
+ dtype_out = cupy.float64 if float64_distances else cupy.float32
472
+ if dt_inplace:
473
+ _check_distances(distances, y.shape, dtype_out)
474
+ else:
475
+ distances = cupy.zeros(y.shape, dtype=dtype_out)
476
+
477
+ # make sure maximum possible distance doesn"t overflow
478
+ max_possible_dist = sum((s - 1)**2 for s in y.shape)
479
+ dist_int_type = "int" if max_possible_dist < 2**31 else "ptrdiff_t"
480
+
481
+ if sampling is None:
482
+ distance_kernel = _get_distance_kernel(
483
+ int_type=_get_inttype(distances),
484
+ dist_int_type=dist_int_type,
485
+ )
486
+ distance_kernel(y, x, distances, size=distances.size)
487
+ else:
488
+ distance_kernel = _get_aniso_distance_kernel(
489
+ int_type=_get_inttype(distances),
490
+ )
491
+ sampling = cupy.asarray(sampling, dtype=dtype_out)
492
+ distance_kernel(y, x, sampling, distances, size=distances.size)
493
+
494
+ vals = vals + (distances,)
495
+ if return_indices:
496
+ if indices_inplace:
497
+ _check_indices(indices, (arr.ndim,) + arr.shape, x.dtype.itemsize)
498
+ indices[0, ...] = y
499
+ indices[1, ...] = x
500
+ else:
501
+ indices = cupy.stack((y, x), axis=0)
502
+ vals = vals + (indices,)
503
+ return vals
vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/_pba_3d.py ADDED
@@ -0,0 +1,491 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import os
3
+
4
+ import cupy
5
+ import numpy as np
6
+
7
+ from ._util import _get_inttype
8
+ from ._pba_2d import (_check_distances, _check_indices,
9
+ _distance_tranform_arg_check, _generate_indices_ops,
10
+ _generate_shape, _get_block_size, lcm)
11
+
12
+ pba3d_defines_template = """
13
+
14
+ #define MARKER {marker}
15
+ #define MAX_INT {max_int}
16
+ #define BLOCKSIZE {block_size_3d}
17
+
18
+ """
19
+
20
+ # For efficiency, the original PBA+ packs three 10-bit integers and two binary
21
+ # flags into a single 32-bit integer. The defines in
22
+ # `pba3d_defines_encode_32bit` handle this format.
23
+ pba3d_defines_encode_32bit = """
24
+ // Sites : ENCODE(x, y, z, 0, 0)
25
+ // Not sites : ENCODE(0, 0, 0, 1, 0) or MARKER
26
+ #define ENCODED_INT_TYPE int
27
+ #define ZERO 0
28
+ #define ONE 1
29
+ #define ENCODE(x, y, z, a, b) (((x) << 20) | ((y) << 10) | (z) | ((a) << 31) | ((b) << 30))
30
+ #define DECODE(value, x, y, z) \
31
+ x = ((value) >> 20) & 0x3ff; \
32
+ y = ((value) >> 10) & 0x3ff; \
33
+ z = (value) & 0x3ff
34
+
35
+ #define NOTSITE(value) (((value) >> 31) & 1)
36
+ #define HASNEXT(value) (((value) >> 30) & 1)
37
+
38
+ #define GET_X(value) (((value) >> 20) & 0x3ff)
39
+ #define GET_Y(value) (((value) >> 10) & 0x3ff)
40
+ #define GET_Z(value) ((NOTSITE((value))) ? MAX_INT : ((value) & 0x3ff))
41
+
42
+ """ # noqa
43
+
44
+
45
+ # 64bit version of ENCODE/DECODE to allow a 20-bit integer per coordinate axis.
46
+ pba3d_defines_encode_64bit = """
47
+ // Sites : ENCODE(x, y, z, 0, 0)
48
+ // Not sites : ENCODE(0, 0, 0, 1, 0) or MARKER
49
+ #define ENCODED_INT_TYPE long long
50
+ #define ZERO 0L
51
+ #define ONE 1L
52
+ #define ENCODE(x, y, z, a, b) (((x) << 40) | ((y) << 20) | (z) | ((a) << 61) | ((b) << 60))
53
+ #define DECODE(value, x, y, z) \
54
+ x = ((value) >> 40) & 0xfffff; \
55
+ y = ((value) >> 20) & 0xfffff; \
56
+ z = (value) & 0xfffff
57
+
58
+ #define NOTSITE(value) (((value) >> 61) & 1)
59
+ #define HASNEXT(value) (((value) >> 60) & 1)
60
+
61
+ #define GET_X(value) (((value) >> 40) & 0xfffff)
62
+ #define GET_Y(value) (((value) >> 20) & 0xfffff)
63
+ #define GET_Z(value) ((NOTSITE((value))) ? MAX_INT : ((value) & 0xfffff))
64
+
65
+ """ # noqa
66
+
67
+
68
+ @cupy.memoize(True)
69
+ def get_pba3d_src(block_size_3d=32, marker=-2147483648, max_int=2147483647,
70
+ size_max=1024):
71
+ pba3d_code = pba3d_defines_template.format(
72
+ block_size_3d=block_size_3d, marker=marker, max_int=max_int
73
+ )
74
+ if size_max > 1024:
75
+ pba3d_code += pba3d_defines_encode_64bit
76
+ else:
77
+ pba3d_code += pba3d_defines_encode_32bit
78
+ kernel_directory = os.path.join(os.path.dirname(__file__), "cuda")
79
+ with open(os.path.join(kernel_directory, "pba_kernels_3d.h"), "rt") as f:
80
+ pba3d_kernels = "\n".join(f.readlines())
81
+ pba3d_code += pba3d_kernels
82
+ return pba3d_code
83
+
84
+
85
+ @cupy.memoize(for_each_device=True)
86
+ def _get_encode3d_kernel(size_max, marker=-2147483648):
87
+ """Pack array coordinates into a single integer."""
88
+ if size_max > 1024:
89
+ int_type = "ptrdiff_t" # int64_t
90
+ else:
91
+ int_type = "int" # int32_t
92
+
93
+ # value must match TOID macro in the C++ code!
94
+ if size_max > 1024:
95
+ value = """(((x) << 40) | ((y) << 20) | (z))"""
96
+ else:
97
+ value = """(((x) << 20) | ((y) << 10) | (z))"""
98
+
99
+ code = f"""
100
+ if (arr[i]) {{
101
+ out[i] = {marker};
102
+ }} else {{
103
+ {int_type} shape_2 = arr.shape()[2];
104
+ {int_type} shape_1 = arr.shape()[1];
105
+ {int_type} _i = i;
106
+ {int_type} x = _i % shape_2;
107
+ _i /= shape_2;
108
+ {int_type} y = _i % shape_1;
109
+ _i /= shape_1;
110
+ {int_type} z = _i;
111
+ out[i] = {value};
112
+ }}
113
+ """
114
+ return cupy.ElementwiseKernel(
115
+ in_params="raw B arr",
116
+ out_params="raw I out",
117
+ operation=code,
118
+ options=("--std=c++11",),
119
+ )
120
+
121
+
122
+ def encode3d(arr, marker=-2147483648, bit_depth=32, size_max=1024):
123
+ if arr.ndim != 3:
124
+ raise ValueError("only 3d arr supported")
125
+ if bit_depth not in [32, 64]:
126
+ raise ValueError("only bit_depth of 32 or 64 is supported")
127
+ if size_max > 1024:
128
+ dtype = np.int64
129
+ else:
130
+ dtype = np.int32
131
+ image = cupy.zeros(arr.shape, dtype=dtype, order="C")
132
+ kern = _get_encode3d_kernel(size_max, marker=marker)
133
+ kern(arr, image, size=image.size)
134
+ return image
135
+
136
+
137
+ def _get_decode3d_code(size_max, int_type=""):
138
+ # bit shifts here must match those used in the encode3d kernel
139
+ if size_max > 1024:
140
+ code = f"""
141
+ {int_type} x = (encoded >> 40) & 0xfffff;
142
+ {int_type} y = (encoded >> 20) & 0xfffff;
143
+ {int_type} z = encoded & 0xfffff;
144
+ """
145
+ else:
146
+ code = f"""
147
+ {int_type} x = (encoded >> 20) & 0x3ff;
148
+ {int_type} y = (encoded >> 10) & 0x3ff;
149
+ {int_type} z = encoded & 0x3ff;
150
+ """
151
+ return code
152
+
153
+
154
+ @cupy.memoize(for_each_device=True)
155
+ def _get_decode3d_kernel(size_max):
156
+ """Unpack 3 coordinates encoded as a single integer."""
157
+
158
+ # int_type = "" here because x, y, z were already allocated externally
159
+ code = _get_decode3d_code(size_max, int_type="")
160
+
161
+ return cupy.ElementwiseKernel(
162
+ in_params="E encoded",
163
+ out_params="I x, I y, I z",
164
+ operation=code,
165
+ options=("--std=c++11",),
166
+ )
167
+
168
+
169
+ def decode3d(encoded, size_max=1024):
170
+ coord_dtype = cupy.int32 if size_max < 2**31 else cupy.int64
171
+ x = cupy.empty_like(encoded, dtype=coord_dtype)
172
+ y = cupy.empty_like(x)
173
+ z = cupy.empty_like(x)
174
+ kern = _get_decode3d_kernel(size_max)
175
+ kern(encoded, x, y, z)
176
+ return (x, y, z)
177
+
178
+
179
+ def _determine_padding(shape, block_size, m1, m2, m3, blockx, blocky):
180
+ # TODO: can possibly revise to consider only particular factors for LCM on
181
+ # a given axis
182
+ LCM = lcm(block_size, m1, m2, m3, blockx, blocky)
183
+ orig_sz, orig_sy, orig_sx = shape
184
+ round_up = False
185
+ if orig_sx % LCM != 0:
186
+ # round up size to a multiple of the band size
187
+ round_up = True
188
+ sx = LCM * math.ceil(orig_sx / LCM)
189
+ else:
190
+ sx = orig_sx
191
+ if orig_sy % LCM != 0:
192
+ # round up size to a multiple of the band size
193
+ round_up = True
194
+ sy = LCM * math.ceil(orig_sy / LCM)
195
+ else:
196
+ sy = orig_sy
197
+ if orig_sz % LCM != 0:
198
+ # round up size to a multiple of the band size
199
+ round_up = True
200
+ sz = LCM * math.ceil(orig_sz / LCM)
201
+ else:
202
+ sz = orig_sz
203
+
204
+ aniso = not (sx == sy == sz)
205
+ if aniso or round_up:
206
+ smax = max(sz, sy, sx)
207
+ padding_width = (
208
+ (0, smax - orig_sz), (0, smax - orig_sy), (0, smax - orig_sx)
209
+ )
210
+ else:
211
+ padding_width = None
212
+ return padding_width
213
+
214
+
215
+ def _generate_distance_computation(int_type, dist_int_type):
216
+ """
217
+ Compute euclidean distance from current coordinate (ind_0, ind_1, ind_2) to
218
+ the coordinates of the nearest point (z, y, x)."""
219
+ return f"""
220
+ {int_type} tmp = z - ind_0;
221
+ {dist_int_type} sq_dist = tmp * tmp;
222
+ tmp = y - ind_1;
223
+ sq_dist += tmp * tmp;
224
+ tmp = x - ind_2;
225
+ sq_dist += tmp * tmp;
226
+ dist[i] = sqrt(static_cast<F>(sq_dist));
227
+ """
228
+
229
+
230
+ def _get_distance_kernel_code(int_type, dist_int_type, raw_out_var=True):
231
+ code = _generate_shape(
232
+ ndim=3, int_type=int_type, var_name="dist", raw_var=raw_out_var
233
+ )
234
+ code += _generate_indices_ops(ndim=3, int_type=int_type)
235
+ code += _generate_distance_computation(int_type, dist_int_type)
236
+ return code
237
+
238
+
239
+ @cupy.memoize(for_each_device=True)
240
+ def _get_distance_kernel(int_type, large_dist=False):
241
+ """Returns kernel computing the Euclidean distance from coordinates."""
242
+ dist_int_type = "ptrdiff_t" if large_dist else "int"
243
+ operation = _get_distance_kernel_code(
244
+ int_type, dist_int_type, raw_out_var=True
245
+ )
246
+ return cupy.ElementwiseKernel(
247
+ in_params="I z, I y, I x",
248
+ out_params="raw F dist",
249
+ operation=operation,
250
+ options=("--std=c++11",),
251
+ )
252
+
253
+
254
+ def _generate_aniso_distance_computation():
255
+ """
256
+ Compute euclidean distance from current coordinate (ind_0, ind_1, ind_2) to
257
+ the coordinates of the nearest point (z, y, x)."""
258
+ return """
259
+ F tmp = static_cast<F>(z - ind_0) * sampling[0];
260
+ F sq_dist = tmp * tmp;
261
+ tmp = static_cast<F>(y - ind_1) * sampling[1];
262
+ sq_dist += tmp * tmp;
263
+ tmp = static_cast<F>(x - ind_2) * sampling[2];
264
+ sq_dist += tmp * tmp;
265
+ dist[i] = sqrt(static_cast<F>(sq_dist));
266
+ """
267
+
268
+
269
+ def _get_aniso_distance_kernel_code(int_type, raw_out_var=True):
270
+ code = _generate_shape(
271
+ ndim=3, int_type=int_type, var_name="dist", raw_var=raw_out_var
272
+ )
273
+ code += _generate_indices_ops(ndim=3, int_type=int_type)
274
+ code += _generate_aniso_distance_computation()
275
+ return code
276
+
277
+
278
+ @cupy.memoize(for_each_device=True)
279
+ def _get_aniso_distance_kernel(int_type):
280
+ """Returns kernel computing the Euclidean distance from coordinates with
281
+ axis spacing != 1."""
282
+ operation = _get_aniso_distance_kernel_code(
283
+ int_type, raw_out_var=True
284
+ )
285
+ return cupy.ElementwiseKernel(
286
+ in_params="I z, I y, I x, raw F sampling",
287
+ out_params="raw F dist",
288
+ operation=operation,
289
+ options=("--std=c++11",),
290
+ )
291
+
292
+
293
+ @cupy.memoize(for_each_device=True)
294
+ def _get_decode_as_distance_kernel(size_max, large_dist=False, sampling=None):
295
+ """Fused decode3d and distance computation.
296
+
297
+ This kernel is for use when `return_distances=True`, but
298
+ `return_indices=False`. It replaces the separate calls to
299
+ `_get_decode3d_kernel` and `_get_distance_kernel`, avoiding the overhead of
300
+ generating full arrays containing the coordinates since the coordinate
301
+ arrays are not going to be returned.
302
+ """
303
+ if sampling is None:
304
+ dist_int_type = "ptrdiff_t" if large_dist else "int"
305
+ int_type = "int"
306
+
307
+ # Step 1: decode the (z, y, x) coordinate
308
+ code = _get_decode3d_code(size_max, int_type=int_type)
309
+
310
+ # Step 2: compute the Euclidean distance based on this (z, y, x).
311
+ code += _generate_shape(
312
+ ndim=3, int_type=int_type, var_name="dist", raw_var=True
313
+ )
314
+ code += _generate_indices_ops(ndim=3, int_type=int_type)
315
+ if sampling is None:
316
+ code += _generate_distance_computation(int_type, dist_int_type)
317
+ in_params = "E encoded"
318
+ else:
319
+ code += _generate_aniso_distance_computation()
320
+ in_params = "E encoded, raw F sampling"
321
+ return cupy.ElementwiseKernel(
322
+ in_params=in_params,
323
+ out_params="raw F dist",
324
+ operation=code,
325
+ options=("--std=c++11",),
326
+ )
327
+
328
+
329
+ def _pba_3d(arr, sampling=None, return_distances=True, return_indices=False,
330
+ block_params=None, check_warp_size=False, *,
331
+ float64_distances=False, distances=None, indices=None):
332
+
333
+ indices_inplace = isinstance(indices, cupy.ndarray)
334
+ dt_inplace = isinstance(distances, cupy.ndarray)
335
+ _distance_tranform_arg_check(
336
+ dt_inplace, indices_inplace, return_distances, return_indices
337
+ )
338
+
339
+ if arr.ndim != 3:
340
+ raise ValueError(f"expected a 3D array, got {arr.ndim}D")
341
+
342
+ if block_params is None:
343
+ m1 = 1
344
+ m2 = 1
345
+ m3 = 2
346
+ else:
347
+ m1, m2, m3 = block_params
348
+
349
+ # reduce blockx for small inputs
350
+ s_min = min(arr.shape)
351
+ if s_min <= 4:
352
+ blockx = 4
353
+ elif s_min <= 8:
354
+ blockx = 8
355
+ elif s_min <= 16:
356
+ blockx = 16
357
+ else:
358
+ blockx = 32
359
+ blocky = 4
360
+
361
+ block_size = _get_block_size(check_warp_size)
362
+
363
+ orig_sz, orig_sy, orig_sx = arr.shape
364
+ padding_width = _determine_padding(
365
+ arr.shape, block_size, m1, m2, m3, blockx, blocky
366
+ )
367
+ if padding_width is not None:
368
+ arr = cupy.pad(arr, padding_width, mode="constant", constant_values=1)
369
+ size = arr.shape[0]
370
+
371
+ # pba algorithm was implemented to use 32-bit integer to store compressed
372
+ # coordinates. input_arr will be C-contiguous, int32
373
+ size_max = max(arr.shape)
374
+ input_arr = encode3d(arr, size_max=size_max)
375
+ buffer_idx = 0
376
+ output = cupy.zeros_like(input_arr)
377
+ pba_images = [input_arr, output]
378
+
379
+ block = (blockx, blocky, 1)
380
+ grid = (size // block[0], size // block[1], 1)
381
+ pba3d = cupy.RawModule(
382
+ code=get_pba3d_src(block_size_3d=block_size, size_max=size_max)
383
+ )
384
+
385
+ kernelFloodZ = pba3d.get_function("kernelFloodZ")
386
+ if sampling is None:
387
+ kernelMaurerAxis = pba3d.get_function("kernelMaurerAxis")
388
+ kernelColorAxis = pba3d.get_function("kernelColorAxis")
389
+ sampling_args = ()
390
+ else:
391
+ kernelMaurerAxis = pba3d.get_function("kernelMaurerAxisWithSpacing")
392
+ kernelColorAxis = pba3d.get_function("kernelColorAxisWithSpacing")
393
+ sampling = tuple(map(float, sampling))
394
+ sampling_args = (sampling[2], sampling[1], sampling[0])
395
+
396
+ kernelFloodZ(
397
+ grid,
398
+ block,
399
+ (pba_images[buffer_idx], pba_images[1 - buffer_idx], size)
400
+ )
401
+ buffer_idx = 1 - buffer_idx
402
+
403
+ block = (blockx, blocky, 1)
404
+ grid = (size // block[0], size // block[1], 1)
405
+ kernelMaurerAxis(
406
+ grid,
407
+ block,
408
+ (pba_images[buffer_idx], pba_images[1 - buffer_idx], size) + sampling_args, # noqa
409
+ )
410
+
411
+ block = (block_size, m3, 1)
412
+ grid = (size // block[0], size, 1)
413
+ kernelColorAxis(
414
+ grid,
415
+ block,
416
+ (pba_images[1 - buffer_idx], pba_images[buffer_idx], size) + sampling_args, # noqa
417
+ )
418
+
419
+ if sampling is not None:
420
+ # kernelColorAxis transposes the first two axis, so have to reorder
421
+ # the sampling_args tuple correspondingly
422
+ sampling_args = (sampling[1], sampling[2], sampling[0])
423
+
424
+ block = (blockx, blocky, 1)
425
+ grid = (size // block[0], size // block[1], 1)
426
+ kernelMaurerAxis(
427
+ grid,
428
+ block,
429
+ (pba_images[buffer_idx], pba_images[1 - buffer_idx], size) + sampling_args, # noqa
430
+ )
431
+
432
+ block = (block_size, m3, 1)
433
+ grid = (size // block[0], size, 1)
434
+ kernelColorAxis(
435
+ grid,
436
+ block,
437
+ (pba_images[1 - buffer_idx], pba_images[buffer_idx], size) + sampling_args, # noqa
438
+ )
439
+ output = pba_images[buffer_idx]
440
+
441
+ if return_distances:
442
+ out_shape = (orig_sz, orig_sy, orig_sx)
443
+ dtype_out = cupy.float64 if float64_distances else cupy.float32
444
+ if dt_inplace:
445
+ _check_distances(distances, out_shape, dtype_out)
446
+ else:
447
+ distances = cupy.zeros(out_shape, dtype=dtype_out)
448
+
449
+ # make sure maximum possible distance doesn't overflow
450
+ max_possible_dist = sum((s - 1)**2 for s in out_shape)
451
+ large_dist = max_possible_dist >= 2**31
452
+
453
+ if not return_indices:
454
+ # Compute distances without forming explicit coordinate arrays.
455
+ kern = _get_decode_as_distance_kernel(
456
+ size_max=size_max,
457
+ large_dist=large_dist,
458
+ sampling=sampling
459
+ )
460
+ if sampling is None:
461
+ kern(output[:orig_sz, :orig_sy, :orig_sx], distances)
462
+ else:
463
+ sampling = cupy.asarray(sampling, dtype=distances.dtype)
464
+ kern(output[:orig_sz, :orig_sy, :orig_sx], sampling, distances)
465
+ return (distances,)
466
+
467
+ if return_indices:
468
+ x, y, z = decode3d(output[:orig_sz, :orig_sy, :orig_sx],
469
+ size_max=size_max)
470
+ vals = ()
471
+ if return_distances:
472
+ if sampling is None:
473
+ kern = _get_distance_kernel(
474
+ int_type=_get_inttype(distances), large_dist=large_dist,
475
+ )
476
+ kern(z, y, x, distances)
477
+ else:
478
+ kern = _get_aniso_distance_kernel(int_type=_get_inttype(distances))
479
+ sampling = cupy.asarray(sampling, dtype=distances.dtype)
480
+ kern(z, y, x, sampling, distances)
481
+ vals = vals + (distances,)
482
+ if return_indices:
483
+ if indices_inplace:
484
+ _check_indices(indices, (arr.ndim,) + arr.shape, x.dtype.itemsize)
485
+ indices[0, ...] = z
486
+ indices[1, ...] = y
487
+ indices[2, ...] = x
488
+ else:
489
+ indices = cupy.stack((z, y, x), axis=0)
490
+ vals = vals + (indices,)
491
+ return vals
vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/_spline_kernel_weights.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Determination of spline kernel weights (adapted from SciPy)
2
+
3
+ See more verbose comments for each case there:
4
+ https://github.com/scipy/scipy/blob/eba29d69846ab1299976ff4af71c106188397ccc/scipy/ndimage/src/ni_splines.c#L7
5
+
6
+ ``spline_weights_inline`` is a dict where the key is the spline order and the
7
+ value is the spline weight initialization code.
8
+ """
9
+
10
+ spline_weights_inline = {}
11
+
12
+ # Note: This order = 1 case is currently unused (order = 1 has a different code
13
+ # path in _interp_kernels.py). I think that existing code is a bit more
14
+ # efficient.
15
+ spline_weights_inline[1] = '''
16
+ wx = c_{j} - floor({order} & 1 ? c_{j} : c_{j} + 0.5);
17
+ weights_{j}[0] = 1.0 - wx;
18
+ weights_{j}[1] = wx;
19
+ '''
20
+
21
+ spline_weights_inline[2] = '''
22
+ wx = c_{j} - floor({order} & 1 ? c_{j} : c_{j} + 0.5);
23
+ weights_{j}[1] = 0.75 - wx * wx;
24
+ wy = 0.5 - wx;
25
+ weights_{j}[0] = 0.5 * wy * wy;
26
+ weights_{j}[2] = 1.0 - weights_{j}[0] - weights_{j}[1];
27
+ '''
28
+
29
+ spline_weights_inline[3] = '''
30
+ wx = c_{j} - floor({order} & 1 ? c_{j} : c_{j} + 0.5);
31
+ wy = 1.0 - wx;
32
+ weights_{j}[1] = (wx * wx * (wx - 2.0) * 3.0 + 4.0) / 6.0;
33
+ weights_{j}[2] = (wy * wy * (wy - 2.0) * 3.0 + 4.0) / 6.0;
34
+ weights_{j}[0] = wy * wy * wy / 6.0;
35
+ weights_{j}[3] = 1.0 - weights_{j}[0] - weights_{j}[1] - weights_{j}[2];
36
+ '''
37
+
38
+ spline_weights_inline[4] = '''
39
+ wx = c_{j} - floor({order} & 1 ? c_{j} : c_{j} + 0.5);
40
+ wy = wx * wx;
41
+ weights_{j}[2] = wy * (wy * 0.25 - 0.625) + 115.0 / 192.0;
42
+ wy = 1.0 + wx;
43
+ weights_{j}[1] = wy * (wy * (wy * (5.0 - wy) / 6.0 - 1.25) + 5.0 / 24.0) +
44
+ 55.0 / 96.0;
45
+ wy = 1.0 - wx;
46
+ weights_{j}[3] = wy * (wy * (wy * (5.0 - wy) / 6.0 - 1.25) + 5.0 / 24.0) +
47
+ 55.0 / 96.0;
48
+ wy = 0.5 - wx;
49
+ wy = wy * wy;
50
+ weights_{j}[0] = wy * wy / 24.0;
51
+ weights_{j}[4] = 1.0 - weights_{j}[0] - weights_{j}[1]
52
+ - weights_{j}[2] - weights_{j}[3];
53
+ '''
54
+
55
+ spline_weights_inline[5] = '''
56
+ wx = c_{j} - floor({order} & 1 ? c_{j} : c_{j} + 0.5);
57
+ wy = wx * wx;
58
+ weights_{j}[2] = wy * (wy * (0.25 - wx / 12.0) - 0.5) + 0.55;
59
+ wy = 1.0 - wx;
60
+ wy = wy * wy;
61
+ weights_{j}[3] = wy * (wy * (0.25 - (1.0 - wx) / 12.0) - 0.5) + 0.55;
62
+ wy = wx + 1.0;
63
+ weights_{j}[1] = wy * (wy * (wy * (wy * (wy / 24.0 - 0.375) + 1.25) - 1.75)
64
+ + 0.625) + 0.425;
65
+ wy = 2.0 - wx;
66
+ weights_{j}[4] = wy * (wy * (wy * (wy * (wy / 24.0 - 0.375) + 1.25) - 1.75)
67
+ + 0.625) + 0.425;
68
+ wy = 1.0 - wx;
69
+ wy = wy * wy;
70
+ weights_{j}[0] = (1.0 - wx) * wy * wy / 120.0;
71
+ weights_{j}[5] = 1.0 - weights_{j}[0] - weights_{j}[1] - weights_{j}[2]
72
+ - weights_{j}[3] - weights_{j}[4];
73
+ '''
vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/_spline_prefilter_core.py ADDED
@@ -0,0 +1,261 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Spline poles and boundary handling implemented as in SciPy
3
+
4
+ https://github.com/scipy/scipy/blob/master/scipy/ndimage/src/ni_splines.c
5
+ """
6
+ import functools
7
+ import math
8
+ import operator
9
+ import textwrap
10
+
11
+ import cupy
12
+
13
+
14
+ def get_poles(order):
15
+ if order == 2:
16
+ # sqrt(8.0) - 3.0
17
+ return (-0.171572875253809902396622551580603843,)
18
+ elif order == 3:
19
+ # sqrt(3.0) - 2.0
20
+ return (-0.267949192431122706472553658494127633,)
21
+ elif order == 4:
22
+ # sqrt(664.0 - sqrt(438976.0)) + sqrt(304.0) - 19.0
23
+ # sqrt(664.0 + sqrt(438976.0)) - sqrt(304.0) - 19.0
24
+ return (-0.361341225900220177092212841325675255,
25
+ -0.013725429297339121360331226939128204)
26
+ elif order == 5:
27
+ # sqrt(67.5 - sqrt(4436.25)) + sqrt(26.25) - 6.5
28
+ # sqrt(67.5 + sqrt(4436.25)) - sqrt(26.25) - 6.5
29
+ return (-0.430575347099973791851434783493520110,
30
+ -0.043096288203264653822712376822550182)
31
+ else:
32
+ raise ValueError('only order 2-5 supported')
33
+
34
+
35
+ def get_gain(poles):
36
+ return functools.reduce(operator.mul,
37
+ [(1.0 - z) * (1.0 - 1.0 / z) for z in poles])
38
+
39
+
40
+ def _causal_init_code(mode):
41
+ """Code for causal initialization step of IIR filtering.
42
+
43
+ c is a 1d array of length n and z is a filter pole
44
+ """
45
+ code = f'''
46
+ // causal init for mode={mode}'''
47
+ if mode == 'mirror':
48
+ code += '''
49
+ z_i = z;
50
+ z_n_1 = pow(z, (P)(n - 1));
51
+
52
+ c[0] = c[0] + z_n_1 * c[(n - 1) * element_stride];
53
+ for (i = 1; i < min(n - 1, static_cast<idx_t>({n_boundary})); ++i) {{
54
+ c[0] += z_i * (c[i * element_stride] +
55
+ z_n_1 * c[(n - 1 - i) * element_stride]);
56
+ z_i *= z;
57
+ }}
58
+ c[0] /= 1 - z_n_1 * z_n_1;'''
59
+ elif mode == 'grid-wrap':
60
+ code += '''
61
+ z_i = z;
62
+
63
+ for (i = 1; i < min(n, static_cast<idx_t>({n_boundary})); ++i) {{
64
+ c[0] += z_i * c[(n - i) * element_stride];
65
+ z_i *= z;
66
+ }}
67
+ c[0] /= 1 - z_i; /* z_i = pow(z, n) */'''
68
+ elif mode == 'reflect':
69
+ code += '''
70
+ z_i = z;
71
+ z_n = pow(z, (P)n);
72
+ c0 = c[0];
73
+
74
+ c[0] = c[0] + z_n * c[(n - 1) * element_stride];
75
+ for (i = 1; i < min(n, static_cast<idx_t>({n_boundary})); ++i) {{
76
+ c[0] += z_i * (c[i * element_stride] +
77
+ z_n * c[(n - 1 - i) * element_stride]);
78
+ z_i *= z;
79
+ }}
80
+ c[0] *= z / (1 - z_n * z_n);
81
+ c[0] += c0;'''
82
+ else:
83
+ raise ValueError('invalid mode: {}'.format(mode))
84
+ return code
85
+
86
+
87
+ def _anticausal_init_code(mode):
88
+ """Code for the anti-causal initialization step of IIR filtering.
89
+
90
+ c is a 1d array of length n and z is a filter pole
91
+ """
92
+ code = f'''
93
+ // anti-causal init for mode={mode}'''
94
+ if mode == 'mirror':
95
+ code += '''
96
+ c[(n - 1) * element_stride] = (
97
+ z * c[(n - 2) * element_stride] +
98
+ c[(n - 1) * element_stride]) * z / (z * z - 1);'''
99
+ elif mode == 'grid-wrap':
100
+ code += '''
101
+ z_i = z;
102
+
103
+ for (i = 0; i < min(n - 1, static_cast<idx_t>({n_boundary})); ++i) {{
104
+ c[(n - 1) * element_stride] += z_i * c[i * element_stride];
105
+ z_i *= z;
106
+ }}
107
+ c[(n - 1) * element_stride] *= z / (z_i - 1); /* z_i = pow(z, n) */'''
108
+ elif mode == 'reflect':
109
+ code += '''
110
+ c[(n - 1) * element_stride] *= z / (z - 1);'''
111
+ else:
112
+ raise ValueError('invalid mode: {}'.format(mode))
113
+ return code
114
+
115
+
116
+ def _get_spline_mode(mode):
117
+ """spline boundary mode for interpolation with order >= 2."""
118
+ if mode in ['mirror', 'reflect', 'grid-wrap']:
119
+ # exact analytic boundary conditions exist for these modes.
120
+ return mode
121
+ elif mode == 'grid-mirror':
122
+ # grid-mirror is a synonym for 'reflect'
123
+ return 'reflect'
124
+ # No exact analytical spline boundary condition implemented. Reflect gives
125
+ # lower error than using mirror or wrap for mode 'nearest'. Otherwise, a
126
+ # mirror spline boundary condition is used.
127
+ return 'reflect' if mode == 'nearest' else 'mirror'
128
+
129
+
130
+ def _get_spline1d_code(mode, poles, n_boundary):
131
+ """Generates the code required for IIR filtering of a single 1d signal.
132
+
133
+ Prefiltering is done by causal filtering followed by anti-causal filtering.
134
+ Multiple boundary conditions have been implemented.
135
+ """
136
+ code = ['''
137
+ __device__ void spline_prefilter1d(
138
+ T* __restrict__ c, idx_t signal_length, idx_t element_stride)
139
+ {{''']
140
+
141
+ # variables common to all boundary modes
142
+ code.append('''
143
+ idx_t i, n = signal_length;
144
+ P z, z_i;''')
145
+
146
+ # retrieve the spline boundary extension mode to use
147
+ mode = _get_spline_mode(mode)
148
+
149
+ if mode == 'mirror':
150
+ # variables specific to mirror boundary mode
151
+ code.append('''
152
+ P z_n_1;''')
153
+ elif mode == 'reflect':
154
+ # variables specific to reflect boundary mode
155
+ code.append('''
156
+ P z_n;
157
+ T c0;''')
158
+
159
+ for pole in poles:
160
+
161
+ code.append(f'''
162
+ // select the current pole
163
+ z = {pole};''')
164
+
165
+ # initialize and apply the causal filter
166
+ code.append(_causal_init_code(mode))
167
+ code.append('''
168
+ // apply the causal filter for the current pole
169
+ for (i = 1; i < n; ++i) {{
170
+ c[i * element_stride] += z * c[(i - 1) * element_stride];
171
+ }}''')
172
+ code.append('''
173
+ #ifdef __HIP_DEVICE_COMPILE__
174
+ __syncthreads();
175
+ #endif
176
+ ''')
177
+ # initialize and apply the anti-causal filter
178
+ code.append(_anticausal_init_code(mode))
179
+ code.append('''
180
+ // apply the anti-causal filter for the current pole
181
+ for (i = n - 2; i >= 0; --i) {{
182
+ c[i * element_stride] = z * (c[(i + 1) * element_stride] -
183
+ c[i * element_stride]);
184
+ }}''')
185
+
186
+ code += ['''
187
+ }}''']
188
+ return textwrap.dedent('\n'.join(code)).format(n_boundary=n_boundary)
189
+
190
+
191
+ _FILTER_GENERAL = '''
192
+ #include "cupy/carray.cuh"
193
+ #include "cupy/complex.cuh"
194
+ typedef {data_type} T;
195
+ typedef {pole_type} P;
196
+ typedef {index_type} idx_t;
197
+ template <typename T>
198
+ __device__ T* row(
199
+ T* ptr, idx_t i, idx_t axis, idx_t ndim, const idx_t* shape) {{
200
+ idx_t index = 0, stride = 1;
201
+ for (idx_t a = ndim - 1; a > 0; --a) {{
202
+ if (a != axis) {{
203
+ index += (i % shape[a]) * stride;
204
+ i /= shape[a];
205
+ }}
206
+ stride *= shape[a];
207
+ }}
208
+ return ptr + index + stride * i;
209
+ }}
210
+ '''
211
+
212
+
213
+ _batch_spline1d_strided_template = """
214
+ extern "C" __global__
215
+ __launch_bounds__({block_size})
216
+ void {kernel_name}(T* __restrict__ y, const idx_t* __restrict__ info) {{
217
+ const idx_t n_signals = info[0], n_samples = info[1],
218
+ * __restrict__ shape = info+2;
219
+ idx_t y_elem_stride = 1;
220
+ for (int a = {ndim} - 1; a > {axis}; --a) {{ y_elem_stride *= shape[a]; }}
221
+ idx_t unraveled_idx = blockDim.x * blockIdx.x + threadIdx.x;
222
+ idx_t batch_idx = unraveled_idx;
223
+ if (batch_idx < n_signals)
224
+ {{
225
+ T* __restrict__ y_i = row(y, batch_idx, {axis}, {ndim}, shape);
226
+ spline_prefilter1d(y_i, n_samples, y_elem_stride);
227
+ }}
228
+ }}
229
+ """
230
+
231
+
232
+ @cupy.memoize(for_each_device=True)
233
+ def get_raw_spline1d_kernel(axis, ndim, mode, order, index_type='int',
234
+ data_type='double', pole_type='double',
235
+ block_size=128):
236
+ """Generate a kernel for applying a spline prefilter along a given axis."""
237
+ poles = get_poles(order)
238
+
239
+ # determine number of samples for the boundary approximation
240
+ # (SciPy uses n_boundary = n_samples but this is excessive)
241
+ largest_pole = max([abs(p) for p in poles])
242
+ # tol < 1e-7 fails test cases comparing to SciPy at atol = rtol = 1e-5
243
+ tol = 1e-10 if pole_type == 'float' else 1e-18
244
+ n_boundary = math.ceil(math.log(tol, largest_pole))
245
+
246
+ # headers and general utility function for extracting rows of data
247
+ code = _FILTER_GENERAL.format(index_type=index_type,
248
+ data_type=data_type,
249
+ pole_type=pole_type)
250
+
251
+ # generate source for a 1d function for a given boundary mode and poles
252
+ code += _get_spline1d_code(mode, poles, n_boundary)
253
+
254
+ # generate code handling batch operation of the 1d filter
255
+ mode_str = mode.replace('-', '_') # cannot have '-' in kernel name
256
+ kernel_name = (f'cupyx_scipy_ndimage_spline_filter_{ndim}d_ord{order}_'
257
+ f'axis{axis}_{mode_str}')
258
+ code += _batch_spline1d_strided_template.format(ndim=ndim, axis=axis,
259
+ block_size=block_size,
260
+ kernel_name=kernel_name)
261
+ return cupy.RawKernel(code, kernel_name)
vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/_util.py ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import warnings
2
+
3
+ import cupy
4
+
5
+
6
+ def _is_integer_output(output, input):
7
+ if output is None:
8
+ return input.dtype.kind in 'iu'
9
+ elif isinstance(output, cupy.ndarray):
10
+ return output.dtype.kind in 'iu'
11
+ return cupy.dtype(output).kind in 'iu'
12
+
13
+
14
+ def _check_cval(mode, cval, integer_output):
15
+ if mode == 'constant' and integer_output and not cupy.isfinite(cval):
16
+ raise NotImplementedError("Non-finite cval is not supported for "
17
+ "outputs with integer dtype.")
18
+
19
+
20
+ def _init_weights_dtype(input):
21
+ """Initialize filter weights based on the input array.
22
+
23
+ This helper is only used during initialization of some internal filters
24
+ like prewitt and sobel to avoid costly double-precision computation.
25
+ """
26
+ if input.dtype.kind == "c":
27
+ return cupy.promote_types(input.real.dtype, cupy.complex64)
28
+ return cupy.promote_types(input.real.dtype, cupy.float32)
29
+
30
+
31
+ def _get_weights_dtype(input, weights):
32
+ if weights.dtype.kind == "c" or input.dtype.kind == "c":
33
+ return cupy.promote_types(input.real.dtype, cupy.complex64)
34
+ elif weights.dtype.kind in 'iub':
35
+ # convert integer dtype weights to double as in SciPy
36
+ return cupy.float64
37
+ return cupy.promote_types(input.real.dtype, cupy.float32)
38
+
39
+
40
+ def _get_output(output, input, shape=None, complex_output=False):
41
+ shape = input.shape if shape is None else shape
42
+ if output is None:
43
+ if complex_output:
44
+ _dtype = cupy.promote_types(input.dtype, cupy.complex64)
45
+ else:
46
+ _dtype = input.dtype
47
+ output = cupy.empty(shape, dtype=_dtype)
48
+ elif isinstance(output, (type, cupy.dtype)):
49
+ if complex_output and cupy.dtype(output).kind != 'c':
50
+ warnings.warn("promoting specified output dtype to complex")
51
+ output = cupy.promote_types(output, cupy.complex64)
52
+ output = cupy.empty(shape, dtype=output)
53
+ elif isinstance(output, str):
54
+ output = cupy.dtype(output)
55
+ if complex_output and output.kind != 'c':
56
+ raise RuntimeError("output must have complex dtype")
57
+ output = cupy.empty(shape, dtype=output)
58
+ elif output.shape != shape:
59
+ raise RuntimeError("output shape not correct")
60
+ elif complex_output and output.dtype.kind != 'c':
61
+ raise RuntimeError("output must have complex dtype")
62
+ return output
63
+
64
+
65
+ def _fix_sequence_arg(arg, ndim, name, conv=lambda x: x):
66
+ if isinstance(arg, str):
67
+ return [conv(arg)] * ndim
68
+ try:
69
+ arg = iter(arg)
70
+ except TypeError:
71
+ return [conv(arg)] * ndim
72
+ lst = [conv(x) for x in arg]
73
+ if len(lst) != ndim:
74
+ msg = "{} must have length equal to input rank".format(name)
75
+ raise RuntimeError(msg)
76
+ return lst
77
+
78
+
79
+ def _check_origin(origin, width):
80
+ origin = int(origin)
81
+ if (width // 2 + origin < 0) or (width // 2 + origin >= width):
82
+ raise ValueError('invalid origin')
83
+ return origin
84
+
85
+
86
+ def _check_mode(mode):
87
+ if mode not in ('reflect', 'constant', 'nearest', 'mirror', 'wrap',
88
+ 'grid-mirror', 'grid-wrap', 'grid-reflect'):
89
+ msg = f'boundary mode not supported (actual: {mode})'
90
+ raise RuntimeError(msg)
91
+ return mode
92
+
93
+
94
+ def _get_inttype(input):
95
+ # The integer type to use for indices in the input array
96
+ # The indices actually use byte positions and we can't just use
97
+ # input.nbytes since that won't tell us the number of bytes between the
98
+ # first and last elements when the array is non-contiguous
99
+ nbytes = sum((x-1)*abs(stride) for x, stride in
100
+ zip(input.shape, input.strides)) + input.dtype.itemsize
101
+ return 'int' if nbytes < (1 << 31) else 'ptrdiff_t'
102
+
103
+
104
+ def _generate_boundary_condition_ops(mode, ix, xsize, int_t="int",
105
+ float_ix=False):
106
+ min_func = "fmin" if float_ix else "min"
107
+ max_func = "fmax" if float_ix else "max"
108
+ if mode in ['reflect', 'grid-mirror']:
109
+ ops = '''
110
+ if ({ix} < 0) {{
111
+ {ix} = - 1 -{ix};
112
+ }}
113
+ {ix} %= {xsize} * 2;
114
+ {ix} = {min}({ix}, 2 * {xsize} - 1 - {ix});'''.format(
115
+ ix=ix, xsize=xsize, min=min_func)
116
+ elif mode == 'mirror':
117
+ ops = '''
118
+ if ({xsize} == 1) {{
119
+ {ix} = 0;
120
+ }} else {{
121
+ if ({ix} < 0) {{
122
+ {ix} = -{ix};
123
+ }}
124
+ {ix} = 1 + ({ix} - 1) % (({xsize} - 1) * 2);
125
+ {ix} = {min}({ix}, 2 * {xsize} - 2 - {ix});
126
+ }}'''.format(ix=ix, xsize=xsize, min=min_func)
127
+ elif mode == 'nearest':
128
+ ops = '''
129
+ {ix} = {min}({max}(({T}){ix}, ({T})0), ({T})({xsize} - 1));'''.format(
130
+ ix=ix, xsize=xsize, min=min_func, max=max_func,
131
+ # force using 64-bit signed integer for ptrdiff_t,
132
+ # see cupy/cupy#6048
133
+ T=('int' if int_t == 'int' else 'long long'))
134
+ elif mode == 'grid-wrap':
135
+ ops = '''
136
+ {ix} %= {xsize};
137
+ while ({ix} < 0) {{
138
+ {ix} += {xsize};
139
+ }}'''.format(ix=ix, xsize=xsize)
140
+ elif mode == 'wrap':
141
+ ops = '''
142
+ if ({ix} < 0) {{
143
+ {ix} += ({sz} - 1) * (({int_t})(-{ix} / ({sz} - 1)) + 1);
144
+ }} else if ({ix} > ({sz} - 1)) {{
145
+ {ix} -= ({sz} - 1) * ({int_t})({ix} / ({sz} - 1));
146
+ }};'''.format(ix=ix, sz=xsize, int_t=int_t)
147
+ elif mode in ['constant', 'grid-constant']:
148
+ ops = '''
149
+ if (({ix} < 0) || {ix} >= {xsize}) {{
150
+ {ix} = -1;
151
+ }}'''.format(ix=ix, xsize=xsize)
152
+ return ops
153
+
154
+
155
+ def _generate_indices_ops(ndim, int_type, offsets):
156
+ code = '{type} ind_{j} = _i % ysize_{j} - {offset}; _i /= ysize_{j};'
157
+ body = [code.format(type=int_type, j=j, offset=offsets[j])
158
+ for j in range(ndim-1, 0, -1)]
159
+ return '{type} _i = i;\n{body}\n{type} ind_0 = _i - {offset};'.format(
160
+ type=int_type, body='\n'.join(body), offset=offsets[0])
vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/cuda/LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2019 School of Computing, National University of Singapore
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/cuda/pba_kernels_2d.h ADDED
@@ -0,0 +1,695 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Euclidean Distance Transform
2
+ //
3
+ // Kernels for the 2D version of the Parallel Banding Algorithm (PBA+).
4
+ //
5
+ // MIT license: see LICENSE in this folder
6
+ // Copyright: (c) 2019 School of Computing, National University of Singapore
7
+ //
8
+ // Modifications by Gregory Lee (2022) (NVIDIA)
9
+ // - add user-defined pixel_int2_t to enable
10
+ // - replace __mul24 operations with standard multiplication operator
11
+ // - Add variant kernels with support for non-isotropic pixel dimensions. These
12
+ // kernels differ from the originals in that they also take sx and sy values
13
+ // indicating the pixel size along the x and y axes. The kernels are identical
14
+ // except that the `dominate` function is replaced by `dominate_sp` and the
15
+ // physical spacings are used when computing distances.
16
+ //
17
+
18
+
19
+ // START OF DEFINITIONS OVERRIDDEN BY THE PYTHON SCRIPT
20
+
21
+ // The values included in this header file are those defined in the original
22
+ // PBA+ implementation
23
+
24
+ // However, the Python code generation can potentially generate a different
25
+ // ENCODE/DECODE that use 20 bits per coordinates instead of 10 bits per
26
+ // coordinate with ENCODED_INT_TYPE as `long long`.
27
+
28
+ #ifndef MARKER
29
+ #define MARKER -32768
30
+ #endif
31
+
32
+ #ifndef BLOCKSIZE
33
+ #define BLOCKSIZE 32
34
+ #endif
35
+
36
+ #ifndef pixel_int2_t
37
+ #define pixel_int2_t short2
38
+ #define make_pixel(x, y) make_short2(x, y)
39
+ #endif
40
+
41
+ // END OF DEFINITIONS OVERRIDDEN BY THE PYTHON SCRIPT
42
+
43
+
44
+ #define TOID(x, y, size) ((y) * (size) + (x))
45
+
46
+ #define LL long long
47
+ __device__ bool dominate(LL x1, LL y1, LL x2, LL y2, LL x3, LL y3, LL x0)
48
+ {
49
+ LL k1 = y2 - y1, k2 = y3 - y2;
50
+ return (k1 * (y1 + y2) + (x2 - x1) * ((x1 + x2) - (x0 << 1))) * k2 > \
51
+ (k2 * (y2 + y3) + (x3 - x2) * ((x2 + x3) - (x0 << 1))) * k1;
52
+ }
53
+ #undef LL
54
+
55
+ // version of dominate, but with per-axis floating-point spacing
56
+ __device__ bool dominate_sp(int _x1, int _y1, int _x2, int _y2, int _x3, int _y3, int _x0, float sx, float sy)
57
+ {
58
+ float x1 = static_cast<float>(_x1) * sx;
59
+ float x2 = static_cast<float>(_x2) * sx;
60
+ float x3 = static_cast<float>(_x3) * sx;
61
+ float y1 = static_cast<float>(_y1) * sy;
62
+ float y2 = static_cast<float>(_y2) * sy;
63
+ float y3 = static_cast<float>(_y3) * sy;
64
+ float x0_2 = static_cast<float>(_x0 << 1) * sx;
65
+ float k1 = (y2 - y1);
66
+ float k2 = (y3 - y2);
67
+ return (k1 * (y1 + y2) + (x2 - x1) * ((x1 + x2) - x0_2)) * k2 > \
68
+ (k2 * (y2 + y3) + (x3 - x2) * ((x2 + x3) - x0_2)) * k1;
69
+ }
70
+
71
+
72
+ extern "C"{
73
+
74
+ __global__ void kernelFloodDown(pixel_int2_t *input, pixel_int2_t *output, int size, int bandSize)
75
+ {
76
+ int tx = blockIdx.x * blockDim.x + threadIdx.x;
77
+ int ty = blockIdx.y * bandSize;
78
+ int id = TOID(tx, ty, size);
79
+
80
+ pixel_int2_t pixel1, pixel2;
81
+
82
+ pixel1 = make_pixel(MARKER, MARKER);
83
+
84
+ for (int i = 0; i < bandSize; i++, id += size) {
85
+ pixel2 = input[id];
86
+
87
+ if (pixel2.x != MARKER)
88
+ pixel1 = pixel2;
89
+
90
+ output[id] = pixel1;
91
+ }
92
+ }
93
+
94
+ __global__ void kernelFloodUp(pixel_int2_t *input, pixel_int2_t *output, int size, int bandSize)
95
+ {
96
+ int tx = blockIdx.x * blockDim.x + threadIdx.x;
97
+ int ty = (blockIdx.y+1) * bandSize - 1;
98
+ int id = TOID(tx, ty, size);
99
+
100
+ pixel_int2_t pixel1, pixel2;
101
+ int dist1, dist2;
102
+
103
+ pixel1 = make_pixel(MARKER, MARKER);
104
+
105
+ for (int i = 0; i < bandSize; i++, id -= size) {
106
+ dist1 = abs(pixel1.y - ty + i);
107
+
108
+ pixel2 = input[id];
109
+ dist2 = abs(pixel2.y - ty + i);
110
+
111
+ if (dist2 < dist1)
112
+ pixel1 = pixel2;
113
+
114
+ output[id] = pixel1;
115
+ }
116
+ }
117
+
118
+ __global__ void kernelPropagateInterband(pixel_int2_t *input, pixel_int2_t *margin_out, int size, int bandSize)
119
+ {
120
+ int tx = blockIdx.x * blockDim.x + threadIdx.x;
121
+ int inc = bandSize * size;
122
+ int ny, nid, nDist;
123
+ pixel_int2_t pixel;
124
+
125
+ // Top row, look backward
126
+ int ty = blockIdx.y * bandSize;
127
+ int topId = TOID(tx, ty, size);
128
+ int bottomId = TOID(tx, ty + bandSize - 1, size);
129
+ int tid = blockIdx.y * size + tx;
130
+ int bid = tid + (size * size / bandSize);
131
+
132
+ pixel = input[topId];
133
+ int myDist = abs(pixel.y - ty);
134
+ margin_out[tid] = pixel;
135
+
136
+ for (nid = bottomId - inc; nid >= 0; nid -= inc) {
137
+ pixel = input[nid];
138
+
139
+ if (pixel.x != MARKER) {
140
+ nDist = abs(pixel.y - ty);
141
+
142
+ if (nDist < myDist)
143
+ margin_out[tid] = pixel;
144
+
145
+ break;
146
+ }
147
+ }
148
+
149
+ // Last row, look downward
150
+ ty = ty + bandSize - 1;
151
+ pixel = input[bottomId];
152
+ myDist = abs(pixel.y - ty);
153
+ margin_out[bid] = pixel;
154
+
155
+ for (ny = ty + 1, nid = topId + inc; ny < size; ny += bandSize, nid += inc) {
156
+ pixel = input[nid];
157
+
158
+ if (pixel.x != MARKER) {
159
+ nDist = abs(pixel.y - ty);
160
+
161
+ if (nDist < myDist)
162
+ margin_out[bid] = pixel;
163
+
164
+ break;
165
+ }
166
+ }
167
+ }
168
+
169
+ __global__ void kernelUpdateVertical(pixel_int2_t *color, pixel_int2_t *margin, pixel_int2_t *output, int size, int bandSize)
170
+ {
171
+ __shared__ pixel_int2_t block[BLOCKSIZE][BLOCKSIZE];
172
+
173
+ int tx = blockIdx.x * blockDim.x + threadIdx.x;
174
+ int ty = blockIdx.y * bandSize;
175
+
176
+ pixel_int2_t top = margin[blockIdx.y * size + tx];
177
+ pixel_int2_t bottom = margin[(blockIdx.y + size / bandSize) * size + tx];
178
+ pixel_int2_t pixel;
179
+
180
+ int dist, myDist;
181
+
182
+ int id = TOID(tx, ty, size);
183
+
184
+ int n_step = bandSize / blockDim.x;
185
+ for(int step = 0; step < n_step; ++step) {
186
+ int y_start = blockIdx.y * bandSize + step * blockDim.x;
187
+ int y_end = y_start + blockDim.x;
188
+
189
+ for (ty = y_start; ty < y_end; ++ty, id += size) {
190
+ pixel = color[id];
191
+ myDist = abs(pixel.y - ty);
192
+
193
+ dist = abs(top.y - ty);
194
+ if (dist < myDist) { myDist = dist; pixel = top; }
195
+
196
+ dist = abs(bottom.y - ty);
197
+ if (dist < myDist) pixel = bottom;
198
+
199
+ // temporary result is stored in block
200
+ block[threadIdx.x][ty - y_start] = make_pixel(pixel.y, pixel.x);
201
+ }
202
+
203
+ __syncthreads();
204
+
205
+ // block is written to a transposed location in the output
206
+
207
+ int tid = TOID(blockIdx.y * bandSize + step * blockDim.x + threadIdx.x, \
208
+ blockIdx.x * blockDim.x, size);
209
+
210
+ for(int i = 0; i < blockDim.x; ++i, tid += size) {
211
+ output[tid] = block[i][threadIdx.x];
212
+ }
213
+
214
+ __syncthreads();
215
+ }
216
+ }
217
+
218
+ __global__ void kernelProximatePoints(pixel_int2_t *input, pixel_int2_t *stack, int size, int bandSize)
219
+ {
220
+ int tx = blockIdx.x * blockDim.x + threadIdx.x;
221
+ int ty = blockIdx.y * bandSize;
222
+ int id = TOID(tx, ty, size);
223
+ int lasty = -1;
224
+ pixel_int2_t last1, last2, current;
225
+
226
+ last1.y = -1; last2.y = -1;
227
+
228
+ for (int i = 0; i < bandSize; i++, id += size) {
229
+ current = input[id];
230
+
231
+ if (current.x != MARKER) {
232
+ while (last2.y >= 0) {
233
+ if (!dominate(last1.x, last2.y, last2.x, \
234
+ lasty, current.x, current.y, tx))
235
+ break;
236
+
237
+ lasty = last2.y; last2 = last1;
238
+
239
+ if (last1.y >= 0)
240
+ last1 = stack[TOID(tx, last1.y, size)];
241
+ }
242
+
243
+ last1 = last2; last2 = make_pixel(current.x, lasty); lasty = current.y;
244
+
245
+ stack[id] = last2;
246
+ }
247
+ }
248
+
249
+ // Store the pointer to the tail at the last pixel of this band
250
+ if (lasty != ty + bandSize - 1)
251
+ stack[TOID(tx, ty + bandSize - 1, size)] = make_pixel(MARKER, lasty);
252
+ }
253
+
254
+
255
+ __global__ void kernelProximatePointsWithSpacing(pixel_int2_t *input, pixel_int2_t *stack, int size, int bandSize, double sx, double sy)
256
+ {
257
+ int tx = blockIdx.x * blockDim.x + threadIdx.x;
258
+ int ty = blockIdx.y * bandSize;
259
+ int id = TOID(tx, ty, size);
260
+ int lasty = -1;
261
+ pixel_int2_t last1, last2, current;
262
+
263
+ last1.y = -1; last2.y = -1;
264
+
265
+ for (int i = 0; i < bandSize; i++, id += size) {
266
+ current = input[id];
267
+
268
+ if (current.x != MARKER) {
269
+ while (last2.y >= 0) {
270
+ if (!dominate_sp(last1.x, last2.y, last2.x, \
271
+ lasty, current.x, current.y, tx, sx, sy))
272
+ break;
273
+
274
+ lasty = last2.y; last2 = last1;
275
+
276
+ if (last1.y >= 0)
277
+ last1 = stack[TOID(tx, last1.y, size)];
278
+ }
279
+
280
+ last1 = last2; last2 = make_pixel(current.x, lasty); lasty = current.y;
281
+
282
+ stack[id] = last2;
283
+ }
284
+ }
285
+
286
+ // Store the pointer to the tail at the last pixel of this band
287
+ if (lasty != ty + bandSize - 1)
288
+ stack[TOID(tx, ty + bandSize - 1, size)] = make_pixel(MARKER, lasty);
289
+ }
290
+
291
+ __global__ void kernelCreateForwardPointers(pixel_int2_t *input, pixel_int2_t *output, int size, int bandSize)
292
+ {
293
+ int tx = blockIdx.x * blockDim.x + threadIdx.x;
294
+ int ty = (blockIdx.y+1) * bandSize - 1;
295
+ int id = TOID(tx, ty, size);
296
+ int lasty = -1, nexty;
297
+ pixel_int2_t current;
298
+
299
+ // Get the tail pointer
300
+ current = input[id];
301
+
302
+ if (current.x == MARKER)
303
+ nexty = current.y;
304
+ else
305
+ nexty = ty;
306
+
307
+ for (int i = 0; i < bandSize; i++, id -= size)
308
+ if (ty - i == nexty) {
309
+ current = make_pixel(lasty, input[id].y);
310
+ output[id] = current;
311
+
312
+ lasty = nexty;
313
+ nexty = current.y;
314
+ }
315
+
316
+ // Store the pointer to the head at the first pixel of this band
317
+ if (lasty != ty - bandSize + 1)
318
+ output[id + size] = make_pixel(lasty, MARKER);
319
+ }
320
+
321
+ __global__ void kernelMergeBands(pixel_int2_t *color, pixel_int2_t *link, pixel_int2_t *output, int size, int bandSize)
322
+ {
323
+ int tx = blockIdx.x * blockDim.x + threadIdx.x;
324
+ int band1 = blockIdx.y * 2;
325
+ int band2 = band1 + 1;
326
+ int firsty, lasty;
327
+ pixel_int2_t last1, last2, current;
328
+ // last1 and last2: x component store the x coordinate of the site,
329
+ // y component store the backward pointer
330
+ // current: y component store the x coordinate of the site,
331
+ // x component store the forward pointer
332
+
333
+ // Get the two last items of the first list
334
+ lasty = band2 * bandSize - 1;
335
+ last2 = make_pixel(color[TOID(tx, lasty, size)].x,
336
+ link[TOID(tx, lasty, size)].y);
337
+
338
+ if (last2.x == MARKER) {
339
+ lasty = last2.y;
340
+
341
+ if (lasty >= 0)
342
+ last2 = make_pixel(color[TOID(tx, lasty, size)].x,
343
+ link[TOID(tx, lasty, size)].y);
344
+ else
345
+ last2 = make_pixel(MARKER, MARKER);
346
+ }
347
+
348
+ if (last2.y >= 0) {
349
+ // Second item at the top of the stack
350
+ last1 = make_pixel(color[TOID(tx, last2.y, size)].x,
351
+ link[TOID(tx, last2.y, size)].y);
352
+ }
353
+
354
+ // Get the first item of the second band
355
+ firsty = band2 * bandSize;
356
+ current = make_pixel(link[TOID(tx, firsty, size)].x,
357
+ color[TOID(tx, firsty, size)].x);
358
+
359
+ if (current.y == MARKER) {
360
+ firsty = current.x;
361
+
362
+ if (firsty >= 0)
363
+ current = make_pixel(link[TOID(tx, firsty, size)].x,
364
+ color[TOID(tx, firsty, size)].x);
365
+ else
366
+ current = make_pixel(MARKER, MARKER);
367
+ }
368
+
369
+ // Count the number of item in the second band that survive so far.
370
+ // Once it reaches 2, we can stop.
371
+ int top = 0;
372
+
373
+ while (top < 2 && current.y >= 0) {
374
+ // While there's still something on the left
375
+ while (last2.y >= 0) {
376
+
377
+ if (!dominate(last1.x, last2.y, last2.x, \
378
+ lasty, current.y, firsty, tx))
379
+ break;
380
+
381
+ lasty = last2.y; last2 = last1;
382
+ top--;
383
+
384
+ if (last1.y >= 0)
385
+ last1 = make_pixel(color[TOID(tx, last1.y, size)].x,
386
+ link[TOID(tx, last1.y, size)].y);
387
+ }
388
+
389
+ // Update the current pointer
390
+ output[TOID(tx, firsty, size)] = make_pixel(current.x, lasty);
391
+
392
+ if (lasty >= 0)
393
+ output[TOID(tx, lasty, size)] = make_pixel(firsty, last2.y);
394
+
395
+ last1 = last2; last2 = make_pixel(current.y, lasty); lasty = firsty;
396
+ firsty = current.x;
397
+
398
+ top = max(1, top + 1);
399
+
400
+ // Advance the current pointer to the next one
401
+ if (firsty >= 0)
402
+ current = make_pixel(link[TOID(tx, firsty, size)].x,
403
+ color[TOID(tx, firsty, size)].x);
404
+ else
405
+ current = make_pixel(MARKER, MARKER);
406
+ }
407
+
408
+ // Update the head and tail pointer.
409
+ firsty = band1 * bandSize;
410
+ lasty = band2 * bandSize;
411
+ current = link[TOID(tx, firsty, size)];
412
+
413
+ if (current.y == MARKER && current.x < 0) { // No head?
414
+ last1 = link[TOID(tx, lasty, size)];
415
+
416
+ if (last1.y == MARKER)
417
+ current.x = last1.x;
418
+ else
419
+ current.x = lasty;
420
+
421
+ output[TOID(tx, firsty, size)] = current;
422
+ }
423
+
424
+ firsty = band1 * bandSize + bandSize - 1;
425
+ lasty = band2 * bandSize + bandSize - 1;
426
+ current = link[TOID(tx, lasty, size)];
427
+
428
+ if (current.x == MARKER && current.y < 0) { // No tail?
429
+ last1 = link[TOID(tx, firsty, size)];
430
+
431
+ if (last1.x == MARKER)
432
+ current.y = last1.y;
433
+ else
434
+ current.y = firsty;
435
+
436
+ output[TOID(tx, lasty, size)] = current;
437
+ }
438
+ }
439
+
440
+
441
+ __global__ void kernelMergeBandsWithSpacing(pixel_int2_t *color, pixel_int2_t *link, pixel_int2_t *output, int size, int bandSize, double sx, double sy)
442
+ {
443
+ int tx = blockIdx.x * blockDim.x + threadIdx.x;
444
+ int band1 = blockIdx.y * 2;
445
+ int band2 = band1 + 1;
446
+ int firsty, lasty;
447
+ pixel_int2_t last1, last2, current;
448
+ // last1 and last2: x component store the x coordinate of the site,
449
+ // y component store the backward pointer
450
+ // current: y component store the x coordinate of the site,
451
+ // x component store the forward pointer
452
+
453
+ // Get the two last items of the first list
454
+ lasty = band2 * bandSize - 1;
455
+ last2 = make_pixel(color[TOID(tx, lasty, size)].x,
456
+ link[TOID(tx, lasty, size)].y);
457
+
458
+ if (last2.x == MARKER) {
459
+ lasty = last2.y;
460
+
461
+ if (lasty >= 0)
462
+ last2 = make_pixel(color[TOID(tx, lasty, size)].x,
463
+ link[TOID(tx, lasty, size)].y);
464
+ else
465
+ last2 = make_pixel(MARKER, MARKER);
466
+ }
467
+
468
+ if (last2.y >= 0) {
469
+ // Second item at the top of the stack
470
+ last1 = make_pixel(color[TOID(tx, last2.y, size)].x,
471
+ link[TOID(tx, last2.y, size)].y);
472
+ }
473
+
474
+ // Get the first item of the second band
475
+ firsty = band2 * bandSize;
476
+ current = make_pixel(link[TOID(tx, firsty, size)].x,
477
+ color[TOID(tx, firsty, size)].x);
478
+
479
+ if (current.y == MARKER) {
480
+ firsty = current.x;
481
+
482
+ if (firsty >= 0)
483
+ current = make_pixel(link[TOID(tx, firsty, size)].x,
484
+ color[TOID(tx, firsty, size)].x);
485
+ else
486
+ current = make_pixel(MARKER, MARKER);
487
+ }
488
+
489
+ // Count the number of item in the second band that survive so far.
490
+ // Once it reaches 2, we can stop.
491
+ int top = 0;
492
+
493
+ while (top < 2 && current.y >= 0) {
494
+ // While there's still something on the left
495
+ while (last2.y >= 0) {
496
+
497
+ if (!dominate_sp(last1.x, last2.y, last2.x, \
498
+ lasty, current.y, firsty, tx, sx, sy))
499
+ break;
500
+
501
+ lasty = last2.y; last2 = last1;
502
+ top--;
503
+
504
+ if (last1.y >= 0)
505
+ last1 = make_pixel(color[TOID(tx, last1.y, size)].x,
506
+ link[TOID(tx, last1.y, size)].y);
507
+ }
508
+
509
+ // Update the current pointer
510
+ output[TOID(tx, firsty, size)] = make_pixel(current.x, lasty);
511
+
512
+ if (lasty >= 0)
513
+ output[TOID(tx, lasty, size)] = make_pixel(firsty, last2.y);
514
+
515
+ last1 = last2; last2 = make_pixel(current.y, lasty); lasty = firsty;
516
+ firsty = current.x;
517
+
518
+ top = max(1, top + 1);
519
+
520
+ // Advance the current pointer to the next one
521
+ if (firsty >= 0)
522
+ current = make_pixel(link[TOID(tx, firsty, size)].x,
523
+ color[TOID(tx, firsty, size)].x);
524
+ else
525
+ current = make_pixel(MARKER, MARKER);
526
+ }
527
+
528
+ // Update the head and tail pointer.
529
+ firsty = band1 * bandSize;
530
+ lasty = band2 * bandSize;
531
+ current = link[TOID(tx, firsty, size)];
532
+
533
+ if (current.y == MARKER && current.x < 0) { // No head?
534
+ last1 = link[TOID(tx, lasty, size)];
535
+
536
+ if (last1.y == MARKER)
537
+ current.x = last1.x;
538
+ else
539
+ current.x = lasty;
540
+
541
+ output[TOID(tx, firsty, size)] = current;
542
+ }
543
+
544
+ firsty = band1 * bandSize + bandSize - 1;
545
+ lasty = band2 * bandSize + bandSize - 1;
546
+ current = link[TOID(tx, lasty, size)];
547
+
548
+ if (current.x == MARKER && current.y < 0) { // No tail?
549
+ last1 = link[TOID(tx, firsty, size)];
550
+
551
+ if (last1.x == MARKER)
552
+ current.y = last1.y;
553
+ else
554
+ current.y = firsty;
555
+
556
+ output[TOID(tx, lasty, size)] = current;
557
+ }
558
+ }
559
+
560
+ __global__ void kernelDoubleToSingleList(pixel_int2_t *color, pixel_int2_t *link, pixel_int2_t *output, int size)
561
+ {
562
+ int tx = blockIdx.x * blockDim.x + threadIdx.x;
563
+ int ty = blockIdx.y;
564
+ int id = TOID(tx, ty, size);
565
+
566
+ output[id] = make_pixel(color[id].x, link[id].y);
567
+ }
568
+
569
+ __global__ void kernelColor(pixel_int2_t *input, pixel_int2_t *output, int size)
570
+ {
571
+ __shared__ pixel_int2_t block[BLOCKSIZE][BLOCKSIZE];
572
+
573
+ int col = threadIdx.x;
574
+ int tid = threadIdx.y;
575
+ int tx = blockIdx.x * blockDim.x + col;
576
+ int dx, dy, lasty;
577
+ unsigned int best, dist;
578
+ pixel_int2_t last1, last2;
579
+
580
+ lasty = size - 1;
581
+
582
+ last2 = input[TOID(tx, lasty, size)];
583
+
584
+ if (last2.x == MARKER) {
585
+ lasty = max(last2.y, 0);
586
+ last2 = input[TOID(tx, lasty, size)];
587
+ }
588
+
589
+ if (last2.y >= 0)
590
+ last1 = input[TOID(tx, last2.y, size)];
591
+
592
+ int y_start, y_end, n_step = size / blockDim.x;
593
+ for(int step = 0; step < n_step; ++step) {
594
+ y_start = size - step * blockDim.x - 1;
595
+ y_end = size - (step + 1) * blockDim.x;
596
+
597
+ for (int ty = y_start - tid; ty >= y_end; ty -= blockDim.y) {
598
+ dx = last2.x - tx; dy = lasty - ty;
599
+ best = dist = dx * dx + dy * dy;
600
+
601
+ while (last2.y >= 0) {
602
+ dx = last1.x - tx; dy = last2.y - ty;
603
+ dist = dx * dx + dy * dy;
604
+
605
+ if (dist > best)
606
+ break;
607
+
608
+ best = dist; lasty = last2.y; last2 = last1;
609
+
610
+ if (last2.y >= 0)
611
+ last1 = input[TOID(tx, last2.y, size)];
612
+ }
613
+
614
+ block[threadIdx.x][ty - y_end] = make_pixel(lasty, last2.x);
615
+ }
616
+
617
+ __syncthreads();
618
+
619
+ // note: transposes back to original shape here
620
+ if(!threadIdx.y) {
621
+ int id = TOID(y_end + threadIdx.x, blockIdx.x * blockDim.x, size);
622
+ for(int i = 0; i < blockDim.x; ++i, id+=size) {
623
+ output[id] = block[i][threadIdx.x];
624
+ }
625
+ }
626
+
627
+ __syncthreads();
628
+ }
629
+ }
630
+
631
+
632
+ __global__ void kernelColorWithSpacing(pixel_int2_t *input, pixel_int2_t *output, int size, double sx, double sy)
633
+ {
634
+ __shared__ pixel_int2_t block[BLOCKSIZE][BLOCKSIZE];
635
+
636
+ int col = threadIdx.x;
637
+ int tid = threadIdx.y;
638
+ int tx = blockIdx.x * blockDim.x + col;
639
+ int lasty;
640
+ double dx, dy, best, dist;
641
+ pixel_int2_t last1, last2;
642
+
643
+ lasty = size - 1;
644
+
645
+ last2 = input[TOID(tx, lasty, size)];
646
+
647
+ if (last2.x == MARKER) {
648
+ lasty = max(last2.y, 0);
649
+ last2 = input[TOID(tx, lasty, size)];
650
+ }
651
+
652
+ if (last2.y >= 0)
653
+ last1 = input[TOID(tx, last2.y, size)];
654
+
655
+ int y_start, y_end, n_step = size / blockDim.x;
656
+ for(int step = 0; step < n_step; ++step) {
657
+ y_start = size - step * blockDim.x - 1;
658
+ y_end = size - (step + 1) * blockDim.x;
659
+
660
+ for (int ty = y_start - tid; ty >= y_end; ty -= blockDim.y) {
661
+ dx = static_cast<double>(last2.x - tx) * sx;
662
+ dy = static_cast<double>(lasty - ty) * sy;
663
+ best = dist = dx * dx + dy * dy;
664
+
665
+ while (last2.y >= 0) {
666
+ dx = static_cast<double>(last1.x - tx) * sx;
667
+ dy = static_cast<double>(last2.y - ty) * sy;
668
+ dist = dx * dx + dy * dy;
669
+
670
+ if (dist > best)
671
+ break;
672
+
673
+ best = dist; lasty = last2.y; last2 = last1;
674
+
675
+ if (last2.y >= 0)
676
+ last1 = input[TOID(tx, last2.y, size)];
677
+ }
678
+
679
+ block[threadIdx.x][ty - y_end] = make_pixel(lasty, last2.x);
680
+ }
681
+
682
+ __syncthreads();
683
+
684
+ // note: transposes back to original shape here
685
+ if(!threadIdx.y) {
686
+ int id = TOID(y_end + threadIdx.x, blockIdx.x * blockDim.x, size);
687
+ for(int i = 0; i < blockDim.x; ++i, id+=size) {
688
+ output[id] = block[i][threadIdx.x];
689
+ }
690
+ }
691
+
692
+ __syncthreads();
693
+ }
694
+ }
695
+ } // extern C
vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/cuda/pba_kernels_3d.h ADDED
@@ -0,0 +1,387 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Euclidean Distance Transform
2
+ //
3
+ // Kernels for the 3D version of the Parallel Banding Algorithm (PBA+).
4
+ //
5
+ // MIT license: see LICENSE in this folder
6
+ // Copyright: (c) 2019 School of Computing, National University of Singapore
7
+ //
8
+ // Modifications by Gregory Lee (2022) (NVIDIA)
9
+ // - allow user-defined ENCODED_INT_TYPE, ENCODE, DECODE
10
+ // - Add variant kernels with support for non-isotropic pixel dimensions
11
+ // These kernels differ from the originals in that they also take sx, sy and
12
+ // sz values indicating the pixel size along the x, y and z axes. The kernels
13
+ // are identical except that the `dominate` function is replaced by
14
+ // `dominate_sp` and the physical spacings are used when computing distances.
15
+
16
+
17
+ // START OF DEFINITIONS OVERRIDDEN BY THE PYTHON SCRIPT
18
+
19
+ // The values included in this header file are those defined in the original
20
+ // PBA+ implementation
21
+
22
+ // However, the Python code generation can potentially generate a different
23
+ // ENCODE/DECODE that use 20 bits per coordinates instead of 10 bits per
24
+ // coordinate with ENCODED_INT_TYPE as `long long`.
25
+
26
+
27
+ #ifndef MARKER
28
+ #define MARKER -2147483648
29
+ #endif // MARKER
30
+
31
+ #ifndef MAX_INT
32
+ #define MAX_INT 2147483647
33
+ #endif
34
+
35
+ #ifndef BLOCKSIZE
36
+ #define BLOCKSIZE 32
37
+ #endif
38
+
39
+ #ifndef ENCODE
40
+
41
+ // Sites : ENCODE(x, y, z, 0, 0)
42
+ // Not sites : ENCODE(0, 0, 0, 1, 0) or MARKER
43
+ #define ENCODED_INT_TYPE int
44
+ #define ZERO 0
45
+ #define ONE 1
46
+ #define ENCODE(x, y, z, a, b) (((x) << 20) | ((y) << 10) | (z) | ((a) << 31) | ((b) << 30))
47
+ #define DECODE(value, x, y, z) \
48
+ x = ((value) >> 20) & 0x3ff; \
49
+ y = ((value) >> 10) & 0x3ff; \
50
+ z = (value) & 0x3ff
51
+
52
+ #define NOTSITE(value) (((value) >> 31) & 1)
53
+ #define HASNEXT(value) (((value) >> 30) & 1)
54
+
55
+ #define GET_X(value) (((value) >> 20) & 0x3ff)
56
+ #define GET_Y(value) (((value) >> 10) & 0x3ff)
57
+ #define GET_Z(value) ((NOTSITE((value))) ? MAX_INT : ((value) & 0x3ff))
58
+
59
+ #endif // ENCODE
60
+
61
+ // END OF DEFINITIONS DEFINED IN THE PYTHON SCRIPT
62
+
63
+
64
+ #define LL long long
65
+ __device__ bool dominate(LL x_1, LL y_1, LL z_1, LL x_2, LL y_2, LL z_2, LL x_3, LL y_3, LL z_3, LL x_0, LL z_0)
66
+ {
67
+ LL k_1 = y_2 - y_1, k_2 = y_3 - y_2;
68
+
69
+ return (((y_1 + y_2) * k_1 + ((x_2 - x_1) * (x_1 + x_2 - (x_0 << 1)) + (z_2 - z_1) * (z_1 + z_2 - (z_0 << 1)))) * k_2 > \
70
+ ((y_2 + y_3) * k_2 + ((x_3 - x_2) * (x_2 + x_3 - (x_0 << 1)) + (z_3 - z_2) * (z_2 + z_3 - (z_0 << 1)))) * k_1);
71
+ }
72
+ #undef LL
73
+
74
+ __device__ bool dominate_sp(int _x_1, int _y_1, int _z_1, int _x_2, int _y_2, int _z_2, int _x_3, int _y_3, int _z_3, int _x_0, int _z_0, float sx, float sy, float sz)
75
+ {
76
+ float x_1 = static_cast<float>(_x_1) * sx;
77
+ float y_1 = static_cast<float>(_y_1) * sy;
78
+ float z_1 = static_cast<float>(_z_1) * sz;
79
+ float x_2 = static_cast<float>(_x_2) * sx;
80
+ float y_2 = static_cast<float>(_y_2) * sy;
81
+ float z_2 = static_cast<float>(_z_2) * sz;
82
+ float x_3 = static_cast<float>(_x_3) * sx;
83
+ float y_3 = static_cast<float>(_y_3) * sy;
84
+ float z_3 = static_cast<float>(_z_3) * sz;
85
+ float x_0_2 = static_cast<float>(_x_0 << 1) * sx;
86
+ float z_0_2 = static_cast<float>(_z_0 << 1) * sz;
87
+ float k_1 = y_2 - y_1;
88
+ float k_2 = y_3 - y_2;
89
+
90
+ return (((y_1 + y_2) * k_1 + ((x_2 - x_1) * (x_1 + x_2 - (x_0_2)) + (z_2 - z_1) * (z_1 + z_2 - (z_0_2)))) * k_2 > \
91
+ ((y_2 + y_3) * k_2 + ((x_3 - x_2) * (x_2 + x_3 - (x_0_2)) + (z_3 - z_2) * (z_2 + z_3 - (z_0_2)))) * k_1);
92
+ }
93
+
94
+ #define TOID(x, y, z, size) ((((z) * (size)) + (y)) * (size) + (x))
95
+
96
+
97
+ extern "C"{
98
+
99
+ __global__ void kernelFloodZ(ENCODED_INT_TYPE *input, ENCODED_INT_TYPE *output, int size)
100
+ {
101
+
102
+ int tx = blockIdx.x * blockDim.x + threadIdx.x;
103
+ int ty = blockIdx.y * blockDim.y + threadIdx.y;
104
+ int tz = 0;
105
+
106
+ int plane = size * size;
107
+ int id = TOID(tx, ty, tz, size);
108
+ ENCODED_INT_TYPE pixel1, pixel2;
109
+
110
+ pixel1 = ENCODE(ZERO,ZERO,ZERO,ONE,ZERO);
111
+
112
+ // Sweep down
113
+ for (int i = 0; i < size; i++, id += plane) {
114
+ pixel2 = input[id];
115
+
116
+ if (!NOTSITE(pixel2))
117
+ pixel1 = pixel2;
118
+
119
+ output[id] = pixel1;
120
+ }
121
+
122
+ ENCODED_INT_TYPE dist1, dist2, nz;
123
+
124
+ id -= plane + plane;
125
+
126
+ // Sweep up
127
+ for (int i = size - 2; i >= 0; i--, id -= plane) {
128
+ nz = GET_Z(pixel1);
129
+ dist1 = abs(nz - (tz + i));
130
+
131
+ pixel2 = output[id];
132
+ nz = GET_Z(pixel2);
133
+ dist2 = abs(nz - (tz + i));
134
+
135
+ if (dist2 < dist1)
136
+ pixel1 = pixel2;
137
+
138
+ output[id] = pixel1;
139
+ }
140
+ }
141
+
142
+
143
+ __global__ void kernelMaurerAxis(ENCODED_INT_TYPE *input, ENCODED_INT_TYPE *stack, int size)
144
+ {
145
+ int tx = blockIdx.x * blockDim.x + threadIdx.x;
146
+ int tz = blockIdx.y * blockDim.y + threadIdx.y;
147
+ int ty = 0;
148
+
149
+ int id = TOID(tx, ty, tz, size);
150
+
151
+ ENCODED_INT_TYPE lasty = 0;
152
+ ENCODED_INT_TYPE x1, y1, z1, x2, y2, z2, nx, ny, nz;
153
+ ENCODED_INT_TYPE p = ENCODE(ZERO,ZERO,ZERO,ONE,ZERO), s1 = ENCODE(ZERO,ZERO,ZERO,ONE,ZERO), s2 = ENCODE(ZERO,ZERO,ZERO,ONE,ZERO);
154
+ ENCODED_INT_TYPE flag = 0;
155
+
156
+ for (ty = 0; ty < size; ++ty, id += size) {
157
+ p = input[id];
158
+
159
+ if (!NOTSITE(p)) {
160
+
161
+ while (HASNEXT(s2)) {
162
+ DECODE(s1, x1, y1, z1);
163
+ DECODE(s2, x2, y2, z2);
164
+ DECODE(p, nx, ny, nz);
165
+
166
+ if (!dominate(x1, y2, z1, x2, lasty, z2, nx, ty, nz, tx, tz))
167
+ break;
168
+
169
+ lasty = y2; s2 = s1; y2 = y1;
170
+
171
+ if (HASNEXT(s2))
172
+ s1 = stack[TOID(tx, y2, tz, size)];
173
+ }
174
+
175
+ DECODE(p, nx, ny, nz);
176
+ s1 = s2;
177
+ s2 = ENCODE(nx, lasty, nz, ZERO, flag);
178
+ y2 = lasty;
179
+ lasty = ty;
180
+
181
+ stack[id] = s2;
182
+
183
+ flag = ONE;
184
+ }
185
+ }
186
+
187
+ if (NOTSITE(p))
188
+ stack[TOID(tx, ty - 1, tz, size)] = ENCODE(ZERO, lasty, ZERO, ONE, flag);
189
+ }
190
+
191
+ __global__ void kernelMaurerAxisWithSpacing(ENCODED_INT_TYPE *input, ENCODED_INT_TYPE *stack, int size, double sx, double sy, double sz)
192
+ {
193
+ int tx = blockIdx.x * blockDim.x + threadIdx.x;
194
+ int tz = blockIdx.y * blockDim.y + threadIdx.y;
195
+ int ty = 0;
196
+
197
+ int id = TOID(tx, ty, tz, size);
198
+
199
+ ENCODED_INT_TYPE lasty = 0;
200
+ ENCODED_INT_TYPE x1, y1, z1, x2, y2, z2, nx, ny, nz;
201
+ ENCODED_INT_TYPE p = ENCODE(ZERO,ZERO,ZERO,ONE,ZERO), s1 = ENCODE(ZERO,ZERO,ZERO,ONE,ZERO), s2 = ENCODE(ZERO,ZERO,ZERO,ONE,ZERO);
202
+ ENCODED_INT_TYPE flag = 0;
203
+
204
+ for (ty = 0; ty < size; ++ty, id += size) {
205
+ p = input[id];
206
+
207
+ if (!NOTSITE(p)) {
208
+
209
+ while (HASNEXT(s2)) {
210
+ DECODE(s1, x1, y1, z1);
211
+ DECODE(s2, x2, y2, z2);
212
+ DECODE(p, nx, ny, nz);
213
+
214
+ if (!dominate_sp(x1, y2, z1, x2, lasty, z2, nx, ty, nz, tx, tz, sx, sy, sz))
215
+ break;
216
+
217
+ lasty = y2; s2 = s1; y2 = y1;
218
+
219
+ if (HASNEXT(s2))
220
+ s1 = stack[TOID(tx, y2, tz, size)];
221
+ }
222
+
223
+ DECODE(p, nx, ny, nz);
224
+ s1 = s2;
225
+ s2 = ENCODE(nx, lasty, nz, ZERO, flag);
226
+ y2 = lasty;
227
+ lasty = ty;
228
+
229
+ stack[id] = s2;
230
+
231
+ flag = ONE;
232
+ }
233
+ }
234
+
235
+ if (NOTSITE(p))
236
+ stack[TOID(tx, ty - 1, tz, size)] = ENCODE(ZERO, lasty, ZERO, ONE, flag);
237
+ }
238
+
239
+ __global__ void kernelColorAxis(ENCODED_INT_TYPE *input, ENCODED_INT_TYPE *output, int size)
240
+ {
241
+ __shared__ ENCODED_INT_TYPE block[BLOCKSIZE][BLOCKSIZE];
242
+
243
+ int col = threadIdx.x;
244
+ int tid = threadIdx.y;
245
+ int tx = blockIdx.x * blockDim.x + col;
246
+ int tz = blockIdx.y;
247
+
248
+ ENCODED_INT_TYPE x1, y1, z1, x2, y2, z2;
249
+ ENCODED_INT_TYPE last1 = ENCODE(ZERO,ZERO,ZERO,ONE,ZERO), last2 = ENCODE(ZERO,ZERO,ZERO,ONE,ZERO), lasty;
250
+ long long dx, dy, dz, best, dist;
251
+
252
+ lasty = size - 1;
253
+
254
+ last2 = input[TOID(tx, lasty, tz, size)];
255
+ DECODE(last2, x2, y2, z2);
256
+
257
+ if (NOTSITE(last2)) {
258
+ lasty = y2;
259
+ if(HASNEXT(last2)) {
260
+ last2 = input[TOID(tx, lasty, tz, size)];
261
+ DECODE(last2, x2, y2, z2);
262
+ }
263
+ }
264
+
265
+ if (HASNEXT(last2)) {
266
+ last1 = input[TOID(tx, y2, tz, size)];
267
+ DECODE(last1, x1, y1, z1);
268
+ }
269
+
270
+ int y_start, y_end, n_step = size / blockDim.x;
271
+ for(int step = 0; step < n_step; ++step) {
272
+ y_start = size - step * blockDim.x - 1;
273
+ y_end = size - (step + 1) * blockDim.x;
274
+
275
+ for (int ty = y_start - tid; ty >= y_end; ty -= blockDim.y) {
276
+ dx = x2 - tx; dy = lasty - ty; dz = z2 - tz;
277
+ best = dx * dx + dy * dy + dz * dz;
278
+
279
+ while (HASNEXT(last2)) {
280
+ dx = x1 - tx; dy = y2 - ty; dz = z1 - tz;
281
+ dist = dx * dx + dy * dy + dz * dz;
282
+
283
+ if(dist > best) break;
284
+
285
+ best = dist; lasty = y2; last2 = last1;
286
+ DECODE(last2, x2, y2, z2);
287
+
288
+ if (HASNEXT(last2)) {
289
+ last1 = input[TOID(tx, y2, tz, size)];
290
+ DECODE(last1, x1, y1, z1);
291
+ }
292
+ }
293
+
294
+ block[threadIdx.x][ty - y_end] = ENCODE(lasty, x2, z2, NOTSITE(last2), ZERO);
295
+ }
296
+
297
+ __syncthreads();
298
+
299
+ if(!threadIdx.y) {
300
+ int id = TOID(y_end + threadIdx.x, blockIdx.x * blockDim.x, tz, size);
301
+ for(int i = 0; i < blockDim.x; i++, id+=size) {
302
+ output[id] = block[i][threadIdx.x];
303
+ }
304
+ }
305
+
306
+ __syncthreads();
307
+ }
308
+ }
309
+
310
+
311
+ __global__ void kernelColorAxisWithSpacing(ENCODED_INT_TYPE *input, ENCODED_INT_TYPE *output, int size, double sx, double sy, double sz)
312
+ {
313
+ __shared__ ENCODED_INT_TYPE block[BLOCKSIZE][BLOCKSIZE];
314
+
315
+ int col = threadIdx.x;
316
+ int tid = threadIdx.y;
317
+ int tx = blockIdx.x * blockDim.x + col;
318
+ int tz = blockIdx.y;
319
+
320
+ ENCODED_INT_TYPE x1, y1, z1, x2, y2, z2;
321
+ ENCODED_INT_TYPE last1 = ENCODE(ZERO,ZERO,ZERO,ONE,ZERO), last2 = ENCODE(ZERO,ZERO,ZERO,ONE,ZERO), lasty;
322
+ double dx, dy, dz, best, dist;
323
+
324
+ lasty = size - 1;
325
+
326
+ last2 = input[TOID(tx, lasty, tz, size)];
327
+ DECODE(last2, x2, y2, z2);
328
+
329
+ if (NOTSITE(last2)) {
330
+ lasty = y2;
331
+ if(HASNEXT(last2)) {
332
+ last2 = input[TOID(tx, lasty, tz, size)];
333
+ DECODE(last2, x2, y2, z2);
334
+ }
335
+ }
336
+
337
+ if (HASNEXT(last2)) {
338
+ last1 = input[TOID(tx, y2, tz, size)];
339
+ DECODE(last1, x1, y1, z1);
340
+ }
341
+
342
+ int y_start, y_end, n_step = size / blockDim.x;
343
+ for(int step = 0; step < n_step; ++step) {
344
+ y_start = size - step * blockDim.x - 1;
345
+ y_end = size - (step + 1) * blockDim.x;
346
+
347
+ for (int ty = y_start - tid; ty >= y_end; ty -= blockDim.y) {
348
+ dx = static_cast<double>(x2 - tx) * sx;
349
+ dy = static_cast<double>(lasty - ty) * sy;
350
+ dz = static_cast<double>(z2 - tz) * sz;
351
+ best = dx * dx + dy * dy + dz * dz;
352
+
353
+ while (HASNEXT(last2)) {
354
+ dx = static_cast<double>(x1 - tx) * sx;
355
+ dy = static_cast<double>(y2 - ty) * sy;
356
+ dz = static_cast<double>(z1 - tz) * sz;
357
+ dist = dx * dx + dy * dy + dz * dz;
358
+
359
+ if(dist > best) break;
360
+
361
+ best = dist; lasty = y2; last2 = last1;
362
+ DECODE(last2, x2, y2, z2);
363
+
364
+ if (HASNEXT(last2)) {
365
+ last1 = input[TOID(tx, y2, tz, size)];
366
+ DECODE(last1, x1, y1, z1);
367
+ }
368
+ }
369
+
370
+ block[threadIdx.x][ty - y_end] = ENCODE(lasty, x2, z2, NOTSITE(last2), ZERO);
371
+ }
372
+
373
+ __syncthreads();
374
+
375
+ if(!threadIdx.y) {
376
+ int id = TOID(y_end + threadIdx.x, blockIdx.x * blockDim.x, tz, size);
377
+ for(int i = 0; i < blockDim.x; i++, id+=size) {
378
+ output[id] = block[i][threadIdx.x];
379
+ }
380
+ }
381
+
382
+ __syncthreads();
383
+ }
384
+ }
385
+
386
+
387
+ } // extern C
vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.21 kB). View file
 
vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/__pycache__/_base.cpython-310.pyc ADDED
Binary file (19 kB). View file
 
vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/__pycache__/_compressed.cpython-310.pyc ADDED
Binary file (24.1 kB). View file
 
vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/__pycache__/_coo.cpython-310.pyc ADDED
Binary file (15.5 kB). View file
 
vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/__pycache__/_csr.cpython-310.pyc ADDED
Binary file (35.1 kB). View file
 
vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/__pycache__/_data.cpython-310.pyc ADDED
Binary file (12.9 kB). View file
 
vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/__pycache__/_dia.cpython-310.pyc ADDED
Binary file (7.01 kB). View file
 
vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/__pycache__/_extract.cpython-310.pyc ADDED
Binary file (2.87 kB). View file
 
vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/__pycache__/_index.cpython-310.pyc ADDED
Binary file (19.9 kB). View file
 
vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/__pycache__/_sputils.cpython-310.pyc ADDED
Binary file (4.76 kB). View file
 
vllm/lib/python3.10/site-packages/packaging/__init__.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is dual licensed under the terms of the Apache License, Version
2
+ # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3
+ # for complete details.
4
+
5
+ __title__ = "packaging"
6
+ __summary__ = "Core utilities for Python packages"
7
+ __uri__ = "https://github.com/pypa/packaging"
8
+
9
+ __version__ = "24.2"
10
+
11
+ __author__ = "Donald Stufft and individual contributors"
12
+ __email__ = "donald@stufft.io"
13
+
14
+ __license__ = "BSD-2-Clause or Apache-2.0"
15
+ __copyright__ = f"2014 {__author__}"
vllm/lib/python3.10/site-packages/packaging/__pycache__/_structures.cpython-310.pyc ADDED
Binary file (2.67 kB). View file