ChipYTY commited on
Commit
853e22b
·
verified ·
1 Parent(s): 01bd570

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. source_code/SegMamba/monai/_extensions/gmm/gmm.cpp +83 -0
  2. source_code/SegMamba/monai/_extensions/gmm/gmm.h +53 -0
  3. source_code/SegMamba/monai/_extensions/gmm/gmm_cpu.cpp +35 -0
  4. source_code/SegMamba/monai/apps/auto3dseg/__init__.py +25 -0
  5. source_code/SegMamba/monai/apps/auto3dseg/__main__.py +35 -0
  6. source_code/SegMamba/monai/apps/auto3dseg/transforms.py +85 -0
  7. source_code/SegMamba/monai/apps/auto3dseg/utils.py +90 -0
  8. source_code/SegMamba/monai/apps/detection/networks/retinanet_network.py +432 -0
  9. source_code/SegMamba/monai/apps/mmars/model_desc.py +229 -0
  10. source_code/SegMamba/monai/apps/pathology/inferers/__init__.py +14 -0
  11. source_code/SegMamba/monai/apps/pathology/inferers/inferer.py +210 -0
  12. source_code/SegMamba/monai/apps/pathology/transforms/post/array.py +837 -0
  13. source_code/SegMamba/monai/apps/reconstruction/__init__.py +10 -0
  14. source_code/SegMamba/monai/apps/reconstruction/networks/blocks/varnetblock.py +81 -0
  15. source_code/SegMamba/monai/apps/reconstruction/networks/nets/__init__.py +10 -0
  16. source_code/SegMamba/monai/apps/reconstruction/networks/nets/coil_sensitivity_model.py +142 -0
  17. source_code/SegMamba/monai/csrc/filtering/bilateral/bilateral.cpp +49 -0
  18. source_code/SegMamba/monai/csrc/filtering/bilateral/bilateralfilter_cpu.cpp +136 -0
  19. source_code/SegMamba/monai/csrc/filtering/bilateral/bilateralfilter_cpu_phl.cpp +88 -0
  20. source_code/SegMamba/monai/csrc/filtering/bilateral/bilateralfilter_cuda.cu +260 -0
  21. source_code/SegMamba/monai/csrc/filtering/bilateral/bilateralfilter_cuda_phl.cu +142 -0
  22. source_code/SegMamba/monai/csrc/filtering/filtering.h +19 -0
  23. source_code/SegMamba/monai/csrc/filtering/permutohedral/hash_table.cuh +260 -0
  24. source_code/SegMamba/monai/csrc/filtering/permutohedral/permutohedral.cpp +97 -0
  25. source_code/SegMamba/monai/csrc/filtering/permutohedral/permutohedral.h +28 -0
  26. source_code/SegMamba/monai/csrc/filtering/permutohedral/permutohedral_cpu.cpp +502 -0
  27. source_code/SegMamba/monai/csrc/filtering/permutohedral/permutohedral_cuda.cu +540 -0
  28. source_code/SegMamba/monai/csrc/filtering/trainable_bilateral/bf_layer_cpu_backward.cpp +232 -0
  29. source_code/SegMamba/monai/csrc/filtering/trainable_bilateral/bf_layer_gpu_backward.cu +296 -0
  30. source_code/SegMamba/monai/csrc/filtering/trainable_bilateral/trainable_bilateral.cpp +121 -0
  31. source_code/SegMamba/monai/csrc/filtering/trainable_bilateral/trainable_bilateral.h +88 -0
  32. source_code/SegMamba/monai/csrc/filtering/trainable_joint_bilateral/jbf_layer_cpu_backward.cpp +246 -0
  33. source_code/SegMamba/monai/csrc/filtering/trainable_joint_bilateral/jbf_layer_cpu_forward.cpp +278 -0
  34. source_code/SegMamba/monai/csrc/filtering/trainable_joint_bilateral/jbf_layer_gpu_backward.cu +311 -0
  35. source_code/SegMamba/monai/csrc/filtering/trainable_joint_bilateral/trainable_joint_bilateral.cpp +133 -0
  36. source_code/SegMamba/monai/csrc/lltm/lltm_cpu.cpp +90 -0
  37. source_code/SegMamba/monai/csrc/resample/pushpull_cpu.cpp +2270 -0
  38. source_code/SegMamba/monai/csrc/resample/pushpull_cuda.cu +2244 -0
  39. source_code/SegMamba/monai/csrc/utils/meta_macros.h +131 -0
  40. source_code/SegMamba/monai/fl/client/__init__.py +15 -0
  41. source_code/SegMamba/monai/fl/utils/constants.py +59 -0
  42. source_code/SegMamba/monai/fl/utils/exchange_object.py +107 -0
  43. source_code/SegMamba/monai/handlers/hausdorff_distance.py +67 -0
  44. source_code/SegMamba/monai/handlers/tensorboard_handlers.py +454 -0
  45. source_code/SegMamba/monai/inferers/inferer.py +754 -0
  46. source_code/SegMamba/monai/losses/adversarial_loss.py +173 -0
  47. source_code/SegMamba/monai/losses/contrastive.py +81 -0
  48. source_code/SegMamba/monai/losses/deform.py +210 -0
  49. source_code/SegMamba/monai/losses/giou_loss.py +67 -0
  50. source_code/SegMamba/monai/losses/tversky.py +162 -0
source_code/SegMamba/monai/_extensions/gmm/gmm.cpp ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ Copyright (c) MONAI Consortium
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+ http://www.apache.org/licenses/LICENSE-2.0
7
+ Unless required by applicable law or agreed to in writing, software
8
+ distributed under the License is distributed on an "AS IS" BASIS,
9
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10
+ See the License for the specific language governing permissions and
11
+ limitations under the License.
12
+ */
13
+
14
+ #include <torch/extension.h>
15
+
16
+ #include "gmm.h"
17
+
18
+ py::tuple init() {
19
+ torch::Tensor gmm_tensor =
20
+ torch::zeros({GMM_COUNT, GMM_COMPONENT_COUNT}, torch::dtype(torch::kFloat32).device(torch::kCUDA));
21
+ torch::Tensor scratch_tensor = torch::empty({1}, torch::dtype(torch::kFloat32).device(torch::kCUDA));
22
+ return py::make_tuple(gmm_tensor, scratch_tensor);
23
+ }
24
+
25
+ void learn(
26
+ torch::Tensor gmm_tensor,
27
+ torch::Tensor scratch_tensor,
28
+ torch::Tensor input_tensor,
29
+ torch::Tensor label_tensor) {
30
+ c10::DeviceType device_type = input_tensor.device().type();
31
+
32
+ unsigned int batch_count = input_tensor.size(0);
33
+ unsigned int element_count = input_tensor.stride(1);
34
+
35
+ unsigned int scratch_size =
36
+ batch_count * (element_count + GMM_COMPONENT_COUNT * GMM_COUNT * (element_count / (32 * 32)));
37
+
38
+ if (scratch_tensor.size(0) < scratch_size) {
39
+ scratch_tensor.resize_({scratch_size});
40
+ }
41
+
42
+ float* gmm = gmm_tensor.data_ptr<float>();
43
+ float* scratch = scratch_tensor.data_ptr<float>();
44
+ float* input = input_tensor.data_ptr<float>();
45
+ int* labels = label_tensor.data_ptr<int>();
46
+
47
+ if (device_type == torch::kCUDA) {
48
+ learn_cuda(input, labels, gmm, scratch, batch_count, element_count);
49
+ } else {
50
+ learn_cpu(input, labels, gmm, scratch, batch_count, element_count);
51
+ }
52
+ }
53
+
54
+ torch::Tensor apply(torch::Tensor gmm_tensor, torch::Tensor input_tensor) {
55
+ c10::DeviceType device_type = input_tensor.device().type();
56
+
57
+ unsigned int dim = input_tensor.dim();
58
+ unsigned int batch_count = input_tensor.size(0);
59
+ unsigned int element_count = input_tensor.stride(1);
60
+
61
+ auto output_size = input_tensor.sizes().vec();
62
+ output_size[1] = MIXTURE_COUNT;
63
+ torch::Tensor output_tensor =
64
+ torch::empty(c10::IntArrayRef(output_size), torch::dtype(torch::kFloat32).device(device_type));
65
+
66
+ const float* gmm = gmm_tensor.data_ptr<float>();
67
+ const float* input = input_tensor.data_ptr<float>();
68
+ float* output = output_tensor.data_ptr<float>();
69
+
70
+ if (device_type == torch::kCUDA) {
71
+ apply_cuda(gmm, input, output, batch_count, element_count);
72
+ } else {
73
+ apply_cpu(gmm, input, output, batch_count, element_count);
74
+ }
75
+
76
+ return output_tensor;
77
+ }
78
+
79
+ PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
80
+ m.def("init", torch::wrap_pybind_function(init));
81
+ m.def("learn", torch::wrap_pybind_function(learn));
82
+ m.def("apply", torch::wrap_pybind_function(apply));
83
+ }
source_code/SegMamba/monai/_extensions/gmm/gmm.h ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ Copyright (c) MONAI Consortium
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+ http://www.apache.org/licenses/LICENSE-2.0
7
+ Unless required by applicable law or agreed to in writing, software
8
+ distributed under the License is distributed on an "AS IS" BASIS,
9
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10
+ See the License for the specific language governing permissions and
11
+ limitations under the License.
12
+ */
13
+
14
+ #if !defined(CHANNEL_COUNT) || !defined(MIXTURE_COUNT) || !defined(MIXTURE_SIZE)
15
+ #error Definition of CHANNEL_COUNT, MIXTURE_COUNT, and MIXTURE_SIZE required
16
+ #endif
17
+
18
+ #if CHANNEL_COUNT < 1 || MIXTURE_COUNT < 1 || MIXTURE_SIZE < 1
19
+ #error CHANNEL_COUNT, MIXTURE_COUNT, and MIXTURE_SIZE must be positive
20
+ #endif
21
+
22
+ #define MATRIX_COMPONENT_COUNT ((CHANNEL_COUNT + 1) * (CHANNEL_COUNT + 2) / 2)
23
+ #define SUB_MATRIX_COMPONENT_COUNT (CHANNEL_COUNT * (CHANNEL_COUNT + 1) / 2)
24
+ #define GMM_COMPONENT_COUNT (MATRIX_COMPONENT_COUNT + 1)
25
+ #define GMM_COUNT (MIXTURE_COUNT * MIXTURE_SIZE)
26
+
27
+ void learn_cpu(
28
+ const float* input,
29
+ const int* labels,
30
+ float* gmm,
31
+ float* scratch_memory,
32
+ unsigned int batch_count,
33
+ unsigned int element_count);
34
+ void apply_cpu(
35
+ const float* gmm,
36
+ const float* input,
37
+ float* output,
38
+ unsigned int batch_count,
39
+ unsigned int element_count);
40
+
41
+ void learn_cuda(
42
+ const float* input,
43
+ const int* labels,
44
+ float* gmm,
45
+ float* scratch_memory,
46
+ unsigned int batch_count,
47
+ unsigned int element_count);
48
+ void apply_cuda(
49
+ const float* gmm,
50
+ const float* input,
51
+ float* output,
52
+ unsigned int batch_count,
53
+ unsigned int element_count);
source_code/SegMamba/monai/_extensions/gmm/gmm_cpu.cpp ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ Copyright (c) MONAI Consortium
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+ http://www.apache.org/licenses/LICENSE-2.0
7
+ Unless required by applicable law or agreed to in writing, software
8
+ distributed under the License is distributed on an "AS IS" BASIS,
9
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10
+ See the License for the specific language governing permissions and
11
+ limitations under the License.
12
+ */
13
+
14
+ #include <stdexcept>
15
+
16
+ #include "gmm.h"
17
+
18
+ void learn_cpu(
19
+ const float* input,
20
+ const int* labels,
21
+ float* gmm,
22
+ float* scratch_memory,
23
+ unsigned int batch_count,
24
+ unsigned int element_count) {
25
+ throw std::invalid_argument("GMM received a cpu tensor but is not yet implemented for the cpu");
26
+ }
27
+
28
+ void apply_cpu(
29
+ const float* gmm,
30
+ const float* input,
31
+ float* output,
32
+ unsigned int batch_count,
33
+ unsigned int element_count) {
34
+ throw std::invalid_argument("GMM received a cpu tensor but is not yet implemented for the cpu");
35
+ }
source_code/SegMamba/monai/apps/auto3dseg/__init__.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) MONAI Consortium
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ # Unless required by applicable law or agreed to in writing, software
7
+ # distributed under the License is distributed on an "AS IS" BASIS,
8
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9
+ # See the License for the specific language governing permissions and
10
+ # limitations under the License.
11
+
12
+ from __future__ import annotations
13
+
14
+ from .auto_runner import AutoRunner
15
+ from .bundle_gen import BundleAlgo, BundleGen
16
+ from .data_analyzer import DataAnalyzer
17
+ from .ensemble_builder import (
18
+ AlgoEnsemble,
19
+ AlgoEnsembleBestByFold,
20
+ AlgoEnsembleBestN,
21
+ AlgoEnsembleBuilder,
22
+ EnsembleRunner,
23
+ )
24
+ from .hpo_gen import NNIGen, OptunaGen
25
+ from .utils import export_bundle_algo_history, get_name_from_algo_id, import_bundle_algo_history
source_code/SegMamba/monai/apps/auto3dseg/__main__.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) MONAI Consortium
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ # Unless required by applicable law or agreed to in writing, software
7
+ # distributed under the License is distributed on an "AS IS" BASIS,
8
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9
+ # See the License for the specific language governing permissions and
10
+ # limitations under the License.
11
+
12
+ from __future__ import annotations
13
+
14
+ from monai.apps.auto3dseg.auto_runner import AutoRunner
15
+ from monai.apps.auto3dseg.bundle_gen import BundleAlgo, BundleGen
16
+ from monai.apps.auto3dseg.data_analyzer import DataAnalyzer
17
+ from monai.apps.auto3dseg.ensemble_builder import AlgoEnsembleBuilder, EnsembleRunner
18
+ from monai.apps.auto3dseg.hpo_gen import NNIGen, OptunaGen
19
+
20
+ if __name__ == "__main__":
21
+ from monai.utils import optional_import
22
+
23
+ fire, _ = optional_import("fire")
24
+ fire.Fire(
25
+ {
26
+ "DataAnalyzer": DataAnalyzer,
27
+ "BundleGen": BundleGen,
28
+ "BundleAlgo": BundleAlgo,
29
+ "AlgoEnsembleBuilder": AlgoEnsembleBuilder,
30
+ "EnsembleRunner": EnsembleRunner,
31
+ "AutoRunner": AutoRunner,
32
+ "NNIGen": NNIGen,
33
+ "OptunaGen": OptunaGen,
34
+ }
35
+ )
source_code/SegMamba/monai/apps/auto3dseg/transforms.py ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) MONAI Consortium
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ # Unless required by applicable law or agreed to in writing, software
7
+ # distributed under the License is distributed on an "AS IS" BASIS,
8
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9
+ # See the License for the specific language governing permissions and
10
+ # limitations under the License.
11
+
12
+ from __future__ import annotations
13
+
14
+ import warnings
15
+ from collections.abc import Hashable, Mapping
16
+
17
+ import numpy as np
18
+ import torch
19
+
20
+ from monai.config import KeysCollection
21
+ from monai.networks.utils import pytorch_after
22
+ from monai.transforms import MapTransform
23
+ from monai.utils.misc import ImageMetaKey
24
+
25
+
26
+ class EnsureSameShaped(MapTransform):
27
+ """
28
+ Checks if segmentation label images (in keys) have the same spatial shape as the main image (in source_key),
29
+ and raise an error if the shapes are significantly different.
30
+ If the shapes are only slightly different (within an allowed_shape_difference in each dim), then resize the label using
31
+ nearest interpolation. This transform is designed to correct datasets with slight label shape mismatches.
32
+ Generally image and segmentation label must have the same spatial shape, however some public datasets are having slight
33
+ shape mismatches, which will cause potential crashes when calculating loss or metric functions.
34
+ """
35
+
36
+ def __init__(
37
+ self,
38
+ keys: KeysCollection = "label",
39
+ allow_missing_keys: bool = False,
40
+ source_key: str = "image",
41
+ allowed_shape_difference: int = 5,
42
+ warn: bool = True,
43
+ ) -> None:
44
+ """
45
+ Args:
46
+ keys: keys of the corresponding items to be compared to the source_key item shape.
47
+ allow_missing_keys: do not raise exception if key is missing.
48
+ source_key: key of the item with the reference shape.
49
+ allowed_shape_difference: raises error if shapes are different more than this value in any dimension,
50
+ otherwise corrects for the shape mismatch using nearest interpolation.
51
+ warn: if `True` prints a warning if the label image is resized
52
+
53
+
54
+ """
55
+ super().__init__(keys=keys, allow_missing_keys=allow_missing_keys)
56
+ self.source_key = source_key
57
+ self.allowed_shape_difference = allowed_shape_difference
58
+ self.warn = warn
59
+
60
+ def __call__(self, data: Mapping[Hashable, torch.Tensor]) -> dict[Hashable, torch.Tensor]:
61
+ d = dict(data)
62
+ image_shape = d[self.source_key].shape[1:]
63
+ for key in self.key_iterator(d):
64
+ label_shape = d[key].shape[1:]
65
+ if label_shape != image_shape:
66
+ filename = ""
67
+ if hasattr(d[key], "meta") and isinstance(d[key].meta, Mapping): # type: ignore[attr-defined]
68
+ filename = d[key].meta.get(ImageMetaKey.FILENAME_OR_OBJ) # type: ignore[attr-defined]
69
+
70
+ if np.allclose(list(label_shape), list(image_shape), atol=self.allowed_shape_difference):
71
+ if self.warn:
72
+ warnings.warn(
73
+ f"The {key} with shape {label_shape} was resized to match the source shape {image_shape}"
74
+ f", the metadata was not updated {filename}."
75
+ )
76
+ d[key] = torch.nn.functional.interpolate(
77
+ input=d[key].unsqueeze(0),
78
+ size=image_shape,
79
+ mode="nearest-exact" if pytorch_after(1, 11) else "nearest",
80
+ ).squeeze(0)
81
+ else:
82
+ raise ValueError(
83
+ f"The {key} shape {label_shape} is different from the source shape {image_shape} {filename}."
84
+ )
85
+ return d
source_code/SegMamba/monai/apps/auto3dseg/utils.py ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) MONAI Consortium
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ # Unless required by applicable law or agreed to in writing, software
7
+ # distributed under the License is distributed on an "AS IS" BASIS,
8
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9
+ # See the License for the specific language governing permissions and
10
+ # limitations under the License.
11
+
12
+ from __future__ import annotations
13
+
14
+ import os
15
+
16
+ from monai.apps.auto3dseg.bundle_gen import BundleAlgo
17
+ from monai.auto3dseg import algo_from_pickle, algo_to_pickle
18
+ from monai.utils.enums import AlgoKeys
19
+
20
+ __all__ = ["import_bundle_algo_history", "export_bundle_algo_history", "get_name_from_algo_id"]
21
+
22
+
23
+ def import_bundle_algo_history(
24
+ output_folder: str = ".", template_path: str | None = None, only_trained: bool = True
25
+ ) -> list:
26
+ """
27
+ import the history of the bundleAlgo objects as a list of algo dicts.
28
+ each algo_dict has keys name (folder name), algo (bundleAlgo), is_trained (bool),
29
+
30
+ Args:
31
+ output_folder: the root path of the algorithms templates.
32
+ template_path: the algorithm_template. It must contain algo.py in the follow path:
33
+ ``{algorithm_templates_dir}/{network}/scripts/algo.py``.
34
+ only_trained: only read the algo history if the algo is trained.
35
+ """
36
+
37
+ history = []
38
+
39
+ for name in sorted(os.listdir(output_folder)):
40
+ write_path = os.path.join(output_folder, name)
41
+
42
+ if not os.path.isdir(write_path):
43
+ continue
44
+
45
+ obj_filename = os.path.join(write_path, "algo_object.pkl")
46
+ if not os.path.isfile(obj_filename): # saved mode pkl
47
+ continue
48
+
49
+ algo, algo_meta_data = algo_from_pickle(obj_filename, template_path=template_path)
50
+
51
+ best_metric = algo_meta_data.get(AlgoKeys.SCORE, None)
52
+ if best_metric is None:
53
+ try:
54
+ best_metric = algo.get_score()
55
+ except BaseException:
56
+ pass
57
+
58
+ is_trained = best_metric is not None
59
+
60
+ if (only_trained and is_trained) or not only_trained:
61
+ history.append(
62
+ {AlgoKeys.ID: name, AlgoKeys.ALGO: algo, AlgoKeys.SCORE: best_metric, AlgoKeys.IS_TRAINED: is_trained}
63
+ )
64
+
65
+ return history
66
+
67
+
68
+ def export_bundle_algo_history(history: list[dict[str, BundleAlgo]]) -> None:
69
+ """
70
+ Save all the BundleAlgo in the history to algo_object.pkl in each individual folder
71
+
72
+ Args:
73
+ history: a List of Bundle. Typically, the history can be obtained from BundleGen get_history method
74
+ """
75
+ for algo_dict in history:
76
+ algo = algo_dict[AlgoKeys.ALGO]
77
+ algo_to_pickle(algo, template_path=algo.template_path)
78
+
79
+
80
+ def get_name_from_algo_id(id: str) -> str:
81
+ """
82
+ Get the name of Algo from the identifier of the Algo.
83
+
84
+ Args:
85
+ id: identifier which follows a convention of "name_fold_other".
86
+
87
+ Returns:
88
+ name of the Algo.
89
+ """
90
+ return id.split("_")[0]
source_code/SegMamba/monai/apps/detection/networks/retinanet_network.py ADDED
@@ -0,0 +1,432 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) MONAI Consortium
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ # Unless required by applicable law or agreed to in writing, software
7
+ # distributed under the License is distributed on an "AS IS" BASIS,
8
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9
+ # See the License for the specific language governing permissions and
10
+ # limitations under the License.
11
+
12
+ # =========================================================================
13
+ # Adapted from https://github.com/pytorch/vision/blob/main/torchvision/models/detection/retinanet.py
14
+ # which has the following license...
15
+ # https://github.com/pytorch/vision/blob/main/LICENSE
16
+
17
+ # BSD 3-Clause License
18
+
19
+ # Copyright (c) Soumith Chintala 2016,
20
+ # All rights reserved.
21
+
22
+ # Redistribution and use in source and binary forms, with or without
23
+ # modification, are permitted provided that the following conditions are met:
24
+
25
+ # * Redistributions of source code must retain the above copyright notice, this
26
+ # list of conditions and the following disclaimer.
27
+
28
+ # * Redistributions in binary form must reproduce the above copyright notice,
29
+ # this list of conditions and the following disclaimer in the documentation
30
+ # and/or other materials provided with the distribution.
31
+
32
+ # * Neither the name of the copyright holder nor the names of its
33
+ # contributors may be used to endorse or promote products derived from
34
+ # this software without specific prior written permission.
35
+ """
36
+ Part of this script is adapted from
37
+ https://github.com/pytorch/vision/blob/main/torchvision/models/detection/retinanet.py
38
+ """
39
+
40
+ from __future__ import annotations
41
+
42
+ import math
43
+ import warnings
44
+ from collections.abc import Callable, Sequence
45
+ from typing import Any, Dict
46
+
47
+ import torch
48
+ from torch import Tensor, nn
49
+
50
+ from monai.networks.blocks.backbone_fpn_utils import BackboneWithFPN, _resnet_fpn_extractor
51
+ from monai.networks.layers.factories import Conv
52
+ from monai.networks.nets import resnet
53
+ from monai.utils import ensure_tuple_rep, look_up_option, optional_import
54
+
55
+ _validate_trainable_layers, _ = optional_import(
56
+ "torchvision.models.detection.backbone_utils", name="_validate_trainable_layers"
57
+ )
58
+
59
+
60
+ class RetinaNetClassificationHead(nn.Module):
61
+ """
62
+ A classification head for use in RetinaNet.
63
+
64
+ This head takes a list of feature maps as inputs, and outputs a list of classification maps.
65
+ Each output map has same spatial size with the corresponding input feature map,
66
+ and the number of output channel is num_anchors * num_classes.
67
+
68
+ Args:
69
+ in_channels: number of channels of the input feature
70
+ num_anchors: number of anchors to be predicted
71
+ num_classes: number of classes to be predicted
72
+ spatial_dims: spatial dimension of the network, should be 2 or 3.
73
+ prior_probability: prior probability to initialize classification convolutional layers.
74
+ """
75
+
76
+ def __init__(
77
+ self, in_channels: int, num_anchors: int, num_classes: int, spatial_dims: int, prior_probability: float = 0.01
78
+ ):
79
+ super().__init__()
80
+
81
+ conv_type: Callable = Conv[Conv.CONV, spatial_dims]
82
+ conv = []
83
+ for _ in range(4):
84
+ conv.append(conv_type(in_channels, in_channels, kernel_size=3, stride=1, padding=1))
85
+ conv.append(nn.GroupNorm(num_groups=8, num_channels=in_channels))
86
+ conv.append(nn.ReLU())
87
+ self.conv = nn.Sequential(*conv)
88
+
89
+ for layer in self.conv.children():
90
+ if isinstance(layer, conv_type): # type: ignore
91
+ torch.nn.init.normal_(layer.weight, std=0.01)
92
+ torch.nn.init.constant_(layer.bias, 0)
93
+
94
+ self.cls_logits = conv_type(in_channels, num_anchors * num_classes, kernel_size=3, stride=1, padding=1)
95
+ torch.nn.init.normal_(self.cls_logits.weight, std=0.01)
96
+ torch.nn.init.constant_(self.cls_logits.bias, -math.log((1 - prior_probability) / prior_probability))
97
+
98
+ self.num_classes = num_classes
99
+ self.num_anchors = num_anchors
100
+
101
+ def forward(self, x: list[Tensor]) -> list[Tensor]:
102
+ """
103
+ It takes a list of feature maps as inputs, and outputs a list of classification maps.
104
+ Each output classification map has same spatial size with the corresponding input feature map,
105
+ and the number of output channel is num_anchors * num_classes.
106
+
107
+ Args:
108
+ x: list of feature map, x[i] is a (B, in_channels, H_i, W_i) or (B, in_channels, H_i, W_i, D_i) Tensor.
109
+
110
+ Return:
111
+ cls_logits_maps, list of classification map. cls_logits_maps[i] is a
112
+ (B, num_anchors * num_classes, H_i, W_i) or (B, num_anchors * num_classes, H_i, W_i, D_i) Tensor.
113
+
114
+ """
115
+ cls_logits_maps = []
116
+
117
+ if isinstance(x, Tensor):
118
+ feature_maps = [x]
119
+ else:
120
+ feature_maps = x
121
+
122
+ for features in feature_maps:
123
+ cls_logits = self.conv(features)
124
+ cls_logits = self.cls_logits(cls_logits)
125
+
126
+ cls_logits_maps.append(cls_logits)
127
+
128
+ if torch.isnan(cls_logits).any() or torch.isinf(cls_logits).any():
129
+ if torch.is_grad_enabled():
130
+ raise ValueError("cls_logits is NaN or Inf.")
131
+ else:
132
+ warnings.warn("cls_logits is NaN or Inf.")
133
+
134
+ return cls_logits_maps
135
+
136
+
137
+ class RetinaNetRegressionHead(nn.Module):
138
+ """
139
+ A regression head for use in RetinaNet.
140
+
141
+ This head takes a list of feature maps as inputs, and outputs a list of box regression maps.
142
+ Each output box regression map has same spatial size with the corresponding input feature map,
143
+ and the number of output channel is num_anchors * 2 * spatial_dims.
144
+
145
+ Args:
146
+ in_channels: number of channels of the input feature
147
+ num_anchors: number of anchors to be predicted
148
+ spatial_dims: spatial dimension of the network, should be 2 or 3.
149
+ """
150
+
151
+ def __init__(self, in_channels: int, num_anchors: int, spatial_dims: int):
152
+ super().__init__()
153
+
154
+ conv_type: Callable = Conv[Conv.CONV, spatial_dims]
155
+
156
+ conv = []
157
+ for _ in range(4):
158
+ conv.append(conv_type(in_channels, in_channels, kernel_size=3, stride=1, padding=1))
159
+ conv.append(nn.GroupNorm(num_groups=8, num_channels=in_channels))
160
+ conv.append(nn.ReLU())
161
+
162
+ self.conv = nn.Sequential(*conv)
163
+
164
+ self.bbox_reg = conv_type(in_channels, num_anchors * 2 * spatial_dims, kernel_size=3, stride=1, padding=1)
165
+ torch.nn.init.normal_(self.bbox_reg.weight, std=0.01)
166
+ torch.nn.init.zeros_(self.bbox_reg.bias)
167
+
168
+ for layer in self.conv.children():
169
+ if isinstance(layer, conv_type): # type: ignore
170
+ torch.nn.init.normal_(layer.weight, std=0.01)
171
+ torch.nn.init.zeros_(layer.bias)
172
+
173
+ def forward(self, x: list[Tensor]) -> list[Tensor]:
174
+ """
175
+ It takes a list of feature maps as inputs, and outputs a list of box regression maps.
176
+ Each output box regression map has same spatial size with the corresponding input feature map,
177
+ and the number of output channel is num_anchors * 2 * spatial_dims.
178
+
179
+ Args:
180
+ x: list of feature map, x[i] is a (B, in_channels, H_i, W_i) or (B, in_channels, H_i, W_i, D_i) Tensor.
181
+
182
+ Return:
183
+ box_regression_maps, list of box regression map. cls_logits_maps[i] is a
184
+ (B, num_anchors * 2 * spatial_dims, H_i, W_i) or (B, num_anchors * 2 * spatial_dims, H_i, W_i, D_i) Tensor.
185
+
186
+ """
187
+ box_regression_maps = []
188
+
189
+ if isinstance(x, Tensor):
190
+ feature_maps = [x]
191
+ else:
192
+ feature_maps = x
193
+
194
+ for features in feature_maps:
195
+ box_regression = self.conv(features)
196
+ box_regression = self.bbox_reg(box_regression)
197
+
198
+ box_regression_maps.append(box_regression)
199
+
200
+ if torch.isnan(box_regression).any() or torch.isinf(box_regression).any():
201
+ if torch.is_grad_enabled():
202
+ raise ValueError("box_regression is NaN or Inf.")
203
+ else:
204
+ warnings.warn("box_regression is NaN or Inf.")
205
+
206
+ return box_regression_maps
207
+
208
+
209
+ class RetinaNet(nn.Module):
210
+ """
211
+ The network used in RetinaNet.
212
+
213
+ It takes an image tensor as inputs, and outputs either 1) a dictionary ``head_outputs``.
214
+ ``head_outputs[self.cls_key]`` is the predicted classification maps, a list of Tensor.
215
+ ``head_outputs[self.box_reg_key]`` is the predicted box regression maps, a list of Tensor.
216
+ or 2) a list of 2N tensors ``head_outputs``, with first N tensors being the predicted
217
+ classification maps and second N tensors being the predicted box regression maps.
218
+
219
+ Args:
220
+ spatial_dims: number of spatial dimensions of the images. We support both 2D and 3D images.
221
+ num_classes: number of output classes of the model (excluding the background).
222
+ num_anchors: number of anchors at each location.
223
+ feature_extractor: a network that outputs feature maps from the input images,
224
+ each feature map corresponds to a different resolution.
225
+ Its output can have a format of Tensor, Dict[Any, Tensor], or Sequence[Tensor].
226
+ It can be the output of ``resnet_fpn_feature_extractor(*args, **kwargs)``.
227
+ size_divisible: the spatial size of the network input should be divisible by size_divisible,
228
+ decided by the feature_extractor.
229
+ use_list_output: default False. If False, the network outputs a dictionary ``head_outputs``,
230
+ ``head_outputs[self.cls_key]`` is the predicted classification maps, a list of Tensor.
231
+ ``head_outputs[self.box_reg_key]`` is the predicted box regression maps, a list of Tensor.
232
+ If True, the network outputs a list of 2N tensors ``head_outputs``, with first N tensors being
233
+ the predicted classification maps and second N tensors being the predicted box regression maps.
234
+
235
+ Example:
236
+
237
+ .. code-block:: python
238
+
239
+ from monai.networks.nets import resnet
240
+ spatial_dims = 3 # 3D network
241
+ conv1_t_stride = (2,2,1) # stride of first convolutional layer in backbone
242
+ backbone = resnet.ResNet(
243
+ spatial_dims = spatial_dims,
244
+ block = resnet.ResNetBottleneck,
245
+ layers = [3, 4, 6, 3],
246
+ block_inplanes = resnet.get_inplanes(),
247
+ n_input_channels= 1,
248
+ conv1_t_stride = conv1_t_stride,
249
+ conv1_t_size = (7,7,7),
250
+ )
251
+ # This feature_extractor outputs 4-level feature maps.
252
+ # number of output feature maps is len(returned_layers)+1
253
+ returned_layers = [1,2,3] # returned layer from feature pyramid network
254
+ feature_extractor = resnet_fpn_feature_extractor(
255
+ backbone = backbone,
256
+ spatial_dims = spatial_dims,
257
+ pretrained_backbone = False,
258
+ trainable_backbone_layers = None,
259
+ returned_layers = returned_layers,
260
+ )
261
+ # This feature_extractor requires input image spatial size
262
+ # to be divisible by (32, 32, 16).
263
+ size_divisible = tuple(2*s*2**max(returned_layers) for s in conv1_t_stride)
264
+ model = RetinaNet(
265
+ spatial_dims = spatial_dims,
266
+ num_classes = 5,
267
+ num_anchors = 6,
268
+ feature_extractor=feature_extractor,
269
+ size_divisible = size_divisible,
270
+ ).to(device)
271
+ result = model(torch.rand(2, 1, 128,128,128))
272
+ cls_logits_maps = result["classification"] # a list of len(returned_layers)+1 Tensor
273
+ box_regression_maps = result["box_regression"] # a list of len(returned_layers)+1 Tensor
274
+ """
275
+
276
+ def __init__(
277
+ self,
278
+ spatial_dims: int,
279
+ num_classes: int,
280
+ num_anchors: int,
281
+ feature_extractor: nn.Module,
282
+ size_divisible: Sequence[int] | int = 1,
283
+ use_list_output: bool = False,
284
+ ):
285
+ super().__init__()
286
+
287
+ self.spatial_dims = look_up_option(spatial_dims, supported=[1, 2, 3])
288
+ self.num_classes = num_classes
289
+ self.size_divisible = ensure_tuple_rep(size_divisible, self.spatial_dims)
290
+ self.use_list_output = use_list_output
291
+
292
+ if not hasattr(feature_extractor, "out_channels"):
293
+ raise ValueError(
294
+ "feature_extractor should contain an attribute out_channels "
295
+ "specifying the number of output channels (assumed to be the "
296
+ "same for all the levels)"
297
+ )
298
+ self.feature_extractor = feature_extractor
299
+
300
+ self.feature_map_channels: int = self.feature_extractor.out_channels
301
+ self.num_anchors = num_anchors
302
+ self.classification_head = RetinaNetClassificationHead(
303
+ self.feature_map_channels, self.num_anchors, self.num_classes, spatial_dims=self.spatial_dims
304
+ )
305
+ self.regression_head = RetinaNetRegressionHead(
306
+ self.feature_map_channels, self.num_anchors, spatial_dims=self.spatial_dims
307
+ )
308
+
309
+ self.cls_key: str = "classification"
310
+ self.box_reg_key: str = "box_regression"
311
+
312
+ def forward(self, images: Tensor) -> Any:
313
+ """
314
+ It takes an image tensor as inputs, and outputs predicted classification maps
315
+ and predicted box regression maps in ``head_outputs``.
316
+
317
+ Args:
318
+ images: input images, sized (B, img_channels, H, W) or (B, img_channels, H, W, D).
319
+
320
+ Return:
321
+ 1) If self.use_list_output is False, output a dictionary ``head_outputs`` with
322
+ keys including self.cls_key and self.box_reg_key.
323
+ ``head_outputs[self.cls_key]`` is the predicted classification maps, a list of Tensor.
324
+ ``head_outputs[self.box_reg_key]`` is the predicted box regression maps, a list of Tensor.
325
+ 2) if self.use_list_output is True, outputs a list of 2N tensors ``head_outputs``, with first N tensors being
326
+ the predicted classification maps and second N tensors being the predicted box regression maps.
327
+
328
+ """
329
+ # compute features maps list from the input images.
330
+ features = self.feature_extractor(images)
331
+ if isinstance(features, Tensor):
332
+ feature_maps = [features]
333
+ elif torch.jit.isinstance(features, Dict[str, Tensor]):
334
+ feature_maps = list(features.values())
335
+ else:
336
+ feature_maps = list(features)
337
+
338
+ if not isinstance(feature_maps[0], Tensor):
339
+ raise ValueError("feature_extractor output format must be Tensor, Dict[str, Tensor], or Sequence[Tensor].")
340
+
341
+ # compute classification and box regression maps from the feature maps
342
+ # expandable for mask prediction in the future
343
+
344
+ if not self.use_list_output:
345
+ # output dict
346
+ head_outputs = {self.cls_key: self.classification_head(feature_maps)}
347
+ head_outputs[self.box_reg_key] = self.regression_head(feature_maps)
348
+ return head_outputs
349
+ else:
350
+ # output list of tensor, first half is classification, second half is box regression
351
+ head_outputs_sequence = self.classification_head(feature_maps) + self.regression_head(feature_maps)
352
+ return head_outputs_sequence
353
+
354
+
355
+ def resnet_fpn_feature_extractor(
356
+ backbone: resnet.ResNet,
357
+ spatial_dims: int,
358
+ pretrained_backbone: bool = False,
359
+ returned_layers: Sequence[int] = (1, 2, 3),
360
+ trainable_backbone_layers: int | None = None,
361
+ ) -> BackboneWithFPN:
362
+ """
363
+ Constructs a feature extractor network with a ResNet-FPN backbone, used as feature_extractor in RetinaNet.
364
+
365
+ Reference: `"Focal Loss for Dense Object Detection" <https://arxiv.org/abs/1708.02002>`_.
366
+
367
+ The returned feature_extractor network takes an image tensor as inputs,
368
+ and outputs a dictionary that maps string to the extracted feature maps (Tensor).
369
+
370
+ The input to the returned feature_extractor is expected to be a list of tensors,
371
+ each of shape ``[C, H, W]`` or ``[C, H, W, D]``,
372
+ one for each image. Different images can have different sizes.
373
+
374
+
375
+ Args:
376
+ backbone: a ResNet model, used as backbone.
377
+ spatial_dims: number of spatial dimensions of the images. We support both 2D and 3D images.
378
+ pretrained_backbone: whether the backbone has been pre-trained.
379
+ returned_layers: returned layers to extract feature maps. Each returned layer should be in the range [1,4].
380
+ len(returned_layers)+1 will be the number of extracted feature maps.
381
+ There is an extra maxpooling layer LastLevelMaxPool() appended.
382
+ trainable_backbone_layers: number of trainable (not frozen) resnet layers starting from final block.
383
+ Valid values are between 0 and 5, with 5 meaning all backbone layers are trainable.
384
+ When pretrained_backbone is False, this value is set to be 5.
385
+ When pretrained_backbone is True, if ``None`` is passed (the default) this value is set to 3.
386
+
387
+ Example:
388
+
389
+ .. code-block:: python
390
+
391
+ from monai.networks.nets import resnet
392
+ spatial_dims = 3 # 3D network
393
+ backbone = resnet.ResNet(
394
+ spatial_dims = spatial_dims,
395
+ block = resnet.ResNetBottleneck,
396
+ layers = [3, 4, 6, 3],
397
+ block_inplanes = resnet.get_inplanes(),
398
+ n_input_channels= 1,
399
+ conv1_t_stride = (2,2,1),
400
+ conv1_t_size = (7,7,7),
401
+ )
402
+ # This feature_extractor outputs 4-level feature maps.
403
+ # number of output feature maps is len(returned_layers)+1
404
+ feature_extractor = resnet_fpn_feature_extractor(
405
+ backbone = backbone,
406
+ spatial_dims = spatial_dims,
407
+ pretrained_backbone = False,
408
+ trainable_backbone_layers = None,
409
+ returned_layers = [1,2,3],
410
+ )
411
+ model = RetinaNet(
412
+ spatial_dims = spatial_dims,
413
+ num_classes = 5,
414
+ num_anchors = 6,
415
+ feature_extractor=feature_extractor,
416
+ size_divisible = 32,
417
+ ).to(device)
418
+ """
419
+ # If pretrained_backbone is False, valid_trainable_backbone_layers = 5.
420
+ # If pretrained_backbone is True, valid_trainable_backbone_layers = trainable_backbone_layers or 3 if None.
421
+ valid_trainable_backbone_layers: int = _validate_trainable_layers(
422
+ pretrained_backbone, trainable_backbone_layers, max_value=5, default_value=3
423
+ )
424
+
425
+ feature_extractor = _resnet_fpn_extractor(
426
+ backbone,
427
+ spatial_dims,
428
+ valid_trainable_backbone_layers,
429
+ returned_layers=list(returned_layers),
430
+ extra_blocks=None,
431
+ )
432
+ return feature_extractor
source_code/SegMamba/monai/apps/mmars/model_desc.py ADDED
@@ -0,0 +1,229 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) MONAI Consortium
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ # Unless required by applicable law or agreed to in writing, software
7
+ # distributed under the License is distributed on an "AS IS" BASIS,
8
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9
+ # See the License for the specific language governing permissions and
10
+ # limitations under the License.
11
+ """
12
+ Collection of the remote MMAR descriptors
13
+
14
+ See Also:
15
+ - https://docs.nvidia.com/clara/clara-train-sdk/pt/mmar.html
16
+ """
17
+
18
+ from __future__ import annotations
19
+
20
+ import os
21
+ from typing import Any
22
+
23
+ __all__ = ["MODEL_DESC", "RemoteMMARKeys"]
24
+
25
+
26
+ class RemoteMMARKeys:
27
+ """
28
+ Data keys used for loading MMAR.
29
+ ID must uniquely define an MMAR.
30
+ """
31
+
32
+ ID = "id" # unique MMAR
33
+ NAME = "name" # MMAR name for readability
34
+ URL = "url" # remote location of the MMAR, see also: `monai.apps.mmars.mmars._get_ngc_url`
35
+ DOC = "doc" # documentation page of the remote model, see also: `monai.apps.mmars.mmars._get_ngc_doc_url`
36
+ FILE_TYPE = "file_type" # type of the compressed MMAR
37
+ HASH_TYPE = "hash_type" # hashing method for the compressed MMAR
38
+ HASH_VAL = "hash_val" # hashing value for the compressed MMAR
39
+ MODEL_FILE = "model_file" # within an MMAR folder, the relative path to the model file
40
+ CONFIG_FILE = "config_file" # within an MMAR folder, the relative path to the config file (for model config)
41
+ VERSION = "version" # version of the MMAR
42
+
43
+
44
+ MODEL_DESC: tuple[dict[Any, Any], ...] = (
45
+ {
46
+ RemoteMMARKeys.ID: "clara_pt_spleen_ct_segmentation_1",
47
+ RemoteMMARKeys.NAME: "clara_pt_spleen_ct_segmentation",
48
+ RemoteMMARKeys.FILE_TYPE: "zip",
49
+ RemoteMMARKeys.HASH_TYPE: "md5",
50
+ RemoteMMARKeys.HASH_VAL: None,
51
+ RemoteMMARKeys.MODEL_FILE: os.path.join("models", "model.pt"),
52
+ RemoteMMARKeys.CONFIG_FILE: os.path.join("config", "config_train.json"),
53
+ RemoteMMARKeys.VERSION: 1,
54
+ },
55
+ {
56
+ RemoteMMARKeys.ID: "clara_pt_prostate_mri_segmentation_1",
57
+ RemoteMMARKeys.NAME: "clara_pt_prostate_mri_segmentation",
58
+ RemoteMMARKeys.FILE_TYPE: "zip",
59
+ RemoteMMARKeys.HASH_TYPE: "md5",
60
+ RemoteMMARKeys.HASH_VAL: None,
61
+ RemoteMMARKeys.MODEL_FILE: os.path.join("models", "model.pt"),
62
+ RemoteMMARKeys.CONFIG_FILE: os.path.join("config", "config_train.json"),
63
+ RemoteMMARKeys.VERSION: 1,
64
+ },
65
+ {
66
+ RemoteMMARKeys.ID: "clara_pt_covid19_ct_lesion_segmentation_1",
67
+ RemoteMMARKeys.NAME: "clara_pt_covid19_ct_lesion_segmentation",
68
+ RemoteMMARKeys.FILE_TYPE: "zip",
69
+ RemoteMMARKeys.HASH_TYPE: "md5",
70
+ RemoteMMARKeys.HASH_VAL: None,
71
+ RemoteMMARKeys.MODEL_FILE: os.path.join("models", "model.pt"),
72
+ RemoteMMARKeys.VERSION: 1,
73
+ },
74
+ {
75
+ RemoteMMARKeys.ID: "clara_pt_covid19_3d_ct_classification_1",
76
+ RemoteMMARKeys.NAME: "clara_pt_covid19_3d_ct_classification",
77
+ RemoteMMARKeys.FILE_TYPE: "zip",
78
+ RemoteMMARKeys.HASH_TYPE: "md5",
79
+ RemoteMMARKeys.HASH_VAL: None,
80
+ RemoteMMARKeys.MODEL_FILE: os.path.join("models", "model.pt"),
81
+ RemoteMMARKeys.CONFIG_FILE: os.path.join("config", "config_train.json"),
82
+ RemoteMMARKeys.VERSION: 1,
83
+ },
84
+ {
85
+ RemoteMMARKeys.ID: "clara_pt_covid19_ct_lung_annotation_1",
86
+ RemoteMMARKeys.NAME: "clara_pt_covid19_ct_lung_annotation",
87
+ RemoteMMARKeys.FILE_TYPE: "zip",
88
+ RemoteMMARKeys.HASH_TYPE: "md5",
89
+ RemoteMMARKeys.HASH_VAL: None,
90
+ RemoteMMARKeys.MODEL_FILE: os.path.join("models", "model.pt"),
91
+ RemoteMMARKeys.VERSION: 1,
92
+ },
93
+ {
94
+ RemoteMMARKeys.ID: "clara_pt_fed_learning_brain_tumor_mri_segmentation_1",
95
+ RemoteMMARKeys.NAME: "clara_pt_fed_learning_brain_tumor_mri_segmentation",
96
+ RemoteMMARKeys.FILE_TYPE: "zip",
97
+ RemoteMMARKeys.HASH_TYPE: "md5",
98
+ RemoteMMARKeys.HASH_VAL: None,
99
+ RemoteMMARKeys.MODEL_FILE: os.path.join("models", "server", "best_FL_global_model.pt"),
100
+ RemoteMMARKeys.VERSION: 1,
101
+ },
102
+ {
103
+ RemoteMMARKeys.ID: "clara_pt_pathology_metastasis_detection_1",
104
+ RemoteMMARKeys.NAME: "clara_pt_pathology_metastasis_detection",
105
+ RemoteMMARKeys.FILE_TYPE: "zip",
106
+ RemoteMMARKeys.HASH_TYPE: "md5",
107
+ RemoteMMARKeys.HASH_VAL: None,
108
+ RemoteMMARKeys.MODEL_FILE: os.path.join("models", "model.pt"),
109
+ RemoteMMARKeys.CONFIG_FILE: os.path.join("config", "config_train.json"),
110
+ RemoteMMARKeys.VERSION: 1,
111
+ },
112
+ {
113
+ RemoteMMARKeys.ID: "clara_pt_brain_mri_segmentation_1",
114
+ RemoteMMARKeys.NAME: "clara_pt_brain_mri_segmentation",
115
+ RemoteMMARKeys.FILE_TYPE: "zip",
116
+ RemoteMMARKeys.HASH_TYPE: "md5",
117
+ RemoteMMARKeys.HASH_VAL: None,
118
+ RemoteMMARKeys.MODEL_FILE: os.path.join("models", "model.pt"),
119
+ RemoteMMARKeys.VERSION: 1,
120
+ },
121
+ {
122
+ RemoteMMARKeys.ID: "clara_pt_brain_mri_segmentation_t1c_1",
123
+ RemoteMMARKeys.NAME: "clara_pt_brain_mri_segmentation_t1c",
124
+ RemoteMMARKeys.FILE_TYPE: "zip",
125
+ RemoteMMARKeys.HASH_TYPE: "md5",
126
+ RemoteMMARKeys.HASH_VAL: None,
127
+ RemoteMMARKeys.MODEL_FILE: os.path.join("models", "model.pt"),
128
+ RemoteMMARKeys.VERSION: 1,
129
+ },
130
+ {
131
+ RemoteMMARKeys.ID: "clara_pt_liver_and_tumor_ct_segmentation_1",
132
+ RemoteMMARKeys.NAME: "clara_pt_liver_and_tumor_ct_segmentation",
133
+ RemoteMMARKeys.FILE_TYPE: "zip",
134
+ RemoteMMARKeys.HASH_TYPE: "md5",
135
+ RemoteMMARKeys.HASH_VAL: None,
136
+ RemoteMMARKeys.MODEL_FILE: os.path.join("models", "model.pt"),
137
+ RemoteMMARKeys.CONFIG_FILE: os.path.join("config", "config_train.json"),
138
+ RemoteMMARKeys.VERSION: 1,
139
+ },
140
+ {
141
+ RemoteMMARKeys.ID: "clara_pt_pancreas_and_tumor_ct_segmentation_1",
142
+ RemoteMMARKeys.NAME: "clara_pt_pancreas_and_tumor_ct_segmentation",
143
+ RemoteMMARKeys.FILE_TYPE: "zip",
144
+ RemoteMMARKeys.HASH_TYPE: "md5",
145
+ RemoteMMARKeys.HASH_VAL: None,
146
+ RemoteMMARKeys.MODEL_FILE: os.path.join("models", "model.pt"),
147
+ RemoteMMARKeys.CONFIG_FILE: os.path.join("config", "config_train.json"),
148
+ RemoteMMARKeys.VERSION: 1,
149
+ },
150
+ {
151
+ RemoteMMARKeys.ID: "clara_pt_brain_mri_annotation_t1c_1",
152
+ RemoteMMARKeys.NAME: "clara_pt_brain_mri_annotation_t1c",
153
+ RemoteMMARKeys.FILE_TYPE: "zip",
154
+ RemoteMMARKeys.HASH_TYPE: "md5",
155
+ RemoteMMARKeys.HASH_VAL: None,
156
+ RemoteMMARKeys.MODEL_FILE: os.path.join("models", "model.pt"),
157
+ RemoteMMARKeys.VERSION: 1,
158
+ },
159
+ {
160
+ RemoteMMARKeys.ID: "clara_pt_spleen_ct_annotation_1",
161
+ RemoteMMARKeys.NAME: "clara_pt_spleen_ct_annotation",
162
+ RemoteMMARKeys.FILE_TYPE: "zip",
163
+ RemoteMMARKeys.HASH_TYPE: "md5",
164
+ RemoteMMARKeys.HASH_VAL: None,
165
+ RemoteMMARKeys.MODEL_FILE: os.path.join("models", "model.pt"),
166
+ RemoteMMARKeys.CONFIG_FILE: os.path.join("config", "config_train.json"),
167
+ RemoteMMARKeys.VERSION: 1,
168
+ },
169
+ {
170
+ RemoteMMARKeys.ID: "clara_pt_deepgrow_3d_annotation_1",
171
+ RemoteMMARKeys.NAME: "clara_pt_deepgrow_3d_annotation",
172
+ RemoteMMARKeys.FILE_TYPE: "zip",
173
+ RemoteMMARKeys.HASH_TYPE: "md5",
174
+ RemoteMMARKeys.HASH_VAL: None,
175
+ RemoteMMARKeys.MODEL_FILE: os.path.join("models", "model.pt"),
176
+ RemoteMMARKeys.CONFIG_FILE: os.path.join("config", "config_train.json"),
177
+ RemoteMMARKeys.VERSION: 1,
178
+ },
179
+ {
180
+ RemoteMMARKeys.ID: "clara_pt_deepgrow_2d_annotation_1",
181
+ RemoteMMARKeys.NAME: "clara_pt_deepgrow_2d_annotation",
182
+ RemoteMMARKeys.FILE_TYPE: "zip",
183
+ RemoteMMARKeys.HASH_TYPE: "md5",
184
+ RemoteMMARKeys.HASH_VAL: None,
185
+ RemoteMMARKeys.MODEL_FILE: os.path.join("models", "model.pt"),
186
+ RemoteMMARKeys.CONFIG_FILE: os.path.join("config", "config_train.json"),
187
+ RemoteMMARKeys.VERSION: 1,
188
+ },
189
+ {
190
+ RemoteMMARKeys.ID: "clara_pt_covid19_ct_lung_segmentation_1",
191
+ RemoteMMARKeys.NAME: "clara_pt_covid19_ct_lung_segmentation",
192
+ RemoteMMARKeys.FILE_TYPE: "zip",
193
+ RemoteMMARKeys.HASH_TYPE: "md5",
194
+ RemoteMMARKeys.HASH_VAL: None,
195
+ RemoteMMARKeys.MODEL_FILE: os.path.join("models", "model.pt"),
196
+ RemoteMMARKeys.CONFIG_FILE: os.path.join("config", "config_train.json"),
197
+ RemoteMMARKeys.VERSION: 1,
198
+ },
199
+ {
200
+ RemoteMMARKeys.ID: "clara_pt_unetr_ct_btcv_segmentation",
201
+ RemoteMMARKeys.NAME: "clara_pt_unetr_ct_btcv_segmentation",
202
+ RemoteMMARKeys.FILE_TYPE: "zip",
203
+ RemoteMMARKeys.HASH_TYPE: "md5",
204
+ RemoteMMARKeys.HASH_VAL: None,
205
+ RemoteMMARKeys.MODEL_FILE: os.path.join("models", "model.pt"),
206
+ RemoteMMARKeys.CONFIG_FILE: os.path.join("config", "config_train.json"),
207
+ RemoteMMARKeys.VERSION: 4.1,
208
+ },
209
+ {
210
+ RemoteMMARKeys.ID: "clara_pt_chest_xray_classification",
211
+ RemoteMMARKeys.NAME: "clara_pt_chest_xray_classification",
212
+ RemoteMMARKeys.FILE_TYPE: "zip",
213
+ RemoteMMARKeys.HASH_TYPE: "md5",
214
+ RemoteMMARKeys.HASH_VAL: None,
215
+ RemoteMMARKeys.MODEL_FILE: os.path.join("models", "model.pt"),
216
+ RemoteMMARKeys.CONFIG_FILE: os.path.join("config", "config_train.json"),
217
+ RemoteMMARKeys.VERSION: 4.1,
218
+ },
219
+ {
220
+ RemoteMMARKeys.ID: "clara_pt_self_supervised_learning_segmentation",
221
+ RemoteMMARKeys.NAME: "clara_pt_self_supervised_learning_segmentation",
222
+ RemoteMMARKeys.FILE_TYPE: "zip",
223
+ RemoteMMARKeys.HASH_TYPE: "md5",
224
+ RemoteMMARKeys.HASH_VAL: None,
225
+ RemoteMMARKeys.MODEL_FILE: os.path.join("models_2gpu", "best_metric_model.pt"),
226
+ RemoteMMARKeys.CONFIG_FILE: os.path.join("config", "config_train.json"),
227
+ RemoteMMARKeys.VERSION: 4.1,
228
+ },
229
+ )
source_code/SegMamba/monai/apps/pathology/inferers/__init__.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) MONAI Consortium
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ # Unless required by applicable law or agreed to in writing, software
7
+ # distributed under the License is distributed on an "AS IS" BASIS,
8
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9
+ # See the License for the specific language governing permissions and
10
+ # limitations under the License.
11
+
12
+ from __future__ import annotations
13
+
14
+ from .inferer import SlidingWindowHoVerNetInferer
source_code/SegMamba/monai/apps/pathology/inferers/inferer.py ADDED
@@ -0,0 +1,210 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) MONAI Consortium
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ # Unless required by applicable law or agreed to in writing, software
7
+ # distributed under the License is distributed on an "AS IS" BASIS,
8
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9
+ # See the License for the specific language governing permissions and
10
+ # limitations under the License.
11
+
12
+ from __future__ import annotations
13
+
14
+ from typing import Any, Callable, Sequence
15
+
16
+ import numpy as np
17
+ import torch
18
+ import torch.nn.functional as F
19
+
20
+ from monai.inferers import SlidingWindowInferer
21
+ from monai.inferers.utils import sliding_window_inference
22
+ from monai.utils import BlendMode, PytorchPadMode, look_up_option
23
+
24
+ __all__ = ["SlidingWindowHoVerNetInferer"]
25
+
26
+
27
+ class SlidingWindowHoVerNetInferer(SlidingWindowInferer):
28
+ """
29
+ Sliding window method for HoVerNet model inference,
30
+ with `sw_batch_size` windows for every model.forward().
31
+ Usage example can be found in the :py:class:`monai.inferers.Inferer` base class.
32
+
33
+ Args:
34
+ roi_size: the window size to execute SlidingWindow evaluation.
35
+ If it has non-positive components, the corresponding `inputs` size will be used.
36
+ if the components of the `roi_size` are non-positive values, the transform will use the
37
+ corresponding components of img size. For example, `roi_size=(32, -1)` will be adapted
38
+ to `(32, 64)` if the second spatial dimension size of img is `64`.
39
+ sw_batch_size: the batch size to run window slices.
40
+ overlap: Amount of overlap between scans.
41
+ mode: {``"constant"``, ``"gaussian"``}
42
+ How to blend output of overlapping windows. Defaults to ``"constant"``.
43
+
44
+ - ``"constant``": gives equal weight to all predictions.
45
+ - ``"gaussian``": gives less weight to predictions on edges of windows.
46
+
47
+ sigma_scale: the standard deviation coefficient of the Gaussian window when `mode` is ``"gaussian"``.
48
+ Default: 0.125. Actual window sigma is ``sigma_scale`` * ``dim_size``.
49
+ When sigma_scale is a sequence of floats, the values denote sigma_scale at the corresponding
50
+ spatial dimensions.
51
+ padding_mode: {``"constant"``, ``"reflect"``, ``"replicate"``, ``"circular"``}
52
+ Padding mode when ``roi_size`` is larger than inputs. Defaults to ``"constant"``
53
+ See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.pad.html
54
+ cval: fill value for 'constant' padding mode. Default: 0
55
+ sw_device: device for the window data.
56
+ By default the device (and accordingly the memory) of the `inputs` is used.
57
+ Normally `sw_device` should be consistent with the device where `predictor` is defined.
58
+ device: device for the stitched output prediction.
59
+ By default the device (and accordingly the memory) of the `inputs` is used. If for example
60
+ set to device=torch.device('cpu') the gpu memory consumption is less and independent of the
61
+ `inputs` and `roi_size`. Output is on the `device`.
62
+ progress: whether to print a tqdm progress bar.
63
+ cache_roi_weight_map: whether to pre-compute the ROI weight map.
64
+ cpu_thresh: when provided, dynamically switch to stitching on cpu (to save gpu memory)
65
+ when input image volume is larger than this threshold (in pixels/voxels).
66
+ Otherwise use ``"device"``. Thus, the output may end-up on either cpu or gpu.
67
+ extra_input_padding: the amount of padding for the input image, which is a tuple of even number of pads.
68
+ Refer to to the `pad` argument of `torch.nn.functional.pad` for more details.
69
+
70
+ Note:
71
+ ``sw_batch_size`` denotes the max number of windows per network inference iteration,
72
+ not the batch size of inputs.
73
+
74
+ """
75
+
76
+ def __init__(
77
+ self,
78
+ roi_size: Sequence[int] | int,
79
+ sw_batch_size: int = 1,
80
+ overlap: float = 0.25,
81
+ mode: BlendMode | str = BlendMode.CONSTANT,
82
+ sigma_scale: Sequence[float] | float = 0.125,
83
+ padding_mode: PytorchPadMode | str = PytorchPadMode.CONSTANT,
84
+ cval: float = 0.0,
85
+ sw_device: torch.device | str | None = None,
86
+ device: torch.device | str | None = None,
87
+ progress: bool = False,
88
+ cache_roi_weight_map: bool = False,
89
+ cpu_thresh: int | None = None,
90
+ extra_input_padding: tuple[int] | None = None,
91
+ ) -> None:
92
+ super().__init__(
93
+ roi_size=roi_size,
94
+ sw_batch_size=sw_batch_size,
95
+ overlap=overlap,
96
+ mode=mode,
97
+ sigma_scale=sigma_scale,
98
+ padding_mode=padding_mode,
99
+ cval=cval,
100
+ sw_device=sw_device,
101
+ device=device,
102
+ progress=progress,
103
+ cache_roi_weight_map=cache_roi_weight_map,
104
+ cpu_thresh=cpu_thresh,
105
+ )
106
+ self.extra_input_padding = extra_input_padding
107
+
108
+ def process_output(self, seg_prob_tuple, window_data, importance_map_):
109
+ window_shape = window_data.shape[2:]
110
+ seg_shape = seg_prob_tuple[0].shape[2:]
111
+
112
+ window_pad_size = []
113
+ window_pad_slices = []
114
+ for window_s, output_s in zip(window_shape, seg_shape):
115
+ pad_width = max(window_s - output_s, 0)
116
+ pad_half_1 = pad_width // 2
117
+ pad_half_2 = pad_width - pad_half_1
118
+ window_pad_size.extend([pad_half_1, pad_half_2])
119
+ window_pad_slices.append(slice(pad_half_1, window_s - pad_half_2))
120
+
121
+ # Make the padding area of the importance map zero
122
+ importance_map = torch.zeros(window_shape, dtype=importance_map_.dtype, device=importance_map_.device)
123
+ importance_map[window_pad_slices] = importance_map_[window_pad_slices]
124
+
125
+ seg_prob_tuple = tuple(
126
+ F.pad(seg_prob, pad=tuple(window_pad_size), mode=self.padding_mode, value=self.cval)
127
+ for seg_prob in seg_prob_tuple
128
+ )
129
+
130
+ return seg_prob_tuple, importance_map
131
+
132
+ def __call__(
133
+ self,
134
+ inputs: torch.Tensor,
135
+ network: Callable[..., torch.Tensor | Sequence[torch.Tensor] | dict[Any, torch.Tensor]],
136
+ *args: Any,
137
+ **kwargs: Any,
138
+ ) -> torch.Tensor | tuple[torch.Tensor, ...] | dict[Any, torch.Tensor]:
139
+ """
140
+
141
+ Args:
142
+ inputs: model input data for inference.
143
+ network: target model to execute inference.
144
+ supports callables such as ``lambda x: my_torch_model(x, additional_config)``
145
+ args: optional args to be passed to ``network``.
146
+ kwargs: optional keyword args to be passed to ``network``.
147
+
148
+ """
149
+
150
+ device = self.device
151
+ if device is None and self.cpu_thresh is not None and inputs.shape[2:].numel() > self.cpu_thresh:
152
+ device = "cpu" # stitch in cpu memory if image is too large
153
+
154
+ if self.extra_input_padding:
155
+ image_size_original = inputs.shape[2:]
156
+ num_spatial_dims = len(image_size_original)
157
+ inputs = F.pad(
158
+ inputs,
159
+ pad=tuple(self.extra_input_padding),
160
+ mode=look_up_option(self.padding_mode, PytorchPadMode),
161
+ value=self.cval,
162
+ )
163
+
164
+ results = sliding_window_inference(
165
+ inputs,
166
+ self.roi_size,
167
+ self.sw_batch_size,
168
+ network,
169
+ self.overlap,
170
+ self.mode,
171
+ self.sigma_scale,
172
+ self.padding_mode,
173
+ self.cval,
174
+ self.sw_device,
175
+ device,
176
+ self.progress,
177
+ self.roi_weight_map,
178
+ self.process_output,
179
+ self.buffer_steps,
180
+ self.buffer_dim,
181
+ False,
182
+ *args,
183
+ **kwargs,
184
+ )
185
+
186
+ if self.extra_input_padding:
187
+ extra_slicing: list[slice] = []
188
+ num_padded_dims = len(self.extra_input_padding) // 2
189
+ for sp in range(num_padded_dims):
190
+ slice_dim = slice(
191
+ self.extra_input_padding[sp * 2],
192
+ image_size_original[num_spatial_dims - sp - 1] + self.extra_input_padding[sp * 2],
193
+ )
194
+ extra_slicing.insert(0, slice_dim)
195
+ for _ in range(len(inputs.shape) - num_padded_dims):
196
+ extra_slicing.insert(0, slice(None))
197
+
198
+ if isinstance(results, dict):
199
+ for k, v in results.items():
200
+ results[k] = v[extra_slicing]
201
+ elif isinstance(results, (list, tuple)):
202
+ results = type(results)([res[extra_slicing] for res in results])
203
+ elif isinstance(results, (torch.Tensor, np.ndarray)):
204
+ results = results[extra_slicing]
205
+ else:
206
+ raise ValueError(
207
+ f"The output [{type(results)}] should be either dict, list, tuple, torch.Tensor, or numpy array."
208
+ )
209
+
210
+ return results
source_code/SegMamba/monai/apps/pathology/transforms/post/array.py ADDED
@@ -0,0 +1,837 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) MONAI Consortium
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ # Unless required by applicable law or agreed to in writing, software
7
+ # distributed under the License is distributed on an "AS IS" BASIS,
8
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9
+ # See the License for the specific language governing permissions and
10
+ # limitations under the License.
11
+
12
+ from __future__ import annotations
13
+
14
+ import warnings
15
+ from typing import Callable, Sequence
16
+
17
+ import numpy as np
18
+ import torch
19
+
20
+ from monai.config.type_definitions import DtypeLike, NdarrayOrTensor
21
+ from monai.transforms import (
22
+ Activations,
23
+ AsDiscrete,
24
+ BoundingRect,
25
+ FillHoles,
26
+ GaussianSmooth,
27
+ RemoveSmallObjects,
28
+ SobelGradients,
29
+ )
30
+ from monai.transforms.transform import Transform
31
+ from monai.transforms.utils_pytorch_numpy_unification import max, maximum, min, sum, unique
32
+ from monai.utils import TransformBackends, convert_to_numpy, optional_import
33
+ from monai.utils.misc import ensure_tuple_rep
34
+ from monai.utils.type_conversion import convert_to_dst_type, convert_to_tensor
35
+
36
+ label, _ = optional_import("scipy.ndimage.measurements", name="label")
37
+ disk, _ = optional_import("skimage.morphology", name="disk")
38
+ opening, _ = optional_import("skimage.morphology", name="opening")
39
+ watershed, _ = optional_import("skimage.segmentation", name="watershed")
40
+ find_contours, _ = optional_import("skimage.measure", name="find_contours")
41
+ centroid, _ = optional_import("skimage.measure", name="centroid")
42
+
43
+ __all__ = [
44
+ "Watershed",
45
+ "GenerateWatershedMask",
46
+ "GenerateInstanceBorder",
47
+ "GenerateDistanceMap",
48
+ "GenerateWatershedMarkers",
49
+ "GenerateSuccinctContour",
50
+ "GenerateInstanceContour",
51
+ "GenerateInstanceCentroid",
52
+ "GenerateInstanceType",
53
+ "HoVerNetInstanceMapPostProcessing",
54
+ "HoVerNetNuclearTypePostProcessing",
55
+ ]
56
+
57
+
58
+ class Watershed(Transform):
59
+ """
60
+ Use `skimage.segmentation.watershed` to get instance segmentation results from images.
61
+ See: https://scikit-image.org/docs/stable/api/skimage.segmentation.html#skimage.segmentation.watershed.
62
+
63
+ Args:
64
+ connectivity: an array with the same number of dimensions as image whose non-zero elements indicate
65
+ neighbors for connection. Following the scipy convention, default is a one-connected array of
66
+ the dimension of the image.
67
+ dtype: target data content type to convert, default is np.int64.
68
+
69
+ """
70
+
71
+ backend = [TransformBackends.NUMPY]
72
+
73
+ def __init__(self, connectivity: int | None = 1, dtype: DtypeLike = np.int64) -> None:
74
+ self.connectivity = connectivity
75
+ self.dtype = dtype
76
+
77
+ def __call__(
78
+ self, image: NdarrayOrTensor, mask: NdarrayOrTensor | None = None, markers: NdarrayOrTensor | None = None
79
+ ) -> NdarrayOrTensor:
80
+ """
81
+ Args:
82
+ image: image where the lowest value points are labeled first. Shape must be [1, H, W, [D]].
83
+ mask: optional, the same shape as image. Only points at which mask == True will be labeled.
84
+ If None (no mask given), it is a volume of all 1s.
85
+ markers: optional, the same shape as image. The desired number of markers, or an array marking
86
+ the basins with the values to be assigned in the label matrix. Zero means not a marker.
87
+ If None (no markers given), the local minima of the image are used as markers.
88
+ """
89
+
90
+ image = convert_to_numpy(image)
91
+ markers = convert_to_numpy(markers)
92
+ mask = convert_to_numpy(mask)
93
+
94
+ instance_seg = watershed(image, markers=markers, mask=mask, connectivity=self.connectivity)
95
+
96
+ return convert_to_dst_type(instance_seg, image, dtype=self.dtype)[0]
97
+
98
+
99
+ class GenerateWatershedMask(Transform):
100
+ """
101
+ generate mask used in `watershed`. Only points at which mask == True will be labeled.
102
+
103
+ Args:
104
+ activation: the activation layer to be applied on the input probability map.
105
+ It can be "softmax" or "sigmoid" string, or any callable. Defaults to "softmax".
106
+ threshold: an optional float value to threshold to binarize probability map.
107
+ If not provided, defaults to 0.5 when activation is not "softmax", otherwise None.
108
+ min_object_size: objects smaller than this size (in pixel) are removed. Defaults to 10.
109
+ dtype: target data content type to convert, default is np.uint8.
110
+
111
+ """
112
+
113
+ backend = [TransformBackends.NUMPY]
114
+
115
+ def __init__(
116
+ self,
117
+ activation: str | Callable = "softmax",
118
+ threshold: float | None = None,
119
+ min_object_size: int = 10,
120
+ dtype: DtypeLike = np.uint8,
121
+ ) -> None:
122
+ self.dtype = dtype
123
+
124
+ # set activation layer
125
+ use_softmax = False
126
+ use_sigmoid = False
127
+ activation_fn = None
128
+ if isinstance(activation, str):
129
+ if activation.lower() == "softmax":
130
+ use_softmax = True
131
+ elif activation.lower() == "sigmoid":
132
+ use_sigmoid = True
133
+ else:
134
+ raise ValueError(
135
+ f"The activation should be 'softmax' or 'sigmoid' string, or any callable. '{activation}' was given."
136
+ )
137
+ elif callable(activation):
138
+ activation_fn = activation
139
+ else:
140
+ raise ValueError(f"The activation type should be either str or callable. '{type(activation)}' was given.")
141
+ self.activation = Activations(softmax=use_softmax, sigmoid=use_sigmoid, other=activation_fn)
142
+
143
+ # set discretization transform
144
+ if not use_softmax and threshold is None:
145
+ threshold = 0.5
146
+ self.as_discrete = AsDiscrete(threshold=threshold, argmax=use_softmax)
147
+
148
+ # set small object removal transform
149
+ self.remove_small_objects = RemoveSmallObjects(min_size=min_object_size) if min_object_size > 0 else None
150
+
151
+ def __call__(self, prob_map: NdarrayOrTensor) -> NdarrayOrTensor:
152
+ """
153
+ Args:
154
+ prob_map: probability map of segmentation, shape must be [C, H, W, [D]]
155
+ """
156
+
157
+ pred = self.activation(prob_map)
158
+ pred = self.as_discrete(pred)
159
+
160
+ pred = convert_to_numpy(pred)
161
+
162
+ pred = label(pred)[0]
163
+ if self.remove_small_objects is not None:
164
+ pred = self.remove_small_objects(pred)
165
+ pred[pred > 0] = 1
166
+
167
+ return convert_to_dst_type(pred, prob_map, dtype=self.dtype)[0]
168
+
169
+
170
+ class GenerateInstanceBorder(Transform):
171
+ """
172
+ Generate instance border by hover map. The more parts of the image that cannot be identified as foreground areas,
173
+ the larger the grey scale value. The grey value of the instance's border will be larger.
174
+
175
+ Args:
176
+ kernel_size: the size of the Sobel kernel. Defaults to 5.
177
+ dtype: target data type to convert to. Defaults to np.float32.
178
+
179
+
180
+ Raises:
181
+ ValueError: when the `mask` shape is not [1, H, W].
182
+ ValueError: when the `hover_map` shape is not [2, H, W].
183
+
184
+ """
185
+
186
+ backend = [TransformBackends.NUMPY]
187
+
188
+ def __init__(self, kernel_size: int = 5, dtype: DtypeLike = np.float32) -> None:
189
+ self.dtype = dtype
190
+ self.sobel_gradient = SobelGradients(kernel_size=kernel_size)
191
+
192
+ def __call__(self, mask: NdarrayOrTensor, hover_map: NdarrayOrTensor) -> NdarrayOrTensor: # type: ignore
193
+ """
194
+ Args:
195
+ mask: binary segmentation map, the output of :py:class:`GenerateWatershedMask`.
196
+ Shape must be [1, H, W] or [H, W].
197
+ hover_map: horizontal and vertical distances of nuclear pixels to their centres of mass. Shape must be [2, H, W].
198
+ The first and second channel represent the horizontal and vertical maps respectively. For more details refer
199
+ to papers: https://arxiv.org/abs/1812.06499.
200
+ """
201
+ if len(hover_map.shape) != 3:
202
+ raise ValueError(f"The hover map should have the shape of [C, H, W], but got {hover_map.shape}.")
203
+ if len(mask.shape) == 3:
204
+ if mask.shape[0] != 1:
205
+ raise ValueError(f"The mask should have only one channel, but got {mask.shape[0]}.")
206
+ elif len(mask.shape) == 2:
207
+ mask = mask[None]
208
+ else:
209
+ raise ValueError(f"The mask should have the shape of [1, H, W] or [H, W], but got {mask.shape}.")
210
+ if hover_map.shape[0] != 2:
211
+ raise ValueError(f"Suppose the hover map only has two channels, but got {hover_map.shape[0]}")
212
+
213
+ hover_h = hover_map[0:1, ...]
214
+ hover_v = hover_map[1:2, ...]
215
+
216
+ hover_h_min, hover_h_max = min(hover_h), max(hover_h)
217
+ hover_v_min, hover_v_max = min(hover_v), max(hover_v)
218
+ if (hover_h_max - hover_h_min) == 0 or (hover_v_max - hover_v_min) == 0:
219
+ raise ValueError("Not a valid hover map, please check your input")
220
+ hover_h = (hover_h - hover_h_min) / (hover_h_max - hover_h_min)
221
+ hover_v = (hover_v - hover_v_min) / (hover_v_max - hover_v_min)
222
+ sobelh = self.sobel_gradient(hover_h)[1, ...]
223
+ sobelv = self.sobel_gradient(hover_v)[0, ...]
224
+ sobelh_min, sobelh_max = min(sobelh), max(sobelh)
225
+ sobelv_min, sobelv_max = min(sobelv), max(sobelv)
226
+ if (sobelh_max - sobelh_min) == 0 or (sobelv_max - sobelv_min) == 0:
227
+ raise ValueError("Not a valid sobel gradient map")
228
+ sobelh = 1 - (sobelh - sobelh_min) / (sobelh_max - sobelh_min)
229
+ sobelv = 1 - (sobelv - sobelv_min) / (sobelv_max - sobelv_min)
230
+
231
+ # combine the h & v values using max
232
+ overall = maximum(sobelh, sobelv)
233
+ overall = overall - (1 - mask)
234
+ overall[overall < 0] = 0
235
+
236
+ return convert_to_dst_type(overall, mask, dtype=self.dtype)[0]
237
+
238
+
239
+ class GenerateDistanceMap(Transform):
240
+ """
241
+ Generate distance map.
242
+ In general, the instance map is calculated from the distance to the background.
243
+ Here, we use 1 - "instance border map" to generate the distance map.
244
+ Nuclei values form mountains so invert them to get basins.
245
+
246
+ Args:
247
+ smooth_fn: smoothing function for distance map, which can be any callable object.
248
+ If not provided :py:class:`monai.transforms.GaussianSmooth()` is used.
249
+ dtype: target data type to convert to. Defaults to np.float32.
250
+ """
251
+
252
+ backend = [TransformBackends.NUMPY]
253
+
254
+ def __init__(self, smooth_fn: Callable | None = None, dtype: DtypeLike = np.float32) -> None:
255
+ self.smooth_fn = smooth_fn if smooth_fn is not None else GaussianSmooth()
256
+ self.dtype = dtype
257
+
258
+ def __call__(self, mask: NdarrayOrTensor, instance_border: NdarrayOrTensor) -> NdarrayOrTensor: # type: ignore
259
+ """
260
+ Args:
261
+ mask: binary segmentation map, the output of :py:class:`GenerateWatershedMask`.
262
+ Shape must be [1, H, W] or [H, W].
263
+ instance_border: instance border map, the output of :py:class:`GenerateInstanceBorder`.
264
+ Shape must be [1, H, W].
265
+ """
266
+ if len(mask.shape) == 3:
267
+ if mask.shape[0] != 1:
268
+ raise ValueError(f"The mask should have only one channel, but got {mask.shape[0]}.")
269
+ elif len(mask.shape) == 2:
270
+ mask = mask[None]
271
+ else:
272
+ raise ValueError(f"The mask should have the shape of [1, H, W] or [H, W], but got {mask.shape}.")
273
+ if instance_border.shape[0] != 1 or instance_border.ndim != 3:
274
+ raise ValueError(f"Input instance_border should be with size of [1, H, W], but got {instance_border.shape}")
275
+
276
+ distance_map = (1.0 - instance_border) * mask
277
+ distance_map = self.smooth_fn(distance_map) # type: ignore
278
+
279
+ return convert_to_dst_type(-distance_map, mask, dtype=self.dtype)[0]
280
+
281
+
282
+ class GenerateWatershedMarkers(Transform):
283
+ """
284
+ Generate markers to be used in `watershed`. The watershed algorithm treats pixels values as a local topography
285
+ (elevation). The algorithm floods basins from the markers until basins attributed to different markers meet on
286
+ watershed lines. Generally, markers are chosen as local minima of the image, from which basins are flooded.
287
+ Here is the implementation from HoVerNet paper.
288
+ For more details refer to papers: https://arxiv.org/abs/1812.06499.
289
+
290
+ Args:
291
+ threshold: a float value to threshold to binarize instance border map.
292
+ It turns uncertain area to 1 and other area to 0. Defaults to 0.4.
293
+ radius: the radius of the disk-shaped footprint used in `opening`. Defaults to 2.
294
+ min_object_size: objects smaller than this size (in pixel) are removed. Defaults to 10.
295
+ postprocess_fn: additional post-process function on the markers.
296
+ If not provided, :py:class:`monai.transforms.post.FillHoles()` will be used.
297
+ dtype: target data type to convert to. Defaults to np.int64.
298
+
299
+ """
300
+
301
+ backend = [TransformBackends.NUMPY]
302
+
303
+ def __init__(
304
+ self,
305
+ threshold: float = 0.4,
306
+ radius: int = 2,
307
+ min_object_size: int = 10,
308
+ postprocess_fn: Callable | None = None,
309
+ dtype: DtypeLike = np.int64,
310
+ ) -> None:
311
+ self.threshold = threshold
312
+ self.radius = radius
313
+ self.dtype = dtype
314
+ if postprocess_fn is None:
315
+ postprocess_fn = FillHoles()
316
+
317
+ self.postprocess_fn = postprocess_fn
318
+ self.remove_small_objects = RemoveSmallObjects(min_size=min_object_size) if min_object_size > 0 else None
319
+
320
+ def __call__(self, mask: NdarrayOrTensor, instance_border: NdarrayOrTensor) -> NdarrayOrTensor: # type: ignore
321
+ """
322
+ Args:
323
+ mask: binary segmentation map, the output of :py:class:`GenerateWatershedMask`.
324
+ Shape must be [1, H, W] or [H, W].
325
+ instance_border: instance border map, the output of :py:class:`GenerateInstanceBorder`.
326
+ Shape must be [1, H, W].
327
+ """
328
+ if len(mask.shape) == 3:
329
+ if mask.shape[0] != 1:
330
+ raise ValueError(f"The mask should have only one channel, but got {mask.shape[0]}.")
331
+ elif len(mask.shape) == 2:
332
+ mask = mask[None]
333
+ else:
334
+ raise ValueError(f"The mask should have the shape of [1, H, W] or [H, W], but got {mask.shape}.")
335
+ if instance_border.shape[0] != 1 or instance_border.ndim != 3:
336
+ raise ValueError(f"Input instance_border should be with size of [1, H, W], but got {instance_border.shape}")
337
+
338
+ instance_border = instance_border >= self.threshold # uncertain area
339
+
340
+ marker = mask - convert_to_dst_type(instance_border, mask)[0] # certain foreground
341
+ marker[marker < 0] = 0
342
+ marker = self.postprocess_fn(marker)
343
+ marker = convert_to_numpy(marker)
344
+
345
+ marker = opening(marker.squeeze(), disk(self.radius))
346
+ marker = label(marker)[0][None]
347
+ if self.remove_small_objects is not None:
348
+ marker = self.remove_small_objects(marker)
349
+
350
+ return convert_to_dst_type(marker, mask, dtype=self.dtype)[0]
351
+
352
+
353
+ class GenerateSuccinctContour(Transform):
354
+ """
355
+ Converts SciPy-style contours (generated by skimage.measure.find_contours) to a more succinct version which only includes
356
+ the pixels to which lines need to be drawn (i.e. not the intervening pixels along each line).
357
+
358
+ Args:
359
+ height: height of bounding box, used to detect direction of line segment.
360
+ width: width of bounding box, used to detect direction of line segment.
361
+
362
+ Returns:
363
+ the pixels that need to be joined by straight lines to describe the outmost pixels of the foreground similar to
364
+ OpenCV's cv.CHAIN_APPROX_SIMPLE (counterclockwise)
365
+ """
366
+
367
+ def __init__(self, height: int, width: int) -> None:
368
+ self.height = height
369
+ self.width = width
370
+
371
+ def _generate_contour_coord(self, current: np.ndarray, previous: np.ndarray) -> tuple[int, int]:
372
+ """
373
+ Generate contour coordinates. Given the previous and current coordinates of border positions,
374
+ returns the int pixel that marks the extremity of the segmented pixels.
375
+
376
+ Args:
377
+ current: coordinates of the current border position.
378
+ previous: coordinates of the previous border position.
379
+ """
380
+
381
+ p_delta = (current[0] - previous[0], current[1] - previous[1])
382
+
383
+ if p_delta in ((0.0, 1.0), (0.5, 0.5), (1.0, 0.0)):
384
+ row = int(current[0] + 0.5)
385
+ col = int(current[1])
386
+ elif p_delta in ((0.0, -1.0), (0.5, -0.5)):
387
+ row = int(current[0])
388
+ col = int(current[1])
389
+ elif p_delta in ((-1, 0.0), (-0.5, -0.5)):
390
+ row = int(current[0])
391
+ col = int(current[1] + 0.5)
392
+ elif p_delta == (-0.5, 0.5):
393
+ row = int(current[0] + 0.5)
394
+ col = int(current[1] + 0.5)
395
+
396
+ return row, col
397
+
398
+ def _calculate_distance_from_top_left(self, sequence: Sequence[tuple[int, int]]) -> int:
399
+ """
400
+ Each sequence of coordinates describes a boundary between foreground and background starting and ending at two sides
401
+ of the bounding box. To order the sequences correctly, we compute the distance from the top-left of the bounding box
402
+ around the perimeter in a clockwise direction.
403
+
404
+ Args:
405
+ sequence: list of border points coordinates.
406
+
407
+ Returns:
408
+ the distance round the perimeter of the bounding box from the top-left origin
409
+ """
410
+ distance: int
411
+ first_coord = sequence[0]
412
+ if first_coord[0] == 0:
413
+ distance = first_coord[1]
414
+ elif first_coord[1] == self.width - 1:
415
+ distance = self.width + first_coord[0]
416
+ elif first_coord[0] == self.height - 1:
417
+ distance = 2 * self.width + self.height - first_coord[1]
418
+ else:
419
+ distance = 2 * (self.width + self.height) - first_coord[0]
420
+
421
+ return distance
422
+
423
+ def __call__(self, contours: list[np.ndarray]) -> np.ndarray:
424
+ """
425
+ Args:
426
+ contours: list of (n, 2)-ndarrays, scipy-style clockwise line segments, with lines separating foreground/background.
427
+ Each contour is an ndarray of shape (n, 2), consisting of n (row, column) coordinates along the contour.
428
+ """
429
+ pixels: list[tuple[int, int]] = []
430
+ sequences = []
431
+ corners = [False, False, False, False]
432
+
433
+ for group in contours:
434
+ sequence: list[tuple[int, int]] = []
435
+ last_added = None
436
+ prev = None
437
+ corner = -1
438
+
439
+ for i, coord in enumerate(group):
440
+ if i == 0:
441
+ # originating from the top, so must be heading south east
442
+ if coord[0] == 0.0:
443
+ corner = 1
444
+ pixel = (0, int(coord[1] - 0.5))
445
+ if pixel[1] == self.width - 1:
446
+ corners[1] = True
447
+ elif pixel[1] == 0.0:
448
+ corners[0] = True
449
+ # originating from the left, so must be heading north east
450
+ elif coord[1] == 0.0:
451
+ corner = 0
452
+ pixel = (int(coord[0] + 0.5), 0)
453
+ # originating from the bottom, so must be heading north west
454
+ elif coord[0] == self.height - 1:
455
+ corner = 3
456
+ pixel = (int(coord[0]), int(coord[1] + 0.5))
457
+ if pixel[1] == self.width - 1:
458
+ corners[2] = True
459
+ # originating from the right, so must be heading south west
460
+ elif coord[1] == self.width - 1:
461
+ corner = 2
462
+ pixel = (int(coord[0] - 0.5), int(coord[1]))
463
+ else:
464
+ warnings.warn(f"Invalid contour coord {coord} is generated, skip this instance.")
465
+ return None # type: ignore
466
+ sequence.append(pixel)
467
+ last_added = pixel
468
+ elif i == len(group) - 1:
469
+ # add this point
470
+ pixel = self._generate_contour_coord(coord, prev) # type: ignore
471
+ if pixel != last_added:
472
+ sequence.append(pixel)
473
+ last_added = pixel
474
+ elif np.any(coord - prev != group[i + 1] - coord):
475
+ pixel = self._generate_contour_coord(coord, prev) # type: ignore
476
+ if pixel != last_added:
477
+ sequence.append(pixel)
478
+ last_added = pixel
479
+
480
+ # flag whether each corner has been crossed
481
+ if i == len(group) - 1:
482
+ if corner == 0:
483
+ if coord[0] == 0:
484
+ corners[corner] = True
485
+ elif corner == 1:
486
+ if coord[1] == self.width - 1:
487
+ corners[corner] = True
488
+ elif corner == 2:
489
+ if coord[0] == self.height - 1:
490
+ corners[corner] = True
491
+ elif corner == 3:
492
+ if coord[1] == 0.0:
493
+ corners[corner] = True
494
+
495
+ prev = coord
496
+ dist = self._calculate_distance_from_top_left(sequence)
497
+
498
+ sequences.append({"distance": dist, "sequence": sequence})
499
+
500
+ # check whether we need to insert any missing corners
501
+ if corners[0] is False:
502
+ sequences.append({"distance": 0, "sequence": [(0, 0)]})
503
+ if corners[1] is False:
504
+ sequences.append({"distance": self.width, "sequence": [(0, self.width - 1)]})
505
+ if corners[2] is False:
506
+ sequences.append({"distance": self.width + self.height, "sequence": [(self.height - 1, self.width - 1)]})
507
+ if corners[3] is False:
508
+ sequences.append({"distance": 2 * self.width + self.height, "sequence": [(self.height - 1, 0)]})
509
+
510
+ # join the sequences into a single contour
511
+ # starting at top left and rotating clockwise
512
+ sequences.sort(key=lambda x: x.get("distance")) # type: ignore
513
+
514
+ last = (-1, -1)
515
+ for _sequence in sequences:
516
+ if _sequence["sequence"][0] == last: # type: ignore
517
+ pixels.pop()
518
+ if pixels:
519
+ pixels = [*pixels, *_sequence["sequence"]] # type: ignore
520
+ else:
521
+ pixels = _sequence["sequence"] # type: ignore
522
+ last = pixels[-1]
523
+
524
+ if pixels[0] == last:
525
+ pixels.pop(0)
526
+
527
+ if pixels[0] == (0, 0):
528
+ pixels.append(pixels.pop(0))
529
+
530
+ return np.flip(convert_to_numpy(pixels, dtype=np.int32)) # type: ignore
531
+
532
+
533
+ class GenerateInstanceContour(Transform):
534
+ """
535
+ Generate contour for each instance in a 2D array. Use `GenerateSuccinctContour` to only include
536
+ the pixels to which lines need to be drawn
537
+
538
+ Args:
539
+ min_num_points: assumed that the created contour does not form a contour if it does not contain more points
540
+ than the specified value. Defaults to 3.
541
+ contour_level: an optional value for `skimage.measure.find_contours` to find contours in the array.
542
+ If not provided, the level is set to `(max(image) + min(image)) / 2`.
543
+
544
+ """
545
+
546
+ backend = [TransformBackends.NUMPY]
547
+
548
+ def __init__(self, min_num_points: int = 3, contour_level: float | None = None) -> None:
549
+ self.contour_level = contour_level
550
+ self.min_num_points = min_num_points
551
+
552
+ def __call__(self, inst_mask: NdarrayOrTensor, offset: Sequence[int] | None = (0, 0)) -> np.ndarray | None:
553
+ """
554
+ Args:
555
+ inst_mask: segmentation mask for a single instance. Shape should be [1, H, W, [D]]
556
+ offset: optional offset of starting position of the instance mask in the original array. Default to 0 for each dim.
557
+ """
558
+ inst_mask = inst_mask.squeeze() # squeeze channel dim
559
+ inst_mask = convert_to_numpy(inst_mask)
560
+ inst_contour_cv = find_contours(inst_mask, level=self.contour_level)
561
+ generate_contour = GenerateSuccinctContour(inst_mask.shape[0], inst_mask.shape[1])
562
+ inst_contour = generate_contour(inst_contour_cv)
563
+ if inst_contour is None:
564
+ return None
565
+ # less than `self.min_num_points` points don't make a contour, so skip.
566
+ # They are likely to be artifacts as the contours obtained via approximation.
567
+ if inst_contour.shape[0] < self.min_num_points:
568
+ print(f"< {self.min_num_points} points don't make a contour, so skipped!")
569
+ return None
570
+ # check for tricky shape
571
+ elif len(inst_contour.shape) != 2:
572
+ print(f"{len(inst_contour.shape)} != 2, check for tricky shapes!")
573
+ return None
574
+ else:
575
+ inst_contour[:, 0] += offset[0] # type: ignore
576
+ inst_contour[:, 1] += offset[1] # type: ignore
577
+ return inst_contour
578
+
579
+
580
+ class GenerateInstanceCentroid(Transform):
581
+ """
582
+ Generate instance centroid using `skimage.measure.centroid`.
583
+
584
+ Args:
585
+ dtype: the data type of output centroid.
586
+
587
+ """
588
+
589
+ backend = [TransformBackends.NUMPY]
590
+
591
+ def __init__(self, dtype: DtypeLike | None = int) -> None:
592
+ self.dtype = dtype
593
+
594
+ def __call__(self, inst_mask: NdarrayOrTensor, offset: Sequence[int] | int = 0) -> NdarrayOrTensor:
595
+ """
596
+ Args:
597
+ inst_mask: segmentation mask for a single instance. Shape should be [1, H, W, [D]]
598
+ offset: optional offset of starting position of the instance mask in the original array. Default to 0 for each dim.
599
+
600
+ """
601
+ inst_mask = convert_to_numpy(inst_mask)
602
+ inst_mask = inst_mask.squeeze(0) # squeeze channel dim
603
+ ndim = len(inst_mask.shape)
604
+ offset = ensure_tuple_rep(offset, ndim)
605
+
606
+ inst_centroid = centroid(inst_mask)
607
+ for i in range(ndim):
608
+ inst_centroid[i] += offset[i]
609
+
610
+ return convert_to_dst_type(inst_centroid, inst_mask, dtype=self.dtype)[0]
611
+
612
+
613
+ class GenerateInstanceType(Transform):
614
+ """
615
+ Generate instance type and probability for each instance.
616
+ """
617
+
618
+ backend = [TransformBackends.NUMPY]
619
+
620
+ def __call__( # type: ignore
621
+ self, type_pred: NdarrayOrTensor, seg_pred: NdarrayOrTensor, bbox: np.ndarray, instance_id: int
622
+ ) -> tuple[int, float]:
623
+ """
624
+ Args:
625
+ type_pred: pixel-level type prediction map after activation function.
626
+ seg_pred: pixel-level segmentation prediction map after activation function.
627
+ bbox: bounding box coordinates of the instance, shape is [channel, 2 * spatial dims].
628
+ instance_id: get instance type from specified instance id.
629
+ """
630
+
631
+ rmin, rmax, cmin, cmax = bbox.flatten()
632
+ seg_map_crop = seg_pred[0, rmin:rmax, cmin:cmax]
633
+ type_map_crop = type_pred[0, rmin:rmax, cmin:cmax]
634
+
635
+ seg_map_crop = convert_to_dst_type(seg_map_crop == instance_id, type_map_crop, dtype=bool)[0]
636
+
637
+ inst_type = type_map_crop[seg_map_crop]
638
+ type_list, type_pixels = unique(inst_type, return_counts=True)
639
+ type_list = list(zip(type_list, type_pixels))
640
+ type_list = sorted(type_list, key=lambda x: x[1], reverse=True)
641
+ inst_type = type_list[0][0]
642
+ if inst_type == 0: # ! pick the 2nd most dominant if exist
643
+ if len(type_list) > 1:
644
+ inst_type = type_list[1][0]
645
+ type_dict = {v[0]: v[1] for v in type_list}
646
+ type_prob = type_dict[inst_type] / (sum(seg_map_crop) + 1.0e-6)
647
+
648
+ return (int(inst_type), float(type_prob))
649
+
650
+
651
+ class HoVerNetInstanceMapPostProcessing(Transform):
652
+ """
653
+ The post-processing transform for HoVerNet model to generate instance segmentation map.
654
+ It generates an instance segmentation map as well as a dictionary containing centroids, bounding boxes, and contours
655
+ for each instance.
656
+
657
+ Args:
658
+ activation: the activation layer to be applied on the input probability map.
659
+ It can be "softmax" or "sigmoid" string, or any callable. Defaults to "softmax".
660
+ mask_threshold: a float value to threshold to binarize probability map to generate mask.
661
+ min_object_size: objects smaller than this size (in pixel) are removed. Defaults to 10.
662
+ sobel_kernel_size: the size of the Sobel kernel used in :py:class:`GenerateInstanceBorder`. Defaults to 5.
663
+ distance_smooth_fn: smoothing function for distance map.
664
+ If not provided, :py:class:`monai.transforms.intensity.GaussianSmooth()` will be used.
665
+ marker_threshold: a float value to threshold to binarize instance border map for markers.
666
+ It turns uncertain area to 1 and other area to 0. Defaults to 0.4.
667
+ marker_radius: the radius of the disk-shaped footprint used in `opening` of markers. Defaults to 2.
668
+ marker_postprocess_fn: post-process function for watershed markers.
669
+ If not provided, :py:class:`monai.transforms.post.FillHoles()` will be used.
670
+ watershed_connectivity: `connectivity` argument of `skimage.segmentation.watershed`.
671
+ min_num_points: minimum number of points to be considered as a contour. Defaults to 3.
672
+ contour_level: an optional value for `skimage.measure.find_contours` to find contours in the array.
673
+ If not provided, the level is set to `(max(image) + min(image)) / 2`.
674
+ device: target device to put the output Tensor data.
675
+ """
676
+
677
+ def __init__(
678
+ self,
679
+ activation: str | Callable = "softmax",
680
+ mask_threshold: float | None = None,
681
+ min_object_size: int = 10,
682
+ sobel_kernel_size: int = 5,
683
+ distance_smooth_fn: Callable | None = None,
684
+ marker_threshold: float = 0.4,
685
+ marker_radius: int = 2,
686
+ marker_postprocess_fn: Callable | None = None,
687
+ watershed_connectivity: int | None = 1,
688
+ min_num_points: int = 3,
689
+ contour_level: float | None = None,
690
+ device: str | torch.device | None = None,
691
+ ) -> None:
692
+ super().__init__()
693
+ self.device = device
694
+ self.generate_watershed_mask = GenerateWatershedMask(
695
+ activation=activation, threshold=mask_threshold, min_object_size=min_object_size
696
+ )
697
+ self.generate_instance_border = GenerateInstanceBorder(kernel_size=sobel_kernel_size)
698
+ self.generate_distance_map = GenerateDistanceMap(smooth_fn=distance_smooth_fn)
699
+ self.generate_watershed_markers = GenerateWatershedMarkers(
700
+ threshold=marker_threshold,
701
+ radius=marker_radius,
702
+ postprocess_fn=marker_postprocess_fn,
703
+ min_object_size=min_object_size,
704
+ )
705
+ self.watershed = Watershed(connectivity=watershed_connectivity)
706
+ self.generate_instance_contour = GenerateInstanceContour(
707
+ min_num_points=min_num_points, contour_level=contour_level
708
+ )
709
+ self.generate_instance_centroid = GenerateInstanceCentroid()
710
+
711
+ def __call__( # type: ignore
712
+ self, nuclear_prediction: NdarrayOrTensor, hover_map: NdarrayOrTensor
713
+ ) -> tuple[dict, NdarrayOrTensor]:
714
+ """post-process instance segmentation branches (NP and HV) to generate instance segmentation map.
715
+
716
+ Args:
717
+ nuclear_prediction: the output of NP (nuclear prediction) branch of HoVerNet model
718
+ hover_map: the output of HV (hover map) branch of HoVerNet model
719
+ """
720
+
721
+ # Process NP and HV branch using watershed algorithm
722
+ watershed_mask = self.generate_watershed_mask(nuclear_prediction)
723
+ instance_borders = self.generate_instance_border(watershed_mask, hover_map)
724
+ distance_map = self.generate_distance_map(watershed_mask, instance_borders)
725
+ watershed_markers = self.generate_watershed_markers(watershed_mask, instance_borders)
726
+ instance_map = self.watershed(distance_map, watershed_mask, watershed_markers)
727
+
728
+ # Create bounding boxes, contours and centroids
729
+ instance_ids = set(np.unique(instance_map)) - {0} # exclude background
730
+ instance_info = {}
731
+ for inst_id in instance_ids:
732
+ instance_mask = instance_map == inst_id
733
+ instance_bbox = BoundingRect()(instance_mask)
734
+
735
+ instance_mask = instance_mask[
736
+ :, instance_bbox[0][0] : instance_bbox[0][1], instance_bbox[0][2] : instance_bbox[0][3]
737
+ ]
738
+ offset = [instance_bbox[0][2], instance_bbox[0][0]]
739
+ instance_contour = self.generate_instance_contour(FillHoles()(instance_mask), offset)
740
+ if instance_contour is not None:
741
+ instance_centroid = self.generate_instance_centroid(instance_mask, offset)
742
+ instance_info[inst_id] = {
743
+ "bounding_box": instance_bbox,
744
+ "centroid": instance_centroid,
745
+ "contour": instance_contour,
746
+ }
747
+ instance_map = convert_to_tensor(instance_map, device=self.device)
748
+ return instance_info, instance_map
749
+
750
+
751
+ class HoVerNetNuclearTypePostProcessing(Transform):
752
+ """
753
+ The post-processing transform for HoVerNet model to generate nuclear type information.
754
+ It updates the input instance info dictionary with information about types of the nuclei (value and probability).
755
+ Also if requested (`return_type_map=True`), it generates a pixel-level type map.
756
+
757
+ Args:
758
+ activation: the activation layer to be applied on nuclear type branch. It can be "softmax" or "sigmoid" string,
759
+ or any callable. Defaults to "softmax".
760
+ threshold: an optional float value to threshold to binarize probability map.
761
+ If not provided, defaults to 0.5 when activation is not "softmax", otherwise None.
762
+ return_type_map: whether to calculate and return pixel-level type map.
763
+ device: target device to put the output Tensor data.
764
+
765
+ """
766
+
767
+ def __init__(
768
+ self,
769
+ activation: str | Callable = "softmax",
770
+ threshold: float | None = None,
771
+ return_type_map: bool = True,
772
+ device: str | torch.device | None = None,
773
+ ) -> None:
774
+ super().__init__()
775
+ self.device = device
776
+ self.return_type_map = return_type_map
777
+ self.generate_instance_type = GenerateInstanceType()
778
+
779
+ # set activation layer
780
+ use_softmax = False
781
+ use_sigmoid = False
782
+ activation_fn = None
783
+ if isinstance(activation, str):
784
+ if activation.lower() == "softmax":
785
+ use_softmax = True
786
+ elif activation.lower() == "sigmoid":
787
+ use_sigmoid = True
788
+ else:
789
+ raise ValueError(
790
+ f"The activation should be 'softmax' or 'sigmoid' string, or any callable. '{activation}' was given."
791
+ )
792
+ elif callable(activation):
793
+ activation_fn = activation
794
+ else:
795
+ raise ValueError(f"The activation type should be either str or callable. '{type(activation)}' was given.")
796
+ self.activation = Activations(softmax=use_softmax, sigmoid=use_sigmoid, other=activation_fn)
797
+
798
+ # set discretization transform
799
+ if not use_softmax and threshold is None:
800
+ threshold = 0.5
801
+ self.as_discrete = AsDiscrete(threshold=threshold, argmax=use_softmax)
802
+
803
+ def __call__( # type: ignore
804
+ self, type_prediction: NdarrayOrTensor, instance_info: dict[int, dict], instance_map: NdarrayOrTensor
805
+ ) -> tuple[dict, NdarrayOrTensor | None]:
806
+ """Process NC (type prediction) branch and combine it with instance segmentation
807
+ It updates the instance_info with instance type and associated probability, and generate instance type map.
808
+
809
+ Args:
810
+ instance_info: instance information dictionary, the output of :py:class:`HoVerNetInstanceMapPostProcessing`
811
+ instance_map: instance segmentation map, the output of :py:class:`HoVerNetInstanceMapPostProcessing`
812
+ type_prediction: the output of NC (type prediction) branch of HoVerNet model
813
+ """
814
+ type_prediction = self.activation(type_prediction)
815
+ type_prediction = self.as_discrete(type_prediction)
816
+
817
+ type_map = None
818
+ if self.return_type_map:
819
+ type_map = convert_to_dst_type(torch.zeros(instance_map.shape), instance_map)[0]
820
+
821
+ for inst_id in instance_info:
822
+ instance_type, instance_type_prob = self.generate_instance_type(
823
+ type_pred=type_prediction,
824
+ seg_pred=instance_map,
825
+ bbox=instance_info[inst_id]["bounding_box"],
826
+ instance_id=inst_id,
827
+ )
828
+ # update instance info dict with type data
829
+ instance_info[inst_id]["type_prob"] = instance_type_prob
830
+ instance_info[inst_id]["type"] = instance_type
831
+
832
+ # update instance type map
833
+ if type_map is not None:
834
+ type_map[instance_map == inst_id] = instance_type
835
+ type_map = convert_to_tensor(type_map, device=self.device)
836
+
837
+ return instance_info, type_map
source_code/SegMamba/monai/apps/reconstruction/__init__.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) MONAI Consortium
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ # Unless required by applicable law or agreed to in writing, software
7
+ # distributed under the License is distributed on an "AS IS" BASIS,
8
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9
+ # See the License for the specific language governing permissions and
10
+ # limitations under the License.
source_code/SegMamba/monai/apps/reconstruction/networks/blocks/varnetblock.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) MONAI Consortium
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ # Unless required by applicable law or agreed to in writing, software
7
+ # distributed under the License is distributed on an "AS IS" BASIS,
8
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9
+ # See the License for the specific language governing permissions and
10
+ # limitations under the License.
11
+
12
+ from __future__ import annotations
13
+
14
+ import torch
15
+ import torch.nn as nn
16
+ from torch import Tensor
17
+
18
+ from monai.apps.reconstruction.networks.nets.utils import sensitivity_map_expand, sensitivity_map_reduce
19
+
20
+
21
+ class VarNetBlock(nn.Module):
22
+ """
23
+ A variational block based on Sriram et. al., "End-to-end variational networks for accelerated MRI reconstruction".
24
+ It applies data consistency and refinement to the intermediate kspace and combines those results.
25
+
26
+ Modified and adopted from: https://github.com/facebookresearch/fastMRI
27
+
28
+ Args:
29
+ refinement_model: the model used for refinement (typically a U-Net but can be any deep learning model
30
+ that performs well when the input and output are in image domain (e.g., a convolutional network).
31
+ spatial_dims: is 2 for 2D data and is 3 for 3D data
32
+ """
33
+
34
+ def __init__(self, refinement_model: nn.Module, spatial_dims: int = 2):
35
+ super().__init__()
36
+ self.model = refinement_model
37
+ self.spatial_dims = spatial_dims
38
+ self.dc_weight = nn.Parameter(torch.ones(1)) # learned scalar as the multiplier of the DC block
39
+
40
+ buffer_shape = [1 for _ in range(spatial_dims + 3)] # 3 denotes the batch, channel, and real/complex dimensions
41
+ self.register_buffer("zeros", torch.zeros(buffer_shape))
42
+
43
+ def soft_dc(self, x: Tensor, ref_kspace: Tensor, mask: Tensor) -> Tensor:
44
+ """
45
+ Applies data consistency to input x. Suppose x is an intermediate estimate of the kspace and ref_kspace
46
+ is the reference under-sampled measurement. This function returns mask * (x - ref_kspace). View this as the
47
+ residual between the original under-sampled kspace and the estimate given by the network.
48
+
49
+ Args:
50
+ x: 2D kspace (B,C,H,W,2) with the last dimension being 2 (for real/imaginary parts) and C denoting the
51
+ coil dimension. 3D data will have the shape (B,C,H,W,D,2).
52
+ ref_kspace: original under-sampled kspace with the same shape as x.
53
+ mask: the under-sampling mask with shape (1,1,1,W,1) for 2D data or (1,1,1,1,D,1) for 3D data.
54
+
55
+ Returns:
56
+ Output of DC block with the same shape as x
57
+ """
58
+ return torch.where(mask, x - ref_kspace, self.zeros) * self.dc_weight
59
+
60
+ def forward(self, current_kspace: Tensor, ref_kspace: Tensor, mask: Tensor, sens_maps: Tensor) -> Tensor:
61
+ """
62
+ Args:
63
+ current_kspace: Predicted kspace from the previous block. It's a 2D kspace (B,C,H,W,2)
64
+ with the last dimension being 2 (for real/imaginary parts) and C denoting the
65
+ coil dimension. 3D data will have the shape (B,C,H,W,D,2).
66
+ ref_kspace: reference kspace for applying data consistency (is the under-sampled kspace in MRI reconstruction).
67
+ Its shape is the same as current_kspace.
68
+ mask: the under-sampling mask with shape (1,1,1,W,1) for 2D data or (1,1,1,1,D,1) for 3D data.
69
+ sens_maps: coil sensitivity maps with the same shape as current_kspace
70
+
71
+ Returns:
72
+ Output of VarNetBlock with the same shape as current_kspace
73
+ """
74
+ dc_out = self.soft_dc(current_kspace, ref_kspace, mask) # output of DC block
75
+ refinement_out = sensitivity_map_expand(
76
+ self.model(sensitivity_map_reduce(current_kspace, sens_maps, spatial_dims=self.spatial_dims)),
77
+ sens_maps,
78
+ spatial_dims=self.spatial_dims,
79
+ ) # output of refinement model
80
+ output = current_kspace - dc_out - refinement_out
81
+ return output
source_code/SegMamba/monai/apps/reconstruction/networks/nets/__init__.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) MONAI Consortium
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ # Unless required by applicable law or agreed to in writing, software
7
+ # distributed under the License is distributed on an "AS IS" BASIS,
8
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9
+ # See the License for the specific language governing permissions and
10
+ # limitations under the License.
source_code/SegMamba/monai/apps/reconstruction/networks/nets/coil_sensitivity_model.py ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) MONAI Consortium
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ # Unless required by applicable law or agreed to in writing, software
7
+ # distributed under the License is distributed on an "AS IS" BASIS,
8
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9
+ # See the License for the specific language governing permissions and
10
+ # limitations under the License.
11
+
12
+ from __future__ import annotations
13
+
14
+ from collections.abc import Sequence
15
+
16
+ import torch
17
+ import torch.nn as nn
18
+ from torch import Tensor
19
+
20
+ from monai.apps.reconstruction.mri_utils import root_sum_of_squares_t
21
+ from monai.apps.reconstruction.networks.nets.complex_unet import ComplexUnet
22
+ from monai.apps.reconstruction.networks.nets.utils import (
23
+ reshape_batch_channel_to_channel_dim,
24
+ reshape_channel_to_batch_dim,
25
+ )
26
+ from monai.networks.blocks.fft_utils_t import ifftn_centered_t
27
+
28
+
29
+ class CoilSensitivityModel(nn.Module):
30
+ """
31
+ This class uses a convolutional model to learn coil sensitivity maps for multi-coil MRI reconstruction.
32
+ The convolutional model is :py:class:`monai.apps.reconstruction.networks.nets.complex_unet` by default
33
+ but can be specified by the user as well. Learning is done on the center of the under-sampled
34
+ kspace (that region is fully sampled).
35
+
36
+ The data being a (complex) 2-channel tensor is a requirement for using this model.
37
+
38
+ Modified and adopted from: https://github.com/facebookresearch/fastMRI
39
+
40
+ Args:
41
+ spatial_dims: number of spatial dimensions.
42
+ features: six integers as numbers of features. denotes number of channels in each layer.
43
+ act: activation type and arguments. Defaults to LeakyReLU.
44
+ norm: feature normalization type and arguments. Defaults to instance norm.
45
+ bias: whether to have a bias term in convolution blocks. Defaults to True.
46
+ dropout: dropout ratio. Defaults to 0.0.
47
+ upsample: upsampling mode, available options are
48
+ ``"deconv"``, ``"pixelshuffle"``, ``"nontrainable"``.
49
+ coil_dim: coil dimension in the data
50
+ conv_net: the learning model used to estimate the coil sensitivity maps. default
51
+ is :py:class:`monai.apps.reconstruction.networks.nets.complex_unet`. The only
52
+ requirement on the model is to have 2 as input and output number of channels.
53
+ """
54
+
55
+ def __init__(
56
+ self,
57
+ spatial_dims: int = 2,
58
+ features: Sequence[int] = (32, 32, 64, 128, 256, 32),
59
+ act: str | tuple = ("LeakyReLU", {"negative_slope": 0.1, "inplace": True}),
60
+ norm: str | tuple = ("instance", {"affine": True}),
61
+ bias: bool = True,
62
+ dropout: float | tuple = 0.0,
63
+ upsample: str = "deconv",
64
+ coil_dim: int = 1,
65
+ conv_net: nn.Module | None = None,
66
+ ):
67
+ super().__init__()
68
+ if conv_net is None:
69
+ self.conv_net = ComplexUnet(
70
+ spatial_dims=spatial_dims,
71
+ features=features,
72
+ act=act,
73
+ norm=norm,
74
+ bias=bias,
75
+ dropout=dropout,
76
+ upsample=upsample,
77
+ )
78
+ else:
79
+ # assume the first layer is convolutional and
80
+ # check whether in_channels == 2
81
+ params = [p.shape for p in conv_net.parameters()]
82
+ if params[0][1] != 2:
83
+ raise ValueError(f"in_channels should be 2 but it's {params[0][1]}.")
84
+ self.conv_net = conv_net # type: ignore
85
+ self.spatial_dims = spatial_dims
86
+ self.coil_dim = coil_dim
87
+
88
+ def get_fully_sampled_region(self, mask: Tensor) -> tuple[int, int]:
89
+ """
90
+ Extracts the size of the fully-sampled part of the kspace. Note that when a kspace
91
+ is under-sampled, a part of its center is fully sampled. This part is called the Auto
92
+ Calibration Region (ACR). ACR is used for sensitivity map computation.
93
+
94
+ Args:
95
+ mask: the under-sampling mask of shape (..., S, 1) where S denotes the sampling dimension
96
+
97
+ Returns:
98
+ A tuple containing
99
+ (1) left index of the region
100
+ (2) right index of the region
101
+
102
+ Note:
103
+ Suppose the mask is of shape (1,1,20,1). If this function returns 8,12 as left and right
104
+ indices, then it means that the fully-sampled center region has size 4 starting from 8 to 12.
105
+ """
106
+ left = right = mask.shape[-2] // 2
107
+ while mask[..., right, :]:
108
+ right += 1
109
+
110
+ while mask[..., left, :]:
111
+ left -= 1
112
+
113
+ return left + 1, right
114
+
115
+ def forward(self, masked_kspace: Tensor, mask: Tensor) -> Tensor:
116
+ """
117
+ Args:
118
+ masked_kspace: the under-sampled kspace (which is the input measurement). Its shape
119
+ is (B,C,H,W,2) for 2D data or (B,C,H,W,D,2) for 3D data.
120
+ mask: the under-sampling mask with shape (1,1,1,W,1) for 2D data or (1,1,1,1,D,1) for 3D data.
121
+
122
+ Returns:
123
+ predicted coil sensitivity maps with shape (B,C,H,W,2) for 2D data or (B,C,H,W,D,2) for 3D data.
124
+ """
125
+ left, right = self.get_fully_sampled_region(mask)
126
+ num_low_freqs = right - left # size of the fully-sampled center
127
+
128
+ # take out the fully-sampled region and set the rest of the data to zero
129
+ x = torch.zeros_like(masked_kspace)
130
+ start = (mask.shape[-2] - num_low_freqs + 1) // 2 # this marks the start of center extraction
131
+ x[..., start : start + num_low_freqs, :] = masked_kspace[..., start : start + num_low_freqs, :]
132
+
133
+ # apply inverse fourier to the extracted fully-sampled data
134
+ x = ifftn_centered_t(x, spatial_dims=self.spatial_dims, is_complex=True)
135
+
136
+ x, b = reshape_channel_to_batch_dim(x) # shape of x will be (B*C,1,...)
137
+ x = self.conv_net(x)
138
+ x = reshape_batch_channel_to_channel_dim(x, b) # shape will be (B,C,...)
139
+ # normalize the maps
140
+ x = x / root_sum_of_squares_t(x, spatial_dim=self.coil_dim).unsqueeze(self.coil_dim)
141
+
142
+ return x
source_code/SegMamba/monai/csrc/filtering/bilateral/bilateral.cpp ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ Copyright (c) MONAI Consortium
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+ http://www.apache.org/licenses/LICENSE-2.0
7
+ Unless required by applicable law or agreed to in writing, software
8
+ distributed under the License is distributed on an "AS IS" BASIS,
9
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10
+ See the License for the specific language governing permissions and
11
+ limitations under the License.
12
+ */
13
+
14
+ #include <torch/extension.h>
15
+ #include <stdexcept>
16
+ #include <string>
17
+
18
+ #include "bilateral.h"
19
+ #include "utils/common_utils.h"
20
+
21
+ torch::Tensor BilateralFilter(torch::Tensor input, float spatial_sigma, float color_sigma, bool usePHL) {
22
+ torch::Tensor (*filterFunction)(torch::Tensor, float, float);
23
+
24
+ #ifdef WITH_CUDA
25
+
26
+ if (torch::cuda::is_available() && input.is_cuda()) {
27
+ CHECK_CONTIGUOUS_CUDA(input);
28
+
29
+ if (input.size(1) > BF_CUDA_MAX_CHANNELS) {
30
+ throw std::runtime_error(
31
+ "Bilateral filtering not implemented for channel count > " + std::to_string(BF_CUDA_MAX_CHANNELS));
32
+ }
33
+
34
+ if (input.dim() - 2 > BF_CUDA_MAX_SPATIAL_DIMENSION) {
35
+ throw std::runtime_error(
36
+ "Bilateral filtering not implemented for spatial dimension > " +
37
+ std::to_string(BF_CUDA_MAX_SPATIAL_DIMENSION));
38
+ }
39
+
40
+ filterFunction = usePHL ? &BilateralFilterPHLCuda : &BilateralFilterCuda;
41
+ } else {
42
+ filterFunction = usePHL ? &BilateralFilterPHLCpu : &BilateralFilterCpu;
43
+ }
44
+ #else
45
+ filterFunction = usePHL ? &BilateralFilterPHLCpu : &BilateralFilterCpu;
46
+ #endif
47
+
48
+ return filterFunction(input, spatial_sigma, color_sigma);
49
+ }
source_code/SegMamba/monai/csrc/filtering/bilateral/bilateralfilter_cpu.cpp ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ Copyright (c) MONAI Consortium
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+ http://www.apache.org/licenses/LICENSE-2.0
7
+ Unless required by applicable law or agreed to in writing, software
8
+ distributed under the License is distributed on an "AS IS" BASIS,
9
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10
+ See the License for the specific language governing permissions and
11
+ limitations under the License.
12
+ */
13
+
14
+ #include <math.h>
15
+ #include <torch/extension.h>
16
+
17
+ #include "utils/tensor_description.h"
18
+ #include "utils/tensor_indexing.h"
19
+
20
+ template <typename scalar_t>
21
+ void BilateralFilterCpu(torch::Tensor inputTensor, torch::Tensor outputTensor, float spatialSigma, float colorSigma) {
22
+ // Getting tensor description.
23
+ TensorDescription desc = TensorDescription(inputTensor);
24
+
25
+ // Raw tensor data pointers.
26
+ scalar_t* inputTensorData = inputTensor.data_ptr<scalar_t>();
27
+ scalar_t* outputTensorData = outputTensor.data_ptr<scalar_t>();
28
+
29
+ // Pre-calculate common values
30
+ int windowSize = (int)ceil(5.0f * spatialSigma) | 1; // ORing last bit to ensure odd window size
31
+ int halfWindowSize = floor(0.5f * windowSize);
32
+ scalar_t spatialExpConstant = -1.0f / (2 * spatialSigma * spatialSigma);
33
+ scalar_t colorExpConstant = -1.0f / (2 * colorSigma * colorSigma);
34
+
35
+ // Kernel sizes.
36
+ int* kernelSizes = new int[desc.dimensions];
37
+
38
+ for (int i = 0; i < desc.dimensions; i++) {
39
+ kernelSizes[i] = windowSize;
40
+ }
41
+
42
+ // Pre-calculate gaussian kernel in 1D.
43
+ scalar_t* gaussianKernel = new scalar_t[windowSize];
44
+
45
+ for (int i = 0; i < windowSize; i++) {
46
+ int distance = i - halfWindowSize;
47
+ gaussianKernel[i] = exp(distance * distance * spatialExpConstant);
48
+ }
49
+
50
+ // Kernel aggregates used to calculate
51
+ // the output value.
52
+ scalar_t* valueSum = new scalar_t[desc.channelCount];
53
+ scalar_t weightSum = 0;
54
+
55
+ // Looping over the batches
56
+ for (int b = 0; b < desc.batchCount; b++) {
57
+ int batchOffset = b * desc.batchStride;
58
+
59
+ // Looping over all dimensions for the home element
60
+ Indexer homeIndex = Indexer(desc.dimensions, desc.sizes);
61
+ do // while(homeIndex++)
62
+ {
63
+ // Calculating indexing offset for the home element
64
+ int homeOffset = batchOffset;
65
+
66
+ for (int i = 0; i < desc.dimensions; i++) {
67
+ homeOffset += homeIndex[i] * desc.strides[i];
68
+ }
69
+
70
+ // Zero kernel aggregates.
71
+ for (int i = 0; i < desc.channelCount; i++) {
72
+ valueSum[i] = 0;
73
+ }
74
+
75
+ weightSum = 0.0f;
76
+
77
+ // Looping over all dimensions for the neighbour element
78
+ Indexer kernelIndex = Indexer(desc.dimensions, kernelSizes);
79
+ do // while(kernelIndex++)
80
+ {
81
+ // Calculating buffer offset for the neighbour element
82
+ // Index is clamped to the border in each dimension.
83
+ int neighbourOffset = batchOffset;
84
+
85
+ for (int i = 0; i < desc.dimensions; i++) {
86
+ int neighbourIndex = homeIndex[i] + kernelIndex[i] - halfWindowSize;
87
+ int neighbourIndexClamped = std::min(desc.sizes[i] - 1, std::max(0, neighbourIndex));
88
+ neighbourOffset += neighbourIndexClamped * desc.strides[i];
89
+ }
90
+
91
+ // Euclidean color distance.
92
+ scalar_t colorDistanceSquared = 0;
93
+
94
+ for (int i = 0; i < desc.channelCount; i++) {
95
+ scalar_t diff = inputTensorData[homeOffset + i * desc.channelStride] -
96
+ inputTensorData[neighbourOffset + i * desc.channelStride];
97
+ colorDistanceSquared += diff * diff;
98
+ }
99
+
100
+ // Calculating and combining the spatial
101
+ // and color weights.
102
+ scalar_t spatialWeight = 1;
103
+
104
+ for (int i = 0; i < desc.dimensions; i++) {
105
+ spatialWeight *= gaussianKernel[kernelIndex[i]];
106
+ }
107
+
108
+ scalar_t colorWeight = exp(colorDistanceSquared * colorExpConstant);
109
+ scalar_t totalWeight = spatialWeight * colorWeight;
110
+
111
+ // Aggregating values.
112
+ for (int i = 0; i < desc.channelCount; i++) {
113
+ valueSum[i] += inputTensorData[neighbourOffset + i * desc.channelStride] * totalWeight;
114
+ }
115
+
116
+ weightSum += totalWeight;
117
+ } while (kernelIndex++);
118
+
119
+ for (int i = 0; i < desc.channelCount; i++) {
120
+ outputTensorData[homeOffset + i * desc.channelStride] = valueSum[i] / weightSum;
121
+ }
122
+ } while (homeIndex++);
123
+ }
124
+ }
125
+
126
+ torch::Tensor BilateralFilterCpu(torch::Tensor inputTensor, float spatialSigma, float colorSigma) {
127
+ // Preparing output tensor.
128
+ torch::Tensor outputTensor = torch::zeros_like(inputTensor);
129
+
130
+ AT_DISPATCH_FLOATING_TYPES_AND_HALF(inputTensor.scalar_type(), "BilateralFilterCpu", ([&] {
131
+ BilateralFilterCpu<scalar_t>(
132
+ inputTensor, outputTensor, spatialSigma, colorSigma);
133
+ }));
134
+
135
+ return outputTensor;
136
+ }
source_code/SegMamba/monai/csrc/filtering/bilateral/bilateralfilter_cpu_phl.cpp ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ Copyright (c) MONAI Consortium
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+ http://www.apache.org/licenses/LICENSE-2.0
7
+ Unless required by applicable law or agreed to in writing, software
8
+ distributed under the License is distributed on an "AS IS" BASIS,
9
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10
+ See the License for the specific language governing permissions and
11
+ limitations under the License.
12
+ */
13
+
14
+ #include <torch/extension.h>
15
+
16
+ #include "filtering/permutohedral/permutohedral.h"
17
+ #include "utils/tensor_description.h"
18
+
19
+ template <typename scalar_t>
20
+ void BilateralFilterPHLCpu(
21
+ torch::Tensor inputTensor,
22
+ torch::Tensor outputTensor,
23
+ float spatialSigma,
24
+ float colorSigma) {
25
+ // Getting tensor description.
26
+ TensorDescription desc = TensorDescription(inputTensor);
27
+
28
+ int featureChannels = desc.channelCount + desc.dimensions;
29
+
30
+ // Preparing memory
31
+ scalar_t* inputTensorData = inputTensor.data_ptr<scalar_t>();
32
+ scalar_t* outputTensorData = outputTensor.data_ptr<scalar_t>();
33
+ scalar_t* data = new scalar_t[desc.channelStride * desc.channelCount];
34
+ scalar_t* features = new scalar_t[desc.channelStride * featureChannels];
35
+
36
+ // Precalculating inverse sigmas
37
+ float invSpatialSigma = 1.0f / spatialSigma;
38
+ float invColorSigma = 1.0f / colorSigma;
39
+
40
+ // Looping over batches
41
+ for (int b = 0; b < desc.batchCount; b++) {
42
+ int batchOffset = b * desc.batchStride;
43
+
44
+ // Creating features (also permuting input data to be channel last. Permutohedral
45
+ // implementation should be changed to channel first to avoid this)
46
+ for (int i = 0; i < desc.channelStride; i++) {
47
+ // Color features (and permutation)
48
+ for (int c = 0; c < desc.channelCount; c++) {
49
+ features[i * featureChannels + c] = invColorSigma * inputTensorData[batchOffset + i + c * desc.channelStride];
50
+ data[i * desc.channelCount + c] = inputTensorData[batchOffset + i + c * desc.channelStride];
51
+ }
52
+
53
+ // Spatial features
54
+ int offsetRemainder = i;
55
+
56
+ for (int d = 0; d < desc.dimensions; d++) {
57
+ int coord = offsetRemainder / desc.strides[d];
58
+ offsetRemainder -= coord * desc.strides[d];
59
+
60
+ features[i * featureChannels + desc.channelCount + d] = (scalar_t)invSpatialSigma * coord;
61
+ }
62
+ }
63
+
64
+ // Filtering data with respect to the features.
65
+ PermutohedralCPU<scalar_t>(data, features, desc.channelCount, featureChannels, desc.channelStride);
66
+
67
+ // Writing output tensor.
68
+ for (int i = 0; i < desc.channelStride; i++) {
69
+ for (int c = 0; c < desc.channelCount; c++) {
70
+ outputTensorData[batchOffset + i + c * desc.channelStride] = data[i * desc.channelCount + c];
71
+ }
72
+ }
73
+ }
74
+
75
+ delete[] data;
76
+ delete[] features;
77
+ }
78
+
79
+ // Function to choose template implementation based on dynamic, channels and dimensions
80
+ torch::Tensor BilateralFilterPHLCpu(torch::Tensor inputTensor, float spatialSigma, float colorSigma) {
81
+ torch::Tensor outputTensor = torch::zeros_like(inputTensor);
82
+
83
+ AT_DISPATCH_FLOATING_TYPES(inputTensor.scalar_type(), "BilateralFilterPhlCpu", ([&] {
84
+ BilateralFilterPHLCpu<scalar_t>(inputTensor, outputTensor, spatialSigma, colorSigma);
85
+ }));
86
+
87
+ return outputTensor;
88
+ }
source_code/SegMamba/monai/csrc/filtering/bilateral/bilateralfilter_cuda.cu ADDED
@@ -0,0 +1,260 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ Copyright (c) MONAI Consortium
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+ http://www.apache.org/licenses/LICENSE-2.0
7
+ Unless required by applicable law or agreed to in writing, software
8
+ distributed under the License is distributed on an "AS IS" BASIS,
9
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10
+ See the License for the specific language governing permissions and
11
+ limitations under the License.
12
+ */
13
+
14
+ #include <cuda.h>
15
+ #include <cuda_runtime.h>
16
+ #include <torch/extension.h>
17
+
18
+ #include "bilateral.h"
19
+ #include "utils/meta_macros.h"
20
+ #include "utils/tensor_description.h"
21
+
22
+ __constant__ int cBatchStride;
23
+ __constant__ int cColorStride;
24
+
25
+ __constant__ int cSizes[3];
26
+ __constant__ int cStrides[3];
27
+
28
+ __constant__ int cKernelSize;
29
+ __constant__ float cKernel[256];
30
+
31
+ __constant__ float cColorExponentFactor;
32
+
33
+ template <typename scalar_t, int C>
34
+ __global__ void BilateralFilterCudaKernel1D(scalar_t* input, scalar_t* output) {
35
+ int kernelHalfSize = cKernelSize / 2;
36
+
37
+ int homeOffset = blockIdx.x * blockDim.x + threadIdx.x;
38
+ int batchOffset = blockIdx.y * cBatchStride;
39
+
40
+ if (homeOffset >= cColorStride)
41
+ return;
42
+
43
+ scalar_t weightSum = 0;
44
+
45
+ for (int kernelOffset = 0; kernelOffset < cKernelSize; kernelOffset++) {
46
+ int neighbourOffset = max(0, min(homeOffset + (kernelOffset - kernelHalfSize), cSizes[0] - 1));
47
+ scalar_t gaussian = cKernel[kernelOffset];
48
+
49
+ scalar_t distanceSquared = 0;
50
+
51
+ #pragma unroll
52
+ for (int c = 0; c < C; c++) {
53
+ scalar_t a = input[batchOffset + homeOffset + c * cColorStride];
54
+ scalar_t b = input[batchOffset + neighbourOffset + c * cColorStride];
55
+ scalar_t diff = a - b;
56
+ distanceSquared += diff * diff;
57
+ }
58
+
59
+ scalar_t spatialWeight = gaussian;
60
+ scalar_t colorWeight = exp(cColorExponentFactor * distanceSquared);
61
+ scalar_t totalWeight = spatialWeight * colorWeight;
62
+
63
+ #pragma unroll
64
+ for (int c = 0; c < C; c++) {
65
+ scalar_t a = input[batchOffset + neighbourOffset + c * cColorStride];
66
+
67
+ output[batchOffset + homeOffset + c * cColorStride] += a * totalWeight;
68
+ }
69
+
70
+ weightSum += totalWeight;
71
+ }
72
+
73
+ #pragma unroll
74
+ for (int c = 0; c < C; c++) {
75
+ output[batchOffset + homeOffset + c * cColorStride] /= weightSum;
76
+ }
77
+ }
78
+
79
+ template <typename scalar_t, int C>
80
+ __global__ void BilateralFilterCudaKernel2D(scalar_t* input, scalar_t* output) {
81
+ int kernelHalfSize = cKernelSize / 2;
82
+
83
+ int homeOffset = blockIdx.x * blockDim.x + threadIdx.x;
84
+ int batchOffset = blockIdx.y * cBatchStride;
85
+
86
+ if (homeOffset >= cColorStride)
87
+ return;
88
+
89
+ int homeX = homeOffset / cStrides[0];
90
+ int homeY = (homeOffset - homeX * cStrides[0]) / cStrides[1];
91
+
92
+ scalar_t weightSum = 0;
93
+
94
+ for (int kernelX = 0; kernelX < cKernelSize; kernelX++) {
95
+ int neighbourX = max(0, min(homeX + (kernelX - kernelHalfSize), cSizes[0] - 1));
96
+ scalar_t gaussianX = cKernel[kernelX];
97
+
98
+ for (int kernelY = 0; kernelY < cKernelSize; kernelY++) {
99
+ int neighbourY = max(0, min(homeY + (kernelY - kernelHalfSize), cSizes[1] - 1));
100
+ scalar_t gaussianY = cKernel[kernelY];
101
+
102
+ int neighbourOffset = neighbourX * cStrides[0] + neighbourY;
103
+
104
+ scalar_t distanceSquared = 0;
105
+
106
+ #pragma unroll
107
+ for (int c = 0; c < C; c++) {
108
+ scalar_t a = input[batchOffset + homeOffset + c * cColorStride];
109
+ scalar_t b = input[batchOffset + neighbourOffset + c * cColorStride];
110
+ scalar_t diff = a - b;
111
+ distanceSquared += diff * diff;
112
+ }
113
+
114
+ scalar_t spatialWeight = gaussianX * gaussianY;
115
+ scalar_t colorWeight = exp(cColorExponentFactor * distanceSquared);
116
+ scalar_t totalWeight = spatialWeight * colorWeight;
117
+
118
+ #pragma unroll
119
+ for (int c = 0; c < C; c++) {
120
+ scalar_t a = input[batchOffset + neighbourOffset + c * cColorStride];
121
+
122
+ output[batchOffset + homeOffset + c * cColorStride] += a * totalWeight;
123
+ }
124
+
125
+ weightSum += totalWeight;
126
+ }
127
+ }
128
+
129
+ #pragma unroll
130
+ for (int c = 0; c < C; c++) {
131
+ output[batchOffset + homeOffset + c * cColorStride] /= weightSum;
132
+ }
133
+ }
134
+
135
+ template <typename scalar_t, int C>
136
+ __global__ void BilateralFilterCudaKernel3D(scalar_t* input, scalar_t* output) {
137
+ int kernelHalfSize = cKernelSize / 2;
138
+
139
+ int homeOffset = blockIdx.x * blockDim.x + threadIdx.x;
140
+ int batchOffset = blockIdx.y * cBatchStride;
141
+
142
+ if (homeOffset >= cColorStride)
143
+ return;
144
+
145
+ int homeX = homeOffset / cStrides[0];
146
+ int homeY = (homeOffset - homeX * cStrides[0]) / cStrides[1];
147
+ int homeZ = (homeOffset - homeX * cStrides[0] - homeY * cStrides[1]) / cStrides[2];
148
+
149
+ scalar_t weightSum = 0;
150
+
151
+ for (int kernelX = 0; kernelX < cKernelSize; kernelX++) {
152
+ int neighbourX = max(0, min(homeX + (kernelX - kernelHalfSize), cSizes[0] - 1));
153
+ scalar_t gaussianX = cKernel[kernelX];
154
+
155
+ for (int kernelY = 0; kernelY < cKernelSize; kernelY++) {
156
+ int neighbourY = max(0, min(homeY + (kernelY - kernelHalfSize), cSizes[1] - 1));
157
+ scalar_t gaussianY = cKernel[kernelY];
158
+
159
+ for (int kernelZ = 0; kernelZ < cKernelSize; kernelZ++) {
160
+ int neighbourZ = max(0, min(homeZ + (kernelZ - kernelHalfSize), cSizes[2] - 1));
161
+ scalar_t gaussianZ = cKernel[kernelZ];
162
+
163
+ int neighbourOffset = neighbourX * cStrides[0] + neighbourY * cStrides[1] + neighbourZ;
164
+
165
+ scalar_t distanceSquared = 0;
166
+
167
+ #pragma unroll
168
+ for (int c = 0; c < C; c++) {
169
+ scalar_t a = input[batchOffset + homeOffset + c * cColorStride];
170
+ scalar_t b = input[batchOffset + neighbourOffset + c * cColorStride];
171
+ scalar_t diff = a - b;
172
+ distanceSquared += diff * diff;
173
+ }
174
+
175
+ scalar_t spatialWeight = gaussianX * gaussianY * gaussianZ;
176
+ scalar_t colorWeight = exp(cColorExponentFactor * distanceSquared);
177
+ scalar_t totalWeight = spatialWeight * colorWeight;
178
+
179
+ #pragma unroll
180
+ for (int c = 0; c < C; c++) {
181
+ scalar_t a = input[batchOffset + neighbourOffset + c * cColorStride];
182
+ output[batchOffset + homeOffset + c * cColorStride] += a * totalWeight;
183
+ }
184
+
185
+ weightSum += totalWeight;
186
+ }
187
+ }
188
+ }
189
+
190
+ #pragma unroll
191
+ for (int c = 0; c < C; c++) {
192
+ output[batchOffset + homeOffset + c * cColorStride] /= weightSum;
193
+ }
194
+ }
195
+
196
+ template <int C, int D>
197
+ void BilateralFilterCuda(torch::Tensor inputTensor, torch::Tensor outputTensor, float spatialSigma, float colorSigma) {
198
+ // Getting tensor description.
199
+ TensorDescription desc = TensorDescription(inputTensor);
200
+
201
+ // Pre-calculating exponent factors.
202
+ float spatialExponentFactor = -1.0f / (2 * spatialSigma * spatialSigma);
203
+ float colorExponentFactor = -1.0f / (2 * colorSigma * colorSigma);
204
+
205
+ // Pre-calculating gaussian kernel.
206
+ int kernelSize = (int)ceil(5.0f * spatialSigma) | 1; // ORing last bit to ensure odd window size
207
+ int kernelHalfSize = floor(0.5f * kernelSize);
208
+ float* kernel = new float[kernelSize];
209
+
210
+ for (int i = 0; i < kernelSize; i++) {
211
+ int distance = i - kernelHalfSize;
212
+ kernel[i] = exp(distance * distance * spatialExponentFactor);
213
+ }
214
+
215
+ // Writing constant memory.
216
+ cudaMemcpyToSymbol(cBatchStride, &desc.batchStride, sizeof(int));
217
+ cudaMemcpyToSymbol(cColorStride, &desc.channelStride, sizeof(int));
218
+ cudaMemcpyToSymbol(cSizes, desc.sizes, sizeof(int) * D);
219
+ cudaMemcpyToSymbol(cStrides, desc.strides, sizeof(int) * D);
220
+ cudaMemcpyToSymbol(cKernelSize, &kernelSize, sizeof(int));
221
+ cudaMemcpyToSymbol(cKernel, kernel, sizeof(float) * kernelSize);
222
+ cudaMemcpyToSymbol(cColorExponentFactor, &colorExponentFactor, sizeof(float));
223
+
224
+ #define BLOCK_SIZE 32
225
+
226
+ AT_DISPATCH_FLOATING_TYPES_AND_HALF(
227
+ inputTensor.scalar_type(), "BilateralFilterCudaKernel", ([&] {
228
+ // Dispatch kernel. (Partial template function specialisation not supported at present so using this switch
229
+ // instead)
230
+ switch (D) {
231
+ case (1):
232
+ BilateralFilterCudaKernel1D<scalar_t, C>
233
+ <<<dim3(int(desc.channelStride / BLOCK_SIZE) + 1, desc.batchCount), dim3(BLOCK_SIZE, 1)>>>(
234
+ inputTensor.data_ptr<scalar_t>(), outputTensor.data_ptr<scalar_t>());
235
+ break;
236
+ case (2):
237
+ BilateralFilterCudaKernel2D<scalar_t, C>
238
+ <<<dim3(int(desc.channelStride / BLOCK_SIZE) + 1, desc.batchCount), dim3(BLOCK_SIZE, 1)>>>(
239
+ inputTensor.data_ptr<scalar_t>(), outputTensor.data_ptr<scalar_t>());
240
+ break;
241
+ case (3):
242
+ BilateralFilterCudaKernel3D<scalar_t, C>
243
+ <<<dim3(int(desc.channelStride / BLOCK_SIZE) + 1, desc.batchCount), dim3(BLOCK_SIZE, 1)>>>(
244
+ inputTensor.data_ptr<scalar_t>(), outputTensor.data_ptr<scalar_t>());
245
+ break;
246
+ }
247
+ }));
248
+
249
+ delete[] kernel;
250
+ }
251
+
252
+ // Function to choose template implementation based on dynamic, channels and dimensions
253
+ torch::Tensor BilateralFilterCuda(torch::Tensor inputTensor, float spatialSigma, float colorSigma) {
254
+ torch::Tensor outputTensor = torch::zeros_like(inputTensor);
255
+
256
+ #define CASE(c, d) BilateralFilterCuda<c, d>(inputTensor, outputTensor, spatialSigma, colorSigma);
257
+ SWITCH_AB(CASE, BF_CUDA_MAX_CHANNELS, BF_CUDA_MAX_SPATIAL_DIMENSION, inputTensor.size(1), inputTensor.dim() - 2);
258
+
259
+ return outputTensor;
260
+ }
source_code/SegMamba/monai/csrc/filtering/bilateral/bilateralfilter_cuda_phl.cu ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ Copyright (c) MONAI Consortium
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+ http://www.apache.org/licenses/LICENSE-2.0
7
+ Unless required by applicable law or agreed to in writing, software
8
+ distributed under the License is distributed on an "AS IS" BASIS,
9
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10
+ See the License for the specific language governing permissions and
11
+ limitations under the License.
12
+ */
13
+
14
+ #include <cuda.h>
15
+ #include <cuda_runtime.h>
16
+ #include <torch/extension.h>
17
+
18
+ #include "bilateral.h"
19
+ #include "filtering/permutohedral/permutohedral.h"
20
+ #include "utils/meta_macros.h"
21
+ #include "utils/tensor_description.h"
22
+
23
+ __constant__ int cBatchStride;
24
+ __constant__ int cChannelStride;
25
+ __constant__ int cSpatialStrides[3];
26
+ __constant__ float cInvSpatialSigma;
27
+ __constant__ float cInvColorSigma;
28
+
29
+ template <typename scalar_t, int C, int D>
30
+ __global__ void FeatureCreation(const scalar_t* inputTensor, scalar_t* outputData, scalar_t* outputFeatures) {
31
+ int elementIndex = blockIdx.x * blockDim.x + threadIdx.x;
32
+ int batchIndex = blockIdx.y;
33
+
34
+ if (elementIndex >= cChannelStride)
35
+ return;
36
+
37
+ int dataBatchOffset = batchIndex * cBatchStride;
38
+ int featureBatchOffset = batchIndex * (D + C) * cChannelStride;
39
+
40
+ #pragma unroll
41
+ for (int i = 0; i < C; i++) {
42
+ outputData[dataBatchOffset + elementIndex * C + i] =
43
+ inputTensor[dataBatchOffset + elementIndex + i * cChannelStride];
44
+ outputFeatures[featureBatchOffset + elementIndex * (C + D) + i] =
45
+ inputTensor[dataBatchOffset + elementIndex + i * cChannelStride] * cInvColorSigma;
46
+ }
47
+
48
+ int remainder = elementIndex;
49
+
50
+ #pragma unroll
51
+ for (int i = 0; i < D; i++) {
52
+ int coord = remainder / cSpatialStrides[i];
53
+ remainder -= coord * cSpatialStrides[i];
54
+
55
+ outputFeatures[featureBatchOffset + elementIndex * (C + D) + C + i] = coord * cInvSpatialSigma;
56
+ }
57
+ }
58
+
59
+ template <typename scalar_t, int C>
60
+ __global__ void WriteOutput(const scalar_t* data, scalar_t* outputTensor) {
61
+ int elementIndex = blockIdx.x * blockDim.x + threadIdx.x;
62
+ int batchIndex = blockIdx.y;
63
+
64
+ if (elementIndex >= cChannelStride)
65
+ return;
66
+
67
+ int batchOffset = batchIndex * cBatchStride;
68
+
69
+ #pragma unroll
70
+ for (int i = 0; i < C; i++) {
71
+ outputTensor[batchOffset + elementIndex + i * cChannelStride] = data[batchOffset + elementIndex * C + i];
72
+ }
73
+ }
74
+
75
+ template <typename scalar_t, int C, int D>
76
+ void BilateralFilterPHLCuda(
77
+ torch::Tensor inputTensor,
78
+ torch::Tensor outputTensor,
79
+ float spatialSigma,
80
+ float colorSigma) {
81
+ // Getting tensor description.
82
+ TensorDescription desc = TensorDescription(inputTensor);
83
+
84
+ int featureChannelCount = desc.channelCount + desc.dimensions;
85
+
86
+ // Pre calculating inverse sigmas.
87
+ float invSpatialSigma = 1.0f / spatialSigma;
88
+ float invColorSigma = 1.0f / colorSigma;
89
+
90
+ // Preparing global memory
91
+ scalar_t* inputTensorData = inputTensor.data_ptr<scalar_t>();
92
+ scalar_t* outputTensorData = outputTensor.data_ptr<scalar_t>();
93
+
94
+ scalar_t* data;
95
+ scalar_t* features;
96
+ cudaMalloc(&data, desc.batchCount * desc.channelStride * desc.channelCount * sizeof(scalar_t));
97
+ cudaMalloc(&features, desc.batchCount * desc.channelStride * featureChannelCount * sizeof(scalar_t));
98
+
99
+ // Preparing constant memory
100
+ cudaMemcpyToSymbol(cBatchStride, &desc.batchStride, sizeof(int));
101
+ cudaMemcpyToSymbol(cChannelStride, &desc.channelStride, sizeof(int));
102
+ cudaMemcpyToSymbol(cSpatialStrides, desc.strides, sizeof(int) * desc.dimensions);
103
+ cudaMemcpyToSymbol(cInvSpatialSigma, &invSpatialSigma, sizeof(float));
104
+ cudaMemcpyToSymbol(cInvColorSigma, &invColorSigma, sizeof(float));
105
+
106
+ #define BLOCK_SIZE 32
107
+
108
+ // Creating features
109
+ FeatureCreation<scalar_t, C, D>
110
+ <<<dim3(int(desc.channelStride / BLOCK_SIZE) + 1, desc.batchCount), dim3(BLOCK_SIZE, 1)>>>(
111
+ inputTensorData, data, features);
112
+
113
+ // Filtering data with respect to the features for each sample in batch
114
+ for (int batchIndex = 0; batchIndex < desc.batchCount; batchIndex++) {
115
+ scalar_t* offsetData = data + batchIndex * desc.batchStride;
116
+ scalar_t* offsetFeatures = features + batchIndex * featureChannelCount * desc.channelStride;
117
+
118
+ PermutohedralCuda<scalar_t, C, C + D>(offsetData, offsetFeatures, desc.channelStride, true);
119
+ }
120
+
121
+ // Writing output
122
+ WriteOutput<scalar_t, C><<<dim3(int(desc.channelStride / BLOCK_SIZE) + 1, desc.batchCount), dim3(BLOCK_SIZE, 1)>>>(
123
+ data, outputTensorData);
124
+
125
+ cudaFree(data);
126
+ cudaFree(features);
127
+ }
128
+
129
+ // Function to choose template implementation based on dynamic, channels and dimensions
130
+ torch::Tensor BilateralFilterPHLCuda(torch::Tensor inputTensor, float spatialSigma, float colorSigma) {
131
+ torch::Tensor outputTensor = torch::zeros_like(inputTensor);
132
+
133
+ #define CASE(c, d) \
134
+ AT_DISPATCH_FLOATING_TYPES(inputTensor.scalar_type(), "BilateralFilterCudaPHL", ([&] { \
135
+ BilateralFilterPHLCuda<scalar_t, c, d>( \
136
+ inputTensor, outputTensor, spatialSigma, colorSigma); \
137
+ }));
138
+
139
+ SWITCH_AB(CASE, BF_CUDA_MAX_CHANNELS, BF_CUDA_MAX_SPATIAL_DIMENSION, inputTensor.size(1), inputTensor.dim() - 2);
140
+
141
+ return outputTensor;
142
+ }
source_code/SegMamba/monai/csrc/filtering/filtering.h ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ Copyright (c) MONAI Consortium
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+ http://www.apache.org/licenses/LICENSE-2.0
7
+ Unless required by applicable law or agreed to in writing, software
8
+ distributed under the License is distributed on an "AS IS" BASIS,
9
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10
+ See the License for the specific language governing permissions and
11
+ limitations under the License.
12
+ */
13
+
14
+ #pragma once
15
+
16
+ #include "bilateral/bilateral.h"
17
+ #include "permutohedral/permutohedral.h"
18
+ #include "trainable_bilateral/trainable_bilateral.h"
19
+ #include "trainable_joint_bilateral/trainable_joint_bilateral.h"
source_code/SegMamba/monai/csrc/filtering/permutohedral/hash_table.cuh ADDED
@@ -0,0 +1,260 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ Copyright (c) MONAI Consortium
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+ http://www.apache.org/licenses/LICENSE-2.0
7
+ Unless required by applicable law or agreed to in writing, software
8
+ distributed under the License is distributed on an "AS IS" BASIS,
9
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10
+ See the License for the specific language governing permissions and
11
+ limitations under the License.
12
+ */
13
+
14
+ #include <torch/extension.h>
15
+
16
+ //#define USE_ADDITIVE_HASH
17
+
18
+ // turn this on if you want to get slightly less memory consumption and slightly longer run times.
19
+ //#define LINEAR_D_MEMORY
20
+
21
+ #define USE_CUSTOM_MODULO
22
+
23
+ __device__ __constant__ signed short* table_keys;
24
+ __device__ __constant__ int* table_entries;
25
+ __device__ __constant__ unsigned int table_capacity;
26
+ __device__ __constant__ signed short* table_zeros;
27
+ __device__ __constant__ char* table_rank;
28
+
29
+ /*************************************************************/
30
+ /* Fast computation of modulo operator with constant divisor */
31
+ /*************************************************************/
32
+ __device__ __constant__ unsigned int __div_m;
33
+ __device__ __constant__ unsigned int __div_l;
34
+ __device__ __constant__ unsigned int __div_c;
35
+
36
+ #ifdef USE_CUSTOM_MODULO
37
+ __device__ inline unsigned int modHash(unsigned int n) {
38
+ unsigned int t1 = __umulhi(__div_m, n);
39
+ return n - ((t1 + ((n - t1) >> 1)) >> (__div_l - 1)) * __div_c;
40
+ }
41
+
42
+ #else
43
+ #define modHash(n) ((n) % (2 * table_capacity));
44
+ #endif
45
+
46
+ /*************************************************************/
47
+ /* End modulo */
48
+ /*************************************************************/
49
+
50
+ __device__ __constant__ static unsigned int hOffset[64];
51
+
52
+ template <typename scalar_t, int kd, int vd>
53
+ static scalar_t* createHashTable(int capacity) {
54
+ scalar_t* values;
55
+ cudaMalloc(&values, capacity * vd * sizeof(scalar_t));
56
+ cudaMemset(values, 0, capacity * vd * sizeof(scalar_t));
57
+
58
+ int* entries;
59
+ cudaMalloc(&entries, capacity * 2 * sizeof(int));
60
+ cudaMemset(entries, -1, capacity * 2 * sizeof(int));
61
+
62
+ cudaMemcpyToSymbol(table_capacity, &capacity, sizeof(int));
63
+
64
+ cudaMemcpyToSymbol(table_entries, &entries, sizeof(int*));
65
+
66
+ #ifdef LINEAR_D_MEMORY
67
+
68
+ char* ranks;
69
+ cudaMalloc(&ranks, capacity * sizeof(char));
70
+
71
+ signed short* zeros;
72
+ cudaMalloc(&zeros, capacity * sizeof(signed short));
73
+
74
+ cudaMemcpyToSymbol(table_rank, &ranks, sizeof(char*));
75
+ cudaMemcpyToSymbol(table_zeros, &zeros, sizeof(char*));
76
+
77
+ #else
78
+
79
+ signed short* keys;
80
+ cudaMalloc(&keys, capacity * kd * sizeof(signed short));
81
+ cudaMemset(keys, 0, capacity * kd * sizeof(signed short));
82
+
83
+ cudaMemcpyToSymbol(table_keys, &keys, sizeof(unsigned int*));
84
+
85
+ #endif
86
+
87
+ return values;
88
+ }
89
+
90
+ template <typename scalar_t>
91
+ static void destroyHashTable() {
92
+ #ifndef LINEAR_D_MEMORY
93
+ signed short* keys;
94
+ cudaMemcpyFromSymbol(&keys, table_keys, sizeof(unsigned int*));
95
+ cudaFree(keys);
96
+ #endif
97
+
98
+ int* entries;
99
+ cudaMemcpyFromSymbol(&entries, table_entries, sizeof(int*));
100
+ cudaFree(entries);
101
+ }
102
+
103
+ template <int kd>
104
+ __device__ __host__ static unsigned int hash(signed short* key) {
105
+ unsigned int k = 0;
106
+ for (int i = 0; i < kd; i++) {
107
+ k += key[i];
108
+ k = k * 2531011;
109
+ }
110
+ return k;
111
+ }
112
+
113
+ template <int kd>
114
+ __device__ __host__ static unsigned int hash(int* key) {
115
+ unsigned int k = 0;
116
+ for (int i = 0; i < kd; i++) {
117
+ k += key[i];
118
+ k = k * 2531011;
119
+ }
120
+ return k;
121
+ }
122
+
123
+ template <int d>
124
+ __device__ static bool matchKey(int idx, signed short* key) {
125
+ bool match = true;
126
+ int slot = idx / (d + 1), color = idx - slot * (d + 1);
127
+ char* rank = table_rank + slot * (d + 1);
128
+ signed short* zero = table_zeros + slot * (d + 1);
129
+
130
+ for (int i = 0; i < d && match; i++) {
131
+ match = (key[i] == zero[i] + color - (rank[i] > d - color ? (d + 1) : 0));
132
+ }
133
+
134
+ return match;
135
+ }
136
+
137
+ template <int d>
138
+ __device__ static void generateKey(int idx, signed short* key) {
139
+ int slot = idx / (d + 1), color = idx - slot * (d + 1);
140
+ char* rank = table_rank + slot * (d + 1);
141
+ signed short* zero = table_zeros + slot * (d + 1);
142
+
143
+ for (int i = 0; i < d; i++) {
144
+ key[i] = zero[i] + color - (rank[i] > d - color ? (d + 1) : 0);
145
+ }
146
+ }
147
+
148
+ template <int kd>
149
+ __device__ static int hashTableInsert(unsigned int fh, signed short* key, unsigned int slot) {
150
+ int h = modHash(fh);
151
+ while (1) {
152
+ int* e = &table_entries[h];
153
+
154
+ // If the cell is empty (-1), lock it (-2)
155
+ int contents = atomicCAS(e, -1, -2);
156
+
157
+ if (contents == -2) {
158
+ // If it was locked already, move on to the next cell
159
+ } else if (contents == -1) {
160
+ // If it was empty, we successfully locked it. Write our key.
161
+
162
+ #ifndef LINEAR_D_MEMORY
163
+ for (int i = 0; i < kd; i++) {
164
+ table_keys[slot * kd + i] = key[i];
165
+ }
166
+ #endif
167
+
168
+ // Unlock
169
+ atomicExch(e, slot);
170
+
171
+ return h;
172
+ } else {
173
+ // The cell is unlocked and has a key in it, check if it matches
174
+ #ifdef LINEAR_D_MEMORY
175
+ if (matchKey<kd>(contents, key))
176
+ return h;
177
+ #else
178
+ bool match = true;
179
+
180
+ for (int i = 0; i < kd && match; i++) {
181
+ match = (table_keys[contents * kd + i] == key[i]);
182
+ }
183
+
184
+ if (match)
185
+ return h;
186
+ #endif
187
+ }
188
+ // increment the bucket with wraparound
189
+ h++;
190
+
191
+ if (h == table_capacity * 2)
192
+ h = 0;
193
+ }
194
+ }
195
+
196
+ template <int kd>
197
+ __device__ static int hashTableInsert(signed short* key, unsigned int slot) {
198
+ unsigned int myHash = hash<kd>(key);
199
+ return hashTableInsert<kd>(myHash, key, slot);
200
+ }
201
+
202
+ template <int kd>
203
+ __device__ static int hashTableRetrieveWithHash(unsigned int fh, signed short* key) {
204
+ int h = modHash(fh);
205
+ while (1) {
206
+ int* e = table_entries + h;
207
+
208
+ if (*e == -1)
209
+ return -1;
210
+
211
+ #ifdef LINEAR_D_MEMORY
212
+ if (matchKey<kd>((*e), key))
213
+ return *e;
214
+ #else
215
+ bool match = true;
216
+
217
+ for (int i = 0; i < kd && match; i++) {
218
+ match = (table_keys[(*e) * kd + i] == key[i]);
219
+ }
220
+
221
+ if (match)
222
+ return *e;
223
+ #endif
224
+
225
+ h++;
226
+
227
+ if (h == table_capacity * 2)
228
+ h = 0;
229
+ }
230
+ }
231
+
232
+ template <int kd>
233
+ __device__ static int hashTableRetrieve(signed short* key) {
234
+ int h = modHash(hash<kd>(key));
235
+ while (1) {
236
+ int* e = table_entries + h;
237
+
238
+ if (*e == -1)
239
+ return -1;
240
+
241
+ #ifdef LINEAR_D_MEMORY
242
+ if (matchKey<kd>((*e), key))
243
+ return *e;
244
+ #else
245
+ bool match = true;
246
+
247
+ for (int i = 0; i < kd && match; i++) {
248
+ match = (table_keys[(*e) * kd + i] == key[i]);
249
+ }
250
+
251
+ if (match)
252
+ return *e;
253
+ #endif
254
+
255
+ h++;
256
+
257
+ if (h == table_capacity * 2)
258
+ h = 0;
259
+ }
260
+ }
source_code/SegMamba/monai/csrc/filtering/permutohedral/permutohedral.cpp ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ Copyright (c) MONAI Consortium
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+ http://www.apache.org/licenses/LICENSE-2.0
7
+ Unless required by applicable law or agreed to in writing, software
8
+ distributed under the License is distributed on an "AS IS" BASIS,
9
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10
+ See the License for the specific language governing permissions and
11
+ limitations under the License.
12
+ */
13
+
14
+ #include <stdexcept>
15
+ #include <string>
16
+
17
+ #include "utils/common_utils.h"
18
+ #include "utils/meta_macros.h"
19
+
20
+ #include "permutohedral.h"
21
+
22
+ torch::Tensor PermutohedralFilter(torch::Tensor input, torch::Tensor features) {
23
+ input = input.contiguous();
24
+
25
+ int batchCount = input.size(0);
26
+ int batchStride = input.stride(0);
27
+ int elementCount = input.stride(1);
28
+ int channelCount = input.size(1);
29
+ int featureCount = features.size(1);
30
+
31
+ // movedim not support in torch < 1.7.1
32
+ #if MONAI_TORCH_VERSION >= 10701
33
+ torch::Tensor data = input.clone().movedim(1, -1).contiguous();
34
+ features = features.movedim(1, -1).contiguous();
35
+ #else
36
+ torch::Tensor data = input.clone();
37
+ features = features;
38
+
39
+ for (int i = 1; i < input.dim() - 1; i++) {
40
+ data = data.transpose(i, i + 1);
41
+ features = features.transpose(i, i + 1);
42
+ }
43
+
44
+ data = data.contiguous();
45
+ features = features.contiguous();
46
+ #endif
47
+
48
+ #ifdef WITH_CUDA
49
+ if (torch::cuda::is_available() && data.is_cuda()) {
50
+ CHECK_CONTIGUOUS_CUDA(data);
51
+
52
+ if (channelCount > PHL_CUDA_MAX_CHANNELS) {
53
+ throw std::runtime_error(
54
+ "PHL filtering not implemented for channel count > " + std::to_string(PHL_CUDA_MAX_CHANNELS));
55
+ }
56
+
57
+ if (featureCount > PHL_CUDA_MAX_FEATURES) {
58
+ throw std::runtime_error(
59
+ "PHL filtering not implemented for feature count > " + std::to_string(PHL_CUDA_MAX_FEATURES));
60
+ }
61
+
62
+ #define CASE(dc, fc) \
63
+ AT_DISPATCH_FLOATING_TYPES(data.scalar_type(), "PermutohedralCuda", ([&] { \
64
+ for (int batchIndex = 0; batchIndex < batchCount; batchIndex++) { \
65
+ scalar_t* offsetData = data.data_ptr<scalar_t>() + batchIndex * batchStride; \
66
+ scalar_t* offsetFeatures = \
67
+ features.data_ptr<scalar_t>() + batchIndex * fc * elementCount; \
68
+ PermutohedralCuda<scalar_t, dc, fc>(offsetData, offsetFeatures, elementCount, true); \
69
+ } \
70
+ }));
71
+ SWITCH_AB(CASE, PHL_CUDA_MAX_CHANNELS, PHL_CUDA_MAX_FEATURES, channelCount, featureCount);
72
+
73
+ } else {
74
+ #endif
75
+ AT_DISPATCH_FLOATING_TYPES(
76
+ data.scalar_type(), "PermutohedralCPU", ([&] {
77
+ for (int batchIndex = 0; batchIndex < batchCount; batchIndex++) {
78
+ scalar_t* offsetData = data.data_ptr<scalar_t>() + batchIndex * batchStride;
79
+ scalar_t* offsetFeatures = features.data_ptr<scalar_t>() + batchIndex * featureCount * elementCount;
80
+ PermutohedralCPU<scalar_t>(offsetData, offsetFeatures, channelCount, featureCount, elementCount);
81
+ }
82
+ }));
83
+ #ifdef WITH_CUDA
84
+ }
85
+ #endif
86
+
87
+ // movedim not support in torch < 1.7.1
88
+ #if MONAI_TORCH_VERSION >= 10701
89
+ data = data.movedim(-1, 1);
90
+ #else
91
+ for (int i = input.dim() - 1; i > 1; i--) {
92
+ data = data.transpose(i - 1, i);
93
+ }
94
+ #endif
95
+
96
+ return data;
97
+ }
source_code/SegMamba/monai/csrc/filtering/permutohedral/permutohedral.h ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ Copyright (c) MONAI Consortium
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+ http://www.apache.org/licenses/LICENSE-2.0
7
+ Unless required by applicable law or agreed to in writing, software
8
+ distributed under the License is distributed on an "AS IS" BASIS,
9
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10
+ See the License for the specific language governing permissions and
11
+ limitations under the License.
12
+ */
13
+
14
+ #pragma once
15
+
16
+ #include <torch/extension.h>
17
+
18
+ #define PHL_CUDA_MAX_CHANNELS 16
19
+ #define PHL_CUDA_MAX_FEATURES 19
20
+
21
+ template <typename scalar_t>
22
+ void PermutohedralCPU(scalar_t* data, scalar_t* features, int dataChannels, int featureChannels, int elementCount);
23
+ #ifdef WITH_CUDA
24
+ template <typename scalar_t, int dc, int fc>
25
+ void PermutohedralCuda(scalar_t* data, scalar_t* features, int elementCount, bool accurate);
26
+ #endif
27
+
28
+ torch::Tensor PermutohedralFilter(torch::Tensor input, torch::Tensor features);
source_code/SegMamba/monai/csrc/filtering/permutohedral/permutohedral_cpu.cpp ADDED
@@ -0,0 +1,502 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ Copyright (c) MONAI Consortium
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+ http://www.apache.org/licenses/LICENSE-2.0
7
+ Unless required by applicable law or agreed to in writing, software
8
+ distributed under the License is distributed on an "AS IS" BASIS,
9
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10
+ See the License for the specific language governing permissions and
11
+ limitations under the License.
12
+ */
13
+
14
+ /*
15
+ Adapted from https://github.com/abadams/permutohedral
16
+ which has the following license...
17
+
18
+ MIT License
19
+
20
+ Copyright (c) 2020 Andrew Adams
21
+
22
+ Permission is hereby granted, free of charge, to any person obtaining a copy
23
+ of this software and associated documentation files (the "Software"), to deal
24
+ in the Software without restriction, including without limitation the rights
25
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
26
+ copies of the Software, and to permit persons to whom the Software is
27
+ furnished to do so, subject to the following conditions:
28
+
29
+ The above copyright notice and this permission notice shall be included in all
30
+ copies or substantial portions of the Software.
31
+
32
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
33
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
34
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
35
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
36
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
37
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
38
+ SOFTWARE.
39
+ */
40
+
41
+ #include <math.h>
42
+ #include <string.h>
43
+
44
+ #include <torch/extension.h>
45
+
46
+ using namespace std;
47
+
48
+ /***************************************************************/
49
+ /* Hash table implementation for permutohedral lattice
50
+ *
51
+ * The lattice points are stored sparsely using a hash table.
52
+ * The key for each point is its spatial location in the (d+1)-
53
+ * dimensional space.
54
+ */
55
+ /***************************************************************/
56
+ template <typename scalar_t>
57
+ class HashTablePermutohedral {
58
+ public:
59
+ /* Constructor
60
+ * kd_: the dimensionality of the position vectors on the hyperplane.
61
+ * vd_: the dimensionality of the value vectors
62
+ */
63
+ HashTablePermutohedral(int kd_, int vd_) : kd(kd_), vd(vd_) {
64
+ capacity = 1 << 15;
65
+ filled = 0;
66
+ entries = new Entry[capacity];
67
+ keys = new short[kd * capacity / 2];
68
+ values = new scalar_t[vd * capacity / 2];
69
+ memset(values, 0, sizeof(scalar_t) * vd * capacity / 2);
70
+ }
71
+
72
+ // Returns the number of vectors stored.
73
+ int size() {
74
+ return filled;
75
+ }
76
+
77
+ // Returns a pointer to the keys array.
78
+ short* getKeys() {
79
+ return keys;
80
+ }
81
+
82
+ // Returns a pointer to the values array.
83
+ scalar_t* getValues() {
84
+ return values;
85
+ }
86
+
87
+ /* Returns the index into the hash table for a given key.
88
+ * key: a pointer to the position vector.
89
+ * h: hash of the position vector.
90
+ * create: a flag specifying whether an entry should be created,
91
+ * should an entry with the given key not found.
92
+ */
93
+ int lookupOffset(short* key, size_t h, bool create = true) {
94
+ // Double hash table size if necessary
95
+ if (filled >= (capacity / 2) - 1) {
96
+ grow();
97
+ }
98
+
99
+ // Find the entry with the given key
100
+ while (1) {
101
+ Entry e = entries[h];
102
+ // check if the cell is empty
103
+ if (e.keyIdx == -1) {
104
+ if (!create)
105
+ return -1; // Return not found.
106
+ // need to create an entry. Store the given key.
107
+ for (int i = 0; i < kd; i++)
108
+ keys[filled * kd + i] = key[i];
109
+ e.keyIdx = filled * kd;
110
+ e.valueIdx = filled * vd;
111
+ entries[h] = e;
112
+ filled++;
113
+ return e.valueIdx;
114
+ }
115
+
116
+ // check if the cell has a matching key
117
+ bool match = true;
118
+ for (int i = 0; i < kd && match; i++)
119
+ match = keys[e.keyIdx + i] == key[i];
120
+ if (match)
121
+ return e.valueIdx;
122
+
123
+ // increment the bucket with wraparound
124
+ h++;
125
+ if (h == capacity)
126
+ h = 0;
127
+ }
128
+ }
129
+
130
+ /* Looks up the value vector associated with a given key vector.
131
+ * k : pointer to the key vector to be looked up.
132
+ * create : true if a non-existing key should be created.
133
+ */
134
+ scalar_t* lookup(short* k, bool create = true) {
135
+ size_t h = hash(k) % capacity;
136
+ int offset = lookupOffset(k, h, create);
137
+ if (offset < 0)
138
+ return NULL;
139
+ else
140
+ return values + offset;
141
+ };
142
+
143
+ /* Hash function used in this implementation. A simple base conversion. */
144
+ size_t hash(const short* key) {
145
+ size_t k = 0;
146
+ for (int i = 0; i < kd; i++) {
147
+ k += key[i];
148
+ k *= 2531011;
149
+ }
150
+ return k;
151
+ }
152
+
153
+ private:
154
+ /* Grows the size of the hash table */
155
+ void grow() {
156
+ size_t oldCapacity = capacity;
157
+ capacity *= 2;
158
+
159
+ // Migrate the value vectors.
160
+ scalar_t* newValues = new scalar_t[vd * capacity / 2];
161
+ memset(newValues, 0, sizeof(scalar_t) * vd * capacity / 2);
162
+ memcpy(newValues, values, sizeof(scalar_t) * vd * filled);
163
+ delete[] values;
164
+ values = newValues;
165
+
166
+ // Migrate the key vectors.
167
+ short* newKeys = new short[kd * capacity / 2];
168
+ memcpy(newKeys, keys, sizeof(short) * kd * filled);
169
+ delete[] keys;
170
+ keys = newKeys;
171
+
172
+ Entry* newEntries = new Entry[capacity];
173
+
174
+ // Migrate the table of indices.
175
+ for (size_t i = 0; i < oldCapacity; i++) {
176
+ if (entries[i].keyIdx == -1)
177
+ continue;
178
+ size_t h = hash(keys + entries[i].keyIdx) % capacity;
179
+ while (newEntries[h].keyIdx != -1) {
180
+ h++;
181
+ if (h == capacity)
182
+ h = 0;
183
+ }
184
+ newEntries[h] = entries[i];
185
+ }
186
+ delete[] entries;
187
+ entries = newEntries;
188
+ }
189
+
190
+ // Private struct for the hash table entries.
191
+ struct Entry {
192
+ Entry() : keyIdx(-1), valueIdx(-1) {}
193
+ int keyIdx;
194
+ int valueIdx;
195
+ };
196
+
197
+ short* keys;
198
+ scalar_t* values;
199
+ Entry* entries;
200
+ size_t capacity, filled;
201
+ int kd, vd;
202
+ };
203
+
204
+ /***************************************************************/
205
+ /* The algorithm class that performs the filter
206
+ *
207
+ * PermutohedralLattice::filter(...) does all the work.
208
+ *
209
+ */
210
+ /***************************************************************/
211
+ template <typename scalar_t>
212
+ class PermutohedralLattice {
213
+ public:
214
+ /* Filters given image against a reference image.
215
+ * im : image to be bilateral-filtered.
216
+ * ref : reference image whose edges are to be respected.
217
+ */
218
+ static void filter(scalar_t* data, scalar_t* features, int dataChannels, int featureChannels, int elementCount) {
219
+ // Create lattice
220
+ PermutohedralLattice lattice(featureChannels, dataChannels + 1, elementCount);
221
+
222
+ // Splat into the lattice
223
+ scalar_t* col = new scalar_t[dataChannels + 1];
224
+ col[dataChannels] = 1; // homogeneous coordinate
225
+
226
+ for (int i = 0, e = 0; e < elementCount; e++) {
227
+ for (int c = 0; c < dataChannels; c++, i++) {
228
+ col[c] = data[i];
229
+ }
230
+
231
+ scalar_t* featureVec = features + e * featureChannels;
232
+ lattice.splat(featureVec, col);
233
+ }
234
+
235
+ // Blur the lattice
236
+ lattice.blur();
237
+
238
+ // Slice from the lattice
239
+ lattice.beginSlice();
240
+
241
+ for (int i = 0, e = 0; e < elementCount; e++) {
242
+ lattice.slice(col);
243
+
244
+ scalar_t scale = 1.0f / col[dataChannels];
245
+ for (int c = 0; c < dataChannels; c++, i++) {
246
+ data[i] = col[c] * scale;
247
+ }
248
+ }
249
+ }
250
+
251
+ /* Constructor
252
+ * d_ : dimensionality of key vectors
253
+ * vd_ : dimensionality of value vectors
254
+ * nData_ : number of points in the input
255
+ */
256
+ PermutohedralLattice(int d_, int vd_, int nData_) : d(d_), vd(vd_), nData(nData_), hashTable(d_, vd_) {
257
+ // Allocate storage for various arrays
258
+ elevated = new scalar_t[d + 1];
259
+ scaleFactor = new scalar_t[d];
260
+
261
+ greedy = new short[d + 1];
262
+ rank = new char[d + 1];
263
+ barycentric = new scalar_t[d + 2];
264
+ replay = new ReplayEntry[nData * (d + 1)];
265
+ nReplay = 0;
266
+ canonical = new short[(d + 1) * (d + 1)];
267
+ key = new short[d + 1];
268
+
269
+ // compute the coordinates of the canonical simplex, in which
270
+ // the difference between a contained point and the zero
271
+ // remainder vertex is always in ascending order. (See pg.4 of paper.)
272
+ for (int i = 0; i <= d; i++) {
273
+ for (int j = 0; j <= d - i; j++)
274
+ canonical[i * (d + 1) + j] = i;
275
+ for (int j = d - i + 1; j <= d; j++)
276
+ canonical[i * (d + 1) + j] = i - (d + 1);
277
+ }
278
+
279
+ // Compute parts of the rotation matrix E. (See pg.4-5 of paper.)
280
+ for (int i = 0; i < d; i++) {
281
+ // the diagonal entries for normalization
282
+ scaleFactor[i] = 1.0f / (sqrtf((scalar_t)(i + 1) * (i + 2)));
283
+
284
+ /* We presume that the user would like to do a Gaussian blur of standard deviation
285
+ * 1 in each dimension (or a total variance of d, summed over dimensions.)
286
+ * Because the total variance of the blur performed by this algorithm is not d,
287
+ * we must scale the space to offset this.
288
+ *
289
+ * The total variance of the algorithm is (See pg.6 and 10 of paper):
290
+ * [variance of splatting] + [variance of blurring] + [variance of splatting]
291
+ * = d(d+1)(d+1)/12 + d(d+1)(d+1)/2 + d(d+1)(d+1)/12
292
+ * = 2d(d+1)(d+1)/3.
293
+ *
294
+ * So we need to scale the space by (d+1)sqrt(2/3).
295
+ */
296
+ scaleFactor[i] *= (d + 1) * sqrtf(2.0 / 3);
297
+ }
298
+ }
299
+
300
+ /* Performs splatting with given position and value vectors */
301
+ void splat(scalar_t* position, scalar_t* value) {
302
+ // first rotate position into the (d+1)-dimensional hyperplane
303
+ elevated[d] = -d * position[d - 1] * scaleFactor[d - 1];
304
+ for (int i = d - 1; i > 0; i--)
305
+ elevated[i] =
306
+ (elevated[i + 1] - i * position[i - 1] * scaleFactor[i - 1] + (i + 2) * position[i] * scaleFactor[i]);
307
+ elevated[0] = elevated[1] + 2 * position[0] * scaleFactor[0];
308
+
309
+ // prepare to find the closest lattice points
310
+ scalar_t scale = 1.0f / (d + 1);
311
+ char* myrank = rank;
312
+ short* mygreedy = greedy;
313
+
314
+ // greedily search for the closest zero-colored lattice point
315
+ int sum = 0;
316
+ for (int i = 0; i <= d; i++) {
317
+ scalar_t v = elevated[i] * scale;
318
+ scalar_t up = ceilf(v) * (d + 1);
319
+ scalar_t down = floorf(v) * (d + 1);
320
+
321
+ if (up - elevated[i] < elevated[i] - down)
322
+ mygreedy[i] = (short)up;
323
+ else
324
+ mygreedy[i] = (short)down;
325
+
326
+ sum += mygreedy[i];
327
+ }
328
+ sum /= d + 1;
329
+
330
+ // rank differential to find the permutation between this simplex and the canonical one.
331
+ // (See pg. 3-4 in paper.)
332
+ memset(myrank, 0, sizeof(char) * (d + 1));
333
+ for (int i = 0; i < d; i++)
334
+ for (int j = i + 1; j <= d; j++)
335
+ if (elevated[i] - mygreedy[i] < elevated[j] - mygreedy[j])
336
+ myrank[i]++;
337
+ else
338
+ myrank[j]++;
339
+
340
+ if (sum > 0) {
341
+ // sum too large - the point is off the hyperplane.
342
+ // need to bring down the ones with the smallest differential
343
+ for (int i = 0; i <= d; i++) {
344
+ if (myrank[i] >= d + 1 - sum) {
345
+ mygreedy[i] -= d + 1;
346
+ myrank[i] += sum - (d + 1);
347
+ } else
348
+ myrank[i] += sum;
349
+ }
350
+ } else if (sum < 0) {
351
+ // sum too small - the point is off the hyperplane
352
+ // need to bring up the ones with largest differential
353
+ for (int i = 0; i <= d; i++) {
354
+ if (myrank[i] < -sum) {
355
+ mygreedy[i] += d + 1;
356
+ myrank[i] += (d + 1) + sum;
357
+ } else
358
+ myrank[i] += sum;
359
+ }
360
+ }
361
+
362
+ // Compute barycentric coordinates (See pg.10 of paper.)
363
+ memset(barycentric, 0, sizeof(scalar_t) * (d + 2));
364
+ for (int i = 0; i <= d; i++) {
365
+ barycentric[d - myrank[i]] += (elevated[i] - mygreedy[i]) * scale;
366
+ barycentric[d + 1 - myrank[i]] -= (elevated[i] - mygreedy[i]) * scale;
367
+ }
368
+ barycentric[0] += 1.0f + barycentric[d + 1];
369
+
370
+ // Splat the value into each vertex of the simplex, with barycentric weights.
371
+ for (int remainder = 0; remainder <= d; remainder++) {
372
+ // Compute the location of the lattice point explicitly (all but the last coordinate - it's redundant because they
373
+ // sum to zero)
374
+ for (int i = 0; i < d; i++)
375
+ key[i] = mygreedy[i] + canonical[remainder * (d + 1) + myrank[i]];
376
+
377
+ // Retrieve pointer to the value at this vertex.
378
+ scalar_t* val = hashTable.lookup(key, true);
379
+
380
+ // Accumulate values with barycentric weight.
381
+ for (int i = 0; i < vd; i++)
382
+ val[i] += barycentric[remainder] * value[i];
383
+
384
+ // Record this interaction to use later when slicing
385
+ replay[nReplay].offset = val - hashTable.getValues();
386
+ replay[nReplay].weight = barycentric[remainder];
387
+ nReplay++;
388
+ }
389
+ }
390
+
391
+ // Prepare for slicing
392
+ void beginSlice() {
393
+ nReplay = 0;
394
+ }
395
+
396
+ /* Performs slicing out of position vectors. Note that the barycentric weights and the simplex
397
+ * containing each position vector were calculated and stored in the splatting step.
398
+ * We may reuse this to accelerate the algorithm. (See pg. 6 in paper.)
399
+ */
400
+ void slice(scalar_t* col) {
401
+ scalar_t* base = hashTable.getValues();
402
+ for (int j = 0; j < vd; j++)
403
+ col[j] = 0;
404
+ for (int i = 0; i <= d; i++) {
405
+ ReplayEntry r = replay[nReplay++];
406
+ for (int j = 0; j < vd; j++) {
407
+ col[j] += r.weight * base[r.offset + j];
408
+ }
409
+ }
410
+ }
411
+
412
+ /* Performs a Gaussian blur along each projected axis in the hyperplane. */
413
+ void blur() {
414
+ // Prepare arrays
415
+ short* neighbor1 = new short[d + 1];
416
+ short* neighbor2 = new short[d + 1];
417
+ scalar_t* newValue = new scalar_t[vd * hashTable.size()];
418
+ scalar_t* oldValue = hashTable.getValues();
419
+ scalar_t* hashTableBase = oldValue;
420
+
421
+ scalar_t* zero = new scalar_t[vd];
422
+ for (int k = 0; k < vd; k++)
423
+ zero[k] = 0;
424
+
425
+ // For each of d+1 axes,
426
+ for (int j = 0; j <= d; j++) {
427
+ // For each vertex in the lattice,
428
+ for (int i = 0; i < hashTable.size(); i++) { // blur point i in dimension j
429
+ short* key = hashTable.getKeys() + i * (d); // keys to current vertex
430
+ for (int k = 0; k < d; k++) {
431
+ neighbor1[k] = key[k] + 1;
432
+ neighbor2[k] = key[k] - 1;
433
+ }
434
+ neighbor1[j] = key[j] - d;
435
+ neighbor2[j] = key[j] + d; // keys to the neighbors along the given axis.
436
+
437
+ scalar_t* oldVal = oldValue + i * vd;
438
+ scalar_t* newVal = newValue + i * vd;
439
+
440
+ scalar_t *vm1, *vp1;
441
+
442
+ vm1 = hashTable.lookup(neighbor1, false); // look up first neighbor
443
+ if (vm1)
444
+ vm1 = vm1 - hashTableBase + oldValue;
445
+ else
446
+ vm1 = zero;
447
+
448
+ vp1 = hashTable.lookup(neighbor2, false); // look up second neighbor
449
+ if (vp1)
450
+ vp1 = vp1 - hashTableBase + oldValue;
451
+ else
452
+ vp1 = zero;
453
+
454
+ // Mix values of the three vertices
455
+ for (int k = 0; k < vd; k++)
456
+ newVal[k] = (0.25f * vm1[k] + 0.5f * oldVal[k] + 0.25f * vp1[k]);
457
+ }
458
+ scalar_t* tmp = newValue;
459
+ newValue = oldValue;
460
+ oldValue = tmp;
461
+ // the freshest data is now in oldValue, and newValue is ready to be written over
462
+ }
463
+
464
+ // depending where we ended up, we may have to copy data
465
+ if (oldValue != hashTableBase) {
466
+ memcpy(hashTableBase, oldValue, hashTable.size() * vd * sizeof(scalar_t));
467
+ delete[] oldValue;
468
+ } else {
469
+ delete[] newValue;
470
+ }
471
+
472
+ delete[] zero;
473
+ delete[] neighbor1;
474
+ delete[] neighbor2;
475
+ }
476
+
477
+ private:
478
+ int d, vd, nData;
479
+ scalar_t *elevated, *scaleFactor, *barycentric;
480
+ short* canonical;
481
+ short* key;
482
+
483
+ // slicing is done by replaying splatting (ie storing the sparse matrix)
484
+ struct ReplayEntry {
485
+ int offset;
486
+ scalar_t weight;
487
+ } * replay;
488
+ int nReplay, nReplaySub;
489
+
490
+ public:
491
+ char* rank;
492
+ short* greedy;
493
+ HashTablePermutohedral<scalar_t> hashTable;
494
+ };
495
+
496
+ template <typename scalar_t>
497
+ void PermutohedralCPU(scalar_t* data, scalar_t* features, int dataChannels, int featureChannels, int elementCount) {
498
+ PermutohedralLattice<scalar_t>::filter(data, features, dataChannels, featureChannels, elementCount);
499
+ }
500
+
501
+ template void PermutohedralCPU(float* data, float* features, int dataChannels, int featureChannels, int elementCount);
502
+ template void PermutohedralCPU(double* data, double* features, int dataChannels, int featureChannels, int elementCount);
source_code/SegMamba/monai/csrc/filtering/permutohedral/permutohedral_cuda.cu ADDED
@@ -0,0 +1,540 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ Copyright (c) MONAI Consortium
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+ http://www.apache.org/licenses/LICENSE-2.0
7
+ Unless required by applicable law or agreed to in writing, software
8
+ distributed under the License is distributed on an "AS IS" BASIS,
9
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10
+ See the License for the specific language governing permissions and
11
+ limitations under the License.
12
+ */
13
+
14
+ /*
15
+ Adapted from https://github.com/abadams/permutohedral
16
+ which has the following license...
17
+
18
+ MIT License
19
+
20
+ Copyright (c) 2020 Andrew Adams
21
+
22
+ Permission is hereby granted, free of charge, to any person obtaining a copy
23
+ of this software and associated documentation files (the "Software"), to deal
24
+ in the Software without restriction, including without limitation the rights
25
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
26
+ copies of the Software, and to permit persons to whom the Software is
27
+ furnished to do so, subject to the following conditions:
28
+
29
+ The above copyright notice and this permission notice shall be included in all
30
+ copies or substantial portions of the Software.
31
+
32
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
33
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
34
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
35
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
36
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
37
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
38
+ SOFTWARE.
39
+ */
40
+
41
+ #define BLOCK_SIZE 32
42
+
43
+ #include <cuda.h>
44
+ #include <cuda_runtime.h>
45
+ #include <stdio.h>
46
+ #include <torch/extension.h>
47
+ #include <THC/THCAtomics.cuh>
48
+
49
+ #include "hash_table.cuh"
50
+ #include "permutohedral.h"
51
+ #include "utils/meta_macros.h"
52
+
53
+ template <typename scalar_t>
54
+ struct MatrixEntry {
55
+ int index;
56
+ scalar_t weight;
57
+ };
58
+
59
+ template <typename scalar_t, int pd>
60
+ __global__ static void createMatrix(
61
+ const int elementCount,
62
+ const scalar_t* positions,
63
+ const scalar_t* values,
64
+ const scalar_t* scaleFactor,
65
+ MatrixEntry<scalar_t>* matrix) {
66
+ const int threadId = threadIdx.x;
67
+ const int idx = threadIdx.x + blockIdx.x * BLOCK_SIZE;
68
+ const bool outOfBounds = idx >= elementCount;
69
+
70
+ scalar_t myElevated[pd + 1];
71
+ const scalar_t* myPosition = positions + idx * pd;
72
+
73
+ int myGreedy[pd + 1];
74
+ int myRank[pd + 1];
75
+
76
+ scalar_t myBarycentric[pd + 2];
77
+ __shared__ short keys[pd * BLOCK_SIZE];
78
+ short* myKey = keys + threadId * pd;
79
+
80
+ if (!outOfBounds) {
81
+ myElevated[pd] = -pd * myPosition[pd - 1] * scaleFactor[pd - 1];
82
+
83
+ for (int i = pd - 1; i > 0; i--) {
84
+ myElevated[i] =
85
+ myElevated[i + 1] - i * (myPosition[i - 1]) * scaleFactor[i - 1] + (i + 2) * myPosition[i] * scaleFactor[i];
86
+ }
87
+
88
+ myElevated[0] = myElevated[1] + 2 * myPosition[0] * scaleFactor[0];
89
+
90
+ // find the closest zero-colored lattice point
91
+
92
+ // greedily search for the closest zero-colored lattice point
93
+ signed short sum = 0;
94
+
95
+ for (int i = 0; i <= pd; i++) {
96
+ scalar_t v = myElevated[i] * (1.0f / (pd + 1));
97
+ scalar_t up = ceilf(v) * (pd + 1);
98
+ scalar_t down = floorf(v) * (pd + 1);
99
+
100
+ myGreedy[i] = (signed short)(up - myElevated[i] < myElevated[i] - down ? up : down);
101
+ sum += myGreedy[i];
102
+ }
103
+
104
+ sum /= pd + 1;
105
+
106
+ // sort differential to find the permutation between this simplex and the canonical one
107
+ for (int i = 0; i <= pd; i++) {
108
+ myRank[i] = 0;
109
+
110
+ for (int j = 0; j <= pd; j++) {
111
+ scalar_t iDiff = myElevated[i] - myGreedy[i];
112
+ scalar_t jDiff = myElevated[j] - myGreedy[j];
113
+
114
+ if (iDiff < jDiff || (iDiff == jDiff && i > j)) {
115
+ myRank[i]++;
116
+ }
117
+ }
118
+ }
119
+
120
+ if (sum > 0) // sum too large, need to bring down the ones with the smallest differential
121
+ {
122
+ for (int i = 0; i <= pd; i++) {
123
+ if (myRank[i] >= pd + 1 - sum) {
124
+ myGreedy[i] -= (pd + 1);
125
+ myRank[i] += sum - (pd + 1);
126
+ } else {
127
+ myRank[i] += sum;
128
+ }
129
+ }
130
+ } else if (sum < 0) // sum too small, need to bring up the ones with largest differential
131
+ {
132
+ for (int i = 0; i <= pd; i++) {
133
+ if (myRank[i] < -sum) {
134
+ myGreedy[i] += (pd + 1);
135
+ myRank[i] += sum + (pd + 1);
136
+ } else {
137
+ myRank[i] += sum;
138
+ }
139
+ }
140
+ }
141
+
142
+ #ifdef LINEAR_D_MEMORY
143
+ for (int i = 0; i <= pd; i++) {
144
+ table_zeros[idx * (pd + 1) + i] = myGreedy[i];
145
+ table_rank[idx * (pd + 1) + i] = myRank[i];
146
+ }
147
+ #endif
148
+
149
+ // turn delta into barycentric coords
150
+ for (int i = 0; i <= pd + 1; i++) {
151
+ myBarycentric[i] = 0;
152
+ }
153
+
154
+ for (int i = 0; i <= pd; i++) {
155
+ scalar_t delta = (myElevated[i] - myGreedy[i]) * (1.0f / (pd + 1));
156
+ myBarycentric[pd - myRank[i]] += delta;
157
+ myBarycentric[pd + 1 - myRank[i]] -= delta;
158
+ }
159
+
160
+ myBarycentric[0] += 1.0f + myBarycentric[pd + 1];
161
+ }
162
+
163
+ #ifdef USE_ADDITIVE_HASH
164
+ unsigned int cumulative_hash = hash<pd>(myGreedy);
165
+ #endif
166
+
167
+ for (int color = 0; color <= pd; color++) {
168
+ // Compute the location of the lattice point explicitly (all but
169
+ // the last coordinate - it's redundant because they sum to zero)
170
+ if (!outOfBounds) {
171
+ for (int i = 0; i < pd; i++) {
172
+ myKey[i] = myGreedy[i] + color;
173
+
174
+ if (myRank[i] > pd - color) {
175
+ myKey[i] -= (pd + 1);
176
+ }
177
+ }
178
+ }
179
+
180
+ #ifdef USE_ADDITIVE_HASH
181
+ for (int i = 0; i < pd; i++) {
182
+ if (myRank[i] == pd - color) {
183
+ cumulative_hash += hOffset[i];
184
+ }
185
+ }
186
+ #endif
187
+
188
+ if (!outOfBounds) {
189
+ MatrixEntry<scalar_t> r;
190
+
191
+ #ifdef USE_ADDITIVE_HASH
192
+ r.index = hashTableInsert<pd>(cumulative_hash, myKey, idx * (pd + 1) + color);
193
+ #else
194
+ r.index = hashTableInsert<pd>(myKey, idx * (pd + 1) + color);
195
+ #endif
196
+
197
+ r.weight = myBarycentric[color];
198
+ matrix[idx * (pd + 1) + color] = r;
199
+ }
200
+ }
201
+ }
202
+
203
+ template <typename scalar_t, int kd>
204
+ __global__ static void cleanHashTable(const int elementCount, MatrixEntry<scalar_t>* matrix) {
205
+ const int idx = threadIdx.x + blockIdx.x * blockDim.x;
206
+
207
+ if (idx >= elementCount)
208
+ return;
209
+
210
+ // find my hash table entry
211
+ int* e = table_entries + idx;
212
+
213
+ // Check if I created my own key in the previous phase
214
+ if (*e >= 0) {
215
+ // Rehash my key and reset the pointer in order to merge with
216
+ // any other pixel that created a different entry under the
217
+ // same key. If the computation was serial this would never
218
+ // happen, but sometimes race conditions can make the same key
219
+ // be inserted twice. hashTableRetrieve always returns the
220
+ // earlier, so it's no problem as long as we rehash now.
221
+
222
+ #ifdef LINEAR_D_MEMORY
223
+ // Get my key
224
+ short myKey[kd];
225
+ generateKey<kd>(*e, myKey);
226
+ *e = hashTableRetrieve<kd>(myKey);
227
+ #else
228
+ *e = hashTableRetrieve<kd>(table_keys + *e * kd);
229
+ #endif
230
+ }
231
+ }
232
+
233
+ template <typename scalar_t, int pd, int vd>
234
+ __global__ static void splat(
235
+ const int elementCount,
236
+ scalar_t* values,
237
+ MatrixEntry<scalar_t>* matrix,
238
+ scalar_t* table_values) {
239
+ const int color = threadIdx.y;
240
+ const int idx = threadIdx.x + blockIdx.x * blockDim.x;
241
+
242
+ const bool outOfBounds = idx >= elementCount;
243
+
244
+ if (outOfBounds) {
245
+ return;
246
+ }
247
+
248
+ scalar_t* myValue = values + idx * vd;
249
+
250
+ MatrixEntry<scalar_t> r = matrix[idx * (pd + 1) + color];
251
+
252
+ matrix[idx * (pd + 1) + color].index = r.index = table_entries[r.index];
253
+ scalar_t* val = table_values + r.index * (vd + 1);
254
+
255
+ for (int j = 0; j < vd; j++) {
256
+ gpuAtomicAdd(val + j, myValue[j] * r.weight);
257
+ }
258
+
259
+ gpuAtomicAdd(val + vd, r.weight);
260
+ }
261
+
262
+ // splat splits by color, so extend the y coordinate to our blocks to represent that
263
+ // dim3 oldblocks((w-1)/8+1, (h-1)/8+1, 1);
264
+ // dim3 oldblockSize(8, 8, 1);
265
+ // oldblocks.y *= pd+1;
266
+ // splatCache<pd, vd><<<oldblocks, oldblockSize>>>(w, h, values, matrix);
267
+
268
+ // int blockCount = (elementCount + 1) / BLOCK_SIZE + 1;
269
+ // int blockSize = BLOCK_SIZE;
270
+
271
+ // splatCache<pd, vd><<<dim3(blockCount, 1), dim3(blockSize, pd+1)>>>(elementCount, values, matrix);
272
+
273
+ template <typename scalar_t, int pd, int vd>
274
+ __global__ static void splatCache(
275
+ const int elementCount,
276
+ scalar_t* values,
277
+ MatrixEntry<scalar_t>* matrix,
278
+ scalar_t* table_values) {
279
+ // const int x = threadIdx.x + blockIdx.x * blockDim.x;
280
+ // const int y = threadIdx.y + (blockIdx.y/(pd+1)) * blockDim.y;
281
+
282
+ // const int threadId = threadIdx.y*blockDim.x + threadIdx.x;
283
+ // const int color = blockIdx.y % (pd+1);
284
+ // const int idx = y*w + x;
285
+
286
+ const int threadId = threadIdx.x;
287
+ const int color = threadIdx.y;
288
+ const int idx = threadIdx.x + blockIdx.x * BLOCK_SIZE;
289
+
290
+ const bool outOfBounds = idx >= elementCount;
291
+
292
+ __shared__ int sharedOffsets[BLOCK_SIZE];
293
+ __shared__ scalar_t sharedValues[BLOCK_SIZE * (vd + 1)];
294
+
295
+ int myOffset = -1;
296
+ scalar_t* myValue = sharedValues + threadId * (vd + 1);
297
+
298
+ if (!outOfBounds) {
299
+ scalar_t* value = values + idx * vd;
300
+
301
+ MatrixEntry<scalar_t> r = matrix[idx * (pd + 1) + color];
302
+
303
+ // convert the matrix entry from a pointer into the entries array to a pointer into the keys/values array
304
+ matrix[idx * (pd + 1) + color].index = r.index = table_entries[r.index];
305
+ // record the offset into the keys/values array in shared space
306
+ myOffset = sharedOffsets[threadId] = r.index * (vd + 1);
307
+
308
+ for (int j = 0; j < vd; j++) {
309
+ myValue[j] = value[j] * r.weight;
310
+ }
311
+ myValue[vd] = r.weight;
312
+
313
+ } else {
314
+ sharedOffsets[threadId] = -1;
315
+ }
316
+
317
+ __syncthreads();
318
+
319
+ // am I the first thread in this block to care about this key?
320
+
321
+ if (outOfBounds)
322
+ return;
323
+
324
+ for (int i = 0; i < BLOCK_SIZE; i++) {
325
+ if (i < threadId) {
326
+ if (myOffset == sharedOffsets[i]) {
327
+ // somebody else with higher priority cares about this key
328
+ return;
329
+ }
330
+ } else if (i > threadId) {
331
+ if (myOffset == sharedOffsets[i]) {
332
+ // someone else with lower priority cares about this key, accumulate it into mine
333
+ for (int j = 0; j <= vd; j++) {
334
+ sharedValues[threadId * (vd + 1) + j] += sharedValues[i * (vd + 1) + j];
335
+ }
336
+ }
337
+ }
338
+ }
339
+
340
+ // only the threads with something to write to main memory are still going
341
+ scalar_t* val = table_values + myOffset;
342
+ for (int j = 0; j <= vd; j++) {
343
+ gpuAtomicAdd(val + j, myValue[j]);
344
+ }
345
+ }
346
+
347
+ template <typename scalar_t, int pd, int vd>
348
+ __global__ static void blur(
349
+ int n,
350
+ scalar_t* newValues,
351
+ MatrixEntry<scalar_t>* matrix,
352
+ int color,
353
+ scalar_t* table_values) {
354
+ const int idx = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x * blockDim.y + threadIdx.x;
355
+
356
+ if (idx >= n)
357
+ return;
358
+
359
+ // Check if I'm valid
360
+ if (matrix[idx].index != idx)
361
+ return;
362
+
363
+ // find my key and the keys of my neighbours
364
+ short myKey[pd + 1];
365
+ short np[pd + 1];
366
+ short nm[pd + 1];
367
+
368
+ #ifdef LINEAR_D_MEMORY
369
+ generateKey<pd>(idx, myKey);
370
+ for (int i = 0; i < pd; i++) {
371
+ np[i] = myKey[i] + 1;
372
+ nm[i] = myKey[i] - 1;
373
+ }
374
+ #else
375
+ for (int i = 0; i < pd; i++) {
376
+ myKey[i] = table_keys[idx * pd + i];
377
+ np[i] = myKey[i] + 1;
378
+ nm[i] = myKey[i] - 1;
379
+ }
380
+ #endif
381
+
382
+ np[color] -= pd + 1;
383
+ nm[color] += pd + 1;
384
+
385
+ #ifdef USE_ADDITIVE_HASH
386
+ unsigned int hCurrent = hash<pd>(myKey);
387
+ int offNp = hashTableRetrieveWithHash<pd>(hCurrent + hOffset[color], np);
388
+ int offNm = hashTableRetrieveWithHash<pd>(hCurrent - hOffset[color], nm);
389
+ #else
390
+ int offNp = hashTableRetrieve<pd>(np);
391
+ int offNm = hashTableRetrieve<pd>(nm);
392
+ #endif
393
+
394
+ scalar_t* valMe = table_values + (vd + 1) * idx;
395
+ scalar_t* valNp = table_values + (vd + 1) * offNp;
396
+ scalar_t* valNm = table_values + (vd + 1) * offNm;
397
+ scalar_t* valOut = newValues + (vd + 1) * idx;
398
+
399
+ if (offNp >= 0 && offNm >= 0) {
400
+ for (int i = 0; i <= vd; i++) {
401
+ valOut[i] = (valNp[i] + (valMe[i] * 2) + valNm[i]) / 4;
402
+ }
403
+ } else if (offNp >= 0) {
404
+ for (int i = 0; i <= vd; i++) {
405
+ valOut[i] = (valNp[i] + (valMe[i] * 2)) / 4;
406
+ }
407
+ } else if (offNm >= 0) {
408
+ for (int i = 0; i <= vd; i++) {
409
+ valOut[i] = (valNm[i] + (valMe[i] * 2)) / 4;
410
+ }
411
+ } else {
412
+ for (int i = 0; i <= vd; i++) {
413
+ valOut[i] = valMe[i] * 2;
414
+ }
415
+ }
416
+ }
417
+
418
+ template <typename scalar_t, int pd, int vd>
419
+ __global__ static void slice(
420
+ const int elementCount,
421
+ scalar_t* values,
422
+ MatrixEntry<scalar_t>* matrix,
423
+ scalar_t* table_values) {
424
+ const int threadId = threadIdx.x;
425
+ const int idx = threadIdx.x + blockIdx.x * BLOCK_SIZE;
426
+ const bool outOfBounds = idx >= elementCount;
427
+
428
+ if (outOfBounds)
429
+ return;
430
+
431
+ __shared__ scalar_t localValue[BLOCK_SIZE * vd];
432
+
433
+ scalar_t* myValue = localValue + threadId * vd;
434
+ scalar_t myWeight = 0;
435
+
436
+ for (int i = 0; i < vd; i++) {
437
+ myValue[i] = 0;
438
+ }
439
+
440
+ for (int i = 0; i <= pd; i++) {
441
+ MatrixEntry<scalar_t> r = matrix[idx * (pd + 1) + i];
442
+ scalar_t* val = table_values + r.index * (vd + 1);
443
+
444
+ for (int j = 0; j < vd; j++) {
445
+ myValue[j] += r.weight * val[j];
446
+ }
447
+
448
+ myWeight += r.weight * val[vd];
449
+ }
450
+
451
+ myWeight = 1.0f / myWeight;
452
+
453
+ for (int j = 0; j < vd; j++) {
454
+ values[idx * vd + j] = myValue[j] * myWeight;
455
+ }
456
+ }
457
+
458
+ template <typename scalar_t, int vd, int pd>
459
+ void PermutohedralCuda(scalar_t* values, scalar_t* positions, int elementCount, bool accurate) {
460
+ scalar_t blurVariance = accurate ? 0.5 : 0;
461
+
462
+ scalar_t* scaleFactor;
463
+ cudaMalloc(&scaleFactor, pd * sizeof(scalar_t));
464
+
465
+ scalar_t scaleFactorHost[pd];
466
+ for (int i = 0; i < pd; i++) {
467
+ scaleFactorHost[i] = (pd + 1) * sqrtf((1.0 / 6 + blurVariance) / ((i + 1) * (i + 2)));
468
+ }
469
+
470
+ cudaMemcpy(scaleFactor, scaleFactorHost, pd * sizeof(scalar_t), cudaMemcpyHostToDevice);
471
+
472
+ MatrixEntry<scalar_t>* matrix;
473
+ cudaMalloc(&matrix, elementCount * (pd + 1) * sizeof(MatrixEntry<scalar_t>));
474
+
475
+ scalar_t* table_values = createHashTable<scalar_t, pd, vd + 1>(elementCount * (pd + 1));
476
+
477
+ // Populate constant memory for hash helpers
478
+ unsigned long long int __host_two32 = ((unsigned long long int)1) << 32;
479
+ unsigned int __host_div_c = 2 * (elementCount * (pd + 1));
480
+ unsigned int __host_div_l = ceilf(logf((float)__host_div_c) / logf(2.0f));
481
+ unsigned int __host_div_m = (__host_two32 << __host_div_l) / __host_div_c - __host_two32 + 1;
482
+ cudaMemcpyToSymbol(__div_c, &__host_div_c, sizeof(unsigned int));
483
+ cudaMemcpyToSymbol(__div_l, &__host_div_l, sizeof(unsigned int));
484
+ cudaMemcpyToSymbol(__div_m, &__host_div_m, sizeof(unsigned int));
485
+
486
+ // Populate constant memory with hash of offset vectors
487
+ unsigned int hOffset_host[pd + 1];
488
+ signed short offset[pd + 1];
489
+ for (int i = 0; i < pd; offset[i] = 1, i++)
490
+ ;
491
+ for (int i = 0; i <= pd; i++) {
492
+ offset[i] -= pd + 1;
493
+ hOffset_host[i] = hash<pd>(offset);
494
+ offset[i] += pd + 1;
495
+ }
496
+ cudaMemcpyToSymbol(hOffset, &hOffset_host, sizeof(unsigned int) * (pd + 1));
497
+
498
+ int blockCount = (elementCount + 1) / BLOCK_SIZE + 1;
499
+ int blockSize = BLOCK_SIZE;
500
+
501
+ createMatrix<scalar_t, pd><<<blockCount, blockSize>>>(elementCount, positions, values, scaleFactor, matrix);
502
+
503
+ // fix duplicate hash table entries
504
+ int tableSize = elementCount * 2 * (pd + 1);
505
+ int cleanBlockSize = 32;
506
+ int cleanBlocks = (tableSize - 1) / cleanBlockSize + 1;
507
+
508
+ cleanHashTable<scalar_t, pd><<<cleanBlocks, cleanBlockSize>>>(tableSize, matrix);
509
+
510
+ splat<scalar_t, pd, vd><<<dim3(blockCount, 1), dim3(blockSize, pd + 1)>>>(elementCount, values, matrix, table_values);
511
+
512
+ if (accurate) {
513
+ scalar_t* newValues;
514
+ cudaMalloc(&newValues, elementCount * (pd + 1) * (vd + 1) * sizeof(scalar_t));
515
+ cudaMemset(newValues, 0, elementCount * (pd + 1) * (vd + 1) * sizeof(scalar_t));
516
+
517
+ for (int color = 0; color <= pd; color++) {
518
+ blur<scalar_t, pd, vd>
519
+ <<<cleanBlocks, cleanBlockSize>>>(elementCount * (pd + 1), newValues, matrix, color, table_values);
520
+
521
+ scalar_t* swap = newValues;
522
+ newValues = table_values;
523
+ table_values = swap;
524
+ }
525
+
526
+ cudaFree(newValues);
527
+ }
528
+
529
+ slice<scalar_t, pd, vd><<<blockCount, blockSize>>>(elementCount, values, matrix, table_values);
530
+
531
+ destroyHashTable<scalar_t>();
532
+ cudaFree(table_values);
533
+ cudaFree(scaleFactor);
534
+ cudaFree(matrix);
535
+ }
536
+
537
+ #define DECLARATION(dc, fc) \
538
+ template void PermutohedralCuda<float, dc, fc>(float* values, float* positions, int elementCount, bool accurate); \
539
+ template void PermutohedralCuda<double, dc, fc>(double* values, double* positions, int elementCount, bool accurate);
540
+ DO_FOR_AB(DECLARATION, 16, 19)
source_code/SegMamba/monai/csrc/filtering/trainable_bilateral/bf_layer_cpu_backward.cpp ADDED
@@ -0,0 +1,232 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ Copyright (c) MONAI Consortium
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+ http://www.apache.org/licenses/LICENSE-2.0
7
+ Unless required by applicable law or agreed to in writing, software
8
+ distributed under the License is distributed on an "AS IS" BASIS,
9
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10
+ See the License for the specific language governing permissions and
11
+ limitations under the License.
12
+
13
+ =========================================================================
14
+ Adapted from https://github.com/faebstn96/trainable-bilateral-filter-source
15
+ which has the following license...
16
+ https://github.com/faebstn96/trainable-bilateral-filter-source/blob/main/LICENSE.md
17
+
18
+ Copyright 2022 Fabian Wagner, Pattern Recognition Lab, FAU Erlangen-Nuernberg, Erlangen, Germany
19
+ Licensed under the Apache License, Version 2.0 (the "License");
20
+ you may not use this file except in compliance with the License.
21
+ You may obtain a copy of the License at
22
+ http://www.apache.org/licenses/LICENSE-2.0
23
+ Unless required by applicable law or agreed to in writing, software
24
+ distributed under the License is distributed on an "AS IS" BASIS,
25
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
26
+ See the License for the specific language governing permissions and
27
+ limitations under the License.
28
+ */
29
+
30
+ #include "trainable_bilateral.h"
31
+ #include "utils/tensor_description.h"
32
+ #include "utils/tensor_indexing.h"
33
+
34
+ template <typename scalar_t>
35
+ void BilateralFilterCpuBackward_3d(
36
+ torch::Tensor gradientInputTensor,
37
+ torch::Tensor gradientOutputTensor,
38
+ torch::Tensor inputTensor,
39
+ torch::Tensor outputTensor,
40
+ torch::Tensor outputWeightsTensor,
41
+ torch::Tensor dO_dx_ki,
42
+ float sigma_x,
43
+ float sigma_y,
44
+ float sigma_z,
45
+ float colorSigma) {
46
+ // Getting tensor description.
47
+ TensorDescription desc = TensorDescription(gradientInputTensor);
48
+
49
+ // Raw tensor data pointers.
50
+ scalar_t* gradientInputTensorData = gradientInputTensor.data_ptr<scalar_t>();
51
+ scalar_t* gradientOutputTensorData = gradientOutputTensor.data_ptr<scalar_t>();
52
+ scalar_t* inputTensorData = inputTensor.data_ptr<scalar_t>();
53
+ scalar_t* outputTensorData = outputTensor.data_ptr<scalar_t>();
54
+ scalar_t* outputWeightsTensorData = outputWeightsTensor.data_ptr<scalar_t>();
55
+ scalar_t* dO_dx_kiData = dO_dx_ki.data_ptr<scalar_t>();
56
+
57
+ // Pre-calculate common values
58
+ int windowSize_x = std::max(((int)ceil(5.0f * sigma_x) | 1), 5); // ORing last bit to ensure odd window size
59
+ int windowSize_y = std::max(((int)ceil(5.0f * sigma_y) | 1), 5); // ORing last bit to ensure odd window size
60
+ int windowSize_z = std::max(((int)ceil(5.0f * sigma_z) | 1), 5); // ORing last bit to ensure odd window size
61
+ int halfWindowSize_x = floor(0.5f * windowSize_x);
62
+ int halfWindowSize_y = floor(0.5f * windowSize_y);
63
+ int halfWindowSize_z = floor(0.5f * windowSize_z);
64
+ int halfWindowSize_arr[] = {halfWindowSize_x, halfWindowSize_y, halfWindowSize_z};
65
+ scalar_t spatialExpConstant_x = -1.0f / (2 * sigma_x * sigma_x);
66
+ scalar_t spatialExpConstant_y = -1.0f / (2 * sigma_y * sigma_y);
67
+ scalar_t spatialExpConstant_z = -1.0f / (2 * sigma_z * sigma_z);
68
+ scalar_t colorExpConstant = -1.0f / (2 * colorSigma * colorSigma);
69
+
70
+ // Set kernel sizes with respect to the defined spatial sigmas.
71
+ int* kernelSizes = new int[desc.dimensions];
72
+
73
+ kernelSizes[0] = windowSize_x;
74
+ kernelSizes[1] = windowSize_y;
75
+ kernelSizes[2] = windowSize_z;
76
+
77
+ // Pre-calculate gaussian kernel and distance map in 1D.
78
+ scalar_t* gaussianKernel_x = new scalar_t[windowSize_x];
79
+ scalar_t* gaussianKernel_y = new scalar_t[windowSize_y];
80
+ scalar_t* gaussianKernel_z = new scalar_t[windowSize_z];
81
+ scalar_t* xDistanceSquared = new scalar_t[windowSize_x];
82
+ scalar_t* yDistanceSquared = new scalar_t[windowSize_y];
83
+ scalar_t* zDistanceSquared = new scalar_t[windowSize_z];
84
+
85
+ for (int i = 0; i < windowSize_x; i++) {
86
+ int distance = i - halfWindowSize_x;
87
+ gaussianKernel_x[i] = exp(distance * distance * spatialExpConstant_x);
88
+ xDistanceSquared[i] = distance * distance;
89
+ }
90
+ for (int i = 0; i < windowSize_y; i++) {
91
+ int distance = i - halfWindowSize_y;
92
+ gaussianKernel_y[i] = exp(distance * distance * spatialExpConstant_y);
93
+ yDistanceSquared[i] = distance * distance;
94
+ }
95
+ for (int i = 0; i < windowSize_z; i++) {
96
+ int distance = i - halfWindowSize_z;
97
+ gaussianKernel_z[i] = exp(distance * distance * spatialExpConstant_z);
98
+ zDistanceSquared[i] = distance * distance;
99
+ }
100
+
101
+ // Looping over the batches
102
+ for (int b = 0; b < desc.batchCount; b++) {
103
+ int batchOffset = b * desc.batchStride;
104
+
105
+ // Looping over all dimensions for the home element
106
+ for (int z = 0; z < desc.sizes[2]; z++)
107
+ #pragma omp parallel for
108
+ for (int y = 0; y < desc.sizes[1]; y++) {
109
+ for (int x = 0; x < desc.sizes[0]; x++) {
110
+ // Calculating indexing offset for the home element
111
+ int homeOffset = batchOffset;
112
+
113
+ int homeIndex[] = {x, y, z};
114
+ homeOffset += x * desc.strides[0];
115
+ homeOffset += y * desc.strides[1];
116
+ homeOffset += z * desc.strides[2];
117
+
118
+ // Zero kernel aggregates.
119
+ scalar_t filter_kernel = 0;
120
+ scalar_t valueSum = 0;
121
+
122
+ // Looping over all dimensions for the neighbour element
123
+ Indexer kernelIndex = Indexer(desc.dimensions, kernelSizes);
124
+ do // while(kernelIndex++)
125
+ {
126
+ // Calculating buffer offset for the neighbour element
127
+ // Index is clamped to the border in each dimension.
128
+ int neighbourOffset = batchOffset;
129
+ bool flagNotClamped = true;
130
+
131
+ for (int i = 0; i < desc.dimensions; i++) {
132
+ int neighbourIndex = homeIndex[i] + kernelIndex[i] - halfWindowSize_arr[i];
133
+ int neighbourIndexClamped = std::min(desc.sizes[i] - 1, std::max(0, neighbourIndex));
134
+ neighbourOffset += neighbourIndexClamped * desc.strides[i];
135
+ if (neighbourIndex != neighbourIndexClamped) {
136
+ flagNotClamped = false;
137
+ }
138
+ }
139
+
140
+ // Euclidean color distance.
141
+ scalar_t colorDistance = 0;
142
+ scalar_t colorDistanceSquared = 0;
143
+
144
+ for (int i = 0; i < desc.channelCount; i++) {
145
+ scalar_t diff = inputTensorData[neighbourOffset + i * desc.channelStride] -
146
+ inputTensorData[homeOffset +
147
+ i * desc.channelStride]; // Be careful: Here it is (X_k - X_i) and not (X_i - X_q)
148
+ colorDistance += diff; // Do not take the absolute value here. Be careful with the signs.
149
+ colorDistanceSquared += diff * diff;
150
+ }
151
+
152
+ // Calculating and combining the spatial
153
+ // and color weights.
154
+ scalar_t spatialWeight = 1;
155
+
156
+ spatialWeight =
157
+ gaussianKernel_x[kernelIndex[0]] * gaussianKernel_y[kernelIndex[1]] * gaussianKernel_z[kernelIndex[2]];
158
+
159
+ scalar_t colorWeight = exp(colorDistanceSquared * colorExpConstant);
160
+ scalar_t totalWeight = spatialWeight * colorWeight;
161
+
162
+ // Aggregating values. Only do this if flagNotClamped: Pixels outside the image are disregarded.
163
+ if (flagNotClamped) {
164
+ for (int i = 0; i < desc.channelCount; i++) {
165
+ // Distinguish cases for k!=i (calculation is done here)
166
+ // and k==i (partial derivatives are precalculated).
167
+ // If statement replaces center element of neighborhood/kernel.
168
+ if (kernelIndex[0] != halfWindowSize_x || kernelIndex[1] != halfWindowSize_y ||
169
+ kernelIndex[2] != halfWindowSize_z) {
170
+ filter_kernel = -(1 / outputWeightsTensorData[neighbourOffset + i * desc.channelStride]) *
171
+ outputTensorData[neighbourOffset + i * desc.channelStride] * totalWeight * colorDistance /
172
+ (colorSigma * colorSigma) +
173
+ (1 / outputWeightsTensorData[neighbourOffset + i * desc.channelStride]) * totalWeight *
174
+ (1 +
175
+ inputTensorData[homeOffset + i * desc.channelStride] * colorDistance /
176
+ (colorSigma * colorSigma)); // inputTensorData[homeOffset] !!
177
+ } else {
178
+ filter_kernel = dO_dx_kiData[homeOffset + i * desc.channelStride];
179
+ }
180
+
181
+ valueSum += gradientInputTensorData[neighbourOffset + i * desc.channelStride] * filter_kernel;
182
+ }
183
+ }
184
+ } while (kernelIndex++);
185
+
186
+ // Do the filtering and calculate the values for the backward pass.
187
+ for (int i = 0; i < desc.channelCount; i++) {
188
+ // Filtering:
189
+ gradientOutputTensorData[homeOffset + i * desc.channelStride] = valueSum;
190
+ }
191
+ }
192
+ }
193
+ }
194
+
195
+ delete[] kernelSizes;
196
+ delete[] gaussianKernel_x;
197
+ delete[] gaussianKernel_y;
198
+ delete[] gaussianKernel_z;
199
+ delete[] xDistanceSquared;
200
+ delete[] yDistanceSquared;
201
+ delete[] zDistanceSquared;
202
+ }
203
+
204
+ torch::Tensor BilateralFilterCpuBackward(
205
+ torch::Tensor gradientInputTensor,
206
+ torch::Tensor inputTensor,
207
+ torch::Tensor outputTensor,
208
+ torch::Tensor outputWeightsTensor,
209
+ torch::Tensor dO_dx_ki,
210
+ float sigma_x,
211
+ float sigma_y,
212
+ float sigma_z,
213
+ float colorSigma) {
214
+ // Preparing output tensor.
215
+ torch::Tensor gradientOutputTensor = torch::zeros_like(gradientInputTensor);
216
+
217
+ AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradientInputTensor.scalar_type(), "BilateralFilterCpuBackward_3d", ([&] {
218
+ BilateralFilterCpuBackward_3d<scalar_t>(
219
+ gradientInputTensor,
220
+ gradientOutputTensor,
221
+ inputTensor,
222
+ outputTensor,
223
+ outputWeightsTensor,
224
+ dO_dx_ki,
225
+ sigma_x,
226
+ sigma_y,
227
+ sigma_z,
228
+ colorSigma);
229
+ }));
230
+
231
+ return gradientOutputTensor;
232
+ }
source_code/SegMamba/monai/csrc/filtering/trainable_bilateral/bf_layer_gpu_backward.cu ADDED
@@ -0,0 +1,296 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ Copyright (c) MONAI Consortium
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+ http://www.apache.org/licenses/LICENSE-2.0
7
+ Unless required by applicable law or agreed to in writing, software
8
+ distributed under the License is distributed on an "AS IS" BASIS,
9
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10
+ See the License for the specific language governing permissions and
11
+ limitations under the License.
12
+
13
+ =========================================================================
14
+ Adapted from https://github.com/faebstn96/trainable-bilateral-filter-source
15
+ which has the following license...
16
+ https://github.com/faebstn96/trainable-bilateral-filter-source/blob/main/LICENSE.md
17
+
18
+ Copyright 2022 Fabian Wagner, Pattern Recognition Lab, FAU Erlangen-Nuernberg, Erlangen, Germany
19
+ Licensed under the Apache License, Version 2.0 (the "License");
20
+ you may not use this file except in compliance with the License.
21
+ You may obtain a copy of the License at
22
+ http://www.apache.org/licenses/LICENSE-2.0
23
+ Unless required by applicable law or agreed to in writing, software
24
+ distributed under the License is distributed on an "AS IS" BASIS,
25
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
26
+ See the License for the specific language governing permissions and
27
+ limitations under the License.
28
+ */
29
+
30
+ #include <cuda.h>
31
+ #include <cuda_runtime.h>
32
+
33
+ #include "trainable_bilateral.h"
34
+ //#include "../utils/cuda_error_check.h"
35
+ #include "utils/meta_macros.h"
36
+ #include "utils/tensor_description.h"
37
+
38
+ __constant__ int cBatchStrideBack;
39
+ __constant__ int cColorStrideBack;
40
+
41
+ __constant__ int cSizesBack[3];
42
+ __constant__ int cStridesBack[3];
43
+
44
+ __constant__ int cKernelSizesBack[3];
45
+ __constant__ int cHalfWindowSize_arrBack[3];
46
+ __constant__ float cGaussianKernel_xBack[256];
47
+ __constant__ float cGaussianKernel_yBack[256];
48
+ __constant__ float cGaussianKernel_zBack[256];
49
+ __constant__ float cXDistanceSquaredBack[256];
50
+ __constant__ float cYDistanceSquaredBack[256];
51
+ __constant__ float cZDistanceSquaredBack[256];
52
+ __constant__ float cColorExponentConstantBack;
53
+ __constant__ float cSigma_xBack;
54
+ __constant__ float cSigma_yBack;
55
+ __constant__ float cSigma_zBack;
56
+ __constant__ float cColorSigmaBack;
57
+
58
+ template <typename scalar_t, int C>
59
+ __global__ void BilateralFilterCudaKernel3DBackward(
60
+ scalar_t* gradientInputTensor,
61
+ scalar_t* gradientOutputTensor,
62
+ scalar_t* inputTensor,
63
+ scalar_t* outputTensor,
64
+ scalar_t* outputWeightsTensor,
65
+ scalar_t* dO_dx_ki) {
66
+ int homeOffset = blockIdx.x * blockDim.x + threadIdx.x;
67
+ int batchOffset = blockIdx.y * cBatchStrideBack;
68
+
69
+ if (homeOffset >= cColorStrideBack)
70
+ return;
71
+
72
+ int homeX = homeOffset / cStridesBack[0];
73
+ int homeY = (homeOffset - homeX * cStridesBack[0]) / cStridesBack[1];
74
+ int homeZ = (homeOffset - homeX * cStridesBack[0] - homeY * cStridesBack[1]) / cStridesBack[2];
75
+ int homeIndex[] = {homeX, homeY, homeZ};
76
+
77
+ // Zero kernel aggregates.
78
+ scalar_t valueSum = 0;
79
+
80
+ for (int kernelX = 0; kernelX < cKernelSizesBack[0]; kernelX++) {
81
+ int neighbourX = max(0, min(homeX + (kernelX - cHalfWindowSize_arrBack[0]), cSizesBack[0] - 1));
82
+ scalar_t gaussianX = cGaussianKernel_xBack[kernelX];
83
+
84
+ for (int kernelY = 0; kernelY < cKernelSizesBack[1]; kernelY++) {
85
+ int neighbourY = max(0, min(homeY + (kernelY - cHalfWindowSize_arrBack[1]), cSizesBack[1] - 1));
86
+ scalar_t gaussianY = cGaussianKernel_yBack[kernelY];
87
+
88
+ for (int kernelZ = 0; kernelZ < cKernelSizesBack[2]; kernelZ++) {
89
+ int neighbourZ = max(0, min(homeZ + (kernelZ - cHalfWindowSize_arrBack[2]), cSizesBack[2] - 1));
90
+ scalar_t gaussianZ = cGaussianKernel_zBack[kernelZ];
91
+
92
+ int neighbourOffset = neighbourX * cStridesBack[0] + neighbourY * cStridesBack[1] + neighbourZ;
93
+
94
+ bool flagNotClamped = true;
95
+ int kernelIndex[] = {kernelX, kernelY, kernelZ};
96
+ int dimensions = 3; // Must equal the number of spatial dimensions.
97
+
98
+ for (int i = 0; i < dimensions; i++) {
99
+ int HalfWindowSizeBack = cHalfWindowSize_arrBack[i]; // Define constant memory as new variable here (!!),
100
+ // otherwise: cudaErrorMisalignedAddress
101
+ int neighbourIndex = homeIndex[i] + kernelIndex[i] - HalfWindowSizeBack;
102
+ int neighbourIndexClamped = min(cSizesBack[i] - 1, max(0, neighbourIndex));
103
+ if (neighbourIndex != neighbourIndexClamped) {
104
+ flagNotClamped = false;
105
+ }
106
+ }
107
+
108
+ scalar_t colorDistance = 0;
109
+ scalar_t colorDistanceSquared = 0;
110
+
111
+ #pragma unroll
112
+ for (int c = 0; c < C; c++) {
113
+ scalar_t a = inputTensor[batchOffset + neighbourOffset + c * cColorStrideBack];
114
+ scalar_t b = inputTensor[batchOffset + homeOffset + c * cColorStrideBack]; // Be careful: Here it is (X_k -
115
+ // X_i) and not (X_i - X_q)
116
+ scalar_t diff = a - b;
117
+ colorDistance += diff; // Do not take the absolute value here. Be careful with the signs.
118
+ colorDistanceSquared += diff * diff;
119
+ }
120
+
121
+ scalar_t spatialWeight = gaussianX * gaussianY * gaussianZ;
122
+ scalar_t colorWeight = exp(cColorExponentConstantBack * colorDistanceSquared);
123
+ scalar_t totalWeight = spatialWeight * colorWeight;
124
+
125
+ // Aggregating values. Only do this if flagNotClamped: Pixels outside the image are disregarded.
126
+ if (flagNotClamped) {
127
+ scalar_t filter_kernel_back;
128
+
129
+ #pragma unroll
130
+ for (int c = 0; c < C; c++) {
131
+ // Distinguish cases for k!=i (calculation is done here)
132
+ // and k==i (partial derivatives are precalculated).
133
+ // If statement replaces center element of neighborhood/kernel.
134
+ if (kernelX != cHalfWindowSize_arrBack[0] || kernelY != cHalfWindowSize_arrBack[1] ||
135
+ kernelZ != cHalfWindowSize_arrBack[2]) {
136
+ filter_kernel_back = -(1 / outputWeightsTensor[batchOffset + neighbourOffset + c * cColorStrideBack]) *
137
+ outputTensor[batchOffset + neighbourOffset + c * cColorStrideBack] * totalWeight * colorDistance /
138
+ (cColorSigmaBack * cColorSigmaBack) +
139
+ (1 / outputWeightsTensor[batchOffset + neighbourOffset + c * cColorStrideBack]) * totalWeight *
140
+ (1 +
141
+ inputTensor[batchOffset + homeOffset + c * cColorStrideBack] * colorDistance /
142
+ (cColorSigmaBack * cColorSigmaBack)); // inputTensorData[homeOffset] !!
143
+ } else {
144
+ filter_kernel_back = dO_dx_ki[batchOffset + homeOffset + c * cColorStrideBack];
145
+ }
146
+
147
+ valueSum += gradientInputTensor[batchOffset + neighbourOffset + c * cColorStrideBack] * filter_kernel_back;
148
+ }
149
+ }
150
+ }
151
+ }
152
+ }
153
+
154
+ #pragma unroll
155
+ for (int c = 0; c < C; c++) {
156
+ gradientOutputTensor[batchOffset + homeOffset + c * cColorStrideBack] = valueSum;
157
+ }
158
+ }
159
+
160
+ template <int C, int D>
161
+ void BilateralFilterCudaBackwardFunction(
162
+ torch::Tensor gradientInputTensor,
163
+ torch::Tensor gradientOutputTensor,
164
+ torch::Tensor inputTensor,
165
+ torch::Tensor outputTensor,
166
+ torch::Tensor outputWeightsTensor,
167
+ torch::Tensor dO_dx_ki,
168
+ float sigma_x,
169
+ float sigma_y,
170
+ float sigma_z,
171
+ float colorSigma) {
172
+ // Getting tensor description.
173
+ TensorDescription desc = TensorDescription(inputTensor);
174
+
175
+ // Pre-calculating gaussian kernel.
176
+ int windowSize_x = std::max(((int)ceil(5.0f * sigma_x) | 1), 5); // ORing last bit to ensure odd window size
177
+ int windowSize_y = std::max(((int)ceil(5.0f * sigma_y) | 1), 5); // ORing last bit to ensure odd window size
178
+ int windowSize_z = std::max(((int)ceil(5.0f * sigma_z) | 1), 5); // ORing last bit to ensure odd window size
179
+ int halfWindowSize_x = floor(0.5f * windowSize_x);
180
+ int halfWindowSize_y = floor(0.5f * windowSize_y);
181
+ int halfWindowSize_z = floor(0.5f * windowSize_z);
182
+ int halfWindowSize_arr[] = {halfWindowSize_x, halfWindowSize_y, halfWindowSize_z};
183
+ float spatialExpConstant_x = -1.0f / (2 * sigma_x * sigma_x);
184
+ float spatialExpConstant_y = -1.0f / (2 * sigma_y * sigma_y);
185
+ float spatialExpConstant_z = -1.0f / (2 * sigma_z * sigma_z);
186
+ float colorExpConstant = -1.0f / (2 * colorSigma * colorSigma);
187
+
188
+ int* kernelSizes = new int[desc.dimensions];
189
+ kernelSizes[0] = windowSize_x;
190
+ kernelSizes[1] = windowSize_y;
191
+ kernelSizes[2] = windowSize_z;
192
+
193
+ auto* gaussianKernel_x = new float[windowSize_x];
194
+ auto* gaussianKernel_y = new float[windowSize_y];
195
+ auto* gaussianKernel_z = new float[windowSize_z];
196
+ auto* xDistanceSquared = new float[windowSize_x];
197
+ auto* yDistanceSquared = new float[windowSize_y];
198
+ auto* zDistanceSquared = new float[windowSize_z];
199
+
200
+ for (int i = 0; i < windowSize_x; i++) {
201
+ int distance = i - halfWindowSize_x;
202
+ gaussianKernel_x[i] = exp(distance * distance * spatialExpConstant_x);
203
+ xDistanceSquared[i] = distance * distance;
204
+ }
205
+ for (int i = 0; i < windowSize_y; i++) {
206
+ int distance = i - halfWindowSize_y;
207
+ gaussianKernel_y[i] = exp(distance * distance * spatialExpConstant_y);
208
+ yDistanceSquared[i] = distance * distance;
209
+ }
210
+ for (int i = 0; i < windowSize_z; i++) {
211
+ int distance = i - halfWindowSize_z;
212
+ gaussianKernel_z[i] = exp(distance * distance * spatialExpConstant_z);
213
+ zDistanceSquared[i] = distance * distance;
214
+ }
215
+
216
+ // Writing constant memory.
217
+ cudaMemcpyToSymbol(cBatchStrideBack, &desc.batchStride, sizeof(int));
218
+ cudaMemcpyToSymbol(cColorStrideBack, &desc.channelStride, sizeof(int));
219
+ cudaMemcpyToSymbol(cSizesBack, desc.sizes, sizeof(int) * 3);
220
+ cudaMemcpyToSymbol(cStridesBack, desc.strides, sizeof(int) * 3);
221
+ cudaMemcpyToSymbol(cKernelSizesBack, kernelSizes, sizeof(int) * desc.dimensions);
222
+ cudaMemcpyToSymbol(cHalfWindowSize_arrBack, halfWindowSize_arr, sizeof(int) * desc.dimensions);
223
+ cudaMemcpyToSymbol(cGaussianKernel_xBack, gaussianKernel_x, sizeof(float) * windowSize_x);
224
+ cudaMemcpyToSymbol(cGaussianKernel_yBack, gaussianKernel_y, sizeof(float) * windowSize_y);
225
+ cudaMemcpyToSymbol(cGaussianKernel_zBack, gaussianKernel_z, sizeof(float) * windowSize_z);
226
+ cudaMemcpyToSymbol(cXDistanceSquaredBack, xDistanceSquared, sizeof(float) * windowSize_x);
227
+ cudaMemcpyToSymbol(cYDistanceSquaredBack, yDistanceSquared, sizeof(float) * windowSize_y);
228
+ cudaMemcpyToSymbol(cZDistanceSquaredBack, zDistanceSquared, sizeof(float) * windowSize_z);
229
+ cudaMemcpyToSymbol(cColorExponentConstantBack, &colorExpConstant, sizeof(float));
230
+ cudaMemcpyToSymbol(cSigma_xBack, &sigma_x, sizeof(float));
231
+ cudaMemcpyToSymbol(cSigma_yBack, &sigma_y, sizeof(float));
232
+ cudaMemcpyToSymbol(cSigma_zBack, &sigma_z, sizeof(float));
233
+ cudaMemcpyToSymbol(cColorSigmaBack, &colorSigma, sizeof(float));
234
+
235
+ // cuda_error_check("Cuda check before kernel call.");
236
+
237
+ #define BLOCK_SIZE 32
238
+
239
+ AT_DISPATCH_FLOATING_TYPES_AND_HALF(
240
+ inputTensor.scalar_type(), "BilateralFilterCudaKernel3DBackward", ([&] {
241
+ BilateralFilterCudaKernel3DBackward<scalar_t, C>
242
+ <<<dim3(int(desc.channelStride / BLOCK_SIZE) + 1, desc.batchCount), dim3(BLOCK_SIZE, 1)>>>(
243
+ gradientInputTensor.data_ptr<scalar_t>(),
244
+ gradientOutputTensor.data_ptr<scalar_t>(),
245
+ inputTensor.data_ptr<scalar_t>(),
246
+ outputTensor.data_ptr<scalar_t>(),
247
+ outputWeightsTensor.data_ptr<scalar_t>(),
248
+ dO_dx_ki.data_ptr<scalar_t>());
249
+ }));
250
+
251
+ // cuda_error_check("Cuda check after kernel call.");
252
+ // delete[] kernel;
253
+ delete[] kernelSizes;
254
+ delete[] gaussianKernel_x;
255
+ delete[] gaussianKernel_y;
256
+ delete[] gaussianKernel_z;
257
+ delete[] xDistanceSquared;
258
+ delete[] yDistanceSquared;
259
+ delete[] zDistanceSquared;
260
+ }
261
+
262
+ // Function to choose template implementation based on dynamic, channels and dimensions
263
+ torch::Tensor BilateralFilterCudaBackward(
264
+ torch::Tensor gradientInputTensor,
265
+ torch::Tensor inputTensor,
266
+ torch::Tensor outputTensor,
267
+ torch::Tensor outputWeightsTensor,
268
+ torch::Tensor dO_dx_ki,
269
+ float sigma_x,
270
+ float sigma_y,
271
+ float sigma_z,
272
+ float colorSigma) {
273
+ torch::Tensor gradientOutputTensor = torch::zeros_like(gradientInputTensor);
274
+ // cuda_error_check("beginning");
275
+
276
+ #define CASE(c, d) \
277
+ BilateralFilterCudaBackwardFunction<c, d>( \
278
+ gradientInputTensor, \
279
+ gradientOutputTensor, \
280
+ inputTensor, \
281
+ outputTensor, \
282
+ outputWeightsTensor, \
283
+ dO_dx_ki, \
284
+ sigma_x, \
285
+ sigma_y, \
286
+ sigma_z, \
287
+ colorSigma);
288
+ SWITCH_AB(
289
+ CASE,
290
+ BF_CUDA_MAX_CHANNELS,
291
+ BF_CUDA_MAX_SPATIAL_DIMENSION,
292
+ gradientInputTensor.size(1),
293
+ gradientInputTensor.dim() - 2);
294
+
295
+ return gradientOutputTensor;
296
+ }
source_code/SegMamba/monai/csrc/filtering/trainable_bilateral/trainable_bilateral.cpp ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ Copyright (c) MONAI Consortium
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+ http://www.apache.org/licenses/LICENSE-2.0
7
+ Unless required by applicable law or agreed to in writing, software
8
+ distributed under the License is distributed on an "AS IS" BASIS,
9
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10
+ See the License for the specific language governing permissions and
11
+ limitations under the License.
12
+
13
+ =========================================================================
14
+ Adapted from https://github.com/faebstn96/trainable-bilateral-filter-source
15
+ which has the following license...
16
+ https://github.com/faebstn96/trainable-bilateral-filter-source/blob/main/LICENSE.md
17
+
18
+ Copyright 2022 Fabian Wagner, Pattern Recognition Lab, FAU Erlangen-Nuernberg, Erlangen, Germany
19
+ Licensed under the Apache License, Version 2.0 (the "License");
20
+ you may not use this file except in compliance with the License.
21
+ You may obtain a copy of the License at
22
+ http://www.apache.org/licenses/LICENSE-2.0
23
+ Unless required by applicable law or agreed to in writing, software
24
+ distributed under the License is distributed on an "AS IS" BASIS,
25
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
26
+ See the License for the specific language governing permissions and
27
+ limitations under the License.
28
+ */
29
+
30
+ #include <torch/extension.h>
31
+ #include <stdexcept>
32
+ #include <string>
33
+
34
+ #include "trainable_bilateral.h"
35
+ #include "utils/common_utils.h"
36
+
37
+ std::tuple<torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor>
38
+ TrainableBilateralFilterForward(
39
+ torch::Tensor inputTensor,
40
+ float sigma_x,
41
+ float sigma_y,
42
+ float sigma_z,
43
+ float colorSigma) {
44
+ std::tuple<torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor> (
45
+ *filterFunction)(torch::Tensor, float, float, float, float);
46
+
47
+ #ifdef WITH_CUDA
48
+
49
+ if (torch::cuda::is_available() && inputTensor.is_cuda()) {
50
+ CHECK_CONTIGUOUS_CUDA(inputTensor);
51
+
52
+ if (inputTensor.size(1) > BF_CUDA_MAX_CHANNELS) {
53
+ throw std::runtime_error(
54
+ "Bilateral filtering not implemented for channel count > " + std::to_string(BF_CUDA_MAX_CHANNELS));
55
+ }
56
+
57
+ if (inputTensor.dim() - 2 > BF_CUDA_MAX_SPATIAL_DIMENSION) {
58
+ throw std::runtime_error(
59
+ "Bilateral filtering not implemented for spatial dimension > " +
60
+ std::to_string(BF_CUDA_MAX_SPATIAL_DIMENSION));
61
+ }
62
+
63
+ filterFunction = &BilateralFilterCudaForward;
64
+ } else {
65
+ filterFunction = &BilateralFilterCpuForward;
66
+ }
67
+ #else
68
+ filterFunction = &BilateralFilterCpuForward;
69
+ #endif
70
+
71
+ return filterFunction(inputTensor, sigma_x, sigma_y, sigma_z, colorSigma);
72
+ }
73
+
74
+ torch::Tensor TrainableBilateralFilterBackward(
75
+ torch::Tensor gradientInputTensor,
76
+ torch::Tensor inputTensor,
77
+ torch::Tensor outputTensor,
78
+ torch::Tensor outputWeightsTensor,
79
+ torch::Tensor dO_dx_ki,
80
+ float sigma_x,
81
+ float sigma_y,
82
+ float sigma_z,
83
+ float colorSigma) {
84
+ torch::Tensor (*filterFunction)(
85
+ torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor, float, float, float, float);
86
+
87
+ #ifdef WITH_CUDA
88
+
89
+ if (torch::cuda::is_available() && gradientInputTensor.is_cuda()) {
90
+ CHECK_CONTIGUOUS_CUDA(gradientInputTensor);
91
+
92
+ if (gradientInputTensor.size(1) > BF_CUDA_MAX_CHANNELS) {
93
+ throw std::runtime_error(
94
+ "Bilateral filtering not implemented for channel count > " + std::to_string(BF_CUDA_MAX_CHANNELS));
95
+ }
96
+
97
+ if (gradientInputTensor.dim() - 2 > BF_CUDA_MAX_SPATIAL_DIMENSION) {
98
+ throw std::runtime_error(
99
+ "Bilateral filtering not implemented for spatial dimension > " +
100
+ std::to_string(BF_CUDA_MAX_SPATIAL_DIMENSION));
101
+ }
102
+
103
+ filterFunction = &BilateralFilterCudaBackward;
104
+ } else {
105
+ filterFunction = &BilateralFilterCpuBackward;
106
+ }
107
+ #else
108
+ filterFunction = &BilateralFilterCpuBackward;
109
+ #endif
110
+
111
+ return filterFunction(
112
+ gradientInputTensor,
113
+ inputTensor,
114
+ outputTensor,
115
+ outputWeightsTensor,
116
+ dO_dx_ki,
117
+ sigma_x,
118
+ sigma_y,
119
+ sigma_z,
120
+ colorSigma);
121
+ }
source_code/SegMamba/monai/csrc/filtering/trainable_bilateral/trainable_bilateral.h ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ Copyright (c) MONAI Consortium
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+ http://www.apache.org/licenses/LICENSE-2.0
7
+ Unless required by applicable law or agreed to in writing, software
8
+ distributed under the License is distributed on an "AS IS" BASIS,
9
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10
+ See the License for the specific language governing permissions and
11
+ limitations under the License.
12
+
13
+ =========================================================================
14
+ Adapted from https://github.com/faebstn96/trainable-bilateral-filter-source
15
+ which has the following license...
16
+ https://github.com/faebstn96/trainable-bilateral-filter-source/blob/main/LICENSE.md
17
+
18
+ Copyright 2022 Fabian Wagner, Pattern Recognition Lab, FAU Erlangen-Nuernberg, Erlangen, Germany
19
+ Licensed under the Apache License, Version 2.0 (the "License");
20
+ you may not use this file except in compliance with the License.
21
+ You may obtain a copy of the License at
22
+ http://www.apache.org/licenses/LICENSE-2.0
23
+ Unless required by applicable law or agreed to in writing, software
24
+ distributed under the License is distributed on an "AS IS" BASIS,
25
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
26
+ See the License for the specific language governing permissions and
27
+ limitations under the License.
28
+ */
29
+
30
+ #pragma once
31
+
32
+ #include <torch/extension.h>
33
+ #include <algorithm>
34
+ #include <iostream>
35
+ #include <vector>
36
+ #include "utils/common_utils.h"
37
+ //#include "utils/tensor_description.h"
38
+
39
+ #define BF_CUDA_MAX_CHANNELS 16
40
+ #define BF_CUDA_MAX_SPATIAL_DIMENSION 3
41
+
42
+ #ifdef WITH_CUDA
43
+ std::tuple<torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor>
44
+ BilateralFilterCudaForward(torch::Tensor inputTensor, float sigma_x, float sigma_y, float sigma_z, float colorSigma);
45
+ torch::Tensor BilateralFilterCudaBackward(
46
+ torch::Tensor gradientInputTensor,
47
+ torch::Tensor inputTensor,
48
+ torch::Tensor outputTensor,
49
+ torch::Tensor outputWeightsTensor,
50
+ torch::Tensor dO_dx_ki,
51
+ float sigma_x,
52
+ float sigma_y,
53
+ float sigma_z,
54
+ float colorSigma);
55
+ #endif
56
+
57
+ std::tuple<torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor>
58
+ BilateralFilterCpuForward(torch::Tensor inputTensor, float sigma_x, float sigma_y, float sigma_z, float colorSigma);
59
+
60
+ torch::Tensor BilateralFilterCpuBackward(
61
+ torch::Tensor gradientInputTensor,
62
+ torch::Tensor inputTensor,
63
+ torch::Tensor outputTensor,
64
+ torch::Tensor outputWeightsTensor,
65
+ torch::Tensor dO_dx_ki,
66
+ float sigma_x,
67
+ float sigma_y,
68
+ float sigma_z,
69
+ float colorSigma);
70
+
71
+ std::tuple<torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor>
72
+ TrainableBilateralFilterForward(
73
+ torch::Tensor inputTensor,
74
+ float sigma_x,
75
+ float sigma_y,
76
+ float sigma_z,
77
+ float colorSigma);
78
+
79
+ torch::Tensor TrainableBilateralFilterBackward(
80
+ torch::Tensor gradientInputTensor,
81
+ torch::Tensor inputTensor,
82
+ torch::Tensor outputTensor,
83
+ torch::Tensor outputWeightsTensor,
84
+ torch::Tensor dO_dx_ki,
85
+ float sigma_x,
86
+ float sigma_y,
87
+ float sigma_z,
88
+ float colorSigma);
source_code/SegMamba/monai/csrc/filtering/trainable_joint_bilateral/jbf_layer_cpu_backward.cpp ADDED
@@ -0,0 +1,246 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ Copyright (c) MONAI Consortium
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+ http://www.apache.org/licenses/LICENSE-2.0
7
+ Unless required by applicable law or agreed to in writing, software
8
+ distributed under the License is distributed on an "AS IS" BASIS,
9
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10
+ See the License for the specific language governing permissions and
11
+ limitations under the License.
12
+
13
+ =========================================================================
14
+ Adapted from https://github.com/faebstn96/trainable-joint-bilateral-filter-source
15
+ which has the following license...
16
+ https://github.com/faebstn96/trainable-joint-bilateral-filter-source/blob/main/LICENSE
17
+
18
+ Copyright 2022 Fabian Wagner, Pattern Recognition Lab, FAU Erlangen-Nuernberg, Erlangen, Germany
19
+ Licensed under the Apache License, Version 2.0 (the "License");
20
+ you may not use this file except in compliance with the License.
21
+ You may obtain a copy of the License at
22
+ http://www.apache.org/licenses/LICENSE-2.0
23
+ Unless required by applicable law or agreed to in writing, software
24
+ distributed under the License is distributed on an "AS IS" BASIS,
25
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
26
+ See the License for the specific language governing permissions and
27
+ limitations under the License.
28
+ */
29
+
30
+ #include "trainable_joint_bilateral.h"
31
+ #include "utils/tensor_description.h"
32
+ #include "utils/tensor_indexing.h"
33
+
34
+ template <typename scalar_t>
35
+ void JointBilateralFilterCpuBackward_3d(
36
+ torch::Tensor gradientInputTensor,
37
+ torch::Tensor gradientGuidanceTensor,
38
+ torch::Tensor gradientOutputTensor,
39
+ torch::Tensor inputTensor,
40
+ torch::Tensor guidanceTensor,
41
+ torch::Tensor outputTensor,
42
+ torch::Tensor outputWeightsTensor,
43
+ torch::Tensor dO_dz_ki,
44
+ float sigma_x,
45
+ float sigma_y,
46
+ float sigma_z,
47
+ float colorSigma) {
48
+ // Getting tensor description.
49
+ TensorDescription desc = TensorDescription(gradientInputTensor);
50
+
51
+ // Raw tensor data pointers.
52
+ scalar_t* gradientInputTensorData = gradientInputTensor.data_ptr<scalar_t>();
53
+ scalar_t* gradientGuidanceTensorData = gradientGuidanceTensor.data_ptr<scalar_t>();
54
+ scalar_t* gradientOutputTensorData = gradientOutputTensor.data_ptr<scalar_t>();
55
+ scalar_t* inputTensorData = inputTensor.data_ptr<scalar_t>();
56
+ scalar_t* guidanceTensorData = guidanceTensor.data_ptr<scalar_t>();
57
+ scalar_t* outputTensorData = outputTensor.data_ptr<scalar_t>();
58
+ scalar_t* outputWeightsTensorData = outputWeightsTensor.data_ptr<scalar_t>();
59
+ scalar_t* dO_dz_kiData = dO_dz_ki.data_ptr<scalar_t>();
60
+ // scalar_t* dw_dx_kiData = dw_dx_ki_Tensor.data_ptr<scalar_t>();
61
+ // scalar_t* dfilter_dx_kiData = dfilter_dx_ki_Tensor.data_ptr<scalar_t>();
62
+
63
+ // Pre-calculate common values
64
+ int windowSize_x = std::max(((int)ceil(5.0f * sigma_x) | 1), 5); // ORing last bit to ensure odd window size
65
+ int windowSize_y = std::max(((int)ceil(5.0f * sigma_y) | 1), 5); // ORing last bit to ensure odd window size
66
+ int windowSize_z = std::max(((int)ceil(5.0f * sigma_z) | 1), 5); // ORing last bit to ensure odd window size
67
+ int halfWindowSize_x = floor(0.5f * windowSize_x);
68
+ int halfWindowSize_y = floor(0.5f * windowSize_y);
69
+ int halfWindowSize_z = floor(0.5f * windowSize_z);
70
+ int halfWindowSize_arr[] = {halfWindowSize_x, halfWindowSize_y, halfWindowSize_z};
71
+ scalar_t spatialExpConstant_x = -1.0f / (2 * sigma_x * sigma_x);
72
+ scalar_t spatialExpConstant_y = -1.0f / (2 * sigma_y * sigma_y);
73
+ scalar_t spatialExpConstant_z = -1.0f / (2 * sigma_z * sigma_z);
74
+ scalar_t colorExpConstant = -1.0f / (2 * colorSigma * colorSigma);
75
+
76
+ // Set kernel sizes with respect to the defined spatial sigmas.
77
+ int* kernelSizes = new int[desc.dimensions];
78
+
79
+ kernelSizes[0] = windowSize_x;
80
+ kernelSizes[1] = windowSize_y;
81
+ kernelSizes[2] = windowSize_z;
82
+
83
+ // Pre-calculate gaussian kernel and distance map in 1D.
84
+ scalar_t* gaussianKernel_x = new scalar_t[windowSize_x];
85
+ scalar_t* gaussianKernel_y = new scalar_t[windowSize_y];
86
+ scalar_t* gaussianKernel_z = new scalar_t[windowSize_z];
87
+ scalar_t* xDistanceSquared = new scalar_t[windowSize_x];
88
+ scalar_t* yDistanceSquared = new scalar_t[windowSize_y];
89
+ scalar_t* zDistanceSquared = new scalar_t[windowSize_z];
90
+
91
+ for (int i = 0; i < windowSize_x; i++) {
92
+ int distance = i - halfWindowSize_x;
93
+ gaussianKernel_x[i] = exp(distance * distance * spatialExpConstant_x);
94
+ xDistanceSquared[i] = distance * distance;
95
+ }
96
+ for (int i = 0; i < windowSize_y; i++) {
97
+ int distance = i - halfWindowSize_y;
98
+ gaussianKernel_y[i] = exp(distance * distance * spatialExpConstant_y);
99
+ yDistanceSquared[i] = distance * distance;
100
+ }
101
+ for (int i = 0; i < windowSize_z; i++) {
102
+ int distance = i - halfWindowSize_z;
103
+ gaussianKernel_z[i] = exp(distance * distance * spatialExpConstant_z);
104
+ zDistanceSquared[i] = distance * distance;
105
+ }
106
+
107
+ // Looping over the batches
108
+ for (int b = 0; b < desc.batchCount; b++) {
109
+ int batchOffset = b * desc.batchStride;
110
+
111
+ // Looping over all dimensions for the home element
112
+ for (int z = 0; z < desc.sizes[2]; z++)
113
+ #pragma omp parallel for
114
+ for (int y = 0; y < desc.sizes[1]; y++) {
115
+ for (int x = 0; x < desc.sizes[0]; x++) {
116
+ // Calculating indexing offset for the home element
117
+ int homeOffset = batchOffset;
118
+
119
+ int homeIndex[] = {x, y, z};
120
+ homeOffset += x * desc.strides[0];
121
+ homeOffset += y * desc.strides[1];
122
+ homeOffset += z * desc.strides[2];
123
+
124
+ // Zero kernel aggregates.
125
+ scalar_t filter_kernel_guidance = 0;
126
+ scalar_t valueSumGuidance = 0;
127
+ scalar_t valueSumInput = 0;
128
+
129
+ // Looping over all dimensions for the neighbour element
130
+ Indexer kernelIndex = Indexer(desc.dimensions, kernelSizes);
131
+ do // while(kernelIndex++)
132
+ {
133
+ // Calculating buffer offset for the neighbour element
134
+ // Index is clamped to the border in each dimension.
135
+ int neighbourOffset = batchOffset;
136
+ bool flagNotClamped = true;
137
+
138
+ for (int i = 0; i < desc.dimensions; i++) {
139
+ int neighbourIndex = homeIndex[i] + kernelIndex[i] - halfWindowSize_arr[i];
140
+ int neighbourIndexClamped = std::min(desc.sizes[i] - 1, std::max(0, neighbourIndex));
141
+ neighbourOffset += neighbourIndexClamped * desc.strides[i];
142
+ if (neighbourIndex != neighbourIndexClamped) {
143
+ flagNotClamped = false;
144
+ }
145
+ }
146
+
147
+ // Euclidean color distance.
148
+ scalar_t colorDistance = 0;
149
+ scalar_t colorDistanceSquared = 0;
150
+
151
+ for (int i = 0; i < desc.channelCount; i++) {
152
+ scalar_t diff = guidanceTensorData[neighbourOffset + i * desc.channelStride] -
153
+ guidanceTensorData[homeOffset + i * desc.channelStride]; // Be careful: Here it is (Z_k - Z_i) and not
154
+ // (Z_i - Z_q)
155
+ colorDistance += diff; // Do not take the absolute value here. Be careful with the signs.
156
+ colorDistanceSquared += diff * diff;
157
+ }
158
+
159
+ // Calculating and combining the spatial
160
+ // and color weights.
161
+ scalar_t spatialWeight = 1;
162
+
163
+ spatialWeight =
164
+ gaussianKernel_x[kernelIndex[0]] * gaussianKernel_y[kernelIndex[1]] * gaussianKernel_z[kernelIndex[2]];
165
+
166
+ scalar_t colorWeight = exp(colorDistanceSquared * colorExpConstant);
167
+ scalar_t totalWeight = spatialWeight * colorWeight;
168
+
169
+ // Aggregating values. Only do this if flagNotClamped: Pixels outside the image are disregarded.
170
+ if (flagNotClamped) {
171
+ for (int i = 0; i < desc.channelCount; i++) {
172
+ // Distinguish cases for k!=i (calculation is done here)
173
+ // and k==i (partial derivatives are precalculated).
174
+ // If statement replaces center element of neighborhood/kernel.
175
+ if (kernelIndex[0] != halfWindowSize_x || kernelIndex[1] != halfWindowSize_y ||
176
+ kernelIndex[2] != halfWindowSize_z) {
177
+ filter_kernel_guidance = -(1 / outputWeightsTensorData[neighbourOffset + i * desc.channelStride]) *
178
+ outputTensorData[neighbourOffset + i * desc.channelStride] * totalWeight * colorDistance /
179
+ (colorSigma * colorSigma) +
180
+ (1 / outputWeightsTensorData[neighbourOffset + i * desc.channelStride]) * totalWeight *
181
+ (inputTensorData[homeOffset + i * desc.channelStride] * colorDistance /
182
+ (colorSigma * colorSigma)); // inputTensorData[homeOffset] !!, no +1!!
183
+ } else {
184
+ filter_kernel_guidance = dO_dz_kiData[homeOffset + i * desc.channelStride];
185
+ }
186
+
187
+ valueSumGuidance +=
188
+ gradientInputTensorData[neighbourOffset + i * desc.channelStride] * filter_kernel_guidance;
189
+ valueSumInput += gradientInputTensorData[neighbourOffset + i * desc.channelStride] *
190
+ (1 / outputWeightsTensorData[neighbourOffset + i * desc.channelStride]) * totalWeight;
191
+ }
192
+ }
193
+ } while (kernelIndex++);
194
+
195
+ // Do the filtering and calculate the values for the backward pass.
196
+ for (int i = 0; i < desc.channelCount; i++) {
197
+ // Filtering:
198
+ gradientGuidanceTensorData[homeOffset + i * desc.channelStride] = valueSumGuidance;
199
+ gradientOutputTensorData[homeOffset + i * desc.channelStride] = valueSumInput;
200
+ }
201
+ }
202
+ }
203
+ }
204
+
205
+ delete[] kernelSizes;
206
+ delete[] gaussianKernel_x;
207
+ delete[] gaussianKernel_y;
208
+ delete[] gaussianKernel_z;
209
+ delete[] xDistanceSquared;
210
+ delete[] yDistanceSquared;
211
+ delete[] zDistanceSquared;
212
+ }
213
+
214
+ std::tuple<torch::Tensor, torch::Tensor> JointBilateralFilterCpuBackward(
215
+ torch::Tensor gradientInputTensor,
216
+ torch::Tensor inputTensor,
217
+ torch::Tensor guidanceTensor,
218
+ torch::Tensor outputTensor,
219
+ torch::Tensor outputWeightsTensor,
220
+ torch::Tensor dO_dz_ki,
221
+ float sigma_x,
222
+ float sigma_y,
223
+ float sigma_z,
224
+ float colorSigma) {
225
+ // Preparing output tensor.
226
+ torch::Tensor gradientOutputTensor = torch::zeros_like(gradientInputTensor);
227
+ torch::Tensor gradientGuidanceTensor = torch::zeros_like(gradientInputTensor);
228
+
229
+ AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradientInputTensor.scalar_type(), "JointBilateralFilterCpuBackward_3d", ([&] {
230
+ JointBilateralFilterCpuBackward_3d<scalar_t>(
231
+ gradientInputTensor,
232
+ gradientGuidanceTensor,
233
+ gradientOutputTensor,
234
+ inputTensor,
235
+ guidanceTensor,
236
+ outputTensor,
237
+ outputWeightsTensor,
238
+ dO_dz_ki,
239
+ sigma_x,
240
+ sigma_y,
241
+ sigma_z,
242
+ colorSigma);
243
+ }));
244
+
245
+ return {gradientOutputTensor, gradientGuidanceTensor};
246
+ }
source_code/SegMamba/monai/csrc/filtering/trainable_joint_bilateral/jbf_layer_cpu_forward.cpp ADDED
@@ -0,0 +1,278 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ Copyright (c) MONAI Consortium
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+ http://www.apache.org/licenses/LICENSE-2.0
7
+ Unless required by applicable law or agreed to in writing, software
8
+ distributed under the License is distributed on an "AS IS" BASIS,
9
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10
+ See the License for the specific language governing permissions and
11
+ limitations under the License.
12
+
13
+ =========================================================================
14
+ Adapted from https://github.com/faebstn96/trainable-joint-bilateral-filter-source
15
+ which has the following license...
16
+ https://github.com/faebstn96/trainable-joint-bilateral-filter-source/blob/main/LICENSE
17
+
18
+ Copyright 2022 Fabian Wagner, Pattern Recognition Lab, FAU Erlangen-Nuernberg, Erlangen, Germany
19
+ Licensed under the Apache License, Version 2.0 (the "License");
20
+ you may not use this file except in compliance with the License.
21
+ You may obtain a copy of the License at
22
+ http://www.apache.org/licenses/LICENSE-2.0
23
+ Unless required by applicable law or agreed to in writing, software
24
+ distributed under the License is distributed on an "AS IS" BASIS,
25
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
26
+ See the License for the specific language governing permissions and
27
+ limitations under the License.
28
+ */
29
+
30
+ #include "trainable_joint_bilateral.h"
31
+ #include "utils/tensor_description.h"
32
+ #include "utils/tensor_indexing.h"
33
+
34
+ template <typename scalar_t>
35
+ void JointBilateralFilterCpuForward_3d(
36
+ torch::Tensor inputTensor,
37
+ torch::Tensor guidanceTensor,
38
+ torch::Tensor outputTensor,
39
+ torch::Tensor outputWeightsTensor,
40
+ torch::Tensor dO_dz_ki,
41
+ torch::Tensor dO_dsig_r,
42
+ torch::Tensor dO_dsig_x,
43
+ torch::Tensor dO_dsig_y,
44
+ torch::Tensor dO_dsig_z,
45
+ float sigma_x,
46
+ float sigma_y,
47
+ float sigma_z,
48
+ float colorSigma) {
49
+ // Getting tensor description.
50
+ TensorDescription desc = TensorDescription(inputTensor);
51
+
52
+ // Raw tensor data pointers.
53
+ scalar_t* inputTensorData = inputTensor.data_ptr<scalar_t>();
54
+ scalar_t* guidanceTensorData = guidanceTensor.data_ptr<scalar_t>();
55
+ scalar_t* outputTensorData = outputTensor.data_ptr<scalar_t>();
56
+ scalar_t* outputWeightsTensorData = outputWeightsTensor.data_ptr<scalar_t>();
57
+ scalar_t* dO_dz_kiData = dO_dz_ki.data_ptr<scalar_t>();
58
+ scalar_t* dO_dsig_rData = dO_dsig_r.data_ptr<scalar_t>();
59
+ scalar_t* dO_dsig_xData = dO_dsig_x.data_ptr<scalar_t>();
60
+ scalar_t* dO_dsig_yData = dO_dsig_y.data_ptr<scalar_t>();
61
+ scalar_t* dO_dsig_zData = dO_dsig_z.data_ptr<scalar_t>();
62
+
63
+ // Pre-calculate common values
64
+ int windowSize_x = std::max(((int)ceil(5.0f * sigma_x) | 1), 5); // ORing last bit to ensure odd window size
65
+ int windowSize_y = std::max(((int)ceil(5.0f * sigma_y) | 1), 5); // ORing last bit to ensure odd window size
66
+ int windowSize_z = std::max(((int)ceil(5.0f * sigma_z) | 1), 5); // ORing last bit to ensure odd window size
67
+ int halfWindowSize_x = floor(0.5f * windowSize_x);
68
+ int halfWindowSize_y = floor(0.5f * windowSize_y);
69
+ int halfWindowSize_z = floor(0.5f * windowSize_z);
70
+ int halfWindowSize_arr[] = {halfWindowSize_x, halfWindowSize_y, halfWindowSize_z};
71
+ scalar_t spatialExpConstant_x = -1.0f / (2 * sigma_x * sigma_x);
72
+ scalar_t spatialExpConstant_y = -1.0f / (2 * sigma_y * sigma_y);
73
+ scalar_t spatialExpConstant_z = -1.0f / (2 * sigma_z * sigma_z);
74
+ scalar_t colorExpConstant = -1.0f / (2 * colorSigma * colorSigma);
75
+
76
+ // Set kernel sizes with respect to the defined spatial sigmas.
77
+ int* kernelSizes = new int[desc.dimensions];
78
+
79
+ kernelSizes[0] = windowSize_x;
80
+ kernelSizes[1] = windowSize_y;
81
+ kernelSizes[2] = windowSize_z;
82
+
83
+ // Pre-calculate gaussian kernel and distance map in 1D.
84
+ scalar_t* gaussianKernel_x = new scalar_t[windowSize_x];
85
+ scalar_t* gaussianKernel_y = new scalar_t[windowSize_y];
86
+ scalar_t* gaussianKernel_z = new scalar_t[windowSize_z];
87
+ scalar_t* xDistanceSquared = new scalar_t[windowSize_x];
88
+ scalar_t* yDistanceSquared = new scalar_t[windowSize_y];
89
+ scalar_t* zDistanceSquared = new scalar_t[windowSize_z];
90
+
91
+ for (int i = 0; i < windowSize_x; i++) {
92
+ int distance = i - halfWindowSize_x;
93
+ gaussianKernel_x[i] = exp(distance * distance * spatialExpConstant_x);
94
+ xDistanceSquared[i] = distance * distance;
95
+ }
96
+ for (int i = 0; i < windowSize_y; i++) {
97
+ int distance = i - halfWindowSize_y;
98
+ gaussianKernel_y[i] = exp(distance * distance * spatialExpConstant_y);
99
+ yDistanceSquared[i] = distance * distance;
100
+ }
101
+ for (int i = 0; i < windowSize_z; i++) {
102
+ int distance = i - halfWindowSize_z;
103
+ gaussianKernel_z[i] = exp(distance * distance * spatialExpConstant_z);
104
+ zDistanceSquared[i] = distance * distance;
105
+ }
106
+
107
+ // Looping over the batches
108
+ for (int b = 0; b < desc.batchCount; b++) {
109
+ int batchOffset = b * desc.batchStride;
110
+
111
+ // Looping over all dimensions for the home element
112
+ for (int z = 0; z < desc.sizes[2]; z++)
113
+ #pragma omp parallel for
114
+ for (int y = 0; y < desc.sizes[1]; y++) {
115
+ for (int x = 0; x < desc.sizes[0]; x++) {
116
+ // Calculating indexing offset for the home element
117
+ int homeOffset = batchOffset;
118
+
119
+ int homeIndex[] = {x, y, z};
120
+ homeOffset += x * desc.strides[0];
121
+ homeOffset += y * desc.strides[1];
122
+ homeOffset += z * desc.strides[2];
123
+
124
+ // Zero kernel aggregates.
125
+ scalar_t valueSum = 0;
126
+ scalar_t dw_dz_ki = 0;
127
+ scalar_t dfilter_dz_ki = 0;
128
+ scalar_t colorSum_w = 0;
129
+ scalar_t colorSum_alpha = 0;
130
+ scalar_t xSum_w = 0;
131
+ scalar_t xSum_alpha = 0;
132
+ scalar_t ySum_w = 0;
133
+ scalar_t ySum_alpha = 0;
134
+ scalar_t zSum_w = 0;
135
+ scalar_t zSum_alpha = 0;
136
+
137
+ scalar_t weightSum = 0.0f;
138
+
139
+ // Looping over all dimensions for the neighbour element
140
+ Indexer kernelIndex = Indexer(desc.dimensions, kernelSizes);
141
+ do // while(kernelIndex++)
142
+ {
143
+ // Calculating buffer offset for the neighbour element
144
+ // Index is clamped to the border in each dimension.
145
+ int neighbourOffset = batchOffset;
146
+ bool flagNotClamped = true;
147
+
148
+ for (int i = 0; i < desc.dimensions; i++) {
149
+ int neighbourIndex = homeIndex[i] + kernelIndex[i] - halfWindowSize_arr[i];
150
+ int neighbourIndexClamped = std::min(desc.sizes[i] - 1, std::max(0, neighbourIndex));
151
+ neighbourOffset += neighbourIndexClamped * desc.strides[i];
152
+ if (neighbourIndex != neighbourIndexClamped) {
153
+ flagNotClamped = false;
154
+ }
155
+ }
156
+
157
+ // Euclidean color distance.
158
+ scalar_t colorDistance = 0;
159
+ scalar_t colorDistanceSquared = 0;
160
+
161
+ for (int i = 0; i < desc.channelCount; i++) {
162
+ scalar_t diff = guidanceTensorData[homeOffset + i * desc.channelStride] -
163
+ guidanceTensorData[neighbourOffset + i * desc.channelStride];
164
+ colorDistance += diff; // Do not take the absolute value here. Be careful with the signs.
165
+ colorDistanceSquared += diff * diff;
166
+ }
167
+
168
+ // Calculating and combining the spatial
169
+ // and color weights.
170
+ scalar_t spatialWeight = 1;
171
+
172
+ spatialWeight =
173
+ gaussianKernel_x[kernelIndex[0]] * gaussianKernel_y[kernelIndex[1]] * gaussianKernel_z[kernelIndex[2]];
174
+
175
+ scalar_t colorWeight = exp(colorDistanceSquared * colorExpConstant);
176
+ scalar_t totalWeight = spatialWeight * colorWeight;
177
+
178
+ // Aggregating values. Only do this if flagNotClamped: Pixels outside the image are disregarded.
179
+ if (flagNotClamped) {
180
+ for (int i = 0; i < desc.channelCount; i++) {
181
+ valueSum += inputTensorData[neighbourOffset + i * desc.channelStride] * totalWeight;
182
+
183
+ // Derivative of weights with respect to X_i while i=k.
184
+ dw_dz_ki += (-1) * totalWeight * colorDistance / (colorSigma * colorSigma);
185
+ // Derivative of convolved image with respect to X_i while i=k.
186
+ dfilter_dz_ki += (-1) * totalWeight * inputTensorData[neighbourOffset + i * desc.channelStride] *
187
+ colorDistance /
188
+ (colorSigma *
189
+ colorSigma); // Be careful, the +1 is missing here -> Added before filling dfilter_dx_kiData
190
+
191
+ colorSum_w += totalWeight * colorDistanceSquared / std::abs(colorSigma * colorSigma * colorSigma);
192
+ colorSum_alpha += totalWeight * inputTensorData[neighbourOffset + i * desc.channelStride] *
193
+ colorDistanceSquared / std::abs(colorSigma * colorSigma * colorSigma);
194
+
195
+ xSum_w += totalWeight * xDistanceSquared[kernelIndex[0]] / std::abs(sigma_x * sigma_x * sigma_x);
196
+ xSum_alpha += totalWeight * inputTensorData[neighbourOffset + i * desc.channelStride] *
197
+ xDistanceSquared[kernelIndex[0]] / std::abs(sigma_x * sigma_x * sigma_x);
198
+
199
+ ySum_w += totalWeight * yDistanceSquared[kernelIndex[1]] / std::abs(sigma_y * sigma_y * sigma_y);
200
+ ySum_alpha += totalWeight * inputTensorData[neighbourOffset + i * desc.channelStride] *
201
+ yDistanceSquared[kernelIndex[1]] / std::abs(sigma_y * sigma_y * sigma_y);
202
+
203
+ zSum_w += totalWeight * zDistanceSquared[kernelIndex[2]] / std::abs(sigma_z * sigma_z * sigma_z);
204
+ zSum_alpha += totalWeight * inputTensorData[neighbourOffset + i * desc.channelStride] *
205
+ zDistanceSquared[kernelIndex[2]] / std::abs(sigma_z * sigma_z * sigma_z);
206
+ }
207
+
208
+ weightSum += totalWeight;
209
+ }
210
+ } while (kernelIndex++);
211
+
212
+ // Do the filtering and calculate the values for the backward pass.
213
+ for (int i = 0; i < desc.channelCount; i++) {
214
+ // Filtering:
215
+ outputTensorData[homeOffset + i * desc.channelStride] = valueSum / weightSum;
216
+
217
+ // Pre-computations for the backward pass:
218
+ outputWeightsTensorData[homeOffset + i * desc.channelStride] = weightSum;
219
+ dO_dz_kiData[homeOffset + i * desc.channelStride] = -(1 / weightSum) * (valueSum / weightSum) * dw_dz_ki +
220
+ (1 / weightSum) * (dfilter_dz_ki); // no +1 for dfilter_dz_ki for JBF added here!
221
+ dO_dsig_rData[homeOffset + i * desc.channelStride] =
222
+ -(1 / weightSum) * (valueSum / weightSum) * colorSum_w + (1 / weightSum) * colorSum_alpha;
223
+ dO_dsig_xData[homeOffset + i * desc.channelStride] =
224
+ -(1 / weightSum) * (valueSum / weightSum) * xSum_w + (1 / weightSum) * xSum_alpha;
225
+ dO_dsig_yData[homeOffset + i * desc.channelStride] =
226
+ -(1 / weightSum) * (valueSum / weightSum) * ySum_w + (1 / weightSum) * ySum_alpha;
227
+ dO_dsig_zData[homeOffset + i * desc.channelStride] =
228
+ -(1 / weightSum) * (valueSum / weightSum) * zSum_w + (1 / weightSum) * zSum_alpha;
229
+ }
230
+ }
231
+ }
232
+ }
233
+
234
+ delete[] kernelSizes;
235
+ delete[] gaussianKernel_x;
236
+ delete[] gaussianKernel_y;
237
+ delete[] gaussianKernel_z;
238
+ delete[] xDistanceSquared;
239
+ delete[] yDistanceSquared;
240
+ delete[] zDistanceSquared;
241
+ }
242
+
243
+ std::tuple<torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor>
244
+ JointBilateralFilterCpuForward(
245
+ torch::Tensor inputTensor,
246
+ torch::Tensor guidanceTensor,
247
+ float sigma_x,
248
+ float sigma_y,
249
+ float sigma_z,
250
+ float colorSigma) {
251
+ // Preparing output tensor.
252
+ torch::Tensor outputTensor = torch::zeros_like(inputTensor);
253
+ torch::Tensor outputWeightsTensor = torch::zeros_like(inputTensor);
254
+ torch::Tensor dO_dz_ki = torch::zeros_like(inputTensor);
255
+ torch::Tensor dO_dsig_r = torch::zeros_like(inputTensor);
256
+ torch::Tensor dO_dsig_x = torch::zeros_like(inputTensor);
257
+ torch::Tensor dO_dsig_y = torch::zeros_like(inputTensor);
258
+ torch::Tensor dO_dsig_z = torch::zeros_like(inputTensor);
259
+
260
+ AT_DISPATCH_FLOATING_TYPES_AND_HALF(inputTensor.scalar_type(), "JointBilateralFilterCpuForward_3d", ([&] {
261
+ JointBilateralFilterCpuForward_3d<scalar_t>(
262
+ inputTensor,
263
+ guidanceTensor,
264
+ outputTensor,
265
+ outputWeightsTensor,
266
+ dO_dz_ki,
267
+ dO_dsig_r,
268
+ dO_dsig_x,
269
+ dO_dsig_y,
270
+ dO_dsig_z,
271
+ sigma_x,
272
+ sigma_y,
273
+ sigma_z,
274
+ colorSigma);
275
+ }));
276
+
277
+ return {outputTensor, outputWeightsTensor, dO_dz_ki, dO_dsig_r, dO_dsig_x, dO_dsig_y, dO_dsig_z};
278
+ }
source_code/SegMamba/monai/csrc/filtering/trainable_joint_bilateral/jbf_layer_gpu_backward.cu ADDED
@@ -0,0 +1,311 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ Copyright (c) MONAI Consortium
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+ http://www.apache.org/licenses/LICENSE-2.0
7
+ Unless required by applicable law or agreed to in writing, software
8
+ distributed under the License is distributed on an "AS IS" BASIS,
9
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10
+ See the License for the specific language governing permissions and
11
+ limitations under the License.
12
+
13
+ =========================================================================
14
+ Adapted from https://github.com/faebstn96/trainable-joint-bilateral-filter-source
15
+ which has the following license...
16
+ https://github.com/faebstn96/trainable-joint-bilateral-filter-source/blob/main/LICENSE
17
+
18
+ Copyright 2022 Fabian Wagner, Pattern Recognition Lab, FAU Erlangen-Nuernberg, Erlangen, Germany
19
+ Licensed under the Apache License, Version 2.0 (the "License");
20
+ you may not use this file except in compliance with the License.
21
+ You may obtain a copy of the License at
22
+ http://www.apache.org/licenses/LICENSE-2.0
23
+ Unless required by applicable law or agreed to in writing, software
24
+ distributed under the License is distributed on an "AS IS" BASIS,
25
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
26
+ See the License for the specific language governing permissions and
27
+ limitations under the License.
28
+ */
29
+
30
+ #include <cuda.h>
31
+ #include <cuda_runtime.h>
32
+
33
+ #include "trainable_joint_bilateral.h"
34
+ //#include "../utils/cuda_error_check.h"
35
+ #include "utils/meta_macros.h"
36
+ #include "utils/tensor_description.h"
37
+
38
+ __constant__ int cBatchStrideBack;
39
+ __constant__ int cColorStrideBack;
40
+
41
+ __constant__ int cSizesBack[3];
42
+ __constant__ int cStridesBack[3];
43
+
44
+ __constant__ int cKernelSizesBack[3];
45
+ __constant__ int cHalfWindowSize_arrBack[3];
46
+ __constant__ float cGaussianKernel_xBack[256];
47
+ __constant__ float cGaussianKernel_yBack[256];
48
+ __constant__ float cGaussianKernel_zBack[256];
49
+ __constant__ float cXDistanceSquaredBack[256];
50
+ __constant__ float cYDistanceSquaredBack[256];
51
+ __constant__ float cZDistanceSquaredBack[256];
52
+ __constant__ float cColorExponentConstantBack;
53
+ __constant__ float cSigma_xBack;
54
+ __constant__ float cSigma_yBack;
55
+ __constant__ float cSigma_zBack;
56
+ __constant__ float cColorSigmaBack;
57
+
58
+ template <typename scalar_t, int C>
59
+ __global__ void JointBilateralFilterCudaKernel3DBackward(
60
+ scalar_t* gradientInputTensor,
61
+ scalar_t* gradientGuidanceTensor,
62
+ scalar_t* gradientOutputTensor,
63
+ scalar_t* inputTensor,
64
+ scalar_t* guidanceTensor,
65
+ scalar_t* outputTensor,
66
+ scalar_t* outputWeightsTensor,
67
+ scalar_t* dO_dz_ki) {
68
+ int homeOffset = blockIdx.x * blockDim.x + threadIdx.x;
69
+ int batchOffset = blockIdx.y * cBatchStrideBack;
70
+
71
+ if (homeOffset >= cColorStrideBack)
72
+ return;
73
+
74
+ int homeX = homeOffset / cStridesBack[0];
75
+ int homeY = (homeOffset - homeX * cStridesBack[0]) / cStridesBack[1];
76
+ int homeZ = (homeOffset - homeX * cStridesBack[0] - homeY * cStridesBack[1]) / cStridesBack[2];
77
+ int homeIndex[] = {homeX, homeY, homeZ};
78
+
79
+ // Zero kernel aggregates.
80
+ scalar_t valueSumGuidance = 0;
81
+ scalar_t valueSumInput = 0;
82
+
83
+ for (int kernelX = 0; kernelX < cKernelSizesBack[0]; kernelX++) {
84
+ int neighbourX = max(0, min(homeX + (kernelX - cHalfWindowSize_arrBack[0]), cSizesBack[0] - 1));
85
+ scalar_t gaussianX = cGaussianKernel_xBack[kernelX];
86
+
87
+ for (int kernelY = 0; kernelY < cKernelSizesBack[1]; kernelY++) {
88
+ int neighbourY = max(0, min(homeY + (kernelY - cHalfWindowSize_arrBack[1]), cSizesBack[1] - 1));
89
+ scalar_t gaussianY = cGaussianKernel_yBack[kernelY];
90
+
91
+ for (int kernelZ = 0; kernelZ < cKernelSizesBack[2]; kernelZ++) {
92
+ int neighbourZ = max(0, min(homeZ + (kernelZ - cHalfWindowSize_arrBack[2]), cSizesBack[2] - 1));
93
+ scalar_t gaussianZ = cGaussianKernel_zBack[kernelZ];
94
+
95
+ int neighbourOffset = neighbourX * cStridesBack[0] + neighbourY * cStridesBack[1] + neighbourZ;
96
+
97
+ bool flagNotClamped = true;
98
+ int kernelIndex[] = {kernelX, kernelY, kernelZ};
99
+ int dimensions = 3; // Must equal the number of spatial dimensions.
100
+
101
+ for (int i = 0; i < dimensions; i++) {
102
+ int HalfWindowSizeBack = cHalfWindowSize_arrBack[i]; // Define constant memory as new variable here (!!),
103
+ // otherwise: cudaErrorMisalignedAddress
104
+ int neighbourIndex = homeIndex[i] + kernelIndex[i] - HalfWindowSizeBack;
105
+ int neighbourIndexClamped = min(cSizesBack[i] - 1, max(0, neighbourIndex));
106
+ if (neighbourIndex != neighbourIndexClamped) {
107
+ flagNotClamped = false;
108
+ }
109
+ }
110
+
111
+ scalar_t colorDistance = 0;
112
+ scalar_t colorDistanceSquared = 0;
113
+
114
+ #pragma unroll
115
+ for (int c = 0; c < C; c++) {
116
+ scalar_t a = guidanceTensor[batchOffset + neighbourOffset + c * cColorStrideBack];
117
+ scalar_t b = guidanceTensor[batchOffset + homeOffset + c * cColorStrideBack]; // Be careful: Here it is (Z_k -
118
+ // Z_i) and not (Z_i - Z_q)
119
+ scalar_t diff = a - b;
120
+ colorDistance += diff; // Do not take the absolute value here. Be careful with the signs.
121
+ colorDistanceSquared += diff * diff;
122
+ }
123
+
124
+ scalar_t spatialWeight = gaussianX * gaussianY * gaussianZ;
125
+ scalar_t colorWeight = exp(cColorExponentConstantBack * colorDistanceSquared);
126
+ scalar_t totalWeight = spatialWeight * colorWeight;
127
+
128
+ // Aggregating values. Only do this if flagNotClamped: Pixels outside the image are disregarded.
129
+ if (flagNotClamped) {
130
+ scalar_t filter_kernel_guidance_back;
131
+
132
+ #pragma unroll
133
+ for (int c = 0; c < C; c++) {
134
+ // Distinguish cases for k!=i (calculation is done here)
135
+ // and k==i (partial derivatives are precalculated).
136
+ // If statement replaces center element of neighborhood/kernel.
137
+ if (kernelX != cHalfWindowSize_arrBack[0] || kernelY != cHalfWindowSize_arrBack[1] ||
138
+ kernelZ != cHalfWindowSize_arrBack[2]) {
139
+ filter_kernel_guidance_back =
140
+ -(1 / outputWeightsTensor[batchOffset + neighbourOffset + c * cColorStrideBack]) *
141
+ outputTensor[batchOffset + neighbourOffset + c * cColorStrideBack] * totalWeight * colorDistance /
142
+ (cColorSigmaBack * cColorSigmaBack) +
143
+ (1 / outputWeightsTensor[batchOffset + neighbourOffset + c * cColorStrideBack]) * totalWeight *
144
+ (inputTensor[batchOffset + homeOffset + c * cColorStrideBack] * colorDistance /
145
+ (cColorSigmaBack * cColorSigmaBack)); // inputTensorData[homeOffset] !!, no +1!!
146
+ } else {
147
+ filter_kernel_guidance_back = dO_dz_ki[batchOffset + homeOffset + c * cColorStrideBack];
148
+ }
149
+
150
+ valueSumGuidance +=
151
+ gradientInputTensor[batchOffset + neighbourOffset + c * cColorStrideBack] * filter_kernel_guidance_back;
152
+ valueSumInput += gradientInputTensor[batchOffset + neighbourOffset + c * cColorStrideBack] *
153
+ (1 / outputWeightsTensor[batchOffset + neighbourOffset + c * cColorStrideBack]) * totalWeight;
154
+ }
155
+ }
156
+ }
157
+ }
158
+ }
159
+
160
+ #pragma unroll
161
+ for (int c = 0; c < C; c++) {
162
+ gradientGuidanceTensor[batchOffset + homeOffset + c * cColorStrideBack] = valueSumGuidance;
163
+ gradientOutputTensor[batchOffset + homeOffset + c * cColorStrideBack] = valueSumInput;
164
+ }
165
+ }
166
+
167
+ template <int C, int D>
168
+ void JointBilateralFilterCudaBackwardFunction(
169
+ torch::Tensor gradientInputTensor,
170
+ torch::Tensor gradientGuidanceTensor,
171
+ torch::Tensor gradientOutputTensor,
172
+ torch::Tensor inputTensor,
173
+ torch::Tensor guidanceTensor,
174
+ torch::Tensor outputTensor,
175
+ torch::Tensor outputWeightsTensor,
176
+ torch::Tensor dO_dz_ki,
177
+ float sigma_x,
178
+ float sigma_y,
179
+ float sigma_z,
180
+ float colorSigma) {
181
+ // Getting tensor description.
182
+ TensorDescription desc = TensorDescription(inputTensor);
183
+
184
+ // Pre-calculating gaussian kernel.
185
+ int windowSize_x = std::max(((int)ceil(5.0f * sigma_x) | 1), 5); // ORing last bit to ensure odd window size
186
+ int windowSize_y = std::max(((int)ceil(5.0f * sigma_y) | 1), 5); // ORing last bit to ensure odd window size
187
+ int windowSize_z = std::max(((int)ceil(5.0f * sigma_z) | 1), 5); // ORing last bit to ensure odd window size
188
+ int halfWindowSize_x = floor(0.5f * windowSize_x);
189
+ int halfWindowSize_y = floor(0.5f * windowSize_y);
190
+ int halfWindowSize_z = floor(0.5f * windowSize_z);
191
+ int halfWindowSize_arr[] = {halfWindowSize_x, halfWindowSize_y, halfWindowSize_z};
192
+ float spatialExpConstant_x = -1.0f / (2 * sigma_x * sigma_x);
193
+ float spatialExpConstant_y = -1.0f / (2 * sigma_y * sigma_y);
194
+ float spatialExpConstant_z = -1.0f / (2 * sigma_z * sigma_z);
195
+ float colorExpConstant = -1.0f / (2 * colorSigma * colorSigma);
196
+
197
+ int* kernelSizes = new int[desc.dimensions];
198
+ kernelSizes[0] = windowSize_x;
199
+ kernelSizes[1] = windowSize_y;
200
+ kernelSizes[2] = windowSize_z;
201
+
202
+ auto* gaussianKernel_x = new float[windowSize_x];
203
+ auto* gaussianKernel_y = new float[windowSize_y];
204
+ auto* gaussianKernel_z = new float[windowSize_z];
205
+ auto* xDistanceSquared = new float[windowSize_x];
206
+ auto* yDistanceSquared = new float[windowSize_y];
207
+ auto* zDistanceSquared = new float[windowSize_z];
208
+
209
+ for (int i = 0; i < windowSize_x; i++) {
210
+ int distance = i - halfWindowSize_x;
211
+ gaussianKernel_x[i] = exp(distance * distance * spatialExpConstant_x);
212
+ xDistanceSquared[i] = distance * distance;
213
+ }
214
+ for (int i = 0; i < windowSize_y; i++) {
215
+ int distance = i - halfWindowSize_y;
216
+ gaussianKernel_y[i] = exp(distance * distance * spatialExpConstant_y);
217
+ yDistanceSquared[i] = distance * distance;
218
+ }
219
+ for (int i = 0; i < windowSize_z; i++) {
220
+ int distance = i - halfWindowSize_z;
221
+ gaussianKernel_z[i] = exp(distance * distance * spatialExpConstant_z);
222
+ zDistanceSquared[i] = distance * distance;
223
+ }
224
+
225
+ // Writing constant memory.
226
+ cudaMemcpyToSymbol(cBatchStrideBack, &desc.batchStride, sizeof(int));
227
+ cudaMemcpyToSymbol(cColorStrideBack, &desc.channelStride, sizeof(int));
228
+ cudaMemcpyToSymbol(cSizesBack, desc.sizes, sizeof(int) * 3);
229
+ cudaMemcpyToSymbol(cStridesBack, desc.strides, sizeof(int) * 3);
230
+ cudaMemcpyToSymbol(cKernelSizesBack, kernelSizes, sizeof(int) * desc.dimensions);
231
+ cudaMemcpyToSymbol(cHalfWindowSize_arrBack, halfWindowSize_arr, sizeof(int) * desc.dimensions);
232
+ cudaMemcpyToSymbol(cGaussianKernel_xBack, gaussianKernel_x, sizeof(float) * windowSize_x);
233
+ cudaMemcpyToSymbol(cGaussianKernel_yBack, gaussianKernel_y, sizeof(float) * windowSize_y);
234
+ cudaMemcpyToSymbol(cGaussianKernel_zBack, gaussianKernel_z, sizeof(float) * windowSize_z);
235
+ cudaMemcpyToSymbol(cXDistanceSquaredBack, xDistanceSquared, sizeof(float) * windowSize_x);
236
+ cudaMemcpyToSymbol(cYDistanceSquaredBack, yDistanceSquared, sizeof(float) * windowSize_y);
237
+ cudaMemcpyToSymbol(cZDistanceSquaredBack, zDistanceSquared, sizeof(float) * windowSize_z);
238
+ cudaMemcpyToSymbol(cColorExponentConstantBack, &colorExpConstant, sizeof(float));
239
+ cudaMemcpyToSymbol(cSigma_xBack, &sigma_x, sizeof(float));
240
+ cudaMemcpyToSymbol(cSigma_yBack, &sigma_y, sizeof(float));
241
+ cudaMemcpyToSymbol(cSigma_zBack, &sigma_z, sizeof(float));
242
+ cudaMemcpyToSymbol(cColorSigmaBack, &colorSigma, sizeof(float));
243
+
244
+ // cuda_error_check("Cuda check before kernel call.");
245
+
246
+ #define BLOCK_SIZE 32
247
+
248
+ AT_DISPATCH_FLOATING_TYPES_AND_HALF(
249
+ inputTensor.scalar_type(), "JointBilateralFilterCudaKernel3DBackward", ([&] {
250
+ JointBilateralFilterCudaKernel3DBackward<scalar_t, C>
251
+ <<<dim3(int(desc.channelStride / BLOCK_SIZE) + 1, desc.batchCount), dim3(BLOCK_SIZE, 1)>>>(
252
+ gradientInputTensor.data_ptr<scalar_t>(),
253
+ gradientGuidanceTensor.data_ptr<scalar_t>(),
254
+ gradientOutputTensor.data_ptr<scalar_t>(),
255
+ inputTensor.data_ptr<scalar_t>(),
256
+ guidanceTensor.data_ptr<scalar_t>(),
257
+ outputTensor.data_ptr<scalar_t>(),
258
+ outputWeightsTensor.data_ptr<scalar_t>(),
259
+ dO_dz_ki.data_ptr<scalar_t>());
260
+ }));
261
+
262
+ // cuda_error_check("Cuda check after kernel call.");
263
+ // delete[] kernel;
264
+ delete[] kernelSizes;
265
+ delete[] gaussianKernel_x;
266
+ delete[] gaussianKernel_y;
267
+ delete[] gaussianKernel_z;
268
+ delete[] xDistanceSquared;
269
+ delete[] yDistanceSquared;
270
+ delete[] zDistanceSquared;
271
+ }
272
+
273
+ // Function to choose template implementation based on dynamic, channels and dimensions
274
+ std::tuple<torch::Tensor, torch::Tensor> JointBilateralFilterCudaBackward(
275
+ torch::Tensor gradientInputTensor,
276
+ torch::Tensor inputTensor,
277
+ torch::Tensor guidanceTensor,
278
+ torch::Tensor outputTensor,
279
+ torch::Tensor outputWeightsTensor,
280
+ torch::Tensor dO_dz_ki,
281
+ float sigma_x,
282
+ float sigma_y,
283
+ float sigma_z,
284
+ float colorSigma) {
285
+ torch::Tensor gradientOutputTensor = torch::zeros_like(gradientInputTensor);
286
+ torch::Tensor gradientGuidanceTensor = torch::zeros_like(gradientInputTensor);
287
+ // cuda_error_check("beginning");
288
+
289
+ #define CASE(c, d) \
290
+ JointBilateralFilterCudaBackwardFunction<c, d>( \
291
+ gradientInputTensor, \
292
+ gradientGuidanceTensor, \
293
+ gradientOutputTensor, \
294
+ inputTensor, \
295
+ guidanceTensor, \
296
+ outputTensor, \
297
+ outputWeightsTensor, \
298
+ dO_dz_ki, \
299
+ sigma_x, \
300
+ sigma_y, \
301
+ sigma_z, \
302
+ colorSigma);
303
+ SWITCH_AB(
304
+ CASE,
305
+ BF_CUDA_MAX_CHANNELS,
306
+ BF_CUDA_MAX_SPATIAL_DIMENSION,
307
+ gradientInputTensor.size(1),
308
+ gradientInputTensor.dim() - 2);
309
+
310
+ return {gradientOutputTensor, gradientGuidanceTensor};
311
+ }
source_code/SegMamba/monai/csrc/filtering/trainable_joint_bilateral/trainable_joint_bilateral.cpp ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ Copyright (c) MONAI Consortium
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+ http://www.apache.org/licenses/LICENSE-2.0
7
+ Unless required by applicable law or agreed to in writing, software
8
+ distributed under the License is distributed on an "AS IS" BASIS,
9
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10
+ See the License for the specific language governing permissions and
11
+ limitations under the License.
12
+
13
+ =========================================================================
14
+ Adapted from https://github.com/faebstn96/trainable-joint-bilateral-filter-source
15
+ which has the following license...
16
+ https://github.com/faebstn96/trainable-joint-bilateral-filter-source/blob/main/LICENSE
17
+
18
+ Copyright 2022 Fabian Wagner, Pattern Recognition Lab, FAU Erlangen-Nuernberg, Erlangen, Germany
19
+ Licensed under the Apache License, Version 2.0 (the "License");
20
+ you may not use this file except in compliance with the License.
21
+ You may obtain a copy of the License at
22
+ http://www.apache.org/licenses/LICENSE-2.0
23
+ Unless required by applicable law or agreed to in writing, software
24
+ distributed under the License is distributed on an "AS IS" BASIS,
25
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
26
+ See the License for the specific language governing permissions and
27
+ limitations under the License.
28
+ */
29
+
30
+ #include <torch/extension.h>
31
+ #include <stdexcept>
32
+ #include <string>
33
+
34
+ #include "trainable_joint_bilateral.h"
35
+ #include "utils/common_utils.h"
36
+
37
+ std::tuple<torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor>
38
+ TrainableJointBilateralFilterForward(
39
+ torch::Tensor inputTensor,
40
+ torch::Tensor guidanceTensor,
41
+ float sigma_x,
42
+ float sigma_y,
43
+ float sigma_z,
44
+ float colorSigma) {
45
+ std::tuple<torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor> (
46
+ *filterFunction)(torch::Tensor, torch::Tensor, float, float, float, float);
47
+
48
+ #ifdef WITH_CUDA
49
+
50
+ if (torch::cuda::is_available() && inputTensor.is_cuda()) {
51
+ CHECK_CONTIGUOUS_CUDA(inputTensor);
52
+
53
+ if (inputTensor.size(1) > BF_CUDA_MAX_CHANNELS) {
54
+ throw std::runtime_error(
55
+ "Bilateral filtering not implemented for channel count > " + std::to_string(BF_CUDA_MAX_CHANNELS));
56
+ }
57
+
58
+ if (inputTensor.dim() - 2 > BF_CUDA_MAX_SPATIAL_DIMENSION) {
59
+ throw std::runtime_error(
60
+ "Bilateral filtering not implemented for spatial dimension > " +
61
+ std::to_string(BF_CUDA_MAX_SPATIAL_DIMENSION));
62
+ }
63
+
64
+ filterFunction = &JointBilateralFilterCudaForward;
65
+ } else {
66
+ filterFunction = &JointBilateralFilterCpuForward;
67
+ }
68
+ #else
69
+ filterFunction = &JointBilateralFilterCpuForward;
70
+ #endif
71
+
72
+ return filterFunction(inputTensor, guidanceTensor, sigma_x, sigma_y, sigma_z, colorSigma);
73
+ }
74
+
75
+ std::tuple<torch::Tensor, torch::Tensor> TrainableJointBilateralFilterBackward(
76
+ torch::Tensor gradientInputTensor,
77
+ torch::Tensor inputTensor,
78
+ torch::Tensor guidanceTensor,
79
+ torch::Tensor outputTensor,
80
+ torch::Tensor outputWeightsTensor,
81
+ torch::Tensor dO_dx_ki,
82
+ float sigma_x,
83
+ float sigma_y,
84
+ float sigma_z,
85
+ float colorSigma) {
86
+ std::tuple<torch::Tensor, torch::Tensor> (*filterFunction)(
87
+ torch::Tensor,
88
+ torch::Tensor,
89
+ torch::Tensor,
90
+ torch::Tensor,
91
+ torch::Tensor,
92
+ torch::Tensor,
93
+ float,
94
+ float,
95
+ float,
96
+ float);
97
+
98
+ #ifdef WITH_CUDA
99
+
100
+ if (torch::cuda::is_available() && gradientInputTensor.is_cuda()) {
101
+ CHECK_CONTIGUOUS_CUDA(gradientInputTensor);
102
+
103
+ if (gradientInputTensor.size(1) > BF_CUDA_MAX_CHANNELS) {
104
+ throw std::runtime_error(
105
+ "Bilateral filtering not implemented for channel count > " + std::to_string(BF_CUDA_MAX_CHANNELS));
106
+ }
107
+
108
+ if (gradientInputTensor.dim() - 2 > BF_CUDA_MAX_SPATIAL_DIMENSION) {
109
+ throw std::runtime_error(
110
+ "Bilateral filtering not implemented for spatial dimension > " +
111
+ std::to_string(BF_CUDA_MAX_SPATIAL_DIMENSION));
112
+ }
113
+
114
+ filterFunction = &JointBilateralFilterCudaBackward;
115
+ } else {
116
+ filterFunction = &JointBilateralFilterCpuBackward;
117
+ }
118
+ #else
119
+ filterFunction = &JointBilateralFilterCpuBackward;
120
+ #endif
121
+
122
+ return filterFunction(
123
+ gradientInputTensor,
124
+ inputTensor,
125
+ guidanceTensor,
126
+ outputTensor,
127
+ outputWeightsTensor,
128
+ dO_dx_ki,
129
+ sigma_x,
130
+ sigma_y,
131
+ sigma_z,
132
+ colorSigma);
133
+ }
source_code/SegMamba/monai/csrc/lltm/lltm_cpu.cpp ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ Copyright (c) MONAI Consortium
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+ http://www.apache.org/licenses/LICENSE-2.0
7
+ Unless required by applicable law or agreed to in writing, software
8
+ distributed under the License is distributed on an "AS IS" BASIS,
9
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10
+ See the License for the specific language governing permissions and
11
+ limitations under the License.
12
+ */
13
+
14
+ #include <torch/extension.h>
15
+ #include <vector>
16
+
17
+ // s'(z) = (1 - s(z)) * s(z)
18
+ torch::Tensor d_sigmoid(torch::Tensor z) {
19
+ auto s = torch::sigmoid(z);
20
+ return (1 - s) * s;
21
+ }
22
+
23
+ // tanh'(z) = 1 - tanh^2(z)
24
+ torch::Tensor d_tanh(torch::Tensor z) {
25
+ return 1 - z.tanh().pow(2);
26
+ }
27
+
28
+ // elu'(z) = relu'(z) + { alpha * exp(z) if (alpha * (exp(z) - 1)) < 0, else 0}
29
+ torch::Tensor d_elu(torch::Tensor z, torch::Scalar alpha = 1.0) {
30
+ auto e = z.exp();
31
+ auto mask = (alpha * (e - 1)) < 0;
32
+ return (z > 0).type_as(z) + mask.type_as(z) * (alpha * e);
33
+ }
34
+
35
+ std::vector<torch::Tensor> lltm_cpu_forward(
36
+ torch::Tensor input,
37
+ torch::Tensor weights,
38
+ torch::Tensor bias,
39
+ torch::Tensor old_h,
40
+ torch::Tensor old_cell) {
41
+ auto X = torch::cat({old_h, input}, /*dim=*/1);
42
+
43
+ auto gate_weights = torch::addmm(bias, X, weights.transpose(0, 1));
44
+ auto gates = gate_weights.chunk(3, /*dim=*/1);
45
+
46
+ auto input_gate = torch::sigmoid(gates[0]);
47
+ auto output_gate = torch::sigmoid(gates[1]);
48
+ auto candidate_cell = torch::elu(gates[2], /*alpha=*/1.0);
49
+
50
+ auto new_cell = old_cell + candidate_cell * input_gate;
51
+ auto new_h = torch::tanh(new_cell) * output_gate;
52
+
53
+ return {new_h, new_cell, input_gate, output_gate, candidate_cell, X, gate_weights};
54
+ }
55
+
56
+ std::vector<torch::Tensor> lltm_cpu_backward(
57
+ torch::Tensor grad_h,
58
+ torch::Tensor grad_cell,
59
+ torch::Tensor new_cell,
60
+ torch::Tensor input_gate,
61
+ torch::Tensor output_gate,
62
+ torch::Tensor candidate_cell,
63
+ torch::Tensor X,
64
+ torch::Tensor gate_weights,
65
+ torch::Tensor weights) {
66
+ auto d_output_gate = torch::tanh(new_cell) * grad_h;
67
+ auto d_tanh_new_cell = output_gate * grad_h;
68
+ auto d_new_cell = d_tanh(new_cell) * d_tanh_new_cell + grad_cell;
69
+
70
+ auto d_old_cell = d_new_cell;
71
+ auto d_candidate_cell = input_gate * d_new_cell;
72
+ auto d_input_gate = candidate_cell * d_new_cell;
73
+
74
+ auto gates = gate_weights.chunk(3, /*dim=*/1);
75
+ d_input_gate *= d_sigmoid(gates[0]);
76
+ d_output_gate *= d_sigmoid(gates[1]);
77
+ d_candidate_cell *= d_elu(gates[2]);
78
+
79
+ auto d_gates = torch::cat({d_input_gate, d_output_gate, d_candidate_cell}, /*dim=*/1);
80
+
81
+ auto d_weights = d_gates.t().mm(X);
82
+ auto d_bias = d_gates.sum(/*dim=*/0, /*keepdim=*/true);
83
+
84
+ auto d_X = d_gates.mm(weights);
85
+ const auto state_size = grad_h.size(1);
86
+ auto d_old_h = d_X.slice(/*dim=*/1, 0, state_size);
87
+ auto d_input = d_X.slice(/*dim=*/1, state_size);
88
+
89
+ return {d_old_h, d_input, d_weights, d_bias, d_old_cell};
90
+ }
source_code/SegMamba/monai/csrc/resample/pushpull_cpu.cpp ADDED
@@ -0,0 +1,2270 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ Copyright (c) MONAI Consortium
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+ http://www.apache.org/licenses/LICENSE-2.0
7
+ Unless required by applicable law or agreed to in writing, software
8
+ distributed under the License is distributed on an "AS IS" BASIS,
9
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10
+ See the License for the specific language governing permissions and
11
+ limitations under the License.
12
+ */
13
+
14
+ // adapted from https://github.com/balbasty/nitorch
15
+
16
+ // This file implements spline interpolation / sampling and its adjoint
17
+ // operations. It corresponds loosely to torch's `GridSampler`.
18
+ // It handles boundary conditions and interpolation orders defined in
19
+ // `utils/resample_utils.h` and `utils/resample_utils.h`.
20
+ // These parameters can be specified per dimension.
21
+ // Isotropic 0-th and 1-st order interpolation have their own (faster)
22
+ // implementations. Sliding boundary conditions are also implemented
23
+ // separately.
24
+
25
+ // TODO:
26
+ // . [DONE] generic 3d
27
+ // . [DONE] generic 2d
28
+ // . [DONE] generic 1d
29
+ // . sliding nearest 3d
30
+ // . sliding nearest 2d
31
+ // . sliding linear 3d
32
+ // . sliding linear 2d
33
+ // . sliding generic 3d
34
+ // . sliding generic 2d
35
+ // . [DONE] spatial gradient mode (without multiplication with output gradient)
36
+ // . [DONE] second order gradients (backward pass for spatial gradients)
37
+ // . performance tests
38
+ // . input bound/inter are always vectors -> clean unused constructors
39
+
40
+ #include <ATen/ATen.h>
41
+ #include <limits>
42
+ #include <tuple>
43
+ #include "bounds_common.h"
44
+ #include "interpolation_common.h"
45
+ #include "utils/resample_utils.h"
46
+ //#include <cstdio>
47
+
48
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
49
+ // CPU-specific parameters
50
+ #include <ATen/Parallel.h>
51
+ namespace {
52
+ // This parameter specifies the minimum number of voxels that should be
53
+ // processed on a single processor in the parallel for loop .
54
+ int64_t GRAIN_SIZE = static_cast<int64_t>(at::internal::GRAIN_SIZE);
55
+ } // namespace
56
+
57
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
58
+
59
+ // maximum number of channels
60
+ // > not used in mode isotropic nearest/linear
61
+ #ifndef MONAI_MAX_NUM_CHANNELS
62
+ #define MONAI_MAX_NUM_CHANNELS 1024
63
+ #endif
64
+
65
+ // This parameter allows for a little bit of tolerance when considering
66
+ // a coordinate as "out-of-bound" (if !extrapolate)
67
+ #define TINY 5e-2
68
+
69
+ using at::Tensor;
70
+ using at::TensorOptions;
71
+ using c10::IntArrayRef;
72
+
73
+ namespace monai {
74
+ MONAI_NAMESPACE_DEVICE { // cpu
75
+
76
+ namespace { // anonymous namespace > everything inside has internal linkage
77
+
78
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
79
+ // INDEXING UTILS
80
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
81
+
82
+ // This class reads and sets all the parameters that will later be used
83
+ // by the algorithm in PushPullImpl. All of this is done outside of the
84
+ // implementation class so that we do not depend on generic types. The
85
+ // point is to pre-allocate all necessary tensors so that we can check
86
+ // if they're all compatible with 32 bit math. If it's the case, we can
87
+ // dispatch to a 32b cuda implementation, which might increase
88
+ // performance. Else, we use 64 bit math to compute offsets.
89
+ // (On CPU, we always use 64 bit offsets because it doesn't make a huge
90
+ // difference. It would be different if we had a vectorized
91
+ // implementation as in PyTorch).
92
+ class PushPullAllocator {
93
+ public:
94
+ static constexpr int64_t max_int32 = std::numeric_limits<int32_t>::max();
95
+
96
+ // ~~~ CONSTRUCTORS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
97
+
98
+ MONAI_HOST
99
+ PushPullAllocator(
100
+ int dim,
101
+ BoundVectorRef bound,
102
+ InterpolationVectorRef interpolation,
103
+ bool extrapolate,
104
+ bool do_pull,
105
+ bool do_push,
106
+ bool do_count,
107
+ bool do_grad,
108
+ bool do_sgrad)
109
+ : dim(dim),
110
+ bound0(bound.size() > 0 ? bound[0] : BoundType::Replicate),
111
+ bound1(
112
+ bound.size() > 1 ? bound[1]
113
+ : bound.size() > 0 ? bound[0]
114
+ : BoundType::Replicate),
115
+ bound2(
116
+ bound.size() > 2 ? bound[2]
117
+ : bound.size() > 1 ? bound[1]
118
+ : bound.size() > 0 ? bound[0]
119
+ : BoundType::Replicate),
120
+ interpolation0(interpolation.size() > 0 ? interpolation[0] : InterpolationType::Linear),
121
+ interpolation1(
122
+ interpolation.size() > 1 ? interpolation[1]
123
+ : interpolation.size() > 0 ? interpolation[0]
124
+ : InterpolationType::Linear),
125
+ interpolation2(
126
+ interpolation.size() > 2 ? interpolation[2]
127
+ : interpolation.size() > 1 ? interpolation[1]
128
+ : interpolation.size() > 0 ? interpolation[0]
129
+ : InterpolationType::Linear),
130
+ extrapolate(extrapolate),
131
+ do_pull(do_pull),
132
+ do_push(do_push),
133
+ do_count(do_count),
134
+ do_grad(do_grad),
135
+ do_sgrad(do_sgrad) {
136
+ iso = interpolation0 == interpolation1 && interpolation0 == interpolation2;
137
+ }
138
+
139
+ // ~~~ FUNCTORS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
140
+
141
+ // Usually used for pull:
142
+ // - do_pull -> return source[grid]
143
+ // - do_push -> fails
144
+ // - do_grad -> return J(source)[grid]
145
+ // - do_sgrad -> return H(source)[grid]
146
+ MONAI_HOST void ioset(const Tensor& source, const Tensor& grid) {
147
+ init_all();
148
+ init_source(source);
149
+ init_grid(grid);
150
+ init_output();
151
+ }
152
+
153
+ // Usually used for pull_backward:
154
+ // - do_pull -> return source[grid]
155
+ // - do_push -> return push(target, grid, source.shape)
156
+ // - do_grad -> return J(source)[grid]
157
+ // - do_sgrad -> return H(source)[grid]
158
+ MONAI_HOST void ioset(const Tensor& source, const Tensor& grid, const Tensor& target) {
159
+ init_all();
160
+ init_source(source);
161
+ init_grid(grid);
162
+ init_target(target);
163
+ init_output();
164
+ }
165
+
166
+ // Usually used for push:
167
+ // - do_pull -> fails
168
+ // - do_push -> return push(target, grid, source_size)
169
+ // - do_grad -> fails
170
+ // - do_sgrad -> fails
171
+ MONAI_HOST void ioset(IntArrayRef source_size, const Tensor& grid, const Tensor& target) {
172
+ init_all();
173
+ init_source(source_size);
174
+ init_grid(grid);
175
+ init_target(target);
176
+ init_output();
177
+ }
178
+
179
+ // Usually used for count:
180
+ // - do_pull -> fails
181
+ // - do_push -> return push(ones, grid, source_size)
182
+ // - do_grad -> fails
183
+ // - do_sgrad -> fails
184
+ MONAI_HOST void ioset(IntArrayRef source_size, const Tensor& grid) {
185
+ init_all();
186
+ init_source(source_size);
187
+ init_grid(grid);
188
+ init_output();
189
+ }
190
+
191
+ // We just check that all tensors that we own are compatible with 32b math
192
+ bool canUse32BitIndexMath(int64_t max_elem = max_int32) const {
193
+ return src_32b_ok && trgt_32b_ok && grid_32b_ok && grad_32b_ok && out_32b_ok;
194
+ }
195
+
196
+ private:
197
+ // Copied from aten/src/ATen/native/IndexingUtils.cpp in PyTorch 1.6.
198
+ // It is used to decide to which pointer type we should dispatch to.
199
+ // Basically, we need to make sure that the "furthest" element we need
200
+ // to reach is less than max_elem away.
201
+ static bool tensorCanUse32BitIndexMath(const Tensor& t, int64_t max_elem = max_int32) {
202
+ int64_t elements = t.numel();
203
+ if (elements >= max_elem) {
204
+ return false;
205
+ }
206
+ if (elements == 0) {
207
+ return max_elem > 0;
208
+ }
209
+
210
+ int64_t offset = 0;
211
+ int64_t linearId = elements - 1;
212
+
213
+ // NOTE: Assumes all strides are positive, which is true for now
214
+ for (int i = t.dim() - 1; i >= 0; --i) {
215
+ int64_t curDimIndex = linearId % t.size(i);
216
+ int64_t curDimOffset = curDimIndex * t.stride(i);
217
+ offset += curDimOffset;
218
+ linearId /= t.size(i);
219
+ }
220
+
221
+ if (offset >= max_elem) {
222
+ return false;
223
+ }
224
+
225
+ return true;
226
+ }
227
+
228
+ // ~~~ COMPONENTS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
229
+ MONAI_HOST void init_all();
230
+ MONAI_HOST void init_source(const Tensor& source);
231
+ MONAI_HOST void init_source(IntArrayRef source_size);
232
+ MONAI_HOST void init_grid(const Tensor& grid);
233
+ MONAI_HOST void init_target(const Tensor& target);
234
+ MONAI_HOST void init_output();
235
+
236
+ // ~~~ OPTIONS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
237
+ int dim; // dimensionality (2 or 3)
238
+ BoundType bound0; // boundary condition // x|W
239
+ BoundType bound1; // boundary condition // y|H
240
+ BoundType bound2; // boundary condition // z|D
241
+ InterpolationType interpolation0; // interpolation order // x|W
242
+ InterpolationType interpolation1; // interpolation order // y|H
243
+ InterpolationType interpolation2; // interpolation order // z|D
244
+ bool iso; // isotropic interpolation?
245
+ bool extrapolate; // compute out-of-bound values
246
+ bool do_pull; // sample a volume
247
+ bool do_push; // splat a volume
248
+ bool do_count; // splatting weights (= jacobian determinant)
249
+ bool do_grad; // backprop: gradient of grid // pull
250
+ bool do_sgrad; // sample spatial gradients
251
+
252
+ // ~~~ NAVIGATORS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
253
+ std::deque<Tensor> output;
254
+ TensorOptions src_opt;
255
+ TensorOptions grid_opt;
256
+ TensorOptions trgt_opt;
257
+ int64_t N;
258
+ int64_t C;
259
+ int64_t src_X;
260
+ int64_t src_Y;
261
+ int64_t src_Z;
262
+ int64_t trgt_X;
263
+ int64_t trgt_Y;
264
+ int64_t trgt_Z;
265
+ int64_t trgt_K;
266
+ int64_t src_sN;
267
+ int64_t src_sC;
268
+ int64_t src_sX;
269
+ int64_t src_sY;
270
+ int64_t src_sZ;
271
+ bool src_32b_ok;
272
+ void* src_ptr;
273
+ int64_t trgt_sN;
274
+ int64_t trgt_sC;
275
+ int64_t trgt_sX;
276
+ int64_t trgt_sY;
277
+ int64_t trgt_sZ;
278
+ int64_t trgt_sK;
279
+ bool trgt_32b_ok;
280
+ void* trgt_ptr;
281
+ int64_t grid_sN;
282
+ int64_t grid_sC;
283
+ int64_t grid_sX;
284
+ int64_t grid_sY;
285
+ int64_t grid_sZ;
286
+ bool grid_32b_ok;
287
+ void* grid_ptr;
288
+ int64_t out_sN;
289
+ int64_t out_sC;
290
+ int64_t out_sX;
291
+ int64_t out_sY;
292
+ int64_t out_sZ;
293
+ int64_t out_sK; // gradient dimension
294
+ bool out_32b_ok;
295
+ void* out_ptr;
296
+ int64_t grad_sN;
297
+ int64_t grad_sC;
298
+ int64_t grad_sX;
299
+ int64_t grad_sY;
300
+ int64_t grad_sZ;
301
+ bool grad_32b_ok;
302
+ void* grad_ptr;
303
+
304
+ // Allow PushPullImpl's constructor to access PushPullAllocator's
305
+ // private members.
306
+ template <typename scalar_t, typename offset_t>
307
+ friend class PushPullImpl;
308
+ };
309
+
310
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
311
+ // INITIALISATION
312
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
313
+
314
+ MONAI_HOST
315
+ void PushPullAllocator::init_all() {
316
+ src_opt = grid_opt = trgt_opt = TensorOptions();
317
+ N = C = 1L;
318
+ src_X = src_Y = src_Z = 1L;
319
+ trgt_X = trgt_Y = trgt_Z = 1L;
320
+ trgt_K = 0L;
321
+ src_sN = src_sC = src_sX = src_sY = src_sZ = 0L;
322
+ grid_sN = grid_sC = grid_sX = grid_sY = grid_sZ = 0L;
323
+ grad_sN = grad_sC = grad_sX = grad_sY = grad_sZ = 0L;
324
+ trgt_sN = trgt_sC = trgt_sX = trgt_sY = trgt_sZ = trgt_sK = 0L;
325
+ out_sN = out_sC = out_sX = out_sY = out_sZ = out_sK = 0L;
326
+ src_ptr = trgt_ptr = grid_ptr = out_ptr = grad_ptr = static_cast<float*>(0);
327
+ src_32b_ok = trgt_32b_ok = grid_32b_ok = out_32b_ok = grad_32b_ok = true;
328
+ }
329
+
330
+ MONAI_HOST
331
+ void PushPullAllocator::init_source(const Tensor& source) {
332
+ N = source.size(0);
333
+ C = source.size(1);
334
+ src_X = source.size(2);
335
+ src_Y = dim < 2 ? 1L : source.size(3);
336
+ src_Z = dim < 3 ? 1L : source.size(4);
337
+ src_sN = source.stride(0);
338
+ src_sC = source.stride(1);
339
+ src_sX = source.stride(2);
340
+ src_sY = dim < 2 ? 0L : source.stride(3);
341
+ src_sZ = dim < 3 ? 0L : source.stride(4);
342
+ src_ptr = source.data_ptr();
343
+ src_opt = source.options();
344
+ src_32b_ok = tensorCanUse32BitIndexMath(source);
345
+ }
346
+
347
+ MONAI_HOST
348
+ void PushPullAllocator::init_source(IntArrayRef source_size) {
349
+ src_X = source_size[0];
350
+ src_Y = dim < 2 ? 1L : source_size[1];
351
+ src_Z = dim < 3 ? 1L : source_size[2];
352
+ }
353
+
354
+ MONAI_HOST
355
+ void PushPullAllocator::init_grid(const Tensor& grid) {
356
+ N = grid.size(0);
357
+ trgt_X = grid.size(1);
358
+ trgt_Y = dim < 2 ? 1L : grid.size(2);
359
+ trgt_Z = dim < 3 ? 1L : grid.size(3);
360
+ grid_sN = grid.stride(0);
361
+ grid_sX = grid.stride(1);
362
+ grid_sY = dim < 2 ? 0L : grid.stride(2);
363
+ grid_sZ = dim < 3 ? 0L : grid.stride(3);
364
+ grid_sC = grid.stride(dim == 1 ? 2 : dim == 2 ? 3 : 4);
365
+ grid_ptr = grid.data_ptr();
366
+ grid_opt = grid.options();
367
+ grid_32b_ok = tensorCanUse32BitIndexMath(grid);
368
+ }
369
+
370
+ MONAI_HOST
371
+ void PushPullAllocator::init_target(const Tensor& target) {
372
+ N = target.size(0);
373
+ C = target.size(1);
374
+ trgt_X = target.size(2);
375
+ trgt_Y = dim < 2 ? 1L : target.size(3);
376
+ trgt_Z = dim < 3 ? 1L : target.size(4);
377
+ trgt_K = target.dim() == dim + 3 ? target.size(dim == 1 ? 3 : dim == 2 ? 4 : 5) : 0L;
378
+ trgt_sN = target.stride(0);
379
+ trgt_sC = target.stride(1);
380
+ trgt_sX = target.stride(2);
381
+ trgt_sY = dim < 2 ? 0L : target.stride(3);
382
+ trgt_sZ = dim < 3 ? 0L : target.stride(4);
383
+ trgt_sK = target.dim() == dim + 3 ? target.stride(dim == 1 ? 3 : dim == 2 ? 4 : 5) : 0L;
384
+ trgt_ptr = target.data_ptr();
385
+ trgt_opt = target.options();
386
+ trgt_32b_ok = tensorCanUse32BitIndexMath(target);
387
+ }
388
+
389
+ MONAI_HOST
390
+ void PushPullAllocator::init_output() {
391
+ output.clear();
392
+ if (do_pull) {
393
+ if (dim == 1)
394
+ output.push_back(at::empty({N, C, trgt_X}, src_opt));
395
+ else if (dim == 2)
396
+ output.push_back(at::empty({N, C, trgt_X, trgt_Y}, src_opt));
397
+ else
398
+ output.push_back(at::empty({N, C, trgt_X, trgt_Y, trgt_Z}, src_opt));
399
+ auto pull = output.back();
400
+ out_sN = pull.stride(0);
401
+ out_sC = pull.stride(1);
402
+ out_sX = pull.stride(2);
403
+ out_sY = dim < 2 ? 0L : pull.stride(3);
404
+ out_sZ = dim < 3 ? 0L : pull.stride(4);
405
+ out_sK = 0L;
406
+ out_ptr = pull.data_ptr();
407
+ out_32b_ok = tensorCanUse32BitIndexMath(pull);
408
+ } else if (do_sgrad) {
409
+ if (dim == 1)
410
+ output.push_back(at::empty({N, C, trgt_X, 1}, src_opt));
411
+ else if (dim == 2)
412
+ output.push_back(at::empty({N, C, trgt_X, trgt_Y, 2}, src_opt));
413
+ else
414
+ output.push_back(at::empty({N, C, trgt_X, trgt_Y, trgt_Z, 3}, src_opt));
415
+ auto sgrad = output.back();
416
+ out_sN = sgrad.stride(0);
417
+ out_sC = sgrad.stride(1);
418
+ out_sX = sgrad.stride(2);
419
+ out_sY = dim < 2 ? 0L : sgrad.stride(3);
420
+ out_sZ = dim < 3 ? 0L : sgrad.stride(4);
421
+ out_sK = sgrad.stride(dim == 1 ? 3 : dim == 2 ? 4 : 5);
422
+ out_ptr = sgrad.data_ptr();
423
+ out_32b_ok = tensorCanUse32BitIndexMath(sgrad);
424
+
425
+ if (iso && interpolation0 == InterpolationType::Nearest)
426
+ sgrad.zero_();
427
+ if (iso && interpolation0 == InterpolationType::Linear && dim == 1)
428
+ sgrad.zero_();
429
+ } else if (do_push) {
430
+ if (dim == 1)
431
+ output.push_back(at::zeros({N, C, src_X}, trgt_opt));
432
+ else if (dim == 2)
433
+ output.push_back(at::zeros({N, C, src_X, src_Y}, trgt_opt));
434
+ else
435
+ output.push_back(at::zeros({N, C, src_X, src_Y, src_Z}, trgt_opt));
436
+ auto push = output.back();
437
+ out_sN = push.stride(0);
438
+ out_sC = push.stride(1);
439
+ out_sX = push.stride(2);
440
+ out_sY = dim < 2 ? 0L : push.stride(3);
441
+ out_sZ = dim < 3 ? 0L : push.stride(4);
442
+ out_sK = 0L;
443
+ out_ptr = push.data_ptr();
444
+ out_32b_ok = tensorCanUse32BitIndexMath(push);
445
+ } else if (do_count) {
446
+ if (dim == 1)
447
+ output.push_back(at::zeros({N, 1, src_X}, grid_opt));
448
+ else if (dim == 2)
449
+ output.push_back(at::zeros({N, 1, src_X, src_Y}, grid_opt));
450
+ else
451
+ output.push_back(at::zeros({N, 1, src_X, src_Y, src_Z}, grid_opt));
452
+ auto count = output.back();
453
+ out_sN = count.stride(0);
454
+ out_sC = count.stride(1);
455
+ out_sX = count.stride(2);
456
+ out_sY = dim < 2 ? 0L : count.stride(3);
457
+ out_sZ = dim < 3 ? 0L : count.stride(4);
458
+ out_sK = 0L;
459
+ out_ptr = count.data_ptr();
460
+ out_32b_ok = tensorCanUse32BitIndexMath(count);
461
+ }
462
+ if (do_grad) {
463
+ if (dim == 1)
464
+ output.push_back(at::zeros({N, trgt_X, 1}, grid_opt));
465
+ else if (dim == 2)
466
+ output.push_back(at::zeros({N, trgt_X, trgt_Y, 2}, grid_opt));
467
+ else
468
+ output.push_back(at::zeros({N, trgt_X, trgt_Y, trgt_Z, 3}, grid_opt));
469
+ auto grad = output.back();
470
+ grad_sN = grad.stride(0);
471
+ grad_sX = grad.stride(1);
472
+ grad_sY = dim < 2 ? 0L : grad.stride(2);
473
+ grad_sZ = dim < 3 ? 0L : grad.stride(3);
474
+ grad_sC = grad.stride(dim == 1 ? 2 : dim == 2 ? 3 : 4);
475
+ grad_ptr = grad.data_ptr();
476
+ out_32b_ok = tensorCanUse32BitIndexMath(grad);
477
+
478
+ if (iso && interpolation0 == InterpolationType::Nearest)
479
+ grad.zero_();
480
+ }
481
+ }
482
+
483
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
484
+ // GENERIC PUSHPULL CLASS
485
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
486
+ // This class implements the bulk of the code.
487
+ // /!\ No type and shape checking is performed here.
488
+
489
+ template <typename scalar_t, typename offset_t>
490
+ class PushPullImpl {
491
+ public:
492
+ // ~~~ CONSTRUCTOR ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
493
+ PushPullImpl(const PushPullAllocator& info)
494
+ : output(info.output),
495
+ dim(info.dim),
496
+ bound0(info.bound0),
497
+ bound1(info.bound1),
498
+ bound2(info.bound2),
499
+ interpolation0(info.interpolation0),
500
+ interpolation1(info.interpolation1),
501
+ interpolation2(info.interpolation1),
502
+ iso(info.iso),
503
+ extrapolate(info.extrapolate),
504
+ do_pull(info.do_pull),
505
+ do_push(info.do_push),
506
+ do_count(info.do_count),
507
+ do_grad(info.do_grad),
508
+ do_sgrad(info.do_sgrad),
509
+ N(static_cast<offset_t>(info.N)),
510
+ C(static_cast<offset_t>(info.C)),
511
+ src_X(static_cast<offset_t>(info.src_X)),
512
+ src_Y(static_cast<offset_t>(info.src_Y)),
513
+ src_Z(static_cast<offset_t>(info.src_Z)),
514
+ trgt_X(static_cast<offset_t>(info.trgt_X)),
515
+ trgt_Y(static_cast<offset_t>(info.trgt_Y)),
516
+ trgt_Z(static_cast<offset_t>(info.trgt_Z)),
517
+ trgt_K(static_cast<offset_t>(info.trgt_K)),
518
+ src_sN(static_cast<offset_t>(info.src_sN)),
519
+ src_sC(static_cast<offset_t>(info.src_sC)),
520
+ src_sX(static_cast<offset_t>(info.src_sX)),
521
+ src_sY(static_cast<offset_t>(info.src_sY)),
522
+ src_sZ(static_cast<offset_t>(info.src_sZ)),
523
+ src_ptr(static_cast<scalar_t*>(info.src_ptr)),
524
+ trgt_sN(static_cast<offset_t>(info.trgt_sN)),
525
+ trgt_sC(static_cast<offset_t>(info.trgt_sC)),
526
+ trgt_sX(static_cast<offset_t>(info.trgt_sX)),
527
+ trgt_sY(static_cast<offset_t>(info.trgt_sY)),
528
+ trgt_sZ(static_cast<offset_t>(info.trgt_sZ)),
529
+ trgt_sK(static_cast<offset_t>(info.trgt_sK)),
530
+ trgt_ptr(static_cast<scalar_t*>(info.trgt_ptr)),
531
+ grid_sN(static_cast<offset_t>(info.grid_sN)),
532
+ grid_sC(static_cast<offset_t>(info.grid_sC)),
533
+ grid_sX(static_cast<offset_t>(info.grid_sX)),
534
+ grid_sY(static_cast<offset_t>(info.grid_sY)),
535
+ grid_sZ(static_cast<offset_t>(info.grid_sZ)),
536
+ grid_ptr(static_cast<scalar_t*>(info.grid_ptr)),
537
+ out_sN(static_cast<offset_t>(info.out_sN)),
538
+ out_sC(static_cast<offset_t>(info.out_sC)),
539
+ out_sX(static_cast<offset_t>(info.out_sX)),
540
+ out_sY(static_cast<offset_t>(info.out_sY)),
541
+ out_sZ(static_cast<offset_t>(info.out_sZ)),
542
+ out_sK(static_cast<offset_t>(info.out_sK)),
543
+ out_ptr(static_cast<scalar_t*>(info.out_ptr)),
544
+ grad_sN(static_cast<offset_t>(info.grad_sN)),
545
+ grad_sC(static_cast<offset_t>(info.grad_sC)),
546
+ grad_sX(static_cast<offset_t>(info.grad_sX)),
547
+ grad_sY(static_cast<offset_t>(info.grad_sY)),
548
+ grad_sZ(static_cast<offset_t>(info.grad_sZ)),
549
+ grad_ptr(static_cast<scalar_t*>(info.grad_ptr)) {}
550
+
551
+ // ~~~ PUBLIC VALUE ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
552
+
553
+ std::deque<Tensor> output;
554
+
555
+ // MONAI_HOST MONAI_DEVICE void printInfo() const {
556
+ // printf("dim: %d\n", dim);
557
+ // printf("do_pull: %d\n", do_pull);
558
+ // printf("do_push: %d\n", do_push);
559
+ // printf("do_count: %d\n", do_count);
560
+ // printf("do_sgrad: %d\n", do_sgrad);
561
+ // printf("do_grad: %d\n", do_grad);
562
+ // printf("bound: [%d %d %d]\n", static_cast<int>(bound0),
563
+ // static_cast<int>(bound1), static_cast<int>(bound2));
564
+ // printf("interpolation: [%d %d %d]\n", static_cast<int>(interpolation0),
565
+ // static_cast<int>(interpolation1), static_cast<int>(interpolation2));
566
+ // printf("src: [%d %d %d]\n", src_Z, src_Y, src_X);
567
+ // printf("trgt: [%d %d %d (%d)]\n", trgt_Z, trgt_Y, trgt_X, trgt_K);
568
+ // printf("N: %d\n", N);
569
+ // printf("C: %d\n", C);
570
+ // printf("src -> %lu\n", reinterpret_cast<std::uintptr_t>(src_ptr));
571
+ // printf("trgt -> %lu\n", reinterpret_cast<std::uintptr_t>(trgt_ptr));
572
+ // printf("grid -> %lu\n", reinterpret_cast<std::uintptr_t>(grid_ptr));
573
+ // printf("out -> %lu\n", reinterpret_cast<std::uintptr_t>(out_ptr));
574
+ // printf("grad -> %lu\n", reinterpret_cast<std::uintptr_t>(grad_ptr));
575
+ // }
576
+
577
+ // ~~~ FUNCTORS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
578
+
579
+ // Loop over all voxels
580
+ void loop() const;
581
+
582
+ MONAI_HOST MONAI_DEVICE int64_t voxcount() const {
583
+ return N * trgt_X * trgt_Y * trgt_Z;
584
+ }
585
+
586
+ private:
587
+ // ~~~ COMPONENTS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
588
+ MONAI_DEVICE void check1d(offset_t w, offset_t n) const;
589
+ MONAI_DEVICE void check2d(offset_t w, offset_t h, offset_t n) const;
590
+ MONAI_DEVICE void check3d(offset_t w, offset_t h, offset_t d, offset_t n) const;
591
+ MONAI_DEVICE void interpolate1d(scalar_t x, offset_t w, offset_t n) const;
592
+ MONAI_DEVICE void interpolate1d_nearest(scalar_t x, offset_t w, offset_t n) const;
593
+ MONAI_DEVICE void interpolate1d_linear(scalar_t x, offset_t w, offset_t n) const;
594
+ MONAI_DEVICE void interpolate1d_sliding(scalar_t x, offset_t w, offset_t n) const { /*TODO*/
595
+ }
596
+ MONAI_DEVICE void interpolate1d_sliding_nearest(scalar_t x, offset_t w, offset_t n) const { /*TODO*/
597
+ }
598
+ MONAI_DEVICE void interpolate1d_sliding_linear(scalar_t x, offset_t w, offset_t n) const { /*TODO*/
599
+ }
600
+ MONAI_DEVICE void interpolate2d(scalar_t x, scalar_t y, offset_t w, offset_t h, offset_t n) const;
601
+ MONAI_DEVICE void interpolate2d_nearest(scalar_t x, scalar_t y, offset_t w, offset_t h, offset_t n) const;
602
+ MONAI_DEVICE void interpolate2d_bilinear(scalar_t x, scalar_t y, offset_t w, offset_t h, offset_t n) const;
603
+ MONAI_DEVICE void interpolate2d_sliding(scalar_t x, scalar_t y, offset_t w, offset_t h, offset_t n) const { /*TODO*/
604
+ }
605
+ MONAI_DEVICE void interpolate2d_sliding_nearest(scalar_t x, scalar_t y, offset_t w, offset_t h, offset_t n)
606
+ const { /*TODO*/
607
+ }
608
+ MONAI_DEVICE void interpolate2d_sliding_bilinear(scalar_t x, scalar_t y, offset_t w, offset_t h, offset_t n)
609
+ const { /*TODO*/
610
+ }
611
+ MONAI_DEVICE void interpolate3d(scalar_t x, scalar_t y, scalar_t z, offset_t w, offset_t h, offset_t d, offset_t n)
612
+ const;
613
+ MONAI_DEVICE void interpolate3d_nearest(
614
+ scalar_t x,
615
+ scalar_t y,
616
+ scalar_t z,
617
+ offset_t w,
618
+ offset_t h,
619
+ offset_t d,
620
+ offset_t n) const;
621
+ MONAI_DEVICE void interpolate3d_trilinear(
622
+ scalar_t x,
623
+ scalar_t y,
624
+ scalar_t z,
625
+ offset_t w,
626
+ offset_t h,
627
+ offset_t d,
628
+ offset_t n) const;
629
+ MONAI_DEVICE void interpolate3d_sliding(
630
+ scalar_t x,
631
+ scalar_t y,
632
+ scalar_t z,
633
+ offset_t w,
634
+ offset_t h,
635
+ offset_t d,
636
+ offset_t n) const { /*TODO*/
637
+ }
638
+ MONAI_DEVICE void interpolate3d_sliding_nearest(
639
+ scalar_t x,
640
+ scalar_t y,
641
+ scalar_t z,
642
+ offset_t w,
643
+ offset_t h,
644
+ offset_t d,
645
+ offset_t n) const { /*TODO*/
646
+ }
647
+ MONAI_DEVICE void interpolate3d_sliding_trilinear(
648
+ scalar_t x,
649
+ scalar_t y,
650
+ scalar_t z,
651
+ offset_t w,
652
+ offset_t h,
653
+ offset_t d,
654
+ offset_t n) const { /*TODO*/
655
+ }
656
+
657
+ // ~~~ OPTIONS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
658
+ int dim; // dimensionality (2 or 3)
659
+ BoundType bound0; // boundary condition // x|W
660
+ BoundType bound1; // boundary condition // y|H
661
+ BoundType bound2; // boundary condition // z|D
662
+ InterpolationType interpolation0; // interpolation order // x|W
663
+ InterpolationType interpolation1; // interpolation order // y|H
664
+ InterpolationType interpolation2; // interpolation order // z|D
665
+ bool iso; // isotropic interpolation?
666
+ bool extrapolate; // compute out-of-bound values
667
+ bool do_pull; // sample a volume
668
+ bool do_push; // splat a volume
669
+ bool do_count; // splatting weights (= jacobian determinant)
670
+ bool do_grad; // backprop: gradient of grid // pull
671
+ bool do_sgrad; // sample spatial gradients
672
+
673
+ // ~~~ NAVIGATORS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
674
+ offset_t N;
675
+ offset_t C;
676
+ offset_t src_X;
677
+ offset_t src_Y;
678
+ offset_t src_Z;
679
+ offset_t trgt_X;
680
+ offset_t trgt_Y;
681
+ offset_t trgt_Z;
682
+ offset_t trgt_K;
683
+ offset_t src_sN;
684
+ offset_t src_sC;
685
+ offset_t src_sX;
686
+ offset_t src_sY;
687
+ offset_t src_sZ;
688
+ scalar_t* src_ptr;
689
+ offset_t trgt_sN;
690
+ offset_t trgt_sC;
691
+ offset_t trgt_sX;
692
+ offset_t trgt_sY;
693
+ offset_t trgt_sZ;
694
+ offset_t trgt_sK;
695
+ scalar_t* trgt_ptr;
696
+ offset_t grid_sN;
697
+ offset_t grid_sC;
698
+ offset_t grid_sX;
699
+ offset_t grid_sY;
700
+ offset_t grid_sZ;
701
+ scalar_t* grid_ptr;
702
+ offset_t out_sN;
703
+ offset_t out_sC;
704
+ offset_t out_sX;
705
+ offset_t out_sY;
706
+ offset_t out_sZ;
707
+ offset_t out_sK; // gradient dimension
708
+ scalar_t* out_ptr;
709
+ offset_t grad_sN;
710
+ offset_t grad_sC;
711
+ offset_t grad_sX;
712
+ offset_t grad_sY;
713
+ offset_t grad_sZ;
714
+ scalar_t* grad_ptr;
715
+ };
716
+
717
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
718
+ // LOOP
719
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
720
+
721
+ // This bit loops over all target voxels. We therefore need to
722
+ // convert linear indices to multivariate indices. The way I do it
723
+ // might not be optimal.
724
+ // Note that I parallelize across all voxels (whereas ATen's grid
725
+ // sampler is only parallelized across batches).
726
+ //
727
+ // TODO: check that the default grain size is optimal. We do quite a lot
728
+ // of compute per voxel, so a smaller value might be better suited.
729
+ template <typename scalar_t, typename offset_t>
730
+ MONAI_HOST void PushPullImpl<scalar_t, offset_t>::loop() const {
731
+ #if !(AT_PARALLEL_OPENMP)
732
+ if (do_push) {
733
+ // I do not have access to atomic operations so I cannot
734
+ // parallelize across voxels.
735
+ at::parallel_for(0, N, 0, [&](offset_t start, offset_t end) {
736
+ for (offset_t n = start; n < end; ++n) {
737
+ if (dim == 1) {
738
+ for (offset_t w = 0; w < trgt_X; ++w)
739
+ check1d(w, n);
740
+ } else if (dim == 2) {
741
+ for (offset_t h = 0; h < trgt_Y; ++h)
742
+ for (offset_t w = 0; w < trgt_X; ++w)
743
+ check2d(w, h, n);
744
+ } else {
745
+ for (offset_t d = 0; d < trgt_Z; ++d)
746
+ for (offset_t h = 0; h < trgt_Y; ++h)
747
+ for (offset_t w = 0; w < trgt_X; ++w)
748
+ check3d(w, h, d, n);
749
+ }
750
+ }
751
+ });
752
+ return;
753
+ }
754
+
755
+ #endif
756
+ // Parallelize across voxels
757
+ offset_t trgt_NXYZ = trgt_Z * trgt_Y * trgt_X * N;
758
+ offset_t trgt_XYZ = trgt_Z * trgt_Y * trgt_X;
759
+ offset_t trgt_YZ = trgt_Z * trgt_Y;
760
+ at::parallel_for(0, trgt_NXYZ, GRAIN_SIZE, [&](offset_t start, offset_t end) {
761
+ offset_t n, w, h, d;
762
+ for (offset_t i = start; i < end; ++i) {
763
+ // Convert index: linear to sub
764
+ n = (i / trgt_XYZ);
765
+ w = (i / trgt_YZ) % trgt_X;
766
+ h = (i / trgt_Z) % trgt_Y;
767
+ d = i % trgt_Z;
768
+
769
+ if (dim == 1)
770
+ check1d(w, n);
771
+ else if (dim == 2)
772
+ check2d(w, h, n);
773
+ else
774
+ check3d(w, h, d, n);
775
+ }
776
+ });
777
+ }
778
+
779
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
780
+ // CHECK OUT-OF-BOUND
781
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
782
+
783
+ // Here, we:
784
+ // 1) read the [x,y,z] source coordinate for the current target voxel
785
+ // 3) check if the source coordinate is in bounds
786
+
787
+ template <typename scalar_t, typename offset_t>
788
+ MONAI_DEVICE void PushPullImpl<scalar_t, offset_t>::check3d(offset_t w, offset_t h, offset_t d, offset_t n) const {
789
+ // get the corresponding input x, y, z co-ordinates from grid
790
+ scalar_t* grid_ptr_NXYZ = grid_ptr + n * grid_sN + w * grid_sX + h * grid_sY + d * grid_sZ;
791
+ scalar_t x = *grid_ptr_NXYZ;
792
+ scalar_t y = grid_ptr_NXYZ[grid_sC];
793
+ scalar_t z = grid_ptr_NXYZ[grid_sC * 2];
794
+
795
+ // Check if out-of-bound
796
+ if (!(extrapolate ||
797
+ (inbounds(x, src_X, static_cast<scalar_t>(TINY)) && inbounds(y, src_Y, static_cast<scalar_t>(TINY)) &&
798
+ inbounds(z, src_Z, static_cast<scalar_t>(TINY))))) {
799
+ if (do_pull || do_sgrad) {
800
+ scalar_t* out_ptr_NCXYZ = out_ptr + n * out_sN + w * out_sX + h * out_sY + d * out_sZ;
801
+ for (offset_t c = 0; c < C; ++c, out_ptr_NCXYZ += out_sC) {
802
+ *out_ptr_NCXYZ = static_cast<scalar_t>(0);
803
+ if (do_sgrad) {
804
+ out_ptr_NCXYZ[out_sK] = static_cast<scalar_t>(0);
805
+ out_ptr_NCXYZ[out_sK * 2] = static_cast<scalar_t>(0);
806
+ }
807
+ }
808
+ }
809
+ if (do_grad) {
810
+ scalar_t* grad_ptr_NXYZ = grad_ptr + n * grad_sN + w * grad_sX + h * grad_sY + d * grad_sZ;
811
+ (*grad_ptr_NXYZ) = static_cast<scalar_t>(0);
812
+ grad_ptr_NXYZ[grad_sC] = static_cast<scalar_t>(0);
813
+ grad_ptr_NXYZ[grad_sC * 2] = static_cast<scalar_t>(0);
814
+ }
815
+ return;
816
+ }
817
+
818
+ // Next step
819
+ if (bound0 == BoundType::Sliding) {
820
+ if (iso)
821
+ switch (static_cast<int>(interpolation0)) {
822
+ case 0:
823
+ return interpolate3d_sliding_nearest(x, y, z, w, h, d, n);
824
+ case 1:
825
+ return interpolate3d_sliding_trilinear(x, y, z, w, h, d, n);
826
+ }
827
+ return interpolate3d_sliding(x, y, z, w, h, d, n);
828
+ } else {
829
+ if (iso)
830
+ switch (static_cast<int>(interpolation0)) {
831
+ case 0:
832
+ return interpolate3d_nearest(x, y, z, w, h, d, n);
833
+ case 1:
834
+ return interpolate3d_trilinear(x, y, z, w, h, d, n);
835
+ }
836
+ return interpolate3d(x, y, z, w, h, d, n);
837
+ }
838
+ }
839
+
840
+ template <typename scalar_t, typename offset_t>
841
+ MONAI_DEVICE void PushPullImpl<scalar_t, offset_t>::check2d(offset_t w, offset_t h, offset_t n) const {
842
+ // get the corresponding input x, y, z co-ordinates from grid
843
+ scalar_t* grid_ptr_NXY = grid_ptr + n * grid_sN + w * grid_sX + h * grid_sY;
844
+ scalar_t x = *grid_ptr_NXY;
845
+ scalar_t y = grid_ptr_NXY[grid_sC];
846
+
847
+ // Check if out-of-bound
848
+ if (!(extrapolate ||
849
+ (inbounds(x, src_X, static_cast<scalar_t>(TINY)) && inbounds(y, src_Y, static_cast<scalar_t>(TINY))))) {
850
+ if (do_pull || do_sgrad) {
851
+ scalar_t* out_ptr_NCXY = out_ptr + n * out_sN + w * out_sX + h * out_sY;
852
+ for (offset_t c = 0; c < C; ++c, out_ptr_NCXY += out_sC) {
853
+ *out_ptr_NCXY = static_cast<scalar_t>(0);
854
+ if (do_sgrad)
855
+ out_ptr_NCXY[out_sK] = static_cast<scalar_t>(0);
856
+ }
857
+ }
858
+ if (do_grad) {
859
+ scalar_t* grad_ptr_NXY = grad_ptr + n * grad_sN + w * grad_sX + h * grad_sY;
860
+ (*grad_ptr_NXY) = static_cast<scalar_t>(0);
861
+ grad_ptr_NXY[grad_sC] = static_cast<scalar_t>(0);
862
+ }
863
+ return;
864
+ }
865
+
866
+ // Next step
867
+ if (bound0 == BoundType::Sliding) {
868
+ if (iso)
869
+ switch (static_cast<int>(interpolation0)) {
870
+ case 0:
871
+ return interpolate2d_sliding_nearest(x, y, w, h, n);
872
+ case 1:
873
+ return interpolate2d_sliding_bilinear(x, y, w, h, n);
874
+ }
875
+ return interpolate2d_sliding(x, y, w, h, n);
876
+ } else {
877
+ if (iso)
878
+ switch (static_cast<int>(interpolation0)) {
879
+ case 0:
880
+ return interpolate2d_nearest(x, y, w, h, n);
881
+ case 1:
882
+ return interpolate2d_bilinear(x, y, w, h, n);
883
+ }
884
+ return interpolate2d(x, y, w, h, n);
885
+ }
886
+ }
887
+
888
+ template <typename scalar_t, typename offset_t>
889
+ MONAI_DEVICE void PushPullImpl<scalar_t, offset_t>::check1d(offset_t w, offset_t n) const {
890
+ // get the corresponding input x, y, z co-ordinates from grid
891
+ scalar_t* grid_ptr_NX = grid_ptr + n * grid_sN + w * grid_sX;
892
+ scalar_t x = *grid_ptr_NX;
893
+
894
+ // Check if out-of-bound
895
+ if (!(extrapolate || inbounds(x, src_X, static_cast<scalar_t>(TINY)))) {
896
+ if (do_pull || do_sgrad) {
897
+ scalar_t* out_ptr_NCX = out_ptr + n * out_sN + w * out_sX;
898
+ for (offset_t c = 0; c < C; ++c, out_ptr_NCX += out_sC) {
899
+ *out_ptr_NCX = static_cast<scalar_t>(0);
900
+ if (do_sgrad)
901
+ out_ptr_NCX[out_sK] = static_cast<scalar_t>(0);
902
+ }
903
+ }
904
+ if (do_grad) {
905
+ scalar_t* grad_ptr_NX = grad_ptr + n * grad_sN + w * grad_sX;
906
+ (*grad_ptr_NX) = static_cast<scalar_t>(0);
907
+ grad_ptr_NX[grad_sC] = static_cast<scalar_t>(0);
908
+ }
909
+ return;
910
+ }
911
+
912
+ // Next step
913
+ if (bound0 == BoundType::Sliding) {
914
+ if (iso)
915
+ switch (static_cast<int>(interpolation0)) {
916
+ case 0:
917
+ return interpolate1d_sliding_nearest(x, w, n);
918
+ case 1:
919
+ return interpolate1d_sliding_linear(x, w, n);
920
+ }
921
+ return interpolate1d_sliding(x, w, n);
922
+ } else {
923
+ if (iso)
924
+ switch (static_cast<int>(interpolation0)) {
925
+ case 0:
926
+ return interpolate1d_nearest(x, w, n);
927
+ case 1:
928
+ return interpolate1d_linear(x, w, n);
929
+ }
930
+ return interpolate1d(x, w, n);
931
+ }
932
+ }
933
+
934
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
935
+ // GENERIC INTERPOLATION 3D
936
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
937
+
938
+ template <typename scalar_t, typename offset_t>
939
+ MONAI_DEVICE void PushPullImpl<scalar_t, offset_t>::interpolate3d(
940
+ scalar_t x,
941
+ scalar_t y,
942
+ scalar_t z,
943
+ offset_t w,
944
+ offset_t h,
945
+ offset_t d,
946
+ offset_t n) const {
947
+ // Get corner pixel values from (x, y, z)
948
+ offset_t bx0, bx1, by0, by1, bz0, bz1;
949
+ interpolation::bounds(interpolation0, x, bx0, bx1);
950
+ interpolation::bounds(interpolation1, y, by0, by1);
951
+ interpolation::bounds(interpolation2, z, bz0, bz1);
952
+ offset_t dbx = bx1 - bx0;
953
+ offset_t dby = by1 - by0;
954
+ offset_t dbz = bz1 - bz0;
955
+
956
+ // Pre-compute offsets and target value
957
+ scalar_t* src_ptr_NC0 = src_ptr + n * src_sN;
958
+ scalar_t* out_ptr_NC0 = out_ptr + n * out_sN;
959
+ scalar_t* out_ptr_NCXYZ0 = out_ptr + n * out_sN + w * out_sX + h * out_sY + d * out_sZ;
960
+ scalar_t* trgt_ptr_NCXYZ = trgt_ptr + n * trgt_sN + w * trgt_sX + h * trgt_sY + d * trgt_sZ;
961
+ scalar_t target[3 * MONAI_MAX_NUM_CHANNELS];
962
+ if (trgt_ptr && (do_push || do_grad))
963
+ for (offset_t c = 0; c < C; ++c, trgt_ptr_NCXYZ += trgt_sC) {
964
+ target[c] = *trgt_ptr_NCXYZ;
965
+ if (trgt_K > 0) {
966
+ target[c + C] = trgt_ptr_NCXYZ[trgt_sK];
967
+ target[c + C * 2] = trgt_ptr_NCXYZ[trgt_sK * 2];
968
+ }
969
+ }
970
+
971
+ // Initialize output
972
+ scalar_t* out_ptr_NCXYZ = out_ptr_NCXYZ0;
973
+ if (do_pull || do_sgrad) {
974
+ for (offset_t c = 0; c < C; ++c, out_ptr_NCXYZ += out_sC) {
975
+ *out_ptr_NCXYZ = static_cast<scalar_t>(0);
976
+ if (do_sgrad) {
977
+ out_ptr_NCXYZ[out_sK] = static_cast<scalar_t>(0);
978
+ out_ptr_NCXYZ[out_sK * 2] = static_cast<scalar_t>(0);
979
+ }
980
+ }
981
+ }
982
+
983
+ // Pre-compute indices/weights/grad
984
+ scalar_t wx[8], wy[8], wz[8]; // B-spline weights
985
+ scalar_t gx[8], gy[8], gz[8]; // B-spline derivatives
986
+ scalar_t hx[8], hy[8], hz[8]; // B-spline 2nd derivatives
987
+ offset_t ix[8], iy[8], iz[8]; // Warped indices
988
+ uint8_t sx[8], sy[8], sz[8]; // Warped indices
989
+
990
+ {
991
+ scalar_t *owz = static_cast<scalar_t*>(wz), *ogz = static_cast<scalar_t*>(gz), *ohz = static_cast<scalar_t*>(hz);
992
+ offset_t* oiz = static_cast<offset_t*>(iz);
993
+ uint8_t* osz = static_cast<uint8_t*>(sz);
994
+ for (offset_t bz = bz0; bz <= bz1; ++bz) {
995
+ scalar_t dz = z - bz;
996
+ *(owz++) = interpolation::fastweight(interpolation2, dz);
997
+ if (do_grad || do_sgrad)
998
+ *(ogz++) = interpolation::fastgrad(interpolation2, dz);
999
+ if (do_grad && trgt_sK > 1)
1000
+ *(ohz++) = interpolation::fasthess(interpolation2, dz);
1001
+ *(osz++) = bound::sign(bound2, bz, src_Z);
1002
+ *(oiz++) = bound::index(bound2, bz, src_Z);
1003
+ }
1004
+ }
1005
+ {
1006
+ scalar_t *owy = static_cast<scalar_t*>(wy), *ogy = static_cast<scalar_t*>(gy), *ohy = static_cast<scalar_t*>(hy);
1007
+ offset_t* oiy = static_cast<offset_t*>(iy);
1008
+ uint8_t* osy = static_cast<uint8_t*>(sy);
1009
+ for (offset_t by = by0; by <= by1; ++by) {
1010
+ scalar_t dy = y - by;
1011
+ *(owy++) = interpolation::fastweight(interpolation1, dy);
1012
+ if (do_grad || do_sgrad)
1013
+ *(ogy++) = interpolation::fastgrad(interpolation1, dy);
1014
+ if (do_grad && trgt_sK > 1)
1015
+ *(ohy++) = interpolation::fasthess(interpolation1, dy);
1016
+ *(osy++) = bound::sign(bound1, by, src_Y);
1017
+ *(oiy++) = bound::index(bound1, by, src_Y);
1018
+ }
1019
+ }
1020
+ {
1021
+ scalar_t *owx = static_cast<scalar_t*>(wx), *ogx = static_cast<scalar_t*>(gx), *ohx = static_cast<scalar_t*>(hx);
1022
+ offset_t* oix = static_cast<offset_t*>(ix);
1023
+ uint8_t* osx = static_cast<uint8_t*>(sx);
1024
+ for (offset_t bx = bx0; bx <= bx1; ++bx) {
1025
+ scalar_t dx = x - bx;
1026
+ *(owx++) = interpolation::fastweight(interpolation0, dx);
1027
+ if (do_grad || do_sgrad)
1028
+ *(ogx++) = interpolation::fastgrad(interpolation0, dx);
1029
+ if (do_grad && trgt_sK > 1)
1030
+ *(ohx++) = interpolation::fasthess(interpolation0, dx);
1031
+ *(osx++) = bound::sign(bound0, bx, src_X);
1032
+ *(oix++) = bound::index(bound0, bx, src_X);
1033
+ }
1034
+ }
1035
+
1036
+ // Convolve coefficients with basis functions
1037
+ scalar_t ogx, ogy, ogz;
1038
+ ogx = ogy = ogz = static_cast<scalar_t>(0);
1039
+ for (offset_t k = 0; k <= dbz; ++k) {
1040
+ offset_t ooz = iz[k] * out_sZ;
1041
+ offset_t osz = iz[k] * src_sZ;
1042
+ uint8_t szz = sz[k];
1043
+ scalar_t wzz = wz[k];
1044
+ scalar_t gzz = gz[k];
1045
+ scalar_t hzz = hz[k];
1046
+ for (offset_t j = 0; j <= dby; ++j) {
1047
+ offset_t ooyz = ooz + iy[j] * out_sY;
1048
+ offset_t osyz = osz + iy[j] * src_sY;
1049
+ uint8_t syz = szz * sy[j];
1050
+ scalar_t wyy = wy[j];
1051
+ scalar_t gyy = gy[j];
1052
+ scalar_t hyy = hy[j];
1053
+ for (offset_t i = 0; i <= dbx; ++i) {
1054
+ offset_t ooxyz = ooyz + ix[i] * out_sX;
1055
+ offset_t osxyz = osyz + ix[i] * src_sX;
1056
+ uint8_t sxyz = syz * sx[i];
1057
+ scalar_t wxx = wx[i];
1058
+ scalar_t gxx = gx[i];
1059
+ scalar_t hxx = hx[i];
1060
+
1061
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Pull ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1062
+ if (do_pull) {
1063
+ scalar_t* src_ptr_NC = src_ptr_NC0;
1064
+ scalar_t* out_ptr_NCXYZ = out_ptr_NCXYZ0;
1065
+ for (offset_t c = 0; c < C; ++c, out_ptr_NCXYZ += out_sC, src_ptr_NC += src_sC)
1066
+ *out_ptr_NCXYZ += bound::get(src_ptr_NC, osxyz, sxyz) * (wxx * wyy * wzz);
1067
+ }
1068
+
1069
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ SGrad ~~~~~~~~~~~~~~~~~~~~~~~~~~~
1070
+ else if (do_sgrad) {
1071
+ scalar_t* src_ptr_NC = src_ptr_NC0;
1072
+ scalar_t* out_ptr_NCXYZ = out_ptr_NCXYZ0;
1073
+ for (offset_t c = 0; c < C; ++c, out_ptr_NCXYZ += out_sC, src_ptr_NC += src_sC) {
1074
+ scalar_t src = bound::get(src_ptr_NC, osxyz, sxyz);
1075
+ *out_ptr_NCXYZ += src * (gxx * wyy * wzz);
1076
+ out_ptr_NCXYZ[out_sK] += src * (wxx * gyy * wzz);
1077
+ out_ptr_NCXYZ[2 * out_sK] += src * (wxx * wyy * gzz);
1078
+ }
1079
+ }
1080
+
1081
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Push ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1082
+ else if (do_push) {
1083
+ if (trgt_K == 0) {
1084
+ // Diff w.r.t. push/pull
1085
+ scalar_t* out_ptr_NC = out_ptr_NC0;
1086
+ for (offset_t c = 0; c < C; ++c, out_ptr_NC += out_sC)
1087
+ bound::add(out_ptr_NC, ooxyz, (wxx * wyy * wzz) * target[c], sxyz);
1088
+ } else {
1089
+ // Diff w.r.t. sgrad
1090
+ scalar_t* out_ptr_NC = out_ptr_NC0;
1091
+ for (offset_t c = 0; c < C; ++c, out_ptr_NC += out_sC) {
1092
+ scalar_t val = (gxx * wyy * wzz) * target[c] + (wxx * gyy * wzz) * target[c + C] +
1093
+ (wxx * wyy * gzz) * target[c + C * 2];
1094
+ bound::add(out_ptr_NC, ooxyz, val, sxyz);
1095
+ }
1096
+ }
1097
+ }
1098
+
1099
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Count ~~~~~~~~~~~~~~~~~~~~~~~~~~~
1100
+ else if (do_count) {
1101
+ bound::add(out_ptr_NC0, ooxyz, (wxx * wyy * wzz), sxyz);
1102
+ }
1103
+
1104
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Grad ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1105
+ if (do_grad) {
1106
+ if (trgt_K == 0) {
1107
+ // Diff w.r.t. pull/push
1108
+ scalar_t* src_ptr_NC = src_ptr_NC0;
1109
+ scalar_t dot = static_cast<scalar_t>(0);
1110
+ for (offset_t c = 0; c < C; ++c, src_ptr_NC += src_sC) {
1111
+ scalar_t src = bound::get(src_ptr_NC, osxyz, sxyz);
1112
+ dot += (trgt_ptr ? src * target[c] : src);
1113
+ // trgt_ptr == 0 in the backward pass of 'count'
1114
+ }
1115
+ ogx += (gxx * wyy * wzz) * dot;
1116
+ ogy += (wxx * gyy * wzz) * dot;
1117
+ ogz += (wxx * wyy * gzz) * dot;
1118
+ } else {
1119
+ // Diff w.r.t. sgrad
1120
+ scalar_t* src_ptr_NC = src_ptr_NC0;
1121
+ scalar_t dot0, dot1, dot2;
1122
+ dot0 = dot1 = dot2 = static_cast<scalar_t>(0);
1123
+ for (offset_t c = 0; c < C; ++c, src_ptr_NC += src_sC) {
1124
+ scalar_t src = bound::get(src_ptr_NC, osxyz, sxyz);
1125
+ dot0 += src * target[c];
1126
+ dot1 += src * target[c + C];
1127
+ dot2 += src * target[c + C * 2];
1128
+ }
1129
+ ogx += (hxx * wyy * wzz) * dot0 + (gxx * gyy * wzz) * dot1 + (gxx * wyy * gzz) * dot2;
1130
+ ogy += (gxx * gyy * wzz) * dot0 + (wxx * hyy * wzz) * dot1 + (wxx * gyy * gzz) * dot2;
1131
+ ogz += (gxx * wyy * gzz) * dot0 + (wxx * gyy * gzz) * dot1 + (wxx * wyy * hzz) * dot2;
1132
+ }
1133
+ }
1134
+
1135
+ } // x
1136
+ } // y
1137
+ } // z
1138
+
1139
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Grad ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1140
+ if (do_grad) {
1141
+ scalar_t* grad_ptr_NXYZ = grad_ptr + n * grad_sN + w * grad_sX + h * grad_sY + d * grad_sZ;
1142
+ (*grad_ptr_NXYZ) = ogx;
1143
+ grad_ptr_NXYZ[grad_sC] = ogy;
1144
+ grad_ptr_NXYZ[grad_sC * 2] = ogz;
1145
+ }
1146
+ }
1147
+
1148
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1149
+ // GENERIC INTERPOLATION 2D
1150
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1151
+
1152
+ template <typename scalar_t, typename offset_t>
1153
+ MONAI_DEVICE void PushPullImpl<scalar_t, offset_t>::interpolate2d(
1154
+ scalar_t x,
1155
+ scalar_t y,
1156
+ offset_t w,
1157
+ offset_t h,
1158
+ offset_t n) const {
1159
+ // Get corner pixel values from (x, y)
1160
+ offset_t bx0, bx1, by0, by1;
1161
+ interpolation::bounds(interpolation0, x, bx0, bx1);
1162
+ interpolation::bounds(interpolation1, y, by0, by1);
1163
+ offset_t dbx = bx1 - bx0;
1164
+ offset_t dby = by1 - by0;
1165
+
1166
+ // Pre-compute offsets and target value
1167
+ scalar_t* src_ptr_NC0 = src_ptr + n * src_sN;
1168
+ scalar_t* out_ptr_NC0 = out_ptr + n * out_sN;
1169
+ scalar_t* out_ptr_NCXY0 = out_ptr + n * out_sN + w * out_sX + h * out_sY;
1170
+ scalar_t* trgt_ptr_NCXY = trgt_ptr + n * trgt_sN + w * trgt_sX + h * trgt_sY;
1171
+ scalar_t target[2 * MONAI_MAX_NUM_CHANNELS];
1172
+ if (trgt_ptr && (do_push || do_grad))
1173
+ for (offset_t c = 0; c < C; ++c, trgt_ptr_NCXY += trgt_sC) {
1174
+ target[c] = *trgt_ptr_NCXY;
1175
+ if (trgt_K > 0) {
1176
+ target[c + C] = trgt_ptr_NCXY[trgt_sK];
1177
+ }
1178
+ }
1179
+
1180
+ // Initialize output
1181
+ scalar_t* out_ptr_NCXY = out_ptr_NCXY0;
1182
+ if (do_pull || do_sgrad) {
1183
+ for (offset_t c = 0; c < C; ++c, out_ptr_NCXY += out_sC) {
1184
+ *out_ptr_NCXY = static_cast<scalar_t>(0);
1185
+ if (do_sgrad) {
1186
+ out_ptr_NCXY[out_sK] = static_cast<scalar_t>(0);
1187
+ }
1188
+ }
1189
+ }
1190
+
1191
+ // Pre-compute indices/weights/grad
1192
+ scalar_t wx[8], wy[8]; // B-spline weights
1193
+ scalar_t gx[8], gy[8]; // B-spline derivatives
1194
+ scalar_t hx[8], hy[8]; // B-spline 2nd derivatives
1195
+ offset_t ix[8], iy[8]; // Warped indices
1196
+ uint8_t sx[8], sy[8]; // Warped indices
1197
+
1198
+ {
1199
+ scalar_t *owy = static_cast<scalar_t*>(wy), *ogy = static_cast<scalar_t*>(gy), *ohy = static_cast<scalar_t*>(hy);
1200
+ offset_t* oiy = static_cast<offset_t*>(iy);
1201
+ uint8_t* osy = static_cast<uint8_t*>(sy);
1202
+ for (offset_t by = by0; by <= by1; ++by) {
1203
+ scalar_t dy = y - by;
1204
+ *(owy++) = interpolation::fastweight(interpolation1, dy);
1205
+ if (do_grad || do_sgrad)
1206
+ *(ogy++) = interpolation::fastgrad(interpolation1, dy);
1207
+ if (do_grad && trgt_sK > 1)
1208
+ *(ohy++) = interpolation::fasthess(interpolation1, dy);
1209
+ *(osy++) = bound::sign(bound1, by, src_Y);
1210
+ *(oiy++) = bound::index(bound1, by, src_Y);
1211
+ }
1212
+ }
1213
+ {
1214
+ scalar_t *owx = static_cast<scalar_t*>(wx), *ogx = static_cast<scalar_t*>(gx), *ohx = static_cast<scalar_t*>(hx);
1215
+ offset_t* oix = static_cast<offset_t*>(ix);
1216
+ uint8_t* osx = static_cast<uint8_t*>(sx);
1217
+ for (offset_t bx = bx0; bx <= bx1; ++bx) {
1218
+ scalar_t dx = x - bx;
1219
+ *(owx++) = interpolation::fastweight(interpolation0, dx);
1220
+ if (do_grad || do_sgrad)
1221
+ *(ogx++) = interpolation::fastgrad(interpolation0, dx);
1222
+ if (do_grad && trgt_sK > 1)
1223
+ *(ohx++) = interpolation::fasthess(interpolation0, dx);
1224
+ *(osx++) = bound::sign(bound0, bx, src_X);
1225
+ *(oix++) = bound::index(bound0, bx, src_X);
1226
+ }
1227
+ }
1228
+
1229
+ // Convolve coefficients with basis functions
1230
+ scalar_t ogx, ogy;
1231
+ ogx = ogy = static_cast<scalar_t>(0);
1232
+ for (offset_t j = 0; j <= dby; ++j) {
1233
+ offset_t ooy = iy[j] * out_sY;
1234
+ offset_t osy = iy[j] * src_sY;
1235
+ uint8_t syy = sy[j];
1236
+ scalar_t wyy = wy[j];
1237
+ scalar_t gyy = gy[j];
1238
+ scalar_t hyy = hy[j];
1239
+ for (offset_t i = 0; i <= dbx; ++i) {
1240
+ offset_t ooxy = ooy + ix[i] * out_sX;
1241
+ offset_t osxy = osy + ix[i] * src_sX;
1242
+ uint8_t sxy = syy * sx[i];
1243
+ scalar_t wxx = wx[i];
1244
+ scalar_t gxx = gx[i];
1245
+ scalar_t hxx = hx[i];
1246
+
1247
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Pull ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1248
+ if (do_pull) {
1249
+ scalar_t* src_ptr_NC = src_ptr_NC0;
1250
+ scalar_t* out_ptr_NCXY = out_ptr_NCXY0;
1251
+ for (offset_t c = 0; c < C; ++c, out_ptr_NCXY += out_sC, src_ptr_NC += src_sC)
1252
+ *out_ptr_NCXY += bound::get(src_ptr_NC, osxy, sxy) * (wxx * wyy);
1253
+ }
1254
+
1255
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ SGrad ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1256
+ else if (do_sgrad) {
1257
+ scalar_t* src_ptr_NC = src_ptr_NC0;
1258
+ scalar_t* out_ptr_NCXY = out_ptr_NCXY0;
1259
+ for (offset_t c = 0; c < C; ++c, out_ptr_NCXY += out_sC, src_ptr_NC += src_sC) {
1260
+ scalar_t src = bound::get(src_ptr_NC, osxy, sxy);
1261
+ *out_ptr_NCXY += src * (gxx * wyy);
1262
+ out_ptr_NCXY[out_sK] += src * (wxx * gyy);
1263
+ }
1264
+ }
1265
+
1266
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Push ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1267
+ else if (do_push) {
1268
+ if (trgt_K == 0) {
1269
+ // Diff w.r.t. push/pull
1270
+ scalar_t* out_ptr_NC = out_ptr_NC0;
1271
+ for (offset_t c = 0; c < C; ++c, out_ptr_NC += out_sC)
1272
+ bound::add(out_ptr_NC, ooxy, (wxx * wyy) * target[c], sxy);
1273
+ } else {
1274
+ // Diff w.r.t. sgrad
1275
+ scalar_t* out_ptr_NC = out_ptr_NC0;
1276
+ for (offset_t c = 0; c < C; ++c, out_ptr_NC += out_sC) {
1277
+ scalar_t val = (gxx * wyy) * target[c] + (wxx * gyy) * target[c + C];
1278
+ bound::add(out_ptr_NC, ooxy, val, sxy);
1279
+ }
1280
+ }
1281
+ }
1282
+
1283
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Count ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1284
+ else if (do_count) {
1285
+ bound::add(out_ptr_NC0, ooxy, (wxx * wyy), sxy);
1286
+ }
1287
+
1288
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Grad ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1289
+ if (do_grad) {
1290
+ if (trgt_K == 0) {
1291
+ // Diff w.r.t. pull/push
1292
+ scalar_t* src_ptr_NC = src_ptr_NC0;
1293
+ scalar_t dot = static_cast<scalar_t>(0);
1294
+ for (offset_t c = 0; c < C; ++c, src_ptr_NC += src_sC) {
1295
+ scalar_t src = bound::get(src_ptr_NC, osxy, sxy);
1296
+ dot += (trgt_ptr ? src * target[c] : src);
1297
+ // trgt_ptr == 0 in the backward pass of 'count'
1298
+ }
1299
+ ogx += (gxx * wyy) * dot;
1300
+ ogy += (wxx * gyy) * dot;
1301
+ } else {
1302
+ // Diff w.r.t. sgrad
1303
+ scalar_t* src_ptr_NC = src_ptr_NC0;
1304
+ scalar_t dot0, dot1;
1305
+ dot0 = dot1 = static_cast<scalar_t>(0);
1306
+ for (offset_t c = 0; c < C; ++c, src_ptr_NC += src_sC) {
1307
+ scalar_t src = bound::get(src_ptr_NC, osxy, sxy);
1308
+ dot0 += src * target[c];
1309
+ dot1 += src * target[c + C];
1310
+ }
1311
+ ogx += (hxx * wyy) * dot0 + (gxx * gyy) * dot1;
1312
+ ogy += (gxx * gyy) * dot0 + (wxx * hyy) * dot1;
1313
+ }
1314
+ }
1315
+
1316
+ } // x
1317
+ } // y
1318
+
1319
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Grad ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1320
+ if (do_grad) {
1321
+ scalar_t* grad_ptr_NXY = grad_ptr + n * grad_sN + w * grad_sX + h * grad_sY;
1322
+ (*grad_ptr_NXY) = ogx;
1323
+ grad_ptr_NXY[grad_sC] = ogy;
1324
+ }
1325
+ }
1326
+
1327
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1328
+ // GENERIC INTERPOLATION 1D
1329
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1330
+
1331
+ template <typename scalar_t, typename offset_t>
1332
+ MONAI_DEVICE void PushPullImpl<scalar_t, offset_t>::interpolate1d(scalar_t x, offset_t w, offset_t n) const {
1333
+ // Get corner pixel values from (x, y)
1334
+ offset_t bx0, bx1;
1335
+ interpolation::bounds(interpolation0, x, bx0, bx1);
1336
+ offset_t dbx = bx1 - bx0;
1337
+
1338
+ // Pre-compute offsets and target value
1339
+ scalar_t* src_ptr_NC0 = src_ptr + n * src_sN;
1340
+ scalar_t* out_ptr_NC0 = out_ptr + n * out_sN;
1341
+ scalar_t* out_ptr_NCX0 = out_ptr + n * out_sN + w * out_sX;
1342
+ scalar_t* trgt_ptr_NCX = trgt_ptr + n * trgt_sN + w * trgt_sX;
1343
+ scalar_t target[2 * MONAI_MAX_NUM_CHANNELS];
1344
+ if (trgt_ptr && (do_push || do_grad))
1345
+ for (offset_t c = 0; c < C; ++c, trgt_ptr_NCX += trgt_sC) {
1346
+ target[c] = *trgt_ptr_NCX;
1347
+ if (trgt_K > 0) {
1348
+ target[c + C] = trgt_ptr_NCX[trgt_sK];
1349
+ }
1350
+ }
1351
+
1352
+ // Initialize output
1353
+ scalar_t* out_ptr_NCX = out_ptr_NCX0;
1354
+ if (do_pull || do_sgrad) {
1355
+ for (offset_t c = 0; c < C; ++c, out_ptr_NCX += out_sC) {
1356
+ *out_ptr_NCX = static_cast<scalar_t>(0);
1357
+ if (do_sgrad) {
1358
+ out_ptr_NCX[out_sK] = static_cast<scalar_t>(0);
1359
+ }
1360
+ }
1361
+ }
1362
+
1363
+ // Pre-compute indices/weights/grad
1364
+ scalar_t wx[8]; // B-spline weights
1365
+ scalar_t gx[8]; // B-spline derivatives
1366
+ scalar_t hx[8]; // B-spline 2nd derivatives
1367
+ offset_t ix[8]; // Warped indices
1368
+ uint8_t sx[8]; // Warped indices
1369
+
1370
+ {
1371
+ scalar_t *owx = static_cast<scalar_t*>(wx), *ogx = static_cast<scalar_t*>(gx), *ohx = static_cast<scalar_t*>(hx);
1372
+ offset_t* oix = static_cast<offset_t*>(ix);
1373
+ uint8_t* osx = static_cast<uint8_t*>(sx);
1374
+ for (offset_t bx = bx0; bx <= bx1; ++bx) {
1375
+ scalar_t dx = x - bx;
1376
+ *(owx++) = interpolation::fastweight(interpolation0, dx);
1377
+ if (do_grad || do_sgrad)
1378
+ *(ogx++) = interpolation::fastgrad(interpolation0, dx);
1379
+ if (do_grad && trgt_sK > 1)
1380
+ *(ohx++) = interpolation::fasthess(interpolation0, dx);
1381
+ *(osx++) = bound::sign(bound0, bx, src_X);
1382
+ *(oix++) = bound::index(bound0, bx, src_X);
1383
+ }
1384
+ }
1385
+
1386
+ // Convolve coefficients with basis functions
1387
+ scalar_t ogx;
1388
+ ogx = static_cast<scalar_t>(0);
1389
+ for (offset_t i = 0; i <= dbx; ++i) {
1390
+ offset_t oox = ix[i] * out_sX;
1391
+ offset_t osx = ix[i] * src_sX;
1392
+ uint8_t sxx = sx[i];
1393
+ scalar_t wxx = wx[i];
1394
+ scalar_t gxx = gx[i];
1395
+ scalar_t hxx = hx[i];
1396
+
1397
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Pull ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1398
+ if (do_pull) {
1399
+ scalar_t* src_ptr_NC = src_ptr_NC0;
1400
+ scalar_t* out_ptr_NCX = out_ptr_NCX0;
1401
+ for (offset_t c = 0; c < C; ++c, out_ptr_NCX += out_sC, src_ptr_NC += src_sC)
1402
+ *out_ptr_NCX += bound::get(src_ptr_NC, osx, sxx) * wxx;
1403
+ }
1404
+
1405
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ SGrad ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1406
+ else if (do_sgrad) {
1407
+ scalar_t* src_ptr_NC = src_ptr_NC0;
1408
+ scalar_t* out_ptr_NCX = out_ptr_NCX0;
1409
+ for (offset_t c = 0; c < C; ++c, out_ptr_NCX += out_sC, src_ptr_NC += src_sC) {
1410
+ scalar_t src = bound::get(src_ptr_NC, osx, sxx);
1411
+ *out_ptr_NCX += src * gxx;
1412
+ }
1413
+ }
1414
+
1415
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Push ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1416
+ else if (do_push) {
1417
+ if (trgt_K == 0) {
1418
+ // Diff w.r.t. push/pull
1419
+ scalar_t* out_ptr_NC = out_ptr_NC0;
1420
+ for (offset_t c = 0; c < C; ++c, out_ptr_NC += out_sC)
1421
+ bound::add(out_ptr_NC, oox, wxx * target[c], sxx);
1422
+ } else {
1423
+ // Diff w.r.t. sgrad
1424
+ scalar_t* out_ptr_NC = out_ptr_NC0;
1425
+ for (offset_t c = 0; c < C; ++c, out_ptr_NC += out_sC) {
1426
+ scalar_t val = gxx * target[c];
1427
+ bound::add(out_ptr_NC, oox, val, sxx);
1428
+ }
1429
+ }
1430
+ }
1431
+
1432
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Count ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1433
+ else if (do_count) {
1434
+ bound::add(out_ptr_NC0, oox, wxx, sxx);
1435
+ }
1436
+
1437
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Grad ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1438
+ if (do_grad) {
1439
+ if (trgt_K == 0) {
1440
+ // Diff w.r.t. pull/push
1441
+ scalar_t* src_ptr_NC = src_ptr_NC0;
1442
+ scalar_t dot = static_cast<scalar_t>(0);
1443
+ for (offset_t c = 0; c < C; ++c, src_ptr_NC += src_sC) {
1444
+ scalar_t src = bound::get(src_ptr_NC, osx, sxx);
1445
+ dot += (trgt_ptr ? src * target[c] : src);
1446
+ // trgt_ptr == 0 in the backward pass of 'count'
1447
+ }
1448
+ ogx += gxx * dot;
1449
+ } else {
1450
+ // Diff w.r.t. sgrad
1451
+ scalar_t* src_ptr_NC = src_ptr_NC0;
1452
+ scalar_t dot;
1453
+ dot = static_cast<scalar_t>(0);
1454
+ for (offset_t c = 0; c < C; ++c, src_ptr_NC += src_sC) {
1455
+ scalar_t src = bound::get(src_ptr_NC, osx, sxx);
1456
+ dot += src * target[c];
1457
+ }
1458
+ ogx += hxx * dot;
1459
+ }
1460
+ }
1461
+
1462
+ } // x
1463
+
1464
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Grad ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1465
+ if (do_grad) {
1466
+ scalar_t* grad_ptr_NX = grad_ptr + n * grad_sN + w * grad_sX;
1467
+ (*grad_ptr_NX) = ogx;
1468
+ }
1469
+ }
1470
+
1471
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1472
+ // LINEAR INTERPOLATION 3D
1473
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1474
+
1475
+ template <typename scalar_t, typename offset_t>
1476
+ MONAI_DEVICE void PushPullImpl<scalar_t, offset_t>::interpolate3d_trilinear(
1477
+ scalar_t x,
1478
+ scalar_t y,
1479
+ scalar_t z,
1480
+ offset_t w,
1481
+ offset_t h,
1482
+ offset_t d,
1483
+ offset_t n) const {
1484
+ // Get corner pixel values from (x, y, z)
1485
+ offset_t ix0 = static_cast<offset_t>(std::floor(x));
1486
+ offset_t iy0 = static_cast<offset_t>(std::floor(y));
1487
+ offset_t iz0 = static_cast<offset_t>(std::floor(z));
1488
+
1489
+ // Interpolation weights (inversely proportional to distance)
1490
+ scalar_t dx1 = x - ix0;
1491
+ scalar_t dy1 = y - iy0;
1492
+ scalar_t dz1 = z - iz0;
1493
+ scalar_t dx0 = 1. - dx1;
1494
+ scalar_t dy0 = 1. - dy1;
1495
+ scalar_t dz0 = 1. - dz1;
1496
+ scalar_t w000 = dx0 * dy0 * dz0;
1497
+ scalar_t w100 = dx1 * dy0 * dz0;
1498
+ scalar_t w010 = dx0 * dy1 * dz0;
1499
+ scalar_t w001 = dx0 * dy0 * dz1;
1500
+ scalar_t w110 = dx1 * dy1 * dz0;
1501
+ scalar_t w011 = dx0 * dy1 * dz1;
1502
+ scalar_t w101 = dx1 * dy0 * dz1;
1503
+ scalar_t w111 = dx1 * dy1 * dz1;
1504
+
1505
+ // Sign (/!\ compute sign before warping indices)
1506
+ int8_t sx1 = bound::sign(bound0, ix0 + 1, src_X);
1507
+ int8_t sy1 = bound::sign(bound1, iy0 + 1, src_Y);
1508
+ int8_t sz1 = bound::sign(bound2, iz0 + 1, src_Z);
1509
+ int8_t sx0 = bound::sign(bound0, ix0, src_X);
1510
+ int8_t sy0 = bound::sign(bound1, iy0, src_Y);
1511
+ int8_t sz0 = bound::sign(bound2, iz0, src_Z);
1512
+ int8_t s000 = sx0 * sy0 * sz0;
1513
+ int8_t s100 = sx1 * sy0 * sz0;
1514
+ int8_t s010 = sx0 * sy1 * sz0;
1515
+ int8_t s001 = sx0 * sy0 * sz1;
1516
+ int8_t s110 = sx1 * sy1 * sz0;
1517
+ int8_t s011 = sx0 * sy1 * sz1;
1518
+ int8_t s101 = sx1 * sy0 * sz1;
1519
+ int8_t s111 = sx1 * sy1 * sz1;
1520
+
1521
+ // Warp indices
1522
+ offset_t ix1, iy1, iz1;
1523
+ ix1 = bound::index(bound0, ix0 + 1, src_X);
1524
+ iy1 = bound::index(bound1, iy0 + 1, src_Y);
1525
+ iz1 = bound::index(bound2, iz0 + 1, src_Z);
1526
+ ix0 = bound::index(bound0, ix0, src_X);
1527
+ iy0 = bound::index(bound1, iy0, src_Y);
1528
+ iz0 = bound::index(bound2, iz0, src_Z);
1529
+
1530
+ offset_t o000, o100, o010, o001, o110, o011, o101, o111;
1531
+
1532
+ if (do_pull || do_grad || do_sgrad) {
1533
+ // Offsets into source volume
1534
+ o000 = ix0 * src_sX + iy0 * src_sY + iz0 * src_sZ;
1535
+ o100 = ix1 * src_sX + iy0 * src_sY + iz0 * src_sZ;
1536
+ o010 = ix0 * src_sX + iy1 * src_sY + iz0 * src_sZ;
1537
+ o001 = ix0 * src_sX + iy0 * src_sY + iz1 * src_sZ;
1538
+ o110 = ix1 * src_sX + iy1 * src_sY + iz0 * src_sZ;
1539
+ o011 = ix0 * src_sX + iy1 * src_sY + iz1 * src_sZ;
1540
+ o101 = ix1 * src_sX + iy0 * src_sY + iz1 * src_sZ;
1541
+ o111 = ix1 * src_sX + iy1 * src_sY + iz1 * src_sZ;
1542
+ } else if (!(do_push || do_count)) {
1543
+ o000 = o100 = o010 = o001 = o110 = o011 = o101 = o111 = 0;
1544
+ }
1545
+
1546
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~ Grid gradient ~~~~~~~~~~~~~~~~~~~~~~~~~~
1547
+ if (do_grad) {
1548
+ scalar_t gx = static_cast<scalar_t>(0);
1549
+ scalar_t gy = static_cast<scalar_t>(0);
1550
+ scalar_t gz = static_cast<scalar_t>(0);
1551
+ scalar_t* trgt_ptr_NCXYZ = trgt_ptr + n * trgt_sN + w * trgt_sX + h * trgt_sY + d * trgt_sZ;
1552
+ scalar_t* src_ptr_NC = src_ptr + n * src_sN;
1553
+
1554
+ if (trgt_K == 0) {
1555
+ // backward w.r.t. push/pull
1556
+ for (offset_t c = 0; c < C; ++c, trgt_ptr_NCXYZ += trgt_sC, src_ptr_NC += src_sC) {
1557
+ scalar_t src;
1558
+ scalar_t trgt = trgt_ptr ? *trgt_ptr_NCXYZ : static_cast<scalar_t>(1);
1559
+ // ^ trgt_ptr == 0 during the backward pass of count
1560
+ src = bound::get(src_ptr_NC, o000, s000);
1561
+ if (trgt_ptr)
1562
+ src *= trgt;
1563
+ gx -= dy0 * dz0 * src;
1564
+ gy -= dx0 * dz0 * src;
1565
+ gz -= dx0 * dy0 * src;
1566
+ src = bound::get(src_ptr_NC, o100, s100);
1567
+ if (trgt_ptr)
1568
+ src *= trgt;
1569
+ gx += dy0 * dz0 * src;
1570
+ gy -= dx1 * dz0 * src;
1571
+ gz -= dx1 * dy0 * src;
1572
+ src = bound::get(src_ptr_NC, o010, s010);
1573
+ if (trgt_ptr)
1574
+ src *= trgt;
1575
+ gx -= dy1 * dz0 * src;
1576
+ gy += dx0 * dz0 * src;
1577
+ gz -= dx0 * dy1 * src;
1578
+ src = bound::get(src_ptr_NC, o110, s110);
1579
+ if (trgt_ptr)
1580
+ src *= trgt;
1581
+ gx += dy1 * dz0 * src;
1582
+ gy += dx1 * dz0 * src;
1583
+ gz -= dx1 * dy1 * src;
1584
+ src = bound::get(src_ptr_NC, o001, s001);
1585
+ if (trgt_ptr)
1586
+ src *= trgt;
1587
+ gx -= dy0 * dz1 * src;
1588
+ gy -= dx0 * dz1 * src;
1589
+ gz += dx0 * dy0 * src;
1590
+ src = bound::get(src_ptr_NC, o101, s101);
1591
+ if (trgt_ptr)
1592
+ src *= trgt;
1593
+ gx += dy0 * dz1 * src;
1594
+ gy -= dx1 * dz1 * src;
1595
+ gz += dx1 * dy0 * src;
1596
+ src = bound::get(src_ptr_NC, o011, s011);
1597
+ if (trgt_ptr)
1598
+ src *= trgt;
1599
+ gx -= dy1 * dz1 * src;
1600
+ gy += dx0 * dz1 * src;
1601
+ gz += dx0 * dy1 * src;
1602
+ src = bound::get(src_ptr_NC, o111, s111);
1603
+ if (trgt_ptr)
1604
+ src *= trgt;
1605
+ gx += dy1 * dz1 * src;
1606
+ gy += dx1 * dz1 * src;
1607
+ gz += dx1 * dy1 * src;
1608
+ }
1609
+ } else {
1610
+ // backward w.r.t. sgrad
1611
+ for (offset_t c = 0; c < C; ++c, trgt_ptr_NCXYZ += trgt_sC, src_ptr_NC += src_sC) {
1612
+ scalar_t src;
1613
+ scalar_t trgt0 = *trgt_ptr_NCXYZ, trgt1 = trgt_ptr_NCXYZ[trgt_sK], trgt2 = trgt_ptr_NCXYZ[trgt_sK * 2];
1614
+ src = bound::get(src_ptr_NC, o000, s000);
1615
+ gx += (dz0 * trgt1 + dy0 * trgt2) * src;
1616
+ gy += (dz0 * trgt0 + dx0 * trgt2) * src;
1617
+ gz += (dy0 * trgt0 + dx0 * trgt1) * src;
1618
+ src = bound::get(src_ptr_NC, o100, s100);
1619
+ gx += (-dz0 * trgt1 - dy0 * trgt2) * src;
1620
+ gy += (-dz0 * trgt0 + dx1 * trgt2) * src;
1621
+ gz += (-dy0 * trgt0 + dx1 * trgt1) * src;
1622
+ src = bound::get(src_ptr_NC, o010, s010);
1623
+ gx += (-dz0 * trgt1 + dy1 * trgt2) * src;
1624
+ gy += (-dz0 * trgt0 - dx0 * trgt2) * src;
1625
+ gz += (dy1 * trgt0 - dx0 * trgt1) * src;
1626
+ src = bound::get(src_ptr_NC, o110, s110);
1627
+ gx += (dz0 * trgt1 - dy1 * trgt2) * src;
1628
+ gy += (dz0 * trgt0 - dx1 * trgt2) * src;
1629
+ gz += (-dy1 * trgt0 - dx1 * trgt1) * src;
1630
+ src = bound::get(src_ptr_NC, o001, s001);
1631
+ gx += (dz1 * trgt1 - dy0 * trgt2) * src;
1632
+ gy += (dz1 * trgt0 - dx0 * trgt2) * src;
1633
+ gz += (-dy0 * trgt0 - dx0 * trgt1) * src;
1634
+ src = bound::get(src_ptr_NC, o101, s101);
1635
+ gx += (-dz1 * trgt1 + dy0 * trgt2) * src;
1636
+ gy += (-dz1 * trgt0 - dx1 * trgt2) * src;
1637
+ gz += (dy0 * trgt0 - dx1 * trgt1) * src;
1638
+ src = bound::get(src_ptr_NC, o011, s011);
1639
+ gx += (-dz1 * trgt1 - dy1 * trgt2) * src;
1640
+ gy += (-dz1 * trgt0 + dx0 * trgt2) * src;
1641
+ gz += (-dy1 * trgt0 + dx0 * trgt1) * src;
1642
+ src = bound::get(src_ptr_NC, o111, s111);
1643
+ gx += (dz1 * trgt1 + dy1 * trgt2) * src;
1644
+ gy += (dz1 * trgt0 + dx1 * trgt2) * src;
1645
+ gz += (dy1 * trgt0 + dx1 * trgt1) * src;
1646
+ }
1647
+ }
1648
+
1649
+ scalar_t* grad_ptr_NXYZ = grad_ptr + n * grad_sN + w * grad_sX + h * grad_sY + d * grad_sZ;
1650
+ (*grad_ptr_NXYZ) = gx;
1651
+ grad_ptr_NXYZ[grad_sC] = gy;
1652
+ grad_ptr_NXYZ[grad_sC * 2] = gz;
1653
+ }
1654
+ if (do_push || do_count) {
1655
+ // Offsets into 'push' volume
1656
+ o000 = ix0 * out_sX + iy0 * out_sY + iz0 * out_sZ;
1657
+ o100 = ix1 * out_sX + iy0 * out_sY + iz0 * out_sZ;
1658
+ o010 = ix0 * out_sX + iy1 * out_sY + iz0 * out_sZ;
1659
+ o001 = ix0 * out_sX + iy0 * out_sY + iz1 * out_sZ;
1660
+ o110 = ix1 * out_sX + iy1 * out_sY + iz0 * out_sZ;
1661
+ o011 = ix0 * out_sX + iy1 * out_sY + iz1 * out_sZ;
1662
+ o101 = ix1 * out_sX + iy0 * out_sY + iz1 * out_sZ;
1663
+ o111 = ix1 * out_sX + iy1 * out_sY + iz1 * out_sZ;
1664
+ }
1665
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Pull ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1666
+ if (do_pull) {
1667
+ scalar_t* out_ptr_NCXYZ = out_ptr + n * out_sN + w * out_sX + h * out_sY + d * out_sZ;
1668
+ scalar_t* src_ptr_NC = src_ptr + n * src_sN;
1669
+ for (offset_t c = 0; c < C; ++c, out_ptr_NCXYZ += out_sC, src_ptr_NC += src_sC) {
1670
+ *out_ptr_NCXYZ = bound::get(src_ptr_NC, o000, s000) * w000 + bound::get(src_ptr_NC, o100, s100) * w100 +
1671
+ bound::get(src_ptr_NC, o010, s010) * w010 + bound::get(src_ptr_NC, o110, s110) * w110 +
1672
+ bound::get(src_ptr_NC, o001, s001) * w001 + bound::get(src_ptr_NC, o101, s101) * w101 +
1673
+ bound::get(src_ptr_NC, o011, s011) * w011 + bound::get(src_ptr_NC, o111, s111) * w111;
1674
+ }
1675
+ }
1676
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ SGrad ~~~~~~~~~~~~~~~~~~~~~ ~~~~~~~~~
1677
+ else if (do_sgrad) {
1678
+ scalar_t* out_ptr_NCXYZ = out_ptr + n * out_sN + w * out_sX + h * out_sY + d * out_sZ;
1679
+ scalar_t* src_ptr_NC = src_ptr + n * src_sN;
1680
+
1681
+ for (offset_t c = 0; c < C; ++c, out_ptr_NCXYZ += out_sC, src_ptr_NC += src_sC) {
1682
+ scalar_t src000 = bound::get(src_ptr_NC, o000, s000);
1683
+ scalar_t src100 = bound::get(src_ptr_NC, o100, s100);
1684
+ scalar_t src010 = bound::get(src_ptr_NC, o010, s010);
1685
+ scalar_t src110 = bound::get(src_ptr_NC, o110, s110);
1686
+ scalar_t src001 = bound::get(src_ptr_NC, o001, s001);
1687
+ scalar_t src101 = bound::get(src_ptr_NC, o101, s101);
1688
+ scalar_t src011 = bound::get(src_ptr_NC, o011, s011);
1689
+ scalar_t src111 = bound::get(src_ptr_NC, o111, s111);
1690
+ *out_ptr_NCXYZ = -dy0 * dz0 * src000 + dy0 * dz0 * src100 - dy1 * dz0 * src010 + dy1 * dz0 * src110 -
1691
+ dy0 * dz1 * src001 + dy0 * dz1 * src101 - dy1 * dz1 * src011 + dy1 * dz1 * src111;
1692
+ out_ptr_NCXYZ[out_sK] = -dx0 * dz0 * src000 - dx1 * dz0 * src100 + dx0 * dz0 * src010 + dx1 * dz0 * src110 -
1693
+ dx0 * dz1 * src001 - dx1 * dz1 * src101 + dx0 * dz1 * src011 + dx1 * dz1 * src111;
1694
+ out_ptr_NCXYZ[out_sK * 2] = -dx0 * dy0 * src000 - dx1 * dy0 * src100 - dx0 * dy1 * src010 - dx1 * dy1 * src110 +
1695
+ dx0 * dy0 * src001 + dx1 * dy0 * src101 + dx0 * dy1 * src011 + dx1 * dy1 * src111;
1696
+ }
1697
+ }
1698
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Push ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1699
+ else if (do_push) {
1700
+ // Offsets into 'push' volume
1701
+ o000 = ix0 * out_sX + iy0 * out_sY + iz0 * out_sZ;
1702
+ o100 = ix1 * out_sX + iy0 * out_sY + iz0 * out_sZ;
1703
+ o010 = ix0 * out_sX + iy1 * out_sY + iz0 * out_sZ;
1704
+ o001 = ix0 * out_sX + iy0 * out_sY + iz1 * out_sZ;
1705
+ o110 = ix1 * out_sX + iy1 * out_sY + iz0 * out_sZ;
1706
+ o011 = ix0 * out_sX + iy1 * out_sY + iz1 * out_sZ;
1707
+ o101 = ix1 * out_sX + iy0 * out_sY + iz1 * out_sZ;
1708
+ o111 = ix1 * out_sX + iy1 * out_sY + iz1 * out_sZ;
1709
+ scalar_t* trgt_ptr_NCXYZ = trgt_ptr + n * trgt_sN + w * trgt_sX + h * trgt_sY + d * trgt_sZ;
1710
+ scalar_t* out_ptr_NC = out_ptr + n * out_sN;
1711
+ if (trgt_K == 0) {
1712
+ // Diff w.r.t. push/pull
1713
+ for (offset_t c = 0; c < C; ++c, trgt_ptr_NCXYZ += trgt_sC, out_ptr_NC += out_sC) {
1714
+ scalar_t trgt = *trgt_ptr_NCXYZ;
1715
+ bound::add(out_ptr_NC, o000, w000 * trgt, s000);
1716
+ bound::add(out_ptr_NC, o100, w100 * trgt, s100);
1717
+ bound::add(out_ptr_NC, o010, w010 * trgt, s010);
1718
+ bound::add(out_ptr_NC, o110, w110 * trgt, s110);
1719
+ bound::add(out_ptr_NC, o001, w001 * trgt, s001);
1720
+ bound::add(out_ptr_NC, o101, w101 * trgt, s101);
1721
+ bound::add(out_ptr_NC, o011, w011 * trgt, s011);
1722
+ bound::add(out_ptr_NC, o111, w111 * trgt, s111);
1723
+ }
1724
+ } else {
1725
+ // Diff w.r.t. sgrad
1726
+ scalar_t val;
1727
+ for (offset_t c = 0; c < C; ++c, trgt_ptr_NCXYZ += trgt_sC, out_ptr_NC += out_sC) {
1728
+ scalar_t trgt0 = *trgt_ptr_NCXYZ, trgt1 = trgt_ptr_NCXYZ[trgt_sK], trgt2 = trgt_ptr_NCXYZ[trgt_sK * 2];
1729
+ val = -dy0 * dz0 * trgt0 - dx0 * dz0 * trgt1 - dx0 * dy0 * trgt2;
1730
+ bound::add(out_ptr_NC, o000, val, s000);
1731
+ val = dy0 * dz0 * trgt0 - dx1 * dz0 * trgt1 - dx1 * dy0 * trgt2;
1732
+ bound::add(out_ptr_NC, o100, val, s100);
1733
+ val = -dy1 * dz0 * trgt0 + dx0 * dz0 * trgt1 - dx0 * dy1 * trgt2;
1734
+ bound::add(out_ptr_NC, o010, val, s010);
1735
+ val = dy1 * dz0 * trgt0 + dx1 * dz0 * trgt1 - dx1 * dy1 * trgt2;
1736
+ bound::add(out_ptr_NC, o110, val, s110);
1737
+ val = -dy0 * dz1 * trgt0 - dx0 * dz1 * trgt1 + dx0 * dy0 * trgt2;
1738
+ bound::add(out_ptr_NC, o001, val, s001);
1739
+ val = dy0 * dz1 * trgt0 - dx1 * dz1 * trgt1 + dx1 * dy0 * trgt2;
1740
+ bound::add(out_ptr_NC, o101, val, s101);
1741
+ val = -dy1 * dz1 * trgt0 + dx0 * dz1 * trgt1 + dx0 * dy1 * trgt2;
1742
+ bound::add(out_ptr_NC, o011, val, s011);
1743
+ val = dy1 * dz1 * trgt0 + dx1 * dz1 * trgt1 + dx1 * dy1 * trgt2;
1744
+ bound::add(out_ptr_NC, o111, val, s111);
1745
+ }
1746
+ }
1747
+ }
1748
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Push ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1749
+ else if (do_count) {
1750
+ scalar_t* out_ptr_N = out_ptr + n * out_sN;
1751
+ bound::add(out_ptr_N, o000, w000, s000);
1752
+ bound::add(out_ptr_N, o100, w100, s100);
1753
+ bound::add(out_ptr_N, o010, w010, s010);
1754
+ bound::add(out_ptr_N, o110, w110, s110);
1755
+ bound::add(out_ptr_N, o001, w001, s001);
1756
+ bound::add(out_ptr_N, o101, w101, s101);
1757
+ bound::add(out_ptr_N, o011, w011, s011);
1758
+ bound::add(out_ptr_N, o111, w111, s111);
1759
+ }
1760
+ }
1761
+
1762
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1763
+ // LINEAR INTERPOLATION 2D
1764
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1765
+
1766
+ template <typename scalar_t, typename offset_t>
1767
+ MONAI_DEVICE void PushPullImpl<scalar_t, offset_t>::interpolate2d_bilinear(
1768
+ scalar_t x,
1769
+ scalar_t y,
1770
+ offset_t w,
1771
+ offset_t h,
1772
+ offset_t n) const {
1773
+ // Get corner pixel values from (x, y, z)
1774
+ offset_t ix0 = static_cast<offset_t>(std::floor(x));
1775
+ offset_t iy0 = static_cast<offset_t>(std::floor(y));
1776
+
1777
+ // Interpolation weights (inversely proportional to distance)
1778
+ scalar_t dx1 = x - ix0;
1779
+ scalar_t dy1 = y - iy0;
1780
+ scalar_t dx0 = 1. - dx1;
1781
+ scalar_t dy0 = 1. - dy1;
1782
+ scalar_t w00 = dx0 * dy0;
1783
+ scalar_t w10 = dx1 * dy0;
1784
+ scalar_t w01 = dx0 * dy1;
1785
+ scalar_t w11 = dx1 * dy1;
1786
+
1787
+ // Sign (/!\ compute sign before warping indices)
1788
+ int8_t sx1 = bound::sign(bound0, ix0 + 1, src_X);
1789
+ int8_t sy1 = bound::sign(bound1, iy0 + 1, src_Y);
1790
+ int8_t sx0 = bound::sign(bound0, ix0, src_X);
1791
+ int8_t sy0 = bound::sign(bound1, iy0, src_Y);
1792
+ int8_t s00 = sx0 * sy0;
1793
+ int8_t s10 = sx1 * sy0;
1794
+ int8_t s01 = sx0 * sy1;
1795
+ int8_t s11 = sx1 * sy1;
1796
+
1797
+ // Warp indices
1798
+ offset_t ix1, iy1;
1799
+ ix1 = bound::index(bound0, ix0 + 1, src_X);
1800
+ iy1 = bound::index(bound1, iy0 + 1, src_Y);
1801
+ ix0 = bound::index(bound0, ix0, src_X);
1802
+ iy0 = bound::index(bound1, iy0, src_Y);
1803
+
1804
+ offset_t o00, o10, o01, o11;
1805
+ if (do_pull || do_grad || do_sgrad) {
1806
+ // Offsets into source volume
1807
+ o00 = ix0 * src_sX + iy0 * src_sY;
1808
+ o10 = ix1 * src_sX + iy0 * src_sY;
1809
+ o01 = ix0 * src_sX + iy1 * src_sY;
1810
+ o11 = ix1 * src_sX + iy1 * src_sY;
1811
+ } else if (!(do_push || do_count)) {
1812
+ o00 = o10 = o01 = o11 = 0;
1813
+ }
1814
+
1815
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~ Grid gradient ~~~~~~~~~~~~~~~~~~~~~~~~~~
1816
+ if (do_grad) {
1817
+ scalar_t gx = static_cast<scalar_t>(0);
1818
+ scalar_t gy = static_cast<scalar_t>(0);
1819
+ scalar_t* trgt_ptr_NCXY = trgt_ptr + n * trgt_sN + w * trgt_sX + h * trgt_sY;
1820
+ scalar_t* src_ptr_NC = src_ptr + n * src_sN;
1821
+
1822
+ if (trgt_K == 0) {
1823
+ // backward w.r.t. push/pull
1824
+ for (offset_t c = 0; c < C; ++c, trgt_ptr_NCXY += trgt_sC, src_ptr_NC += src_sC) {
1825
+ scalar_t src;
1826
+ scalar_t trgt = trgt_ptr ? *trgt_ptr_NCXY : static_cast<scalar_t>(1);
1827
+ // ^ trgt_ptr == 0 during the backward pass of count
1828
+ src = bound::get(src_ptr_NC, o00, s00);
1829
+ if (trgt_ptr)
1830
+ src *= trgt;
1831
+ gx -= dy0 * src;
1832
+ gy -= dx0 * src;
1833
+ src = bound::get(src_ptr_NC, o10, s10);
1834
+ if (trgt_ptr)
1835
+ src *= trgt;
1836
+ gx += dy0 * src;
1837
+ gy -= dx1 * src;
1838
+ src = bound::get(src_ptr_NC, o01, s01);
1839
+ if (trgt_ptr)
1840
+ src *= trgt;
1841
+ gx -= dy1 * src;
1842
+ gy += dx0 * src;
1843
+ src = bound::get(src_ptr_NC, o11, s11);
1844
+ if (trgt_ptr)
1845
+ src *= trgt;
1846
+ gx += dy1 * src;
1847
+ gy += dx1 * src;
1848
+ }
1849
+ } else {
1850
+ // backward w.r.t. sgrad
1851
+ for (offset_t c = 0; c < C; ++c, trgt_ptr_NCXY += trgt_sC, src_ptr_NC += src_sC) {
1852
+ scalar_t src;
1853
+ scalar_t trgt0 = *trgt_ptr_NCXY, trgt1 = trgt_ptr_NCXY[trgt_sK];
1854
+ src = bound::get(src_ptr_NC, o00, s00);
1855
+ gx += trgt1 * src;
1856
+ gy += trgt0 * src;
1857
+ src = bound::get(src_ptr_NC, o10, s10);
1858
+ gx -= trgt1 * src;
1859
+ gy -= trgt0 * src;
1860
+ src = bound::get(src_ptr_NC, o01, s01);
1861
+ gx -= trgt1 * src;
1862
+ gy -= trgt0 * src;
1863
+ src = bound::get(src_ptr_NC, o11, s11);
1864
+ gx += trgt1 * src;
1865
+ gy += trgt0 * src;
1866
+ }
1867
+ }
1868
+
1869
+ scalar_t* grad_ptr_NXY = grad_ptr + n * grad_sN + w * grad_sX + h * grad_sY;
1870
+ (*grad_ptr_NXY) = gx;
1871
+ grad_ptr_NXY[grad_sC] = gy;
1872
+ }
1873
+ if (do_push || do_count) {
1874
+ // Offsets into 'push' volume
1875
+ o00 = ix0 * out_sX + iy0 * out_sY;
1876
+ o10 = ix1 * out_sX + iy0 * out_sY;
1877
+ o01 = ix0 * out_sX + iy1 * out_sY;
1878
+ o11 = ix1 * out_sX + iy1 * out_sY;
1879
+ }
1880
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Pull ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1881
+ if (do_pull) {
1882
+ scalar_t* out_ptr_NCXY = out_ptr + n * out_sN + w * out_sX + h * out_sY;
1883
+ scalar_t* src_ptr_NC = src_ptr + n * src_sN;
1884
+ for (offset_t c = 0; c < C; ++c, out_ptr_NCXY += out_sC, src_ptr_NC += src_sC) {
1885
+ *out_ptr_NCXY = bound::get(src_ptr_NC, o00, s00) * w00 + bound::get(src_ptr_NC, o10, s10) * w10 +
1886
+ bound::get(src_ptr_NC, o01, s01) * w01 + bound::get(src_ptr_NC, o11, s11) * w11;
1887
+ }
1888
+ }
1889
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ SGrad ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1890
+ else if (do_sgrad) {
1891
+ scalar_t* out_ptr_NCXY = out_ptr + n * out_sN + w * out_sX + h * out_sY;
1892
+ scalar_t* src_ptr_NC = src_ptr + n * src_sN;
1893
+
1894
+ for (offset_t c = 0; c < C; ++c, out_ptr_NCXY += out_sC, src_ptr_NC += src_sC) {
1895
+ scalar_t src00 = bound::get(src_ptr_NC, o00, s00);
1896
+ scalar_t src10 = bound::get(src_ptr_NC, o10, s10);
1897
+ scalar_t src01 = bound::get(src_ptr_NC, o01, s01);
1898
+ scalar_t src11 = bound::get(src_ptr_NC, o11, s11);
1899
+ *out_ptr_NCXY = -dy0 * src00 + dy0 * src10 - dy1 * src01 + dy1 * src11;
1900
+ out_ptr_NCXY[out_sK] = -dx0 * src00 - dx1 * src10 + dx0 * src01 + dx1 * src11;
1901
+ }
1902
+ }
1903
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Push ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1904
+ else if (do_push) {
1905
+ scalar_t* trgt_ptr_NCXY = trgt_ptr + n * trgt_sN + w * trgt_sX + h * trgt_sY;
1906
+ scalar_t* out_ptr_NC = out_ptr + n * out_sN;
1907
+ if (trgt_K == 0) {
1908
+ // Diff w.r.t. push/pull
1909
+ for (offset_t c = 0; c < C; ++c, trgt_ptr_NCXY += trgt_sC, out_ptr_NC += out_sC) {
1910
+ scalar_t trgt = *trgt_ptr_NCXY;
1911
+ bound::add(out_ptr_NC, o00, w00 * trgt, s00);
1912
+ bound::add(out_ptr_NC, o10, w10 * trgt, s10);
1913
+ bound::add(out_ptr_NC, o01, w01 * trgt, s01);
1914
+ bound::add(out_ptr_NC, o11, w11 * trgt, s11);
1915
+ }
1916
+ } else {
1917
+ // Diff w.r.t. sgrad
1918
+ scalar_t val;
1919
+ for (offset_t c = 0; c < C; ++c, trgt_ptr_NCXY += trgt_sC, out_ptr_NC += out_sC) {
1920
+ scalar_t trgt0 = *trgt_ptr_NCXY, trgt1 = trgt_ptr_NCXY[trgt_sK];
1921
+ val = -dy0 * trgt0 - dx0 * trgt1;
1922
+ bound::add(out_ptr_NC, o00, val, s00);
1923
+ val = dy0 * trgt0 - dx1 * trgt1;
1924
+ bound::add(out_ptr_NC, o10, val, s10);
1925
+ val = -dy1 * trgt0 + dx0 * trgt1;
1926
+ bound::add(out_ptr_NC, o01, val, s01);
1927
+ val = dy1 * trgt0 + dx1 * trgt1;
1928
+ bound::add(out_ptr_NC, o11, val, s11);
1929
+ }
1930
+ }
1931
+ }
1932
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Push ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1933
+ else if (do_count) {
1934
+ scalar_t* out_ptr_N = out_ptr + n * out_sN;
1935
+ bound::add(out_ptr_N, o00, w00, s00);
1936
+ bound::add(out_ptr_N, o10, w10, s10);
1937
+ bound::add(out_ptr_N, o01, w01, s01);
1938
+ bound::add(out_ptr_N, o11, w11, s11);
1939
+ }
1940
+ }
1941
+
1942
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1943
+ // LINEAR INTERPOLATION 1D
1944
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1945
+
1946
+ template <typename scalar_t, typename offset_t>
1947
+ MONAI_DEVICE void PushPullImpl<scalar_t, offset_t>::interpolate1d_linear(scalar_t x, offset_t w, offset_t n) const {
1948
+ // Get corner pixel values from (x)
1949
+ offset_t ix0 = static_cast<offset_t>(std::floor(x));
1950
+
1951
+ // Interpolation weights (inversely proportional to distance)
1952
+ scalar_t w1 = x - ix0;
1953
+ scalar_t w0 = 1. - w1;
1954
+
1955
+ // Sign (/!\ compute sign before warping indices)
1956
+ int8_t s1 = bound::sign(bound0, ix0 + 1, src_X);
1957
+ int8_t s0 = bound::sign(bound0, ix0, src_X);
1958
+
1959
+ // Warp indices
1960
+ offset_t ix1;
1961
+ ix1 = bound::index(bound0, ix0 + 1, src_X);
1962
+ ix0 = bound::index(bound0, ix0, src_X);
1963
+
1964
+ offset_t o0, o1;
1965
+ if (do_pull || do_grad || do_sgrad) {
1966
+ // Offsets into source volume
1967
+ o0 = ix0 * src_sX;
1968
+ o1 = ix1 * src_sX;
1969
+ } else if (!(do_push || do_count)) {
1970
+ o0 = o1 = 0;
1971
+ }
1972
+
1973
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~ Grid gradient ~~~~~~~~~~~~~~~~~~~~~~~~~~
1974
+ if (do_grad) {
1975
+ if (trgt_K == 0) {
1976
+ // backward w.r.t. push/pull
1977
+ scalar_t gx = static_cast<scalar_t>(0);
1978
+ scalar_t* trgt_ptr_NCX = trgt_ptr + n * trgt_sN + w * trgt_sX;
1979
+ scalar_t* src_ptr_NC = src_ptr + n * src_sN;
1980
+
1981
+ for (offset_t c = 0; c < C; ++c, trgt_ptr_NCX += trgt_sC, src_ptr_NC += src_sC) {
1982
+ scalar_t src;
1983
+ scalar_t trgt = trgt_ptr ? *trgt_ptr_NCX : static_cast<scalar_t>(1);
1984
+ // ^ trgt_ptr == 0 during the backward pass of count
1985
+ src = bound::get(src_ptr_NC, o0, s0);
1986
+ if (trgt_ptr)
1987
+ src *= trgt;
1988
+ gx -= src;
1989
+ src = bound::get(src_ptr_NC, o1, s1);
1990
+ if (trgt_ptr)
1991
+ src *= trgt;
1992
+ gx += src;
1993
+ }
1994
+
1995
+ scalar_t* grad_ptr_NX = grad_ptr + n * grad_sN + w * grad_sX;
1996
+ (*grad_ptr_NX) = gx;
1997
+ } else {
1998
+ // backward w.r.t. sgrad
1999
+ // -> zero (make sure this is done at initialization)
2000
+ }
2001
+ }
2002
+ if (do_push || do_count) {
2003
+ // Offsets into 'push' volume
2004
+ o0 = ix0 * out_sX;
2005
+ o1 = ix1 * out_sX;
2006
+ }
2007
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Pull ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
2008
+ if (do_pull) {
2009
+ scalar_t* out_ptr_NCX = out_ptr + n * out_sN + w * out_sX;
2010
+ scalar_t* src_ptr_NC = src_ptr + n * src_sN;
2011
+ for (offset_t c = 0; c < C; ++c, out_ptr_NCX += out_sC, src_ptr_NC += src_sC) {
2012
+ *out_ptr_NCX = bound::get(src_ptr_NC, o0, s0) * w0 + bound::get(src_ptr_NC, o1, s1) * w1;
2013
+ }
2014
+ }
2015
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ SGrad ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
2016
+ else if (do_sgrad) {
2017
+ scalar_t* out_ptr_NCX = out_ptr + n * out_sN + w * out_sX;
2018
+ scalar_t* src_ptr_NC = src_ptr + n * src_sN;
2019
+
2020
+ for (offset_t c = 0; c < C; ++c, out_ptr_NCX += out_sC, src_ptr_NC += src_sC) {
2021
+ *out_ptr_NCX = bound::get(src_ptr_NC, o1, s1) - bound::get(src_ptr_NC, o0, s0);
2022
+ }
2023
+ }
2024
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Push ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
2025
+ else if (do_push) {
2026
+ scalar_t* trgt_ptr_NCX = trgt_ptr + n * trgt_sN + w * trgt_sX;
2027
+ scalar_t* out_ptr_NC = out_ptr + n * out_sN;
2028
+ if (trgt_K == 0) {
2029
+ // Diff w.r.t. push/pull
2030
+ for (offset_t c = 0; c < C; ++c, trgt_ptr_NCX += trgt_sC, out_ptr_NC += out_sC) {
2031
+ scalar_t trgt = *trgt_ptr_NCX;
2032
+ bound::add(out_ptr_NC, o0, w0 * trgt, s0);
2033
+ bound::add(out_ptr_NC, o1, w1 * trgt, s1);
2034
+ }
2035
+ } else {
2036
+ // Diff w.r.t. sgrad
2037
+ for (offset_t c = 0; c < C; ++c, trgt_ptr_NCX += trgt_sC, out_ptr_NC += out_sC) {
2038
+ scalar_t trgt0 = *trgt_ptr_NCX;
2039
+ bound::add(out_ptr_NC, o0, -trgt0, s0);
2040
+ bound::add(out_ptr_NC, o1, trgt0, s1);
2041
+ }
2042
+ }
2043
+ }
2044
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Push ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
2045
+ else if (do_count) {
2046
+ scalar_t* out_ptr_N = out_ptr + n * out_sN;
2047
+ bound::add(out_ptr_N, o0, w0, s0);
2048
+ bound::add(out_ptr_N, o1, w1, s1);
2049
+ }
2050
+ }
2051
+
2052
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
2053
+ // NEAREST NEIGHBOR INTERPOLATION 3D
2054
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
2055
+
2056
+ template <typename scalar_t, typename offset_t>
2057
+ MONAI_DEVICE void PushPullImpl<scalar_t, offset_t>::interpolate3d_nearest(
2058
+ scalar_t x,
2059
+ scalar_t y,
2060
+ scalar_t z,
2061
+ offset_t w,
2062
+ offset_t h,
2063
+ offset_t d,
2064
+ offset_t n) const {
2065
+ offset_t ix = static_cast<offset_t>(std::round(x));
2066
+ offset_t iy = static_cast<offset_t>(std::round(y));
2067
+ offset_t iz = static_cast<offset_t>(std::round(z));
2068
+
2069
+ // Boundary condition (/!\ compute sign before warping indices)
2070
+ int8_t sx = bound::sign(bound0, ix, src_X);
2071
+ int8_t sy = bound::sign(bound1, iy, src_Y);
2072
+ int8_t sz = bound::sign(bound2, iz, src_Z);
2073
+ ix = bound::index(bound0, ix, src_X);
2074
+ iy = bound::index(bound1, iy, src_Y);
2075
+ iz = bound::index(bound2, iz, src_Z);
2076
+
2077
+ // Sign
2078
+ int8_t s = sz * sy * sx;
2079
+
2080
+ if (do_pull) {
2081
+ offset_t o = iz * src_sZ + iy * src_sY + ix * src_sX;
2082
+ scalar_t* out_ptr_NCXYZ = out_ptr + n * out_sN + w * out_sX + h * out_sY + d * out_sZ;
2083
+ scalar_t* src_ptr_NC = src_ptr + n * src_sN;
2084
+ for (offset_t c = 0; c < C; ++c, out_ptr_NCXYZ += out_sC, src_ptr_NC += src_sC)
2085
+ *out_ptr_NCXYZ = bound::get(src_ptr_NC, o, s);
2086
+ } else if (do_push && trgt_K == 0) {
2087
+ offset_t o = iz * out_sZ + iy * out_sY + ix * out_sX;
2088
+ scalar_t* trgt_ptr_NCXYZ = trgt_ptr + n * trgt_sN + w * trgt_sX + h * trgt_sY + d * trgt_sZ;
2089
+ scalar_t* out_ptr_NC = out_ptr + n * out_sN;
2090
+ for (offset_t c = 0; c < C; ++c, trgt_ptr_NCXYZ += trgt_sC, out_ptr_NC += out_sC)
2091
+ bound::add(out_ptr_NC, o, *trgt_ptr_NCXYZ, s);
2092
+ } else if (do_count) {
2093
+ offset_t o = iz * out_sZ + iy * out_sY + ix * out_sX;
2094
+ scalar_t* out_ptr_NC = out_ptr + n * out_sN;
2095
+ for (offset_t c = 0; c < C; ++c, out_ptr_NC += out_sC)
2096
+ bound::add(out_ptr_NC, o, static_cast<scalar_t>(1), s);
2097
+ }
2098
+ }
2099
+
2100
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
2101
+ // NEAREST NEIGHBOR INTERPOLATION 2D
2102
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
2103
+
2104
+ template <typename scalar_t, typename offset_t>
2105
+ MONAI_DEVICE void PushPullImpl<scalar_t, offset_t>::interpolate2d_nearest(
2106
+ scalar_t x,
2107
+ scalar_t y,
2108
+ offset_t w,
2109
+ offset_t h,
2110
+ offset_t n) const {
2111
+ offset_t ix = static_cast<offset_t>(std::round(x));
2112
+ offset_t iy = static_cast<offset_t>(std::round(y));
2113
+
2114
+ // Boundary condition (/!\ compute sign before warping indices)
2115
+ int8_t sx = bound::sign(bound0, ix, src_X);
2116
+ int8_t sy = bound::sign(bound1, iy, src_Y);
2117
+ ix = bound::index(bound0, ix, src_X);
2118
+ iy = bound::index(bound1, iy, src_Y);
2119
+
2120
+ // Sign
2121
+ int8_t s = sy * sx;
2122
+
2123
+ if (do_pull) {
2124
+ offset_t o = iy * src_sY + ix * src_sX;
2125
+ scalar_t* out_ptr_NCXY = out_ptr + n * out_sN + w * out_sX + h * out_sY;
2126
+ scalar_t* src_ptr_NC = src_ptr + n * src_sN;
2127
+ for (offset_t c = 0; c < C; ++c, out_ptr_NCXY += out_sC, src_ptr_NC += src_sC)
2128
+ *out_ptr_NCXY = bound::get(src_ptr_NC, o, s);
2129
+ } else if (do_push && trgt_K == 0) {
2130
+ offset_t o = iy * out_sY + ix * out_sX;
2131
+ scalar_t* trgt_ptr_NCXY = trgt_ptr + n * trgt_sN + w * trgt_sX + h * trgt_sY;
2132
+ scalar_t* out_ptr_NC = out_ptr + n * out_sN;
2133
+ for (offset_t c = 0; c < C; ++c, trgt_ptr_NCXY += trgt_sC, out_ptr_NC += out_sC)
2134
+ bound::add(out_ptr_NC, o, *trgt_ptr_NCXY, s);
2135
+ } else if (do_count) {
2136
+ offset_t o = iy * out_sY + ix * out_sX;
2137
+ scalar_t* out_ptr_NC = out_ptr + n * out_sN;
2138
+ for (offset_t c = 0; c < C; ++c, out_ptr_NC += out_sC)
2139
+ bound::add(out_ptr_NC, o, static_cast<scalar_t>(1), s);
2140
+ }
2141
+ }
2142
+
2143
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
2144
+ // NEAREST NEIGHBOR INTERPOLATION 1D
2145
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
2146
+
2147
+ template <typename scalar_t, typename offset_t>
2148
+ MONAI_DEVICE void PushPullImpl<scalar_t, offset_t>::interpolate1d_nearest(scalar_t x, offset_t w, offset_t n) const {
2149
+ offset_t i = static_cast<offset_t>(std::round(x));
2150
+
2151
+ // Boundary condition (/!\ compute sign before warping indices)
2152
+ int8_t s = bound::sign(bound0, i, src_X);
2153
+ i = bound::index(bound0, i, src_X);
2154
+
2155
+ if (do_pull) {
2156
+ offset_t o = i * src_sX;
2157
+ scalar_t* out_ptr_NCX = out_ptr + n * out_sN + w * out_sX;
2158
+ scalar_t* src_ptr_NC = src_ptr + n * src_sN;
2159
+ for (offset_t c = 0; c < C; ++c, out_ptr_NCX += out_sC, src_ptr_NC += src_sC)
2160
+ *out_ptr_NCX = bound::get(src_ptr_NC, o, s);
2161
+ } else if (do_push && trgt_K == 0) {
2162
+ offset_t o = i * out_sX;
2163
+ scalar_t* trgt_ptr_NCX = trgt_ptr + n * trgt_sN + w * trgt_sX;
2164
+ scalar_t* out_ptr_NC = out_ptr + n * out_sN;
2165
+ for (offset_t c = 0; c < C; ++c, trgt_ptr_NCX += trgt_sC, out_ptr_NC += out_sC)
2166
+ bound::add(out_ptr_NC, o, *trgt_ptr_NCX, s);
2167
+ } else if (do_count) {
2168
+ offset_t o = i * out_sX;
2169
+ scalar_t* out_ptr_NC = out_ptr + n * out_sN;
2170
+ for (offset_t c = 0; c < C; ++c, out_ptr_NC += out_sC)
2171
+ bound::add(out_ptr_NC, o, static_cast<scalar_t>(1), s);
2172
+ }
2173
+ }
2174
+
2175
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
2176
+ // LINEAR INTERPOLATION 3D + SLIDING BOUNDARY
2177
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
2178
+ // TODO
2179
+
2180
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
2181
+ // CUDA KERNEL (MUST BE OUT OF CLASS)
2182
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
2183
+
2184
+ } // namespace
2185
+
2186
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
2187
+ // FUNCTIONAL FORM WITH DISPATCH
2188
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
2189
+
2190
+ #define PUSHPULL_INSTANTIATE3(BoundType0, InterpolationType0, SourceType0) \
2191
+ template std::deque<Tensor> pushpull( \
2192
+ const SourceType0&, \
2193
+ const Tensor&, \
2194
+ const Tensor&, \
2195
+ BoundType0, \
2196
+ InterpolationType0, \
2197
+ bool, \
2198
+ bool, \
2199
+ bool, \
2200
+ bool, \
2201
+ bool, \
2202
+ bool); \
2203
+ template std::deque<Tensor> pushpull( \
2204
+ const SourceType0&, const Tensor&, BoundType0, InterpolationType0, bool, bool, bool, bool, bool, bool)
2205
+ #define PUSHPULL_INSTANTIATE2(BoundType0, InterpolationType0) \
2206
+ PUSHPULL_INSTANTIATE3(BoundType0, InterpolationType0, IntArrayRef); \
2207
+ PUSHPULL_INSTANTIATE3(BoundType0, InterpolationType0, Tensor)
2208
+ #define PUSHPULL_INSTANTIATE1(BoundType0) \
2209
+ PUSHPULL_INSTANTIATE2(BoundType0, InterpolationType); \
2210
+ PUSHPULL_INSTANTIATE2(BoundType0, InterpolationVectorRef)
2211
+ #define PUSHPULL_INSTANTIATE \
2212
+ PUSHPULL_INSTANTIATE1(BoundType); \
2213
+ PUSHPULL_INSTANTIATE1(BoundVectorRef)
2214
+
2215
+ // Two arguments (source, grid)
2216
+ // > `bound` and `interpolation` can be single arguments or vectors.
2217
+ template <typename BoundType, typename InterpolationType, typename SourceType>
2218
+ MONAI_HOST std::deque<Tensor> pushpull(
2219
+ const SourceType& source,
2220
+ const Tensor& grid,
2221
+ BoundType bound,
2222
+ InterpolationType interpolation,
2223
+ bool extrapolate,
2224
+ bool do_pull,
2225
+ bool do_push,
2226
+ bool do_count,
2227
+ bool do_grad,
2228
+ bool do_sgrad) {
2229
+ PushPullAllocator info(
2230
+ grid.dim() - 2, bound, interpolation, extrapolate, do_pull, do_push, do_count, do_grad, do_sgrad);
2231
+ info.ioset(source, grid);
2232
+
2233
+ return AT_DISPATCH_FLOATING_TYPES(grid.scalar_type(), "pushpull", [&] {
2234
+ PushPullImpl<scalar_t, int64_t> algo(info);
2235
+ algo.loop();
2236
+ return algo.output;
2237
+ });
2238
+ }
2239
+
2240
+ // Three arguments (source, grid, target)
2241
+ // > `bound` and `interpolation` can be single arguments or vectors.
2242
+ // > `source` can be a tensor or a vector of dimensions.
2243
+ template <typename BoundType, typename InterpolationType, typename SourceType>
2244
+ MONAI_HOST std::deque<Tensor> pushpull(
2245
+ const SourceType& source,
2246
+ const Tensor& grid,
2247
+ const Tensor& target,
2248
+ BoundType bound,
2249
+ InterpolationType interpolation,
2250
+ bool extrapolate,
2251
+ bool do_pull,
2252
+ bool do_push,
2253
+ bool do_count,
2254
+ bool do_grad,
2255
+ bool do_sgrad) {
2256
+ PushPullAllocator info(
2257
+ grid.dim() - 2, bound, interpolation, extrapolate, do_pull, do_push, do_count, do_grad, do_sgrad);
2258
+ info.ioset(source, grid, target);
2259
+
2260
+ return AT_DISPATCH_FLOATING_TYPES(grid.scalar_type(), "pushpull", [&] {
2261
+ PushPullImpl<scalar_t, int64_t> algo(info);
2262
+ algo.loop();
2263
+ return algo.output;
2264
+ });
2265
+ }
2266
+
2267
+ PUSHPULL_INSTANTIATE;
2268
+
2269
+ } // namespace cpu
2270
+ } // namespace monai
source_code/SegMamba/monai/csrc/resample/pushpull_cuda.cu ADDED
@@ -0,0 +1,2244 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ Copyright (c) MONAI Consortium
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+ http://www.apache.org/licenses/LICENSE-2.0
7
+ Unless required by applicable law or agreed to in writing, software
8
+ distributed under the License is distributed on an "AS IS" BASIS,
9
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10
+ See the License for the specific language governing permissions and
11
+ limitations under the License.
12
+ */
13
+
14
+ // adapted from https://github.com/balbasty/nitorch
15
+
16
+ // This file implements spline interpolation / sampling and its adjoint
17
+ // operations. It corresponds loosely to torch's `GridSampler`.
18
+ // It handles boundary conditions and interpolation orders defined in
19
+ // `utils/resample_utils.h` and `utils/resample_utils.h`.
20
+ // These parameters can be specified per dimension.
21
+ // Isotropic 0-th and 1-st order interpolation have their own (faster)
22
+ // implementations. Sliding boundary conditions are also implemented
23
+ // separately.
24
+
25
+ // TODO:
26
+ // . [DONE] generic 3d
27
+ // . [DONE] generic 2d
28
+ // . [DONE] generic 1d
29
+ // . sliding nearest 3d
30
+ // . sliding nearest 2d
31
+ // . sliding linear 3d
32
+ // . sliding linear 2d
33
+ // . sliding generic 3d
34
+ // . sliding generic 2d
35
+ // . [DONE] spatial gradient mode (without multiplication with output gradient)
36
+ // . [DONE] second order gradients (backward pass for spatial gradients)
37
+ // . performance tests
38
+ // . input bound/inter are always vectors -> clean unused constructors
39
+
40
+ #include <ATen/ATen.h>
41
+ #include <limits>
42
+ #include <tuple>
43
+ #include "bounds_common.h"
44
+ #include "interpolation_common.h"
45
+ #include "utils/resample_utils.h"
46
+ //#include <cstdio>
47
+
48
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
49
+ // GPU-specific parameters
50
+ #include <ATen/cuda/CUDAContext.h>
51
+ #include <ATen/cuda/detail/KernelUtils.h>
52
+ #include <c10/macros/Macros.h>
53
+ using namespace at::cuda::detail;
54
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
55
+
56
+ // maximum number of channels
57
+ // > not used in mode isotropic nearest/linear
58
+ #ifndef MONAI_MAX_NUM_CHANNELS
59
+ #define MONAI_MAX_NUM_CHANNELS 1024
60
+ #endif
61
+
62
+ // This parameter allows for a little bit of tolerance when considering
63
+ // a coordinate as "out-of-bound" (if !extrapolate)
64
+ #define TINY 5e-2
65
+
66
+ using at::Tensor;
67
+ using at::TensorOptions;
68
+ using c10::IntArrayRef;
69
+
70
+ namespace monai {
71
+ MONAI_NAMESPACE_DEVICE { // cuda
72
+
73
+ namespace { // anonymous namespace > everything inside has internal linkage
74
+
75
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
76
+ // INDEXING UTILS
77
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
78
+
79
+ // This class reads and sets all the parameters that will later be used
80
+ // by the algorithm in PushPullImpl. All of this is done outside of the
81
+ // implementation class so that we do not depend on generic types. The
82
+ // point is to pre-allocate all necessary tensors so that we can check
83
+ // if they're all compatible with 32 bit math. If it's the case, we can
84
+ // dispatch to a 32b cuda implementation, which might increase
85
+ // performance. Else, we use 64 bit math to compute offsets.
86
+ // (On CPU, we always use 64 bit offsets because it doesn't make a huge
87
+ // difference. It would be different if we had a vectorized
88
+ // implementation as in PyTorch).
89
+ class PushPullAllocator {
90
+ public:
91
+ static constexpr int64_t max_int32 = std::numeric_limits<int32_t>::max();
92
+
93
+ // ~~~ CONSTRUCTORS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
94
+
95
+ MONAI_HOST
96
+ PushPullAllocator(
97
+ int dim,
98
+ BoundVectorRef bound,
99
+ InterpolationVectorRef interpolation,
100
+ bool extrapolate,
101
+ bool do_pull,
102
+ bool do_push,
103
+ bool do_count,
104
+ bool do_grad,
105
+ bool do_sgrad)
106
+ : dim(dim),
107
+ bound0(bound.size() > 0 ? bound[0] : BoundType::Replicate),
108
+ bound1(
109
+ bound.size() > 1 ? bound[1]
110
+ : bound.size() > 0 ? bound[0]
111
+ : BoundType::Replicate),
112
+ bound2(
113
+ bound.size() > 2 ? bound[2]
114
+ : bound.size() > 1 ? bound[1]
115
+ : bound.size() > 0 ? bound[0]
116
+ : BoundType::Replicate),
117
+ interpolation0(interpolation.size() > 0 ? interpolation[0] : InterpolationType::Linear),
118
+ interpolation1(
119
+ interpolation.size() > 1 ? interpolation[1]
120
+ : interpolation.size() > 0 ? interpolation[0]
121
+ : InterpolationType::Linear),
122
+ interpolation2(
123
+ interpolation.size() > 2 ? interpolation[2]
124
+ : interpolation.size() > 1 ? interpolation[1]
125
+ : interpolation.size() > 0 ? interpolation[0]
126
+ : InterpolationType::Linear),
127
+ extrapolate(extrapolate),
128
+ do_pull(do_pull),
129
+ do_push(do_push),
130
+ do_count(do_count),
131
+ do_grad(do_grad),
132
+ do_sgrad(do_sgrad) {
133
+ iso = interpolation0 == interpolation1 && interpolation0 == interpolation2;
134
+ }
135
+
136
+ // ~~~ FUNCTORS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
137
+
138
+ // Usually used for pull:
139
+ // - do_pull -> return source[grid]
140
+ // - do_push -> fails
141
+ // - do_grad -> return J(source)[grid]
142
+ // - do_sgrad -> return H(source)[grid]
143
+ MONAI_HOST void ioset(const Tensor& source, const Tensor& grid) {
144
+ init_all();
145
+ init_source(source);
146
+ init_grid(grid);
147
+ init_output();
148
+ }
149
+
150
+ // Usually used for pull_backward:
151
+ // - do_pull -> return source[grid]
152
+ // - do_push -> return push(target, grid, source.shape)
153
+ // - do_grad -> return J(source)[grid]
154
+ // - do_sgrad -> return H(source)[grid]
155
+ MONAI_HOST void ioset(const Tensor& source, const Tensor& grid, const Tensor& target) {
156
+ init_all();
157
+ init_source(source);
158
+ init_grid(grid);
159
+ init_target(target);
160
+ init_output();
161
+ }
162
+
163
+ // Usually used for push:
164
+ // - do_pull -> fails
165
+ // - do_push -> return push(target, grid, source_size)
166
+ // - do_grad -> fails
167
+ // - do_sgrad -> fails
168
+ MONAI_HOST void ioset(IntArrayRef source_size, const Tensor& grid, const Tensor& target) {
169
+ init_all();
170
+ init_source(source_size);
171
+ init_grid(grid);
172
+ init_target(target);
173
+ init_output();
174
+ }
175
+
176
+ // Usually used for count:
177
+ // - do_pull -> fails
178
+ // - do_push -> return push(ones, grid, source_size)
179
+ // - do_grad -> fails
180
+ // - do_sgrad -> fails
181
+ MONAI_HOST void ioset(IntArrayRef source_size, const Tensor& grid) {
182
+ init_all();
183
+ init_source(source_size);
184
+ init_grid(grid);
185
+ init_output();
186
+ }
187
+
188
+ // We just check that all tensors that we own are compatible with 32b math
189
+ bool canUse32BitIndexMath(int64_t max_elem = max_int32) const {
190
+ return src_32b_ok && trgt_32b_ok && grid_32b_ok && grad_32b_ok && out_32b_ok;
191
+ }
192
+
193
+ private:
194
+ // Copied from aten/src/ATen/native/IndexingUtils.cpp in PyTorch 1.6.
195
+ // It is used to decide to which pointer type we should dispatch to.
196
+ // Basically, we need to make sure that the "furthest" element we need
197
+ // to reach is less than max_elem away.
198
+ static bool tensorCanUse32BitIndexMath(const Tensor& t, int64_t max_elem = max_int32) {
199
+ int64_t elements = t.numel();
200
+ if (elements >= max_elem) {
201
+ return false;
202
+ }
203
+ if (elements == 0) {
204
+ return max_elem > 0;
205
+ }
206
+
207
+ int64_t offset = 0;
208
+ int64_t linearId = elements - 1;
209
+
210
+ // NOTE: Assumes all strides are positive, which is true for now
211
+ for (int i = t.dim() - 1; i >= 0; --i) {
212
+ int64_t curDimIndex = linearId % t.size(i);
213
+ int64_t curDimOffset = curDimIndex * t.stride(i);
214
+ offset += curDimOffset;
215
+ linearId /= t.size(i);
216
+ }
217
+
218
+ if (offset >= max_elem) {
219
+ return false;
220
+ }
221
+
222
+ return true;
223
+ }
224
+
225
+ // ~~~ COMPONENTS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
226
+ MONAI_HOST void init_all();
227
+ MONAI_HOST void init_source(const Tensor& source);
228
+ MONAI_HOST void init_source(IntArrayRef source_size);
229
+ MONAI_HOST void init_grid(const Tensor& grid);
230
+ MONAI_HOST void init_target(const Tensor& target);
231
+ MONAI_HOST void init_output();
232
+
233
+ // ~~~ OPTIONS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
234
+ int dim; // dimensionality (2 or 3)
235
+ BoundType bound0; // boundary condition // x|W
236
+ BoundType bound1; // boundary condition // y|H
237
+ BoundType bound2; // boundary condition // z|D
238
+ InterpolationType interpolation0; // interpolation order // x|W
239
+ InterpolationType interpolation1; // interpolation order // y|H
240
+ InterpolationType interpolation2; // interpolation order // z|D
241
+ bool iso; // isotropic interpolation?
242
+ bool extrapolate; // compute out-of-bound values
243
+ bool do_pull; // sample a volume
244
+ bool do_push; // splat a volume
245
+ bool do_count; // splatting weights (= jacobian determinant)
246
+ bool do_grad; // backprop: gradient of grid // pull
247
+ bool do_sgrad; // sample spatial gradients
248
+
249
+ // ~~~ NAVIGATORS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
250
+ std::deque<Tensor> output;
251
+ TensorOptions src_opt;
252
+ TensorOptions grid_opt;
253
+ TensorOptions trgt_opt;
254
+ int64_t N;
255
+ int64_t C;
256
+ int64_t src_X;
257
+ int64_t src_Y;
258
+ int64_t src_Z;
259
+ int64_t trgt_X;
260
+ int64_t trgt_Y;
261
+ int64_t trgt_Z;
262
+ int64_t trgt_K;
263
+ int64_t src_sN;
264
+ int64_t src_sC;
265
+ int64_t src_sX;
266
+ int64_t src_sY;
267
+ int64_t src_sZ;
268
+ bool src_32b_ok;
269
+ void* src_ptr;
270
+ int64_t trgt_sN;
271
+ int64_t trgt_sC;
272
+ int64_t trgt_sX;
273
+ int64_t trgt_sY;
274
+ int64_t trgt_sZ;
275
+ int64_t trgt_sK;
276
+ bool trgt_32b_ok;
277
+ void* trgt_ptr;
278
+ int64_t grid_sN;
279
+ int64_t grid_sC;
280
+ int64_t grid_sX;
281
+ int64_t grid_sY;
282
+ int64_t grid_sZ;
283
+ bool grid_32b_ok;
284
+ void* grid_ptr;
285
+ int64_t out_sN;
286
+ int64_t out_sC;
287
+ int64_t out_sX;
288
+ int64_t out_sY;
289
+ int64_t out_sZ;
290
+ int64_t out_sK; // gradient dimension
291
+ bool out_32b_ok;
292
+ void* out_ptr;
293
+ int64_t grad_sN;
294
+ int64_t grad_sC;
295
+ int64_t grad_sX;
296
+ int64_t grad_sY;
297
+ int64_t grad_sZ;
298
+ bool grad_32b_ok;
299
+ void* grad_ptr;
300
+
301
+ // Allow PushPullImpl's constructor to access PushPullAllocator's
302
+ // private members.
303
+ template <typename scalar_t, typename offset_t>
304
+ friend class PushPullImpl;
305
+ };
306
+
307
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
308
+ // INITIALISATION
309
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
310
+
311
+ MONAI_HOST
312
+ void PushPullAllocator::init_all() {
313
+ src_opt = grid_opt = trgt_opt = TensorOptions();
314
+ N = C = 1L;
315
+ src_X = src_Y = src_Z = 1L;
316
+ trgt_X = trgt_Y = trgt_Z = 1L;
317
+ trgt_K = 0L;
318
+ src_sN = src_sC = src_sX = src_sY = src_sZ = 0L;
319
+ grid_sN = grid_sC = grid_sX = grid_sY = grid_sZ = 0L;
320
+ grad_sN = grad_sC = grad_sX = grad_sY = grad_sZ = 0L;
321
+ trgt_sN = trgt_sC = trgt_sX = trgt_sY = trgt_sZ = trgt_sK = 0L;
322
+ out_sN = out_sC = out_sX = out_sY = out_sZ = out_sK = 0L;
323
+ src_ptr = trgt_ptr = grid_ptr = out_ptr = grad_ptr = static_cast<float*>(0);
324
+ src_32b_ok = trgt_32b_ok = grid_32b_ok = out_32b_ok = grad_32b_ok = true;
325
+ }
326
+
327
+ MONAI_HOST
328
+ void PushPullAllocator::init_source(const Tensor& source) {
329
+ N = source.size(0);
330
+ C = source.size(1);
331
+ src_X = source.size(2);
332
+ src_Y = dim < 2 ? 1L : source.size(3);
333
+ src_Z = dim < 3 ? 1L : source.size(4);
334
+ src_sN = source.stride(0);
335
+ src_sC = source.stride(1);
336
+ src_sX = source.stride(2);
337
+ src_sY = dim < 2 ? 0L : source.stride(3);
338
+ src_sZ = dim < 3 ? 0L : source.stride(4);
339
+ src_ptr = source.data_ptr();
340
+ src_opt = source.options();
341
+ src_32b_ok = tensorCanUse32BitIndexMath(source);
342
+ }
343
+
344
+ MONAI_HOST
345
+ void PushPullAllocator::init_source(IntArrayRef source_size) {
346
+ src_X = source_size[0];
347
+ src_Y = dim < 2 ? 1L : source_size[1];
348
+ src_Z = dim < 3 ? 1L : source_size[2];
349
+ }
350
+
351
+ MONAI_HOST
352
+ void PushPullAllocator::init_grid(const Tensor& grid) {
353
+ N = grid.size(0);
354
+ trgt_X = grid.size(1);
355
+ trgt_Y = dim < 2 ? 1L : grid.size(2);
356
+ trgt_Z = dim < 3 ? 1L : grid.size(3);
357
+ grid_sN = grid.stride(0);
358
+ grid_sX = grid.stride(1);
359
+ grid_sY = dim < 2 ? 0L : grid.stride(2);
360
+ grid_sZ = dim < 3 ? 0L : grid.stride(3);
361
+ grid_sC = grid.stride(dim == 1 ? 2 : dim == 2 ? 3 : 4);
362
+ grid_ptr = grid.data_ptr();
363
+ grid_opt = grid.options();
364
+ grid_32b_ok = tensorCanUse32BitIndexMath(grid);
365
+ }
366
+
367
+ MONAI_HOST
368
+ void PushPullAllocator::init_target(const Tensor& target) {
369
+ N = target.size(0);
370
+ C = target.size(1);
371
+ trgt_X = target.size(2);
372
+ trgt_Y = dim < 2 ? 1L : target.size(3);
373
+ trgt_Z = dim < 3 ? 1L : target.size(4);
374
+ trgt_K = target.dim() == dim + 3 ? target.size(dim == 1 ? 3 : dim == 2 ? 4 : 5) : 0L;
375
+ trgt_sN = target.stride(0);
376
+ trgt_sC = target.stride(1);
377
+ trgt_sX = target.stride(2);
378
+ trgt_sY = dim < 2 ? 0L : target.stride(3);
379
+ trgt_sZ = dim < 3 ? 0L : target.stride(4);
380
+ trgt_sK = target.dim() == dim + 3 ? target.stride(dim == 1 ? 3 : dim == 2 ? 4 : 5) : 0L;
381
+ trgt_ptr = target.data_ptr();
382
+ trgt_opt = target.options();
383
+ trgt_32b_ok = tensorCanUse32BitIndexMath(target);
384
+ }
385
+
386
+ MONAI_HOST
387
+ void PushPullAllocator::init_output() {
388
+ output.clear();
389
+ if (do_pull) {
390
+ if (dim == 1)
391
+ output.push_back(at::empty({N, C, trgt_X}, src_opt));
392
+ else if (dim == 2)
393
+ output.push_back(at::empty({N, C, trgt_X, trgt_Y}, src_opt));
394
+ else
395
+ output.push_back(at::empty({N, C, trgt_X, trgt_Y, trgt_Z}, src_opt));
396
+ auto pull = output.back();
397
+ out_sN = pull.stride(0);
398
+ out_sC = pull.stride(1);
399
+ out_sX = pull.stride(2);
400
+ out_sY = dim < 2 ? 0L : pull.stride(3);
401
+ out_sZ = dim < 3 ? 0L : pull.stride(4);
402
+ out_sK = 0L;
403
+ out_ptr = pull.data_ptr();
404
+ out_32b_ok = tensorCanUse32BitIndexMath(pull);
405
+ } else if (do_sgrad) {
406
+ if (dim == 1)
407
+ output.push_back(at::empty({N, C, trgt_X, 1}, src_opt));
408
+ else if (dim == 2)
409
+ output.push_back(at::empty({N, C, trgt_X, trgt_Y, 2}, src_opt));
410
+ else
411
+ output.push_back(at::empty({N, C, trgt_X, trgt_Y, trgt_Z, 3}, src_opt));
412
+ auto sgrad = output.back();
413
+ out_sN = sgrad.stride(0);
414
+ out_sC = sgrad.stride(1);
415
+ out_sX = sgrad.stride(2);
416
+ out_sY = dim < 2 ? 0L : sgrad.stride(3);
417
+ out_sZ = dim < 3 ? 0L : sgrad.stride(4);
418
+ out_sK = sgrad.stride(dim == 1 ? 3 : dim == 2 ? 4 : 5);
419
+ out_ptr = sgrad.data_ptr();
420
+ out_32b_ok = tensorCanUse32BitIndexMath(sgrad);
421
+
422
+ if (iso && interpolation0 == InterpolationType::Nearest)
423
+ sgrad.zero_();
424
+ if (iso && interpolation0 == InterpolationType::Linear && dim == 1)
425
+ sgrad.zero_();
426
+ } else if (do_push) {
427
+ if (dim == 1)
428
+ output.push_back(at::zeros({N, C, src_X}, trgt_opt));
429
+ else if (dim == 2)
430
+ output.push_back(at::zeros({N, C, src_X, src_Y}, trgt_opt));
431
+ else
432
+ output.push_back(at::zeros({N, C, src_X, src_Y, src_Z}, trgt_opt));
433
+ auto push = output.back();
434
+ out_sN = push.stride(0);
435
+ out_sC = push.stride(1);
436
+ out_sX = push.stride(2);
437
+ out_sY = dim < 2 ? 0L : push.stride(3);
438
+ out_sZ = dim < 3 ? 0L : push.stride(4);
439
+ out_sK = 0L;
440
+ out_ptr = push.data_ptr();
441
+ out_32b_ok = tensorCanUse32BitIndexMath(push);
442
+ } else if (do_count) {
443
+ if (dim == 1)
444
+ output.push_back(at::zeros({N, 1, src_X}, grid_opt));
445
+ else if (dim == 2)
446
+ output.push_back(at::zeros({N, 1, src_X, src_Y}, grid_opt));
447
+ else
448
+ output.push_back(at::zeros({N, 1, src_X, src_Y, src_Z}, grid_opt));
449
+ auto count = output.back();
450
+ out_sN = count.stride(0);
451
+ out_sC = count.stride(1);
452
+ out_sX = count.stride(2);
453
+ out_sY = dim < 2 ? 0L : count.stride(3);
454
+ out_sZ = dim < 3 ? 0L : count.stride(4);
455
+ out_sK = 0L;
456
+ out_ptr = count.data_ptr();
457
+ out_32b_ok = tensorCanUse32BitIndexMath(count);
458
+ }
459
+ if (do_grad) {
460
+ if (dim == 1)
461
+ output.push_back(at::zeros({N, trgt_X, 1}, grid_opt));
462
+ else if (dim == 2)
463
+ output.push_back(at::zeros({N, trgt_X, trgt_Y, 2}, grid_opt));
464
+ else
465
+ output.push_back(at::zeros({N, trgt_X, trgt_Y, trgt_Z, 3}, grid_opt));
466
+ auto grad = output.back();
467
+ grad_sN = grad.stride(0);
468
+ grad_sX = grad.stride(1);
469
+ grad_sY = dim < 2 ? 0L : grad.stride(2);
470
+ grad_sZ = dim < 3 ? 0L : grad.stride(3);
471
+ grad_sC = grad.stride(dim == 1 ? 2 : dim == 2 ? 3 : 4);
472
+ grad_ptr = grad.data_ptr();
473
+ out_32b_ok = tensorCanUse32BitIndexMath(grad);
474
+
475
+ if (iso && interpolation0 == InterpolationType::Nearest)
476
+ grad.zero_();
477
+ }
478
+ }
479
+
480
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
481
+ // GENERIC PUSHPULL CLASS
482
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
483
+ // This class implements the bulk of the code.
484
+ // /!\ No type and shape checking is performed here.
485
+
486
+ template <typename scalar_t, typename offset_t>
487
+ class PushPullImpl {
488
+ public:
489
+ // ~~~ CONSTRUCTOR ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
490
+ PushPullImpl(const PushPullAllocator& info)
491
+ : output(info.output),
492
+ dim(info.dim),
493
+ bound0(info.bound0),
494
+ bound1(info.bound1),
495
+ bound2(info.bound2),
496
+ interpolation0(info.interpolation0),
497
+ interpolation1(info.interpolation1),
498
+ interpolation2(info.interpolation1),
499
+ iso(info.iso),
500
+ extrapolate(info.extrapolate),
501
+ do_pull(info.do_pull),
502
+ do_push(info.do_push),
503
+ do_count(info.do_count),
504
+ do_grad(info.do_grad),
505
+ do_sgrad(info.do_sgrad),
506
+ N(static_cast<offset_t>(info.N)),
507
+ C(static_cast<offset_t>(info.C)),
508
+ src_X(static_cast<offset_t>(info.src_X)),
509
+ src_Y(static_cast<offset_t>(info.src_Y)),
510
+ src_Z(static_cast<offset_t>(info.src_Z)),
511
+ trgt_X(static_cast<offset_t>(info.trgt_X)),
512
+ trgt_Y(static_cast<offset_t>(info.trgt_Y)),
513
+ trgt_Z(static_cast<offset_t>(info.trgt_Z)),
514
+ trgt_K(static_cast<offset_t>(info.trgt_K)),
515
+ src_sN(static_cast<offset_t>(info.src_sN)),
516
+ src_sC(static_cast<offset_t>(info.src_sC)),
517
+ src_sX(static_cast<offset_t>(info.src_sX)),
518
+ src_sY(static_cast<offset_t>(info.src_sY)),
519
+ src_sZ(static_cast<offset_t>(info.src_sZ)),
520
+ src_ptr(static_cast<scalar_t*>(info.src_ptr)),
521
+ trgt_sN(static_cast<offset_t>(info.trgt_sN)),
522
+ trgt_sC(static_cast<offset_t>(info.trgt_sC)),
523
+ trgt_sX(static_cast<offset_t>(info.trgt_sX)),
524
+ trgt_sY(static_cast<offset_t>(info.trgt_sY)),
525
+ trgt_sZ(static_cast<offset_t>(info.trgt_sZ)),
526
+ trgt_sK(static_cast<offset_t>(info.trgt_sK)),
527
+ trgt_ptr(static_cast<scalar_t*>(info.trgt_ptr)),
528
+ grid_sN(static_cast<offset_t>(info.grid_sN)),
529
+ grid_sC(static_cast<offset_t>(info.grid_sC)),
530
+ grid_sX(static_cast<offset_t>(info.grid_sX)),
531
+ grid_sY(static_cast<offset_t>(info.grid_sY)),
532
+ grid_sZ(static_cast<offset_t>(info.grid_sZ)),
533
+ grid_ptr(static_cast<scalar_t*>(info.grid_ptr)),
534
+ out_sN(static_cast<offset_t>(info.out_sN)),
535
+ out_sC(static_cast<offset_t>(info.out_sC)),
536
+ out_sX(static_cast<offset_t>(info.out_sX)),
537
+ out_sY(static_cast<offset_t>(info.out_sY)),
538
+ out_sZ(static_cast<offset_t>(info.out_sZ)),
539
+ out_sK(static_cast<offset_t>(info.out_sK)),
540
+ out_ptr(static_cast<scalar_t*>(info.out_ptr)),
541
+ grad_sN(static_cast<offset_t>(info.grad_sN)),
542
+ grad_sC(static_cast<offset_t>(info.grad_sC)),
543
+ grad_sX(static_cast<offset_t>(info.grad_sX)),
544
+ grad_sY(static_cast<offset_t>(info.grad_sY)),
545
+ grad_sZ(static_cast<offset_t>(info.grad_sZ)),
546
+ grad_ptr(static_cast<scalar_t*>(info.grad_ptr)) {}
547
+
548
+ // ~~~ PUBLIC VALUE ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
549
+
550
+ std::deque<Tensor> output;
551
+
552
+ // MONAI_HOST MONAI_DEVICE void printInfo() const {
553
+ // printf("dim: %d\n", dim);
554
+ // printf("do_pull: %d\n", do_pull);
555
+ // printf("do_push: %d\n", do_push);
556
+ // printf("do_count: %d\n", do_count);
557
+ // printf("do_sgrad: %d\n", do_sgrad);
558
+ // printf("do_grad: %d\n", do_grad);
559
+ // printf("bound: [%d %d %d]\n", static_cast<int>(bound0),
560
+ // static_cast<int>(bound1), static_cast<int>(bound2));
561
+ // printf("interpolation: [%d %d %d]\n", static_cast<int>(interpolation0),
562
+ // static_cast<int>(interpolation1), static_cast<int>(interpolation2));
563
+ // printf("src: [%d %d %d]\n", src_Z, src_Y, src_X);
564
+ // printf("trgt: [%d %d %d (%d)]\n", trgt_Z, trgt_Y, trgt_X, trgt_K);
565
+ // printf("N: %d\n", N);
566
+ // printf("C: %d\n", C);
567
+ // printf("src -> %lu\n", reinterpret_cast<std::uintptr_t>(src_ptr));
568
+ // printf("trgt -> %lu\n", reinterpret_cast<std::uintptr_t>(trgt_ptr));
569
+ // printf("grid -> %lu\n", reinterpret_cast<std::uintptr_t>(grid_ptr));
570
+ // printf("out -> %lu\n", reinterpret_cast<std::uintptr_t>(out_ptr));
571
+ // printf("grad -> %lu\n", reinterpret_cast<std::uintptr_t>(grad_ptr));
572
+ // }
573
+
574
+ // ~~~ FUNCTORS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
575
+
576
+ // Loop over voxels that belong to one CUDA block
577
+ // This function is called by the CUDA kernel
578
+ MONAI_DEVICE void loop(int threadIdx, int blockIdx, int blockDim, int gridDim) const;
579
+
580
+ MONAI_HOST MONAI_DEVICE int64_t voxcount() const {
581
+ return N * trgt_X * trgt_Y * trgt_Z;
582
+ }
583
+
584
+ private:
585
+ // ~~~ COMPONENTS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
586
+ MONAI_DEVICE void check1d(offset_t w, offset_t n) const;
587
+ MONAI_DEVICE void check2d(offset_t w, offset_t h, offset_t n) const;
588
+ MONAI_DEVICE void check3d(offset_t w, offset_t h, offset_t d, offset_t n) const;
589
+ MONAI_DEVICE void interpolate1d(scalar_t x, offset_t w, offset_t n) const;
590
+ MONAI_DEVICE void interpolate1d_nearest(scalar_t x, offset_t w, offset_t n) const;
591
+ MONAI_DEVICE void interpolate1d_linear(scalar_t x, offset_t w, offset_t n) const;
592
+ MONAI_DEVICE void interpolate1d_sliding(scalar_t x, offset_t w, offset_t n) const { /*TODO*/
593
+ }
594
+ MONAI_DEVICE void interpolate1d_sliding_nearest(scalar_t x, offset_t w, offset_t n) const { /*TODO*/
595
+ }
596
+ MONAI_DEVICE void interpolate1d_sliding_linear(scalar_t x, offset_t w, offset_t n) const { /*TODO*/
597
+ }
598
+ MONAI_DEVICE void interpolate2d(scalar_t x, scalar_t y, offset_t w, offset_t h, offset_t n) const;
599
+ MONAI_DEVICE void interpolate2d_nearest(scalar_t x, scalar_t y, offset_t w, offset_t h, offset_t n) const;
600
+ MONAI_DEVICE void interpolate2d_bilinear(scalar_t x, scalar_t y, offset_t w, offset_t h, offset_t n) const;
601
+ MONAI_DEVICE void interpolate2d_sliding(scalar_t x, scalar_t y, offset_t w, offset_t h, offset_t n) const { /*TODO*/
602
+ }
603
+ MONAI_DEVICE void interpolate2d_sliding_nearest(scalar_t x, scalar_t y, offset_t w, offset_t h, offset_t n)
604
+ const { /*TODO*/
605
+ }
606
+ MONAI_DEVICE void interpolate2d_sliding_bilinear(scalar_t x, scalar_t y, offset_t w, offset_t h, offset_t n)
607
+ const { /*TODO*/
608
+ }
609
+ MONAI_DEVICE void interpolate3d(scalar_t x, scalar_t y, scalar_t z, offset_t w, offset_t h, offset_t d, offset_t n)
610
+ const;
611
+ MONAI_DEVICE void interpolate3d_nearest(
612
+ scalar_t x,
613
+ scalar_t y,
614
+ scalar_t z,
615
+ offset_t w,
616
+ offset_t h,
617
+ offset_t d,
618
+ offset_t n) const;
619
+ MONAI_DEVICE void interpolate3d_trilinear(
620
+ scalar_t x,
621
+ scalar_t y,
622
+ scalar_t z,
623
+ offset_t w,
624
+ offset_t h,
625
+ offset_t d,
626
+ offset_t n) const;
627
+ MONAI_DEVICE void interpolate3d_sliding(
628
+ scalar_t x,
629
+ scalar_t y,
630
+ scalar_t z,
631
+ offset_t w,
632
+ offset_t h,
633
+ offset_t d,
634
+ offset_t n) const { /*TODO*/
635
+ }
636
+ MONAI_DEVICE void interpolate3d_sliding_nearest(
637
+ scalar_t x,
638
+ scalar_t y,
639
+ scalar_t z,
640
+ offset_t w,
641
+ offset_t h,
642
+ offset_t d,
643
+ offset_t n) const { /*TODO*/
644
+ }
645
+ MONAI_DEVICE void interpolate3d_sliding_trilinear(
646
+ scalar_t x,
647
+ scalar_t y,
648
+ scalar_t z,
649
+ offset_t w,
650
+ offset_t h,
651
+ offset_t d,
652
+ offset_t n) const { /*TODO*/
653
+ }
654
+
655
+ // ~~~ OPTIONS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
656
+ int dim; // dimensionality (2 or 3)
657
+ BoundType bound0; // boundary condition // x|W
658
+ BoundType bound1; // boundary condition // y|H
659
+ BoundType bound2; // boundary condition // z|D
660
+ InterpolationType interpolation0; // interpolation order // x|W
661
+ InterpolationType interpolation1; // interpolation order // y|H
662
+ InterpolationType interpolation2; // interpolation order // z|D
663
+ bool iso; // isotropic interpolation?
664
+ bool extrapolate; // compute out-of-bound values
665
+ bool do_pull; // sample a volume
666
+ bool do_push; // splat a volume
667
+ bool do_count; // splatting weights (= jacobian determinant)
668
+ bool do_grad; // backprop: gradient of grid // pull
669
+ bool do_sgrad; // sample spatial gradients
670
+
671
+ // ~~~ NAVIGATORS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
672
+ offset_t N;
673
+ offset_t C;
674
+ offset_t src_X;
675
+ offset_t src_Y;
676
+ offset_t src_Z;
677
+ offset_t trgt_X;
678
+ offset_t trgt_Y;
679
+ offset_t trgt_Z;
680
+ offset_t trgt_K;
681
+ offset_t src_sN;
682
+ offset_t src_sC;
683
+ offset_t src_sX;
684
+ offset_t src_sY;
685
+ offset_t src_sZ;
686
+ scalar_t* src_ptr;
687
+ offset_t trgt_sN;
688
+ offset_t trgt_sC;
689
+ offset_t trgt_sX;
690
+ offset_t trgt_sY;
691
+ offset_t trgt_sZ;
692
+ offset_t trgt_sK;
693
+ scalar_t* trgt_ptr;
694
+ offset_t grid_sN;
695
+ offset_t grid_sC;
696
+ offset_t grid_sX;
697
+ offset_t grid_sY;
698
+ offset_t grid_sZ;
699
+ scalar_t* grid_ptr;
700
+ offset_t out_sN;
701
+ offset_t out_sC;
702
+ offset_t out_sX;
703
+ offset_t out_sY;
704
+ offset_t out_sZ;
705
+ offset_t out_sK; // gradient dimension
706
+ scalar_t* out_ptr;
707
+ offset_t grad_sN;
708
+ offset_t grad_sC;
709
+ offset_t grad_sX;
710
+ offset_t grad_sY;
711
+ offset_t grad_sZ;
712
+ scalar_t* grad_ptr;
713
+ };
714
+
715
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
716
+ // LOOP
717
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
718
+
719
+ template <typename scalar_t, typename offset_t>
720
+ MONAI_DEVICE void PushPullImpl<scalar_t, offset_t>::loop(int threadIdx, int blockIdx, int blockDim, int gridDim)
721
+ const {
722
+ int64_t index = blockIdx * blockDim + threadIdx;
723
+ int64_t nthreads = voxcount();
724
+ offset_t trgt_XYZ = trgt_Z * trgt_Y * trgt_X;
725
+ offset_t trgt_YZ = trgt_Z * trgt_Y;
726
+ offset_t n, w, h, d;
727
+ for (offset_t i = index; index < nthreads; index += blockDim * gridDim, i = index) {
728
+ // Convert index: linear to sub
729
+ n = (i / trgt_XYZ);
730
+ w = (i / trgt_YZ) % trgt_X;
731
+ h = (i / trgt_Z) % trgt_Y;
732
+ d = i % trgt_Z;
733
+
734
+ if (dim == 1)
735
+ check1d(w, n);
736
+ else if (dim == 2)
737
+ check2d(w, h, n);
738
+ else
739
+ check3d(w, h, d, n);
740
+ }
741
+ }
742
+
743
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
744
+ // CHECK OUT-OF-BOUND
745
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
746
+
747
+ // Here, we:
748
+ // 1) read the [x,y,z] source coordinate for the current target voxel
749
+ // 3) check if the source coordinate is in bounds
750
+
751
+ template <typename scalar_t, typename offset_t>
752
+ MONAI_DEVICE void PushPullImpl<scalar_t, offset_t>::check3d(offset_t w, offset_t h, offset_t d, offset_t n) const {
753
+ // get the corresponding input x, y, z co-ordinates from grid
754
+ scalar_t* grid_ptr_NXYZ = grid_ptr + n * grid_sN + w * grid_sX + h * grid_sY + d * grid_sZ;
755
+ scalar_t x = *grid_ptr_NXYZ;
756
+ scalar_t y = grid_ptr_NXYZ[grid_sC];
757
+ scalar_t z = grid_ptr_NXYZ[grid_sC * 2];
758
+
759
+ // Check if out-of-bound
760
+ if (!(extrapolate ||
761
+ (inbounds(x, src_X, static_cast<scalar_t>(TINY)) && inbounds(y, src_Y, static_cast<scalar_t>(TINY)) &&
762
+ inbounds(z, src_Z, static_cast<scalar_t>(TINY))))) {
763
+ if (do_pull || do_sgrad) {
764
+ scalar_t* out_ptr_NCXYZ = out_ptr + n * out_sN + w * out_sX + h * out_sY + d * out_sZ;
765
+ for (offset_t c = 0; c < C; ++c, out_ptr_NCXYZ += out_sC) {
766
+ *out_ptr_NCXYZ = static_cast<scalar_t>(0);
767
+ if (do_sgrad) {
768
+ out_ptr_NCXYZ[out_sK] = static_cast<scalar_t>(0);
769
+ out_ptr_NCXYZ[out_sK * 2] = static_cast<scalar_t>(0);
770
+ }
771
+ }
772
+ }
773
+ if (do_grad) {
774
+ scalar_t* grad_ptr_NXYZ = grad_ptr + n * grad_sN + w * grad_sX + h * grad_sY + d * grad_sZ;
775
+ (*grad_ptr_NXYZ) = static_cast<scalar_t>(0);
776
+ grad_ptr_NXYZ[grad_sC] = static_cast<scalar_t>(0);
777
+ grad_ptr_NXYZ[grad_sC * 2] = static_cast<scalar_t>(0);
778
+ }
779
+ return;
780
+ }
781
+
782
+ // Next step
783
+ if (bound0 == BoundType::Sliding) {
784
+ if (iso)
785
+ switch (static_cast<int>(interpolation0)) {
786
+ case 0:
787
+ return interpolate3d_sliding_nearest(x, y, z, w, h, d, n);
788
+ case 1:
789
+ return interpolate3d_sliding_trilinear(x, y, z, w, h, d, n);
790
+ }
791
+ return interpolate3d_sliding(x, y, z, w, h, d, n);
792
+ } else {
793
+ if (iso)
794
+ switch (static_cast<int>(interpolation0)) {
795
+ case 0:
796
+ return interpolate3d_nearest(x, y, z, w, h, d, n);
797
+ case 1:
798
+ return interpolate3d_trilinear(x, y, z, w, h, d, n);
799
+ }
800
+ return interpolate3d(x, y, z, w, h, d, n);
801
+ }
802
+ }
803
+
804
+ template <typename scalar_t, typename offset_t>
805
+ MONAI_DEVICE void PushPullImpl<scalar_t, offset_t>::check2d(offset_t w, offset_t h, offset_t n) const {
806
+ // get the corresponding input x, y, z co-ordinates from grid
807
+ scalar_t* grid_ptr_NXY = grid_ptr + n * grid_sN + w * grid_sX + h * grid_sY;
808
+ scalar_t x = *grid_ptr_NXY;
809
+ scalar_t y = grid_ptr_NXY[grid_sC];
810
+
811
+ // Check if out-of-bound
812
+ if (!(extrapolate ||
813
+ (inbounds(x, src_X, static_cast<scalar_t>(TINY)) && inbounds(y, src_Y, static_cast<scalar_t>(TINY))))) {
814
+ if (do_pull || do_sgrad) {
815
+ scalar_t* out_ptr_NCXY = out_ptr + n * out_sN + w * out_sX + h * out_sY;
816
+ for (offset_t c = 0; c < C; ++c, out_ptr_NCXY += out_sC) {
817
+ *out_ptr_NCXY = static_cast<scalar_t>(0);
818
+ if (do_sgrad)
819
+ out_ptr_NCXY[out_sK] = static_cast<scalar_t>(0);
820
+ }
821
+ }
822
+ if (do_grad) {
823
+ scalar_t* grad_ptr_NXY = grad_ptr + n * grad_sN + w * grad_sX + h * grad_sY;
824
+ (*grad_ptr_NXY) = static_cast<scalar_t>(0);
825
+ grad_ptr_NXY[grad_sC] = static_cast<scalar_t>(0);
826
+ }
827
+ return;
828
+ }
829
+
830
+ // Next step
831
+ if (bound0 == BoundType::Sliding) {
832
+ if (iso)
833
+ switch (static_cast<int>(interpolation0)) {
834
+ case 0:
835
+ return interpolate2d_sliding_nearest(x, y, w, h, n);
836
+ case 1:
837
+ return interpolate2d_sliding_bilinear(x, y, w, h, n);
838
+ }
839
+ return interpolate2d_sliding(x, y, w, h, n);
840
+ } else {
841
+ if (iso)
842
+ switch (static_cast<int>(interpolation0)) {
843
+ case 0:
844
+ return interpolate2d_nearest(x, y, w, h, n);
845
+ case 1:
846
+ return interpolate2d_bilinear(x, y, w, h, n);
847
+ }
848
+ return interpolate2d(x, y, w, h, n);
849
+ }
850
+ }
851
+
852
+ template <typename scalar_t, typename offset_t>
853
+ MONAI_DEVICE void PushPullImpl<scalar_t, offset_t>::check1d(offset_t w, offset_t n) const {
854
+ // get the corresponding input x, y, z co-ordinates from grid
855
+ scalar_t* grid_ptr_NX = grid_ptr + n * grid_sN + w * grid_sX;
856
+ scalar_t x = *grid_ptr_NX;
857
+
858
+ // Check if out-of-bound
859
+ if (!(extrapolate || inbounds(x, src_X, static_cast<scalar_t>(TINY)))) {
860
+ if (do_pull || do_sgrad) {
861
+ scalar_t* out_ptr_NCX = out_ptr + n * out_sN + w * out_sX;
862
+ for (offset_t c = 0; c < C; ++c, out_ptr_NCX += out_sC) {
863
+ *out_ptr_NCX = static_cast<scalar_t>(0);
864
+ if (do_sgrad)
865
+ out_ptr_NCX[out_sK] = static_cast<scalar_t>(0);
866
+ }
867
+ }
868
+ if (do_grad) {
869
+ scalar_t* grad_ptr_NX = grad_ptr + n * grad_sN + w * grad_sX;
870
+ (*grad_ptr_NX) = static_cast<scalar_t>(0);
871
+ grad_ptr_NX[grad_sC] = static_cast<scalar_t>(0);
872
+ }
873
+ return;
874
+ }
875
+
876
+ // Next step
877
+ if (bound0 == BoundType::Sliding) {
878
+ if (iso)
879
+ switch (static_cast<int>(interpolation0)) {
880
+ case 0:
881
+ return interpolate1d_sliding_nearest(x, w, n);
882
+ case 1:
883
+ return interpolate1d_sliding_linear(x, w, n);
884
+ }
885
+ return interpolate1d_sliding(x, w, n);
886
+ } else {
887
+ if (iso)
888
+ switch (static_cast<int>(interpolation0)) {
889
+ case 0:
890
+ return interpolate1d_nearest(x, w, n);
891
+ case 1:
892
+ return interpolate1d_linear(x, w, n);
893
+ }
894
+ return interpolate1d(x, w, n);
895
+ }
896
+ }
897
+
898
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
899
+ // GENERIC INTERPOLATION 3D
900
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
901
+
902
+ template <typename scalar_t, typename offset_t>
903
+ MONAI_DEVICE void PushPullImpl<scalar_t, offset_t>::interpolate3d(
904
+ scalar_t x,
905
+ scalar_t y,
906
+ scalar_t z,
907
+ offset_t w,
908
+ offset_t h,
909
+ offset_t d,
910
+ offset_t n) const {
911
+ // Get corner pixel values from (x, y, z)
912
+ offset_t bx0, bx1, by0, by1, bz0, bz1;
913
+ interpolation::bounds(interpolation0, x, bx0, bx1);
914
+ interpolation::bounds(interpolation1, y, by0, by1);
915
+ interpolation::bounds(interpolation2, z, bz0, bz1);
916
+ offset_t dbx = bx1 - bx0;
917
+ offset_t dby = by1 - by0;
918
+ offset_t dbz = bz1 - bz0;
919
+
920
+ // Pre-compute offsets and target value
921
+ scalar_t* src_ptr_NC0 = src_ptr + n * src_sN;
922
+ scalar_t* out_ptr_NC0 = out_ptr + n * out_sN;
923
+ scalar_t* out_ptr_NCXYZ0 = out_ptr + n * out_sN + w * out_sX + h * out_sY + d * out_sZ;
924
+ scalar_t* trgt_ptr_NCXYZ = trgt_ptr + n * trgt_sN + w * trgt_sX + h * trgt_sY + d * trgt_sZ;
925
+ scalar_t target[3 * MONAI_MAX_NUM_CHANNELS];
926
+ if (trgt_ptr && (do_push || do_grad))
927
+ for (offset_t c = 0; c < C; ++c, trgt_ptr_NCXYZ += trgt_sC) {
928
+ target[c] = *trgt_ptr_NCXYZ;
929
+ if (trgt_K > 0) {
930
+ target[c + C] = trgt_ptr_NCXYZ[trgt_sK];
931
+ target[c + C * 2] = trgt_ptr_NCXYZ[trgt_sK * 2];
932
+ }
933
+ }
934
+
935
+ // Initialize output
936
+ scalar_t* out_ptr_NCXYZ = out_ptr_NCXYZ0;
937
+ if (do_pull || do_sgrad) {
938
+ for (offset_t c = 0; c < C; ++c, out_ptr_NCXYZ += out_sC) {
939
+ *out_ptr_NCXYZ = static_cast<scalar_t>(0);
940
+ if (do_sgrad) {
941
+ out_ptr_NCXYZ[out_sK] = static_cast<scalar_t>(0);
942
+ out_ptr_NCXYZ[out_sK * 2] = static_cast<scalar_t>(0);
943
+ }
944
+ }
945
+ }
946
+
947
+ // Pre-compute indices/weights/grad
948
+ scalar_t wx[8], wy[8], wz[8]; // B-spline weights
949
+ scalar_t gx[8], gy[8], gz[8]; // B-spline derivatives
950
+ scalar_t hx[8], hy[8], hz[8]; // B-spline 2nd derivatives
951
+ offset_t ix[8], iy[8], iz[8]; // Warped indices
952
+ uint8_t sx[8], sy[8], sz[8]; // Warped indices
953
+
954
+ {
955
+ scalar_t *owz = static_cast<scalar_t*>(wz), *ogz = static_cast<scalar_t*>(gz), *ohz = static_cast<scalar_t*>(hz);
956
+ offset_t* oiz = static_cast<offset_t*>(iz);
957
+ uint8_t* osz = static_cast<uint8_t*>(sz);
958
+ for (offset_t bz = bz0; bz <= bz1; ++bz) {
959
+ scalar_t dz = z - bz;
960
+ *(owz++) = interpolation::fastweight(interpolation2, dz);
961
+ if (do_grad || do_sgrad)
962
+ *(ogz++) = interpolation::fastgrad(interpolation2, dz);
963
+ if (do_grad && trgt_sK > 1)
964
+ *(ohz++) = interpolation::fasthess(interpolation2, dz);
965
+ *(osz++) = bound::sign(bound2, bz, src_Z);
966
+ *(oiz++) = bound::index(bound2, bz, src_Z);
967
+ }
968
+ }
969
+ {
970
+ scalar_t *owy = static_cast<scalar_t*>(wy), *ogy = static_cast<scalar_t*>(gy), *ohy = static_cast<scalar_t*>(hy);
971
+ offset_t* oiy = static_cast<offset_t*>(iy);
972
+ uint8_t* osy = static_cast<uint8_t*>(sy);
973
+ for (offset_t by = by0; by <= by1; ++by) {
974
+ scalar_t dy = y - by;
975
+ *(owy++) = interpolation::fastweight(interpolation1, dy);
976
+ if (do_grad || do_sgrad)
977
+ *(ogy++) = interpolation::fastgrad(interpolation1, dy);
978
+ if (do_grad && trgt_sK > 1)
979
+ *(ohy++) = interpolation::fasthess(interpolation1, dy);
980
+ *(osy++) = bound::sign(bound1, by, src_Y);
981
+ *(oiy++) = bound::index(bound1, by, src_Y);
982
+ }
983
+ }
984
+ {
985
+ scalar_t *owx = static_cast<scalar_t*>(wx), *ogx = static_cast<scalar_t*>(gx), *ohx = static_cast<scalar_t*>(hx);
986
+ offset_t* oix = static_cast<offset_t*>(ix);
987
+ uint8_t* osx = static_cast<uint8_t*>(sx);
988
+ for (offset_t bx = bx0; bx <= bx1; ++bx) {
989
+ scalar_t dx = x - bx;
990
+ *(owx++) = interpolation::fastweight(interpolation0, dx);
991
+ if (do_grad || do_sgrad)
992
+ *(ogx++) = interpolation::fastgrad(interpolation0, dx);
993
+ if (do_grad && trgt_sK > 1)
994
+ *(ohx++) = interpolation::fasthess(interpolation0, dx);
995
+ *(osx++) = bound::sign(bound0, bx, src_X);
996
+ *(oix++) = bound::index(bound0, bx, src_X);
997
+ }
998
+ }
999
+
1000
+ // Convolve coefficients with basis functions
1001
+ scalar_t ogx, ogy, ogz;
1002
+ ogx = ogy = ogz = static_cast<scalar_t>(0);
1003
+ for (offset_t k = 0; k <= dbz; ++k) {
1004
+ offset_t ooz = iz[k] * out_sZ;
1005
+ offset_t osz = iz[k] * src_sZ;
1006
+ uint8_t szz = sz[k];
1007
+ scalar_t wzz = wz[k];
1008
+ scalar_t gzz = gz[k];
1009
+ scalar_t hzz = hz[k];
1010
+ for (offset_t j = 0; j <= dby; ++j) {
1011
+ offset_t ooyz = ooz + iy[j] * out_sY;
1012
+ offset_t osyz = osz + iy[j] * src_sY;
1013
+ uint8_t syz = szz * sy[j];
1014
+ scalar_t wyy = wy[j];
1015
+ scalar_t gyy = gy[j];
1016
+ scalar_t hyy = hy[j];
1017
+ for (offset_t i = 0; i <= dbx; ++i) {
1018
+ offset_t ooxyz = ooyz + ix[i] * out_sX;
1019
+ offset_t osxyz = osyz + ix[i] * src_sX;
1020
+ uint8_t sxyz = syz * sx[i];
1021
+ scalar_t wxx = wx[i];
1022
+ scalar_t gxx = gx[i];
1023
+ scalar_t hxx = hx[i];
1024
+
1025
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Pull ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1026
+ if (do_pull) {
1027
+ scalar_t* src_ptr_NC = src_ptr_NC0;
1028
+ scalar_t* out_ptr_NCXYZ = out_ptr_NCXYZ0;
1029
+ for (offset_t c = 0; c < C; ++c, out_ptr_NCXYZ += out_sC, src_ptr_NC += src_sC)
1030
+ *out_ptr_NCXYZ += bound::get(src_ptr_NC, osxyz, sxyz) * (wxx * wyy * wzz);
1031
+ }
1032
+
1033
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ SGrad ~~~~~~~~~~~~~~~~~~~~~~~~~~~
1034
+ else if (do_sgrad) {
1035
+ scalar_t* src_ptr_NC = src_ptr_NC0;
1036
+ scalar_t* out_ptr_NCXYZ = out_ptr_NCXYZ0;
1037
+ for (offset_t c = 0; c < C; ++c, out_ptr_NCXYZ += out_sC, src_ptr_NC += src_sC) {
1038
+ scalar_t src = bound::get(src_ptr_NC, osxyz, sxyz);
1039
+ *out_ptr_NCXYZ += src * (gxx * wyy * wzz);
1040
+ out_ptr_NCXYZ[out_sK] += src * (wxx * gyy * wzz);
1041
+ out_ptr_NCXYZ[2 * out_sK] += src * (wxx * wyy * gzz);
1042
+ }
1043
+ }
1044
+
1045
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Push ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1046
+ else if (do_push) {
1047
+ if (trgt_K == 0) {
1048
+ // Diff w.r.t. push/pull
1049
+ scalar_t* out_ptr_NC = out_ptr_NC0;
1050
+ for (offset_t c = 0; c < C; ++c, out_ptr_NC += out_sC)
1051
+ bound::add(out_ptr_NC, ooxyz, (wxx * wyy * wzz) * target[c], sxyz);
1052
+ } else {
1053
+ // Diff w.r.t. sgrad
1054
+ scalar_t* out_ptr_NC = out_ptr_NC0;
1055
+ for (offset_t c = 0; c < C; ++c, out_ptr_NC += out_sC) {
1056
+ scalar_t val = (gxx * wyy * wzz) * target[c] + (wxx * gyy * wzz) * target[c + C] +
1057
+ (wxx * wyy * gzz) * target[c + C * 2];
1058
+ bound::add(out_ptr_NC, ooxyz, val, sxyz);
1059
+ }
1060
+ }
1061
+ }
1062
+
1063
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Count ~~~~~~~~~~~~~~~~~~~~~~~~~~~
1064
+ else if (do_count) {
1065
+ bound::add(out_ptr_NC0, ooxyz, (wxx * wyy * wzz), sxyz);
1066
+ }
1067
+
1068
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Grad ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1069
+ if (do_grad) {
1070
+ if (trgt_K == 0) {
1071
+ // Diff w.r.t. pull/push
1072
+ scalar_t* src_ptr_NC = src_ptr_NC0;
1073
+ scalar_t dot = static_cast<scalar_t>(0);
1074
+ for (offset_t c = 0; c < C; ++c, src_ptr_NC += src_sC) {
1075
+ scalar_t src = bound::get(src_ptr_NC, osxyz, sxyz);
1076
+ dot += (trgt_ptr ? src * target[c] : src);
1077
+ // trgt_ptr == 0 in the backward pass of 'count'
1078
+ }
1079
+ ogx += (gxx * wyy * wzz) * dot;
1080
+ ogy += (wxx * gyy * wzz) * dot;
1081
+ ogz += (wxx * wyy * gzz) * dot;
1082
+ } else {
1083
+ // Diff w.r.t. sgrad
1084
+ scalar_t* src_ptr_NC = src_ptr_NC0;
1085
+ scalar_t dot0, dot1, dot2;
1086
+ dot0 = dot1 = dot2 = static_cast<scalar_t>(0);
1087
+ for (offset_t c = 0; c < C; ++c, src_ptr_NC += src_sC) {
1088
+ scalar_t src = bound::get(src_ptr_NC, osxyz, sxyz);
1089
+ dot0 += src * target[c];
1090
+ dot1 += src * target[c + C];
1091
+ dot2 += src * target[c + C * 2];
1092
+ }
1093
+ ogx += (hxx * wyy * wzz) * dot0 + (gxx * gyy * wzz) * dot1 + (gxx * wyy * gzz) * dot2;
1094
+ ogy += (gxx * gyy * wzz) * dot0 + (wxx * hyy * wzz) * dot1 + (wxx * gyy * gzz) * dot2;
1095
+ ogz += (gxx * wyy * gzz) * dot0 + (wxx * gyy * gzz) * dot1 + (wxx * wyy * hzz) * dot2;
1096
+ }
1097
+ }
1098
+
1099
+ } // x
1100
+ } // y
1101
+ } // z
1102
+
1103
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Grad ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1104
+ if (do_grad) {
1105
+ scalar_t* grad_ptr_NXYZ = grad_ptr + n * grad_sN + w * grad_sX + h * grad_sY + d * grad_sZ;
1106
+ (*grad_ptr_NXYZ) = ogx;
1107
+ grad_ptr_NXYZ[grad_sC] = ogy;
1108
+ grad_ptr_NXYZ[grad_sC * 2] = ogz;
1109
+ }
1110
+ }
1111
+
1112
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1113
+ // GENERIC INTERPOLATION 2D
1114
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1115
+
1116
+ template <typename scalar_t, typename offset_t>
1117
+ MONAI_DEVICE void PushPullImpl<scalar_t, offset_t>::interpolate2d(
1118
+ scalar_t x,
1119
+ scalar_t y,
1120
+ offset_t w,
1121
+ offset_t h,
1122
+ offset_t n) const {
1123
+ // Get corner pixel values from (x, y)
1124
+ offset_t bx0, bx1, by0, by1;
1125
+ interpolation::bounds(interpolation0, x, bx0, bx1);
1126
+ interpolation::bounds(interpolation1, y, by0, by1);
1127
+ offset_t dbx = bx1 - bx0;
1128
+ offset_t dby = by1 - by0;
1129
+
1130
+ // Pre-compute offsets and target value
1131
+ scalar_t* src_ptr_NC0 = src_ptr + n * src_sN;
1132
+ scalar_t* out_ptr_NC0 = out_ptr + n * out_sN;
1133
+ scalar_t* out_ptr_NCXY0 = out_ptr + n * out_sN + w * out_sX + h * out_sY;
1134
+ scalar_t* trgt_ptr_NCXY = trgt_ptr + n * trgt_sN + w * trgt_sX + h * trgt_sY;
1135
+ scalar_t target[2 * MONAI_MAX_NUM_CHANNELS];
1136
+ if (trgt_ptr && (do_push || do_grad))
1137
+ for (offset_t c = 0; c < C; ++c, trgt_ptr_NCXY += trgt_sC) {
1138
+ target[c] = *trgt_ptr_NCXY;
1139
+ if (trgt_K > 0) {
1140
+ target[c + C] = trgt_ptr_NCXY[trgt_sK];
1141
+ }
1142
+ }
1143
+
1144
+ // Initialize output
1145
+ scalar_t* out_ptr_NCXY = out_ptr_NCXY0;
1146
+ if (do_pull || do_sgrad) {
1147
+ for (offset_t c = 0; c < C; ++c, out_ptr_NCXY += out_sC) {
1148
+ *out_ptr_NCXY = static_cast<scalar_t>(0);
1149
+ if (do_sgrad) {
1150
+ out_ptr_NCXY[out_sK] = static_cast<scalar_t>(0);
1151
+ }
1152
+ }
1153
+ }
1154
+
1155
+ // Pre-compute indices/weights/grad
1156
+ scalar_t wx[8], wy[8]; // B-spline weights
1157
+ scalar_t gx[8], gy[8]; // B-spline derivatives
1158
+ scalar_t hx[8], hy[8]; // B-spline 2nd derivatives
1159
+ offset_t ix[8], iy[8]; // Warped indices
1160
+ uint8_t sx[8], sy[8]; // Warped indices
1161
+
1162
+ {
1163
+ scalar_t *owy = static_cast<scalar_t*>(wy), *ogy = static_cast<scalar_t*>(gy), *ohy = static_cast<scalar_t*>(hy);
1164
+ offset_t* oiy = static_cast<offset_t*>(iy);
1165
+ uint8_t* osy = static_cast<uint8_t*>(sy);
1166
+ for (offset_t by = by0; by <= by1; ++by) {
1167
+ scalar_t dy = y - by;
1168
+ *(owy++) = interpolation::fastweight(interpolation1, dy);
1169
+ if (do_grad || do_sgrad)
1170
+ *(ogy++) = interpolation::fastgrad(interpolation1, dy);
1171
+ if (do_grad && trgt_sK > 1)
1172
+ *(ohy++) = interpolation::fasthess(interpolation1, dy);
1173
+ *(osy++) = bound::sign(bound1, by, src_Y);
1174
+ *(oiy++) = bound::index(bound1, by, src_Y);
1175
+ }
1176
+ }
1177
+ {
1178
+ scalar_t *owx = static_cast<scalar_t*>(wx), *ogx = static_cast<scalar_t*>(gx), *ohx = static_cast<scalar_t*>(hx);
1179
+ offset_t* oix = static_cast<offset_t*>(ix);
1180
+ uint8_t* osx = static_cast<uint8_t*>(sx);
1181
+ for (offset_t bx = bx0; bx <= bx1; ++bx) {
1182
+ scalar_t dx = x - bx;
1183
+ *(owx++) = interpolation::fastweight(interpolation0, dx);
1184
+ if (do_grad || do_sgrad)
1185
+ *(ogx++) = interpolation::fastgrad(interpolation0, dx);
1186
+ if (do_grad && trgt_sK > 1)
1187
+ *(ohx++) = interpolation::fasthess(interpolation0, dx);
1188
+ *(osx++) = bound::sign(bound0, bx, src_X);
1189
+ *(oix++) = bound::index(bound0, bx, src_X);
1190
+ }
1191
+ }
1192
+
1193
+ // Convolve coefficients with basis functions
1194
+ scalar_t ogx, ogy;
1195
+ ogx = ogy = static_cast<scalar_t>(0);
1196
+ for (offset_t j = 0; j <= dby; ++j) {
1197
+ offset_t ooy = iy[j] * out_sY;
1198
+ offset_t osy = iy[j] * src_sY;
1199
+ uint8_t syy = sy[j];
1200
+ scalar_t wyy = wy[j];
1201
+ scalar_t gyy = gy[j];
1202
+ scalar_t hyy = hy[j];
1203
+ for (offset_t i = 0; i <= dbx; ++i) {
1204
+ offset_t ooxy = ooy + ix[i] * out_sX;
1205
+ offset_t osxy = osy + ix[i] * src_sX;
1206
+ uint8_t sxy = syy * sx[i];
1207
+ scalar_t wxx = wx[i];
1208
+ scalar_t gxx = gx[i];
1209
+ scalar_t hxx = hx[i];
1210
+
1211
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Pull ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1212
+ if (do_pull) {
1213
+ scalar_t* src_ptr_NC = src_ptr_NC0;
1214
+ scalar_t* out_ptr_NCXY = out_ptr_NCXY0;
1215
+ for (offset_t c = 0; c < C; ++c, out_ptr_NCXY += out_sC, src_ptr_NC += src_sC)
1216
+ *out_ptr_NCXY += bound::get(src_ptr_NC, osxy, sxy) * (wxx * wyy);
1217
+ }
1218
+
1219
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ SGrad ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1220
+ else if (do_sgrad) {
1221
+ scalar_t* src_ptr_NC = src_ptr_NC0;
1222
+ scalar_t* out_ptr_NCXY = out_ptr_NCXY0;
1223
+ for (offset_t c = 0; c < C; ++c, out_ptr_NCXY += out_sC, src_ptr_NC += src_sC) {
1224
+ scalar_t src = bound::get(src_ptr_NC, osxy, sxy);
1225
+ *out_ptr_NCXY += src * (gxx * wyy);
1226
+ out_ptr_NCXY[out_sK] += src * (wxx * gyy);
1227
+ }
1228
+ }
1229
+
1230
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Push ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1231
+ else if (do_push) {
1232
+ if (trgt_K == 0) {
1233
+ // Diff w.r.t. push/pull
1234
+ scalar_t* out_ptr_NC = out_ptr_NC0;
1235
+ for (offset_t c = 0; c < C; ++c, out_ptr_NC += out_sC)
1236
+ bound::add(out_ptr_NC, ooxy, (wxx * wyy) * target[c], sxy);
1237
+ } else {
1238
+ // Diff w.r.t. sgrad
1239
+ scalar_t* out_ptr_NC = out_ptr_NC0;
1240
+ for (offset_t c = 0; c < C; ++c, out_ptr_NC += out_sC) {
1241
+ scalar_t val = (gxx * wyy) * target[c] + (wxx * gyy) * target[c + C];
1242
+ bound::add(out_ptr_NC, ooxy, val, sxy);
1243
+ }
1244
+ }
1245
+ }
1246
+
1247
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Count ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1248
+ else if (do_count) {
1249
+ bound::add(out_ptr_NC0, ooxy, (wxx * wyy), sxy);
1250
+ }
1251
+
1252
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Grad ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1253
+ if (do_grad) {
1254
+ if (trgt_K == 0) {
1255
+ // Diff w.r.t. pull/push
1256
+ scalar_t* src_ptr_NC = src_ptr_NC0;
1257
+ scalar_t dot = static_cast<scalar_t>(0);
1258
+ for (offset_t c = 0; c < C; ++c, src_ptr_NC += src_sC) {
1259
+ scalar_t src = bound::get(src_ptr_NC, osxy, sxy);
1260
+ dot += (trgt_ptr ? src * target[c] : src);
1261
+ // trgt_ptr == 0 in the backward pass of 'count'
1262
+ }
1263
+ ogx += (gxx * wyy) * dot;
1264
+ ogy += (wxx * gyy) * dot;
1265
+ } else {
1266
+ // Diff w.r.t. sgrad
1267
+ scalar_t* src_ptr_NC = src_ptr_NC0;
1268
+ scalar_t dot0, dot1;
1269
+ dot0 = dot1 = static_cast<scalar_t>(0);
1270
+ for (offset_t c = 0; c < C; ++c, src_ptr_NC += src_sC) {
1271
+ scalar_t src = bound::get(src_ptr_NC, osxy, sxy);
1272
+ dot0 += src * target[c];
1273
+ dot1 += src * target[c + C];
1274
+ }
1275
+ ogx += (hxx * wyy) * dot0 + (gxx * gyy) * dot1;
1276
+ ogy += (gxx * gyy) * dot0 + (wxx * hyy) * dot1;
1277
+ }
1278
+ }
1279
+
1280
+ } // x
1281
+ } // y
1282
+
1283
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Grad ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1284
+ if (do_grad) {
1285
+ scalar_t* grad_ptr_NXY = grad_ptr + n * grad_sN + w * grad_sX + h * grad_sY;
1286
+ (*grad_ptr_NXY) = ogx;
1287
+ grad_ptr_NXY[grad_sC] = ogy;
1288
+ }
1289
+ }
1290
+
1291
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1292
+ // GENERIC INTERPOLATION 1D
1293
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1294
+
1295
+ template <typename scalar_t, typename offset_t>
1296
+ MONAI_DEVICE void PushPullImpl<scalar_t, offset_t>::interpolate1d(scalar_t x, offset_t w, offset_t n) const {
1297
+ // Get corner pixel values from (x, y)
1298
+ offset_t bx0, bx1;
1299
+ interpolation::bounds(interpolation0, x, bx0, bx1);
1300
+ offset_t dbx = bx1 - bx0;
1301
+
1302
+ // Pre-compute offsets and target value
1303
+ scalar_t* src_ptr_NC0 = src_ptr + n * src_sN;
1304
+ scalar_t* out_ptr_NC0 = out_ptr + n * out_sN;
1305
+ scalar_t* out_ptr_NCX0 = out_ptr + n * out_sN + w * out_sX;
1306
+ scalar_t* trgt_ptr_NCX = trgt_ptr + n * trgt_sN + w * trgt_sX;
1307
+ scalar_t target[2 * MONAI_MAX_NUM_CHANNELS];
1308
+ if (trgt_ptr && (do_push || do_grad))
1309
+ for (offset_t c = 0; c < C; ++c, trgt_ptr_NCX += trgt_sC) {
1310
+ target[c] = *trgt_ptr_NCX;
1311
+ if (trgt_K > 0) {
1312
+ target[c + C] = trgt_ptr_NCX[trgt_sK];
1313
+ }
1314
+ }
1315
+
1316
+ // Initialize output
1317
+ scalar_t* out_ptr_NCX = out_ptr_NCX0;
1318
+ if (do_pull || do_sgrad) {
1319
+ for (offset_t c = 0; c < C; ++c, out_ptr_NCX += out_sC) {
1320
+ *out_ptr_NCX = static_cast<scalar_t>(0);
1321
+ if (do_sgrad) {
1322
+ out_ptr_NCX[out_sK] = static_cast<scalar_t>(0);
1323
+ }
1324
+ }
1325
+ }
1326
+
1327
+ // Pre-compute indices/weights/grad
1328
+ scalar_t wx[8]; // B-spline weights
1329
+ scalar_t gx[8]; // B-spline derivatives
1330
+ scalar_t hx[8]; // B-spline 2nd derivatives
1331
+ offset_t ix[8]; // Warped indices
1332
+ uint8_t sx[8]; // Warped indices
1333
+
1334
+ {
1335
+ scalar_t *owx = static_cast<scalar_t*>(wx), *ogx = static_cast<scalar_t*>(gx), *ohx = static_cast<scalar_t*>(hx);
1336
+ offset_t* oix = static_cast<offset_t*>(ix);
1337
+ uint8_t* osx = static_cast<uint8_t*>(sx);
1338
+ for (offset_t bx = bx0; bx <= bx1; ++bx) {
1339
+ scalar_t dx = x - bx;
1340
+ *(owx++) = interpolation::fastweight(interpolation0, dx);
1341
+ if (do_grad || do_sgrad)
1342
+ *(ogx++) = interpolation::fastgrad(interpolation0, dx);
1343
+ if (do_grad && trgt_sK > 1)
1344
+ *(ohx++) = interpolation::fasthess(interpolation0, dx);
1345
+ *(osx++) = bound::sign(bound0, bx, src_X);
1346
+ *(oix++) = bound::index(bound0, bx, src_X);
1347
+ }
1348
+ }
1349
+
1350
+ // Convolve coefficients with basis functions
1351
+ scalar_t ogx;
1352
+ ogx = static_cast<scalar_t>(0);
1353
+ for (offset_t i = 0; i <= dbx; ++i) {
1354
+ offset_t oox = ix[i] * out_sX;
1355
+ offset_t osx = ix[i] * src_sX;
1356
+ uint8_t sxx = sx[i];
1357
+ scalar_t wxx = wx[i];
1358
+ scalar_t gxx = gx[i];
1359
+ scalar_t hxx = hx[i];
1360
+
1361
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Pull ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1362
+ if (do_pull) {
1363
+ scalar_t* src_ptr_NC = src_ptr_NC0;
1364
+ scalar_t* out_ptr_NCX = out_ptr_NCX0;
1365
+ for (offset_t c = 0; c < C; ++c, out_ptr_NCX += out_sC, src_ptr_NC += src_sC)
1366
+ *out_ptr_NCX += bound::get(src_ptr_NC, osx, sxx) * wxx;
1367
+ }
1368
+
1369
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ SGrad ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1370
+ else if (do_sgrad) {
1371
+ scalar_t* src_ptr_NC = src_ptr_NC0;
1372
+ scalar_t* out_ptr_NCX = out_ptr_NCX0;
1373
+ for (offset_t c = 0; c < C; ++c, out_ptr_NCX += out_sC, src_ptr_NC += src_sC) {
1374
+ scalar_t src = bound::get(src_ptr_NC, osx, sxx);
1375
+ *out_ptr_NCX += src * gxx;
1376
+ }
1377
+ }
1378
+
1379
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Push ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1380
+ else if (do_push) {
1381
+ if (trgt_K == 0) {
1382
+ // Diff w.r.t. push/pull
1383
+ scalar_t* out_ptr_NC = out_ptr_NC0;
1384
+ for (offset_t c = 0; c < C; ++c, out_ptr_NC += out_sC)
1385
+ bound::add(out_ptr_NC, oox, wxx * target[c], sxx);
1386
+ } else {
1387
+ // Diff w.r.t. sgrad
1388
+ scalar_t* out_ptr_NC = out_ptr_NC0;
1389
+ for (offset_t c = 0; c < C; ++c, out_ptr_NC += out_sC) {
1390
+ scalar_t val = gxx * target[c];
1391
+ bound::add(out_ptr_NC, oox, val, sxx);
1392
+ }
1393
+ }
1394
+ }
1395
+
1396
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Count ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1397
+ else if (do_count) {
1398
+ bound::add(out_ptr_NC0, oox, wxx, sxx);
1399
+ }
1400
+
1401
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Grad ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1402
+ if (do_grad) {
1403
+ if (trgt_K == 0) {
1404
+ // Diff w.r.t. pull/push
1405
+ scalar_t* src_ptr_NC = src_ptr_NC0;
1406
+ scalar_t dot = static_cast<scalar_t>(0);
1407
+ for (offset_t c = 0; c < C; ++c, src_ptr_NC += src_sC) {
1408
+ scalar_t src = bound::get(src_ptr_NC, osx, sxx);
1409
+ dot += (trgt_ptr ? src * target[c] : src);
1410
+ // trgt_ptr == 0 in the backward pass of 'count'
1411
+ }
1412
+ ogx += gxx * dot;
1413
+ } else {
1414
+ // Diff w.r.t. sgrad
1415
+ scalar_t* src_ptr_NC = src_ptr_NC0;
1416
+ scalar_t dot;
1417
+ dot = static_cast<scalar_t>(0);
1418
+ for (offset_t c = 0; c < C; ++c, src_ptr_NC += src_sC) {
1419
+ scalar_t src = bound::get(src_ptr_NC, osx, sxx);
1420
+ dot += src * target[c];
1421
+ }
1422
+ ogx += hxx * dot;
1423
+ }
1424
+ }
1425
+
1426
+ } // x
1427
+
1428
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Grad ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1429
+ if (do_grad) {
1430
+ scalar_t* grad_ptr_NX = grad_ptr + n * grad_sN + w * grad_sX;
1431
+ (*grad_ptr_NX) = ogx;
1432
+ }
1433
+ }
1434
+
1435
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1436
+ // LINEAR INTERPOLATION 3D
1437
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1438
+
1439
+ template <typename scalar_t, typename offset_t>
1440
+ MONAI_DEVICE void PushPullImpl<scalar_t, offset_t>::interpolate3d_trilinear(
1441
+ scalar_t x,
1442
+ scalar_t y,
1443
+ scalar_t z,
1444
+ offset_t w,
1445
+ offset_t h,
1446
+ offset_t d,
1447
+ offset_t n) const {
1448
+ // Get corner pixel values from (x, y, z)
1449
+ offset_t ix0 = static_cast<offset_t>(std::floor(x));
1450
+ offset_t iy0 = static_cast<offset_t>(std::floor(y));
1451
+ offset_t iz0 = static_cast<offset_t>(std::floor(z));
1452
+
1453
+ // Interpolation weights (inversely proportional to distance)
1454
+ scalar_t dx1 = x - ix0;
1455
+ scalar_t dy1 = y - iy0;
1456
+ scalar_t dz1 = z - iz0;
1457
+ scalar_t dx0 = 1. - dx1;
1458
+ scalar_t dy0 = 1. - dy1;
1459
+ scalar_t dz0 = 1. - dz1;
1460
+ scalar_t w000 = dx0 * dy0 * dz0;
1461
+ scalar_t w100 = dx1 * dy0 * dz0;
1462
+ scalar_t w010 = dx0 * dy1 * dz0;
1463
+ scalar_t w001 = dx0 * dy0 * dz1;
1464
+ scalar_t w110 = dx1 * dy1 * dz0;
1465
+ scalar_t w011 = dx0 * dy1 * dz1;
1466
+ scalar_t w101 = dx1 * dy0 * dz1;
1467
+ scalar_t w111 = dx1 * dy1 * dz1;
1468
+
1469
+ // Sign (/!\ compute sign before warping indices)
1470
+ int8_t sx1 = bound::sign(bound0, ix0 + 1, src_X);
1471
+ int8_t sy1 = bound::sign(bound1, iy0 + 1, src_Y);
1472
+ int8_t sz1 = bound::sign(bound2, iz0 + 1, src_Z);
1473
+ int8_t sx0 = bound::sign(bound0, ix0, src_X);
1474
+ int8_t sy0 = bound::sign(bound1, iy0, src_Y);
1475
+ int8_t sz0 = bound::sign(bound2, iz0, src_Z);
1476
+ int8_t s000 = sx0 * sy0 * sz0;
1477
+ int8_t s100 = sx1 * sy0 * sz0;
1478
+ int8_t s010 = sx0 * sy1 * sz0;
1479
+ int8_t s001 = sx0 * sy0 * sz1;
1480
+ int8_t s110 = sx1 * sy1 * sz0;
1481
+ int8_t s011 = sx0 * sy1 * sz1;
1482
+ int8_t s101 = sx1 * sy0 * sz1;
1483
+ int8_t s111 = sx1 * sy1 * sz1;
1484
+
1485
+ // Warp indices
1486
+ offset_t ix1, iy1, iz1;
1487
+ ix1 = bound::index(bound0, ix0 + 1, src_X);
1488
+ iy1 = bound::index(bound1, iy0 + 1, src_Y);
1489
+ iz1 = bound::index(bound2, iz0 + 1, src_Z);
1490
+ ix0 = bound::index(bound0, ix0, src_X);
1491
+ iy0 = bound::index(bound1, iy0, src_Y);
1492
+ iz0 = bound::index(bound2, iz0, src_Z);
1493
+
1494
+ offset_t o000, o100, o010, o001, o110, o011, o101, o111;
1495
+
1496
+ if (do_pull || do_grad || do_sgrad) {
1497
+ // Offsets into source volume
1498
+ o000 = ix0 * src_sX + iy0 * src_sY + iz0 * src_sZ;
1499
+ o100 = ix1 * src_sX + iy0 * src_sY + iz0 * src_sZ;
1500
+ o010 = ix0 * src_sX + iy1 * src_sY + iz0 * src_sZ;
1501
+ o001 = ix0 * src_sX + iy0 * src_sY + iz1 * src_sZ;
1502
+ o110 = ix1 * src_sX + iy1 * src_sY + iz0 * src_sZ;
1503
+ o011 = ix0 * src_sX + iy1 * src_sY + iz1 * src_sZ;
1504
+ o101 = ix1 * src_sX + iy0 * src_sY + iz1 * src_sZ;
1505
+ o111 = ix1 * src_sX + iy1 * src_sY + iz1 * src_sZ;
1506
+ } else if (!(do_push || do_count)) {
1507
+ o000 = o100 = o010 = o001 = o110 = o011 = o101 = o111 = 0;
1508
+ }
1509
+
1510
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~ Grid gradient ~~~~~~~~~~~~~~~~~~~~~~~~~~
1511
+ if (do_grad) {
1512
+ scalar_t gx = static_cast<scalar_t>(0);
1513
+ scalar_t gy = static_cast<scalar_t>(0);
1514
+ scalar_t gz = static_cast<scalar_t>(0);
1515
+ scalar_t* trgt_ptr_NCXYZ = trgt_ptr + n * trgt_sN + w * trgt_sX + h * trgt_sY + d * trgt_sZ;
1516
+ scalar_t* src_ptr_NC = src_ptr + n * src_sN;
1517
+
1518
+ if (trgt_K == 0) {
1519
+ // backward w.r.t. push/pull
1520
+ for (offset_t c = 0; c < C; ++c, trgt_ptr_NCXYZ += trgt_sC, src_ptr_NC += src_sC) {
1521
+ scalar_t src;
1522
+ scalar_t trgt = trgt_ptr ? *trgt_ptr_NCXYZ : static_cast<scalar_t>(1);
1523
+ // ^ trgt_ptr == 0 during the backward pass of count
1524
+ src = bound::get(src_ptr_NC, o000, s000);
1525
+ if (trgt_ptr)
1526
+ src *= trgt;
1527
+ gx -= dy0 * dz0 * src;
1528
+ gy -= dx0 * dz0 * src;
1529
+ gz -= dx0 * dy0 * src;
1530
+ src = bound::get(src_ptr_NC, o100, s100);
1531
+ if (trgt_ptr)
1532
+ src *= trgt;
1533
+ gx += dy0 * dz0 * src;
1534
+ gy -= dx1 * dz0 * src;
1535
+ gz -= dx1 * dy0 * src;
1536
+ src = bound::get(src_ptr_NC, o010, s010);
1537
+ if (trgt_ptr)
1538
+ src *= trgt;
1539
+ gx -= dy1 * dz0 * src;
1540
+ gy += dx0 * dz0 * src;
1541
+ gz -= dx0 * dy1 * src;
1542
+ src = bound::get(src_ptr_NC, o110, s110);
1543
+ if (trgt_ptr)
1544
+ src *= trgt;
1545
+ gx += dy1 * dz0 * src;
1546
+ gy += dx1 * dz0 * src;
1547
+ gz -= dx1 * dy1 * src;
1548
+ src = bound::get(src_ptr_NC, o001, s001);
1549
+ if (trgt_ptr)
1550
+ src *= trgt;
1551
+ gx -= dy0 * dz1 * src;
1552
+ gy -= dx0 * dz1 * src;
1553
+ gz += dx0 * dy0 * src;
1554
+ src = bound::get(src_ptr_NC, o101, s101);
1555
+ if (trgt_ptr)
1556
+ src *= trgt;
1557
+ gx += dy0 * dz1 * src;
1558
+ gy -= dx1 * dz1 * src;
1559
+ gz += dx1 * dy0 * src;
1560
+ src = bound::get(src_ptr_NC, o011, s011);
1561
+ if (trgt_ptr)
1562
+ src *= trgt;
1563
+ gx -= dy1 * dz1 * src;
1564
+ gy += dx0 * dz1 * src;
1565
+ gz += dx0 * dy1 * src;
1566
+ src = bound::get(src_ptr_NC, o111, s111);
1567
+ if (trgt_ptr)
1568
+ src *= trgt;
1569
+ gx += dy1 * dz1 * src;
1570
+ gy += dx1 * dz1 * src;
1571
+ gz += dx1 * dy1 * src;
1572
+ }
1573
+ } else {
1574
+ // backward w.r.t. sgrad
1575
+ for (offset_t c = 0; c < C; ++c, trgt_ptr_NCXYZ += trgt_sC, src_ptr_NC += src_sC) {
1576
+ scalar_t src;
1577
+ scalar_t trgt0 = *trgt_ptr_NCXYZ, trgt1 = trgt_ptr_NCXYZ[trgt_sK], trgt2 = trgt_ptr_NCXYZ[trgt_sK * 2];
1578
+ src = bound::get(src_ptr_NC, o000, s000);
1579
+ gx += (dz0 * trgt1 + dy0 * trgt2) * src;
1580
+ gy += (dz0 * trgt0 + dx0 * trgt2) * src;
1581
+ gz += (dy0 * trgt0 + dx0 * trgt1) * src;
1582
+ src = bound::get(src_ptr_NC, o100, s100);
1583
+ gx += (-dz0 * trgt1 - dy0 * trgt2) * src;
1584
+ gy += (-dz0 * trgt0 + dx1 * trgt2) * src;
1585
+ gz += (-dy0 * trgt0 + dx1 * trgt1) * src;
1586
+ src = bound::get(src_ptr_NC, o010, s010);
1587
+ gx += (-dz0 * trgt1 + dy1 * trgt2) * src;
1588
+ gy += (-dz0 * trgt0 - dx0 * trgt2) * src;
1589
+ gz += (dy1 * trgt0 - dx0 * trgt1) * src;
1590
+ src = bound::get(src_ptr_NC, o110, s110);
1591
+ gx += (dz0 * trgt1 - dy1 * trgt2) * src;
1592
+ gy += (dz0 * trgt0 - dx1 * trgt2) * src;
1593
+ gz += (-dy1 * trgt0 - dx1 * trgt1) * src;
1594
+ src = bound::get(src_ptr_NC, o001, s001);
1595
+ gx += (dz1 * trgt1 - dy0 * trgt2) * src;
1596
+ gy += (dz1 * trgt0 - dx0 * trgt2) * src;
1597
+ gz += (-dy0 * trgt0 - dx0 * trgt1) * src;
1598
+ src = bound::get(src_ptr_NC, o101, s101);
1599
+ gx += (-dz1 * trgt1 + dy0 * trgt2) * src;
1600
+ gy += (-dz1 * trgt0 - dx1 * trgt2) * src;
1601
+ gz += (dy0 * trgt0 - dx1 * trgt1) * src;
1602
+ src = bound::get(src_ptr_NC, o011, s011);
1603
+ gx += (-dz1 * trgt1 - dy1 * trgt2) * src;
1604
+ gy += (-dz1 * trgt0 + dx0 * trgt2) * src;
1605
+ gz += (-dy1 * trgt0 + dx0 * trgt1) * src;
1606
+ src = bound::get(src_ptr_NC, o111, s111);
1607
+ gx += (dz1 * trgt1 + dy1 * trgt2) * src;
1608
+ gy += (dz1 * trgt0 + dx1 * trgt2) * src;
1609
+ gz += (dy1 * trgt0 + dx1 * trgt1) * src;
1610
+ }
1611
+ }
1612
+
1613
+ scalar_t* grad_ptr_NXYZ = grad_ptr + n * grad_sN + w * grad_sX + h * grad_sY + d * grad_sZ;
1614
+ (*grad_ptr_NXYZ) = gx;
1615
+ grad_ptr_NXYZ[grad_sC] = gy;
1616
+ grad_ptr_NXYZ[grad_sC * 2] = gz;
1617
+ }
1618
+ if (do_push || do_count) {
1619
+ // Offsets into 'push' volume
1620
+ o000 = ix0 * out_sX + iy0 * out_sY + iz0 * out_sZ;
1621
+ o100 = ix1 * out_sX + iy0 * out_sY + iz0 * out_sZ;
1622
+ o010 = ix0 * out_sX + iy1 * out_sY + iz0 * out_sZ;
1623
+ o001 = ix0 * out_sX + iy0 * out_sY + iz1 * out_sZ;
1624
+ o110 = ix1 * out_sX + iy1 * out_sY + iz0 * out_sZ;
1625
+ o011 = ix0 * out_sX + iy1 * out_sY + iz1 * out_sZ;
1626
+ o101 = ix1 * out_sX + iy0 * out_sY + iz1 * out_sZ;
1627
+ o111 = ix1 * out_sX + iy1 * out_sY + iz1 * out_sZ;
1628
+ }
1629
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Pull ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1630
+ if (do_pull) {
1631
+ scalar_t* out_ptr_NCXYZ = out_ptr + n * out_sN + w * out_sX + h * out_sY + d * out_sZ;
1632
+ scalar_t* src_ptr_NC = src_ptr + n * src_sN;
1633
+ for (offset_t c = 0; c < C; ++c, out_ptr_NCXYZ += out_sC, src_ptr_NC += src_sC) {
1634
+ *out_ptr_NCXYZ = bound::get(src_ptr_NC, o000, s000) * w000 + bound::get(src_ptr_NC, o100, s100) * w100 +
1635
+ bound::get(src_ptr_NC, o010, s010) * w010 + bound::get(src_ptr_NC, o110, s110) * w110 +
1636
+ bound::get(src_ptr_NC, o001, s001) * w001 + bound::get(src_ptr_NC, o101, s101) * w101 +
1637
+ bound::get(src_ptr_NC, o011, s011) * w011 + bound::get(src_ptr_NC, o111, s111) * w111;
1638
+ }
1639
+ }
1640
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ SGrad ~~~~~~~~~~~~~~~~~~~~~ ~~~~~~~~~
1641
+ else if (do_sgrad) {
1642
+ scalar_t* out_ptr_NCXYZ = out_ptr + n * out_sN + w * out_sX + h * out_sY + d * out_sZ;
1643
+ scalar_t* src_ptr_NC = src_ptr + n * src_sN;
1644
+
1645
+ for (offset_t c = 0; c < C; ++c, out_ptr_NCXYZ += out_sC, src_ptr_NC += src_sC) {
1646
+ scalar_t src000 = bound::get(src_ptr_NC, o000, s000);
1647
+ scalar_t src100 = bound::get(src_ptr_NC, o100, s100);
1648
+ scalar_t src010 = bound::get(src_ptr_NC, o010, s010);
1649
+ scalar_t src110 = bound::get(src_ptr_NC, o110, s110);
1650
+ scalar_t src001 = bound::get(src_ptr_NC, o001, s001);
1651
+ scalar_t src101 = bound::get(src_ptr_NC, o101, s101);
1652
+ scalar_t src011 = bound::get(src_ptr_NC, o011, s011);
1653
+ scalar_t src111 = bound::get(src_ptr_NC, o111, s111);
1654
+ *out_ptr_NCXYZ = -dy0 * dz0 * src000 + dy0 * dz0 * src100 - dy1 * dz0 * src010 + dy1 * dz0 * src110 -
1655
+ dy0 * dz1 * src001 + dy0 * dz1 * src101 - dy1 * dz1 * src011 + dy1 * dz1 * src111;
1656
+ out_ptr_NCXYZ[out_sK] = -dx0 * dz0 * src000 - dx1 * dz0 * src100 + dx0 * dz0 * src010 + dx1 * dz0 * src110 -
1657
+ dx0 * dz1 * src001 - dx1 * dz1 * src101 + dx0 * dz1 * src011 + dx1 * dz1 * src111;
1658
+ out_ptr_NCXYZ[out_sK * 2] = -dx0 * dy0 * src000 - dx1 * dy0 * src100 - dx0 * dy1 * src010 - dx1 * dy1 * src110 +
1659
+ dx0 * dy0 * src001 + dx1 * dy0 * src101 + dx0 * dy1 * src011 + dx1 * dy1 * src111;
1660
+ }
1661
+ }
1662
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Push ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1663
+ else if (do_push) {
1664
+ scalar_t* trgt_ptr_NCXYZ = trgt_ptr + n * trgt_sN + w * trgt_sX + h * trgt_sY + d * trgt_sZ;
1665
+ scalar_t* out_ptr_NC = out_ptr + n * out_sN;
1666
+ if (trgt_K == 0) {
1667
+ // Diff w.r.t. push/pull
1668
+ for (offset_t c = 0; c < C; ++c, trgt_ptr_NCXYZ += trgt_sC, out_ptr_NC += out_sC) {
1669
+ scalar_t trgt = *trgt_ptr_NCXYZ;
1670
+ bound::add(out_ptr_NC, o000, w000 * trgt, s000);
1671
+ bound::add(out_ptr_NC, o100, w100 * trgt, s100);
1672
+ bound::add(out_ptr_NC, o010, w010 * trgt, s010);
1673
+ bound::add(out_ptr_NC, o110, w110 * trgt, s110);
1674
+ bound::add(out_ptr_NC, o001, w001 * trgt, s001);
1675
+ bound::add(out_ptr_NC, o101, w101 * trgt, s101);
1676
+ bound::add(out_ptr_NC, o011, w011 * trgt, s011);
1677
+ bound::add(out_ptr_NC, o111, w111 * trgt, s111);
1678
+ }
1679
+ } else {
1680
+ // Diff w.r.t. sgrad
1681
+ scalar_t val;
1682
+ for (offset_t c = 0; c < C; ++c, trgt_ptr_NCXYZ += trgt_sC, out_ptr_NC += out_sC) {
1683
+ scalar_t trgt0 = *trgt_ptr_NCXYZ, trgt1 = trgt_ptr_NCXYZ[trgt_sK], trgt2 = trgt_ptr_NCXYZ[trgt_sK * 2];
1684
+ val = -dy0 * dz0 * trgt0 - dx0 * dz0 * trgt1 - dx0 * dy0 * trgt2;
1685
+ bound::add(out_ptr_NC, o000, val, s000);
1686
+ val = dy0 * dz0 * trgt0 - dx1 * dz0 * trgt1 - dx1 * dy0 * trgt2;
1687
+ bound::add(out_ptr_NC, o100, val, s100);
1688
+ val = -dy1 * dz0 * trgt0 + dx0 * dz0 * trgt1 - dx0 * dy1 * trgt2;
1689
+ bound::add(out_ptr_NC, o010, val, s010);
1690
+ val = dy1 * dz0 * trgt0 + dx1 * dz0 * trgt1 - dx1 * dy1 * trgt2;
1691
+ bound::add(out_ptr_NC, o110, val, s110);
1692
+ val = -dy0 * dz1 * trgt0 - dx0 * dz1 * trgt1 + dx0 * dy0 * trgt2;
1693
+ bound::add(out_ptr_NC, o001, val, s001);
1694
+ val = dy0 * dz1 * trgt0 - dx1 * dz1 * trgt1 + dx1 * dy0 * trgt2;
1695
+ bound::add(out_ptr_NC, o101, val, s101);
1696
+ val = -dy1 * dz1 * trgt0 + dx0 * dz1 * trgt1 + dx0 * dy1 * trgt2;
1697
+ bound::add(out_ptr_NC, o011, val, s011);
1698
+ val = dy1 * dz1 * trgt0 + dx1 * dz1 * trgt1 + dx1 * dy1 * trgt2;
1699
+ bound::add(out_ptr_NC, o111, val, s111);
1700
+ }
1701
+ }
1702
+ }
1703
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Push ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1704
+ else if (do_count) {
1705
+ scalar_t* out_ptr_N = out_ptr + n * out_sN;
1706
+ bound::add(out_ptr_N, o000, w000, s000);
1707
+ bound::add(out_ptr_N, o100, w100, s100);
1708
+ bound::add(out_ptr_N, o010, w010, s010);
1709
+ bound::add(out_ptr_N, o110, w110, s110);
1710
+ bound::add(out_ptr_N, o001, w001, s001);
1711
+ bound::add(out_ptr_N, o101, w101, s101);
1712
+ bound::add(out_ptr_N, o011, w011, s011);
1713
+ bound::add(out_ptr_N, o111, w111, s111);
1714
+ }
1715
+ }
1716
+
1717
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1718
+ // LINEAR INTERPOLATION 2D
1719
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1720
+
1721
+ template <typename scalar_t, typename offset_t>
1722
+ MONAI_DEVICE void PushPullImpl<scalar_t, offset_t>::interpolate2d_bilinear(
1723
+ scalar_t x,
1724
+ scalar_t y,
1725
+ offset_t w,
1726
+ offset_t h,
1727
+ offset_t n) const {
1728
+ // Get corner pixel values from (x, y, z)
1729
+ offset_t ix0 = static_cast<offset_t>(std::floor(x));
1730
+ offset_t iy0 = static_cast<offset_t>(std::floor(y));
1731
+
1732
+ // Interpolation weights (inversely proportional to distance)
1733
+ scalar_t dx1 = x - ix0;
1734
+ scalar_t dy1 = y - iy0;
1735
+ scalar_t dx0 = 1. - dx1;
1736
+ scalar_t dy0 = 1. - dy1;
1737
+ scalar_t w00 = dx0 * dy0;
1738
+ scalar_t w10 = dx1 * dy0;
1739
+ scalar_t w01 = dx0 * dy1;
1740
+ scalar_t w11 = dx1 * dy1;
1741
+
1742
+ // Sign (/!\ compute sign before warping indices)
1743
+ int8_t sx1 = bound::sign(bound0, ix0 + 1, src_X);
1744
+ int8_t sy1 = bound::sign(bound1, iy0 + 1, src_Y);
1745
+ int8_t sx0 = bound::sign(bound0, ix0, src_X);
1746
+ int8_t sy0 = bound::sign(bound1, iy0, src_Y);
1747
+ int8_t s00 = sx0 * sy0;
1748
+ int8_t s10 = sx1 * sy0;
1749
+ int8_t s01 = sx0 * sy1;
1750
+ int8_t s11 = sx1 * sy1;
1751
+
1752
+ // Warp indices
1753
+ offset_t ix1, iy1;
1754
+ ix1 = bound::index(bound0, ix0 + 1, src_X);
1755
+ iy1 = bound::index(bound1, iy0 + 1, src_Y);
1756
+ ix0 = bound::index(bound0, ix0, src_X);
1757
+ iy0 = bound::index(bound1, iy0, src_Y);
1758
+
1759
+ offset_t o00, o10, o01, o11;
1760
+ if (do_pull || do_grad || do_sgrad) {
1761
+ // Offsets into source volume
1762
+ o00 = ix0 * src_sX + iy0 * src_sY;
1763
+ o10 = ix1 * src_sX + iy0 * src_sY;
1764
+ o01 = ix0 * src_sX + iy1 * src_sY;
1765
+ o11 = ix1 * src_sX + iy1 * src_sY;
1766
+ } else if (!(do_push || do_count)) {
1767
+ o00 = o10 = o01 = o11 = 0;
1768
+ }
1769
+
1770
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~ Grid gradient ~~~~~~~~~~~~~~~~~~~~~~~~~~
1771
+ if (do_grad) {
1772
+ scalar_t gx = static_cast<scalar_t>(0);
1773
+ scalar_t gy = static_cast<scalar_t>(0);
1774
+ scalar_t* trgt_ptr_NCXY = trgt_ptr + n * trgt_sN + w * trgt_sX + h * trgt_sY;
1775
+ scalar_t* src_ptr_NC = src_ptr + n * src_sN;
1776
+
1777
+ if (trgt_K == 0) {
1778
+ // backward w.r.t. push/pull
1779
+ for (offset_t c = 0; c < C; ++c, trgt_ptr_NCXY += trgt_sC, src_ptr_NC += src_sC) {
1780
+ scalar_t src;
1781
+ scalar_t trgt = trgt_ptr ? *trgt_ptr_NCXY : static_cast<scalar_t>(1);
1782
+ // ^ trgt_ptr == 0 during the backward pass of count
1783
+ src = bound::get(src_ptr_NC, o00, s00);
1784
+ if (trgt_ptr)
1785
+ src *= trgt;
1786
+ gx -= dy0 * src;
1787
+ gy -= dx0 * src;
1788
+ src = bound::get(src_ptr_NC, o10, s10);
1789
+ if (trgt_ptr)
1790
+ src *= trgt;
1791
+ gx += dy0 * src;
1792
+ gy -= dx1 * src;
1793
+ src = bound::get(src_ptr_NC, o01, s01);
1794
+ if (trgt_ptr)
1795
+ src *= trgt;
1796
+ gx -= dy1 * src;
1797
+ gy += dx0 * src;
1798
+ src = bound::get(src_ptr_NC, o11, s11);
1799
+ if (trgt_ptr)
1800
+ src *= trgt;
1801
+ gx += dy1 * src;
1802
+ gy += dx1 * src;
1803
+ }
1804
+ } else {
1805
+ // backward w.r.t. sgrad
1806
+ for (offset_t c = 0; c < C; ++c, trgt_ptr_NCXY += trgt_sC, src_ptr_NC += src_sC) {
1807
+ scalar_t src;
1808
+ scalar_t trgt0 = *trgt_ptr_NCXY, trgt1 = trgt_ptr_NCXY[trgt_sK];
1809
+ src = bound::get(src_ptr_NC, o00, s00);
1810
+ gx += trgt1 * src;
1811
+ gy += trgt0 * src;
1812
+ src = bound::get(src_ptr_NC, o10, s10);
1813
+ gx -= trgt1 * src;
1814
+ gy -= trgt0 * src;
1815
+ src = bound::get(src_ptr_NC, o01, s01);
1816
+ gx -= trgt1 * src;
1817
+ gy -= trgt0 * src;
1818
+ src = bound::get(src_ptr_NC, o11, s11);
1819
+ gx += trgt1 * src;
1820
+ gy += trgt0 * src;
1821
+ }
1822
+ }
1823
+
1824
+ scalar_t* grad_ptr_NXY = grad_ptr + n * grad_sN + w * grad_sX + h * grad_sY;
1825
+ (*grad_ptr_NXY) = gx;
1826
+ grad_ptr_NXY[grad_sC] = gy;
1827
+ }
1828
+ if (do_push || do_count) {
1829
+ // Offsets into 'push' volume
1830
+ o00 = ix0 * out_sX + iy0 * out_sY;
1831
+ o10 = ix1 * out_sX + iy0 * out_sY;
1832
+ o01 = ix0 * out_sX + iy1 * out_sY;
1833
+ o11 = ix1 * out_sX + iy1 * out_sY;
1834
+ }
1835
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Pull ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1836
+ if (do_pull) {
1837
+ scalar_t* out_ptr_NCXY = out_ptr + n * out_sN + w * out_sX + h * out_sY;
1838
+ scalar_t* src_ptr_NC = src_ptr + n * src_sN;
1839
+ for (offset_t c = 0; c < C; ++c, out_ptr_NCXY += out_sC, src_ptr_NC += src_sC) {
1840
+ *out_ptr_NCXY = bound::get(src_ptr_NC, o00, s00) * w00 + bound::get(src_ptr_NC, o10, s10) * w10 +
1841
+ bound::get(src_ptr_NC, o01, s01) * w01 + bound::get(src_ptr_NC, o11, s11) * w11;
1842
+ }
1843
+ }
1844
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ SGrad ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1845
+ else if (do_sgrad) {
1846
+ scalar_t* out_ptr_NCXY = out_ptr + n * out_sN + w * out_sX + h * out_sY;
1847
+ scalar_t* src_ptr_NC = src_ptr + n * src_sN;
1848
+
1849
+ for (offset_t c = 0; c < C; ++c, out_ptr_NCXY += out_sC, src_ptr_NC += src_sC) {
1850
+ scalar_t src00 = bound::get(src_ptr_NC, o00, s00);
1851
+ scalar_t src10 = bound::get(src_ptr_NC, o10, s10);
1852
+ scalar_t src01 = bound::get(src_ptr_NC, o01, s01);
1853
+ scalar_t src11 = bound::get(src_ptr_NC, o11, s11);
1854
+ *out_ptr_NCXY = -dy0 * src00 + dy0 * src10 - dy1 * src01 + dy1 * src11;
1855
+ out_ptr_NCXY[out_sK] = -dx0 * src00 - dx1 * src10 + dx0 * src01 + dx1 * src11;
1856
+ }
1857
+ }
1858
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Push ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1859
+ else if (do_push) {
1860
+ scalar_t* trgt_ptr_NCXY = trgt_ptr + n * trgt_sN + w * trgt_sX + h * trgt_sY;
1861
+ scalar_t* out_ptr_NC = out_ptr + n * out_sN;
1862
+ if (trgt_K == 0) {
1863
+ // Diff w.r.t. push/pull
1864
+ for (offset_t c = 0; c < C; ++c, trgt_ptr_NCXY += trgt_sC, out_ptr_NC += out_sC) {
1865
+ scalar_t trgt = *trgt_ptr_NCXY;
1866
+ bound::add(out_ptr_NC, o00, w00 * trgt, s00);
1867
+ bound::add(out_ptr_NC, o10, w10 * trgt, s10);
1868
+ bound::add(out_ptr_NC, o01, w01 * trgt, s01);
1869
+ bound::add(out_ptr_NC, o11, w11 * trgt, s11);
1870
+ }
1871
+ } else {
1872
+ // Diff w.r.t. sgrad
1873
+ scalar_t val;
1874
+ for (offset_t c = 0; c < C; ++c, trgt_ptr_NCXY += trgt_sC, out_ptr_NC += out_sC) {
1875
+ scalar_t trgt0 = *trgt_ptr_NCXY, trgt1 = trgt_ptr_NCXY[trgt_sK];
1876
+ val = -dy0 * trgt0 - dx0 * trgt1;
1877
+ bound::add(out_ptr_NC, o00, val, s00);
1878
+ val = dy0 * trgt0 - dx1 * trgt1;
1879
+ bound::add(out_ptr_NC, o10, val, s10);
1880
+ val = -dy1 * trgt0 + dx0 * trgt1;
1881
+ bound::add(out_ptr_NC, o01, val, s01);
1882
+ val = dy1 * trgt0 + dx1 * trgt1;
1883
+ bound::add(out_ptr_NC, o11, val, s11);
1884
+ }
1885
+ }
1886
+ }
1887
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Push ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1888
+ else if (do_count) {
1889
+ scalar_t* out_ptr_N = out_ptr + n * out_sN;
1890
+ bound::add(out_ptr_N, o00, w00, s00);
1891
+ bound::add(out_ptr_N, o10, w10, s10);
1892
+ bound::add(out_ptr_N, o01, w01, s01);
1893
+ bound::add(out_ptr_N, o11, w11, s11);
1894
+ }
1895
+ }
1896
+
1897
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1898
+ // LINEAR INTERPOLATION 1D
1899
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1900
+
1901
+ template <typename scalar_t, typename offset_t>
1902
+ MONAI_DEVICE void PushPullImpl<scalar_t, offset_t>::interpolate1d_linear(scalar_t x, offset_t w, offset_t n) const {
1903
+ // Get corner pixel values from (x)
1904
+ offset_t ix0 = static_cast<offset_t>(std::floor(x));
1905
+
1906
+ // Interpolation weights (inversely proportional to distance)
1907
+ scalar_t w1 = x - ix0;
1908
+ scalar_t w0 = 1. - w1;
1909
+
1910
+ // Sign (/!\ compute sign before warping indices)
1911
+ int8_t s1 = bound::sign(bound0, ix0 + 1, src_X);
1912
+ int8_t s0 = bound::sign(bound0, ix0, src_X);
1913
+
1914
+ // Warp indices
1915
+ offset_t ix1;
1916
+ ix1 = bound::index(bound0, ix0 + 1, src_X);
1917
+ ix0 = bound::index(bound0, ix0, src_X);
1918
+
1919
+ // Offsets into source volume
1920
+ offset_t o0, o1;
1921
+ if (do_pull || do_grad || do_sgrad) {
1922
+ o0 = ix0 * src_sX;
1923
+ o1 = ix1 * src_sX;
1924
+ } else if (!(do_push || do_count)) {
1925
+ o0 = o1 = 0;
1926
+ }
1927
+
1928
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~ Grid gradient ~~~~~~~~~~~~~~~~~~~~~~~~~~
1929
+ if (do_grad) {
1930
+ if (trgt_K == 0) {
1931
+ // backward w.r.t. push/pull
1932
+ scalar_t gx = static_cast<scalar_t>(0);
1933
+ scalar_t* trgt_ptr_NCX = trgt_ptr + n * trgt_sN + w * trgt_sX;
1934
+ scalar_t* src_ptr_NC = src_ptr + n * src_sN;
1935
+
1936
+ for (offset_t c = 0; c < C; ++c, trgt_ptr_NCX += trgt_sC, src_ptr_NC += src_sC) {
1937
+ scalar_t src;
1938
+ scalar_t trgt = trgt_ptr ? *trgt_ptr_NCX : static_cast<scalar_t>(1);
1939
+ // ^ trgt_ptr == 0 during the backward pass of count
1940
+ src = bound::get(src_ptr_NC, o0, s0);
1941
+ if (trgt_ptr)
1942
+ src *= trgt;
1943
+ gx -= src;
1944
+ src = bound::get(src_ptr_NC, o1, s1);
1945
+ if (trgt_ptr)
1946
+ src *= trgt;
1947
+ gx += src;
1948
+ }
1949
+
1950
+ scalar_t* grad_ptr_NX = grad_ptr + n * grad_sN + w * grad_sX;
1951
+ (*grad_ptr_NX) = gx;
1952
+ } else {
1953
+ // backward w.r.t. sgrad
1954
+ // -> zero (make sure this is done at initialization)
1955
+ }
1956
+ }
1957
+ if (do_push || do_count) {
1958
+ // Offsets into 'push' volume
1959
+ o0 = ix0 * out_sX;
1960
+ o1 = ix1 * out_sX;
1961
+ }
1962
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Pull ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1963
+ if (do_pull) {
1964
+ scalar_t* out_ptr_NCX = out_ptr + n * out_sN + w * out_sX;
1965
+ scalar_t* src_ptr_NC = src_ptr + n * src_sN;
1966
+ for (offset_t c = 0; c < C; ++c, out_ptr_NCX += out_sC, src_ptr_NC += src_sC) {
1967
+ *out_ptr_NCX = bound::get(src_ptr_NC, o0, s0) * w0 + bound::get(src_ptr_NC, o1, s1) * w1;
1968
+ }
1969
+ }
1970
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ SGrad ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1971
+ else if (do_sgrad) {
1972
+ scalar_t* out_ptr_NCX = out_ptr + n * out_sN + w * out_sX;
1973
+ scalar_t* src_ptr_NC = src_ptr + n * src_sN;
1974
+
1975
+ for (offset_t c = 0; c < C; ++c, out_ptr_NCX += out_sC, src_ptr_NC += src_sC) {
1976
+ *out_ptr_NCX = bound::get(src_ptr_NC, o1, s1) - bound::get(src_ptr_NC, o0, s0);
1977
+ }
1978
+ }
1979
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Push ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1980
+ else if (do_push) {
1981
+ scalar_t* trgt_ptr_NCX = trgt_ptr + n * trgt_sN + w * trgt_sX;
1982
+ scalar_t* out_ptr_NC = out_ptr + n * out_sN;
1983
+ if (trgt_K == 0) {
1984
+ // Diff w.r.t. push/pull
1985
+ for (offset_t c = 0; c < C; ++c, trgt_ptr_NCX += trgt_sC, out_ptr_NC += out_sC) {
1986
+ scalar_t trgt = *trgt_ptr_NCX;
1987
+ bound::add(out_ptr_NC, o0, w0 * trgt, s0);
1988
+ bound::add(out_ptr_NC, o1, w1 * trgt, s1);
1989
+ }
1990
+ } else {
1991
+ // Diff w.r.t. sgrad
1992
+ for (offset_t c = 0; c < C; ++c, trgt_ptr_NCX += trgt_sC, out_ptr_NC += out_sC) {
1993
+ scalar_t trgt0 = *trgt_ptr_NCX;
1994
+ bound::add(out_ptr_NC, o0, -trgt0, s0);
1995
+ bound::add(out_ptr_NC, o1, trgt0, s1);
1996
+ }
1997
+ }
1998
+ }
1999
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Push ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
2000
+ else if (do_count) {
2001
+ scalar_t* out_ptr_N = out_ptr + n * out_sN;
2002
+ bound::add(out_ptr_N, o0, w0, s0);
2003
+ bound::add(out_ptr_N, o1, w1, s1);
2004
+ }
2005
+ }
2006
+
2007
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
2008
+ // NEAREST NEIGHBOR INTERPOLATION 3D
2009
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
2010
+
2011
+ template <typename scalar_t, typename offset_t>
2012
+ MONAI_DEVICE void PushPullImpl<scalar_t, offset_t>::interpolate3d_nearest(
2013
+ scalar_t x,
2014
+ scalar_t y,
2015
+ scalar_t z,
2016
+ offset_t w,
2017
+ offset_t h,
2018
+ offset_t d,
2019
+ offset_t n) const {
2020
+ offset_t ix = static_cast<offset_t>(std::round(x));
2021
+ offset_t iy = static_cast<offset_t>(std::round(y));
2022
+ offset_t iz = static_cast<offset_t>(std::round(z));
2023
+
2024
+ // Boundary condition (/!\ compute sign before warping indices)
2025
+ int8_t sx = bound::sign(bound0, ix, src_X);
2026
+ int8_t sy = bound::sign(bound1, iy, src_Y);
2027
+ int8_t sz = bound::sign(bound2, iz, src_Z);
2028
+ ix = bound::index(bound0, ix, src_X);
2029
+ iy = bound::index(bound1, iy, src_Y);
2030
+ iz = bound::index(bound2, iz, src_Z);
2031
+
2032
+ // Sign
2033
+ int8_t s = sz * sy * sx;
2034
+
2035
+ if (do_pull) {
2036
+ offset_t o = iz * src_sZ + iy * src_sY + ix * src_sX;
2037
+ scalar_t* out_ptr_NCXYZ = out_ptr + n * out_sN + w * out_sX + h * out_sY + d * out_sZ;
2038
+ scalar_t* src_ptr_NC = src_ptr + n * src_sN;
2039
+ for (offset_t c = 0; c < C; ++c, out_ptr_NCXYZ += out_sC, src_ptr_NC += src_sC)
2040
+ *out_ptr_NCXYZ = bound::get(src_ptr_NC, o, s);
2041
+ } else if (do_push && trgt_K == 0) {
2042
+ offset_t o = iz * out_sZ + iy * out_sY + ix * out_sX;
2043
+ scalar_t* trgt_ptr_NCXYZ = trgt_ptr + n * trgt_sN + w * trgt_sX + h * trgt_sY + d * trgt_sZ;
2044
+ scalar_t* out_ptr_NC = out_ptr + n * out_sN;
2045
+ for (offset_t c = 0; c < C; ++c, trgt_ptr_NCXYZ += trgt_sC, out_ptr_NC += out_sC)
2046
+ bound::add(out_ptr_NC, o, *trgt_ptr_NCXYZ, s);
2047
+ } else if (do_count) {
2048
+ offset_t o = iz * out_sZ + iy * out_sY + ix * out_sX;
2049
+ scalar_t* out_ptr_NC = out_ptr + n * out_sN;
2050
+ for (offset_t c = 0; c < C; ++c, out_ptr_NC += out_sC)
2051
+ bound::add(out_ptr_NC, o, static_cast<scalar_t>(1), s);
2052
+ }
2053
+ }
2054
+
2055
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
2056
+ // NEAREST NEIGHBOR INTERPOLATION 2D
2057
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
2058
+
2059
+ template <typename scalar_t, typename offset_t>
2060
+ MONAI_DEVICE void PushPullImpl<scalar_t, offset_t>::interpolate2d_nearest(
2061
+ scalar_t x,
2062
+ scalar_t y,
2063
+ offset_t w,
2064
+ offset_t h,
2065
+ offset_t n) const {
2066
+ offset_t ix = static_cast<offset_t>(std::round(x));
2067
+ offset_t iy = static_cast<offset_t>(std::round(y));
2068
+
2069
+ // Boundary condition (/!\ compute sign before warping indices)
2070
+ int8_t sx = bound::sign(bound0, ix, src_X);
2071
+ int8_t sy = bound::sign(bound1, iy, src_Y);
2072
+ ix = bound::index(bound0, ix, src_X);
2073
+ iy = bound::index(bound1, iy, src_Y);
2074
+
2075
+ // Sign
2076
+ int8_t s = sy * sx;
2077
+
2078
+ if (do_pull) {
2079
+ offset_t o = iy * src_sY + ix * src_sX;
2080
+ scalar_t* out_ptr_NCXY = out_ptr + n * out_sN + w * out_sX + h * out_sY;
2081
+ scalar_t* src_ptr_NC = src_ptr + n * src_sN;
2082
+ for (offset_t c = 0; c < C; ++c, out_ptr_NCXY += out_sC, src_ptr_NC += src_sC)
2083
+ *out_ptr_NCXY = bound::get(src_ptr_NC, o, s);
2084
+ } else if (do_push && trgt_K == 0) {
2085
+ offset_t o = iy * out_sY + ix * out_sX;
2086
+ scalar_t* trgt_ptr_NCXY = trgt_ptr + n * trgt_sN + w * trgt_sX + h * trgt_sY;
2087
+ scalar_t* out_ptr_NC = out_ptr + n * out_sN;
2088
+ for (offset_t c = 0; c < C; ++c, trgt_ptr_NCXY += trgt_sC, out_ptr_NC += out_sC)
2089
+ bound::add(out_ptr_NC, o, *trgt_ptr_NCXY, s);
2090
+ } else if (do_count) {
2091
+ offset_t o = iy * out_sY + ix * out_sX;
2092
+ scalar_t* out_ptr_NC = out_ptr + n * out_sN;
2093
+ for (offset_t c = 0; c < C; ++c, out_ptr_NC += out_sC)
2094
+ bound::add(out_ptr_NC, o, static_cast<scalar_t>(1), s);
2095
+ }
2096
+ }
2097
+
2098
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
2099
+ // NEAREST NEIGHBOR INTERPOLATION 1D
2100
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
2101
+
2102
+ template <typename scalar_t, typename offset_t>
2103
+ MONAI_DEVICE void PushPullImpl<scalar_t, offset_t>::interpolate1d_nearest(scalar_t x, offset_t w, offset_t n) const {
2104
+ offset_t i = static_cast<offset_t>(std::round(x));
2105
+
2106
+ // Boundary condition (/!\ compute sign before warping indices)
2107
+ int8_t s = bound::sign(bound0, i, src_X);
2108
+ i = bound::index(bound0, i, src_X);
2109
+
2110
+ if (do_pull) {
2111
+ offset_t o = i * src_sX;
2112
+ scalar_t* out_ptr_NCX = out_ptr + n * out_sN + w * out_sX;
2113
+ scalar_t* src_ptr_NC = src_ptr + n * src_sN;
2114
+ for (offset_t c = 0; c < C; ++c, out_ptr_NCX += out_sC, src_ptr_NC += src_sC)
2115
+ *out_ptr_NCX = bound::get(src_ptr_NC, o, s);
2116
+ } else if (do_push && trgt_K == 0) {
2117
+ offset_t o = i * out_sX;
2118
+ scalar_t* trgt_ptr_NCX = trgt_ptr + n * trgt_sN + w * trgt_sX;
2119
+ scalar_t* out_ptr_NC = out_ptr + n * out_sN;
2120
+ for (offset_t c = 0; c < C; ++c, trgt_ptr_NCX += trgt_sC, out_ptr_NC += out_sC)
2121
+ bound::add(out_ptr_NC, o, *trgt_ptr_NCX, s);
2122
+ } else if (do_count) {
2123
+ offset_t o = i * out_sX;
2124
+ scalar_t* out_ptr_NC = out_ptr + n * out_sN;
2125
+ for (offset_t c = 0; c < C; ++c, out_ptr_NC += out_sC)
2126
+ bound::add(out_ptr_NC, o, static_cast<scalar_t>(1), s);
2127
+ }
2128
+ }
2129
+
2130
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
2131
+ // LINEAR INTERPOLATION 3D + SLIDING BOUNDARY
2132
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
2133
+ // TODO
2134
+
2135
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
2136
+ // CUDA KERNEL (MUST BE OUT OF CLASS)
2137
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
2138
+
2139
+ // CUDA Kernel
2140
+ template <typename scalar_t, typename offset_t>
2141
+ C10_LAUNCH_BOUNDS_1(1024)
2142
+ __global__ void pushpull_kernel(PushPullImpl<scalar_t, offset_t> f) {
2143
+ f.loop(threadIdx.x, blockIdx.x, blockDim.x, gridDim.x);
2144
+ }
2145
+
2146
+ } // namespace
2147
+
2148
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
2149
+ // FUNCTIONAL FORM WITH DISPATCH
2150
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
2151
+
2152
+ #define PUSHPULL_INSTANTIATE3(BoundType0, InterpolationType0, SourceType0) \
2153
+ template std::deque<Tensor> pushpull( \
2154
+ const SourceType0&, \
2155
+ const Tensor&, \
2156
+ const Tensor&, \
2157
+ BoundType0, \
2158
+ InterpolationType0, \
2159
+ bool, \
2160
+ bool, \
2161
+ bool, \
2162
+ bool, \
2163
+ bool, \
2164
+ bool); \
2165
+ template std::deque<Tensor> pushpull( \
2166
+ const SourceType0&, const Tensor&, BoundType0, InterpolationType0, bool, bool, bool, bool, bool, bool)
2167
+ #define PUSHPULL_INSTANTIATE2(BoundType0, InterpolationType0) \
2168
+ PUSHPULL_INSTANTIATE3(BoundType0, InterpolationType0, IntArrayRef); \
2169
+ PUSHPULL_INSTANTIATE3(BoundType0, InterpolationType0, Tensor)
2170
+ #define PUSHPULL_INSTANTIATE1(BoundType0) \
2171
+ PUSHPULL_INSTANTIATE2(BoundType0, InterpolationType); \
2172
+ PUSHPULL_INSTANTIATE2(BoundType0, InterpolationVectorRef)
2173
+ #define PUSHPULL_INSTANTIATE \
2174
+ PUSHPULL_INSTANTIATE1(BoundType); \
2175
+ PUSHPULL_INSTANTIATE1(BoundVectorRef)
2176
+
2177
+ // Two arguments (source, grid)
2178
+ // > `bound` and `interpolation` can be single arguments or vectors.
2179
+ template <typename BoundType, typename InterpolationType, typename SourceType>
2180
+ MONAI_HOST std::deque<Tensor> pushpull(
2181
+ const SourceType& source,
2182
+ const Tensor& grid,
2183
+ BoundType bound,
2184
+ InterpolationType interpolation,
2185
+ bool extrapolate,
2186
+ bool do_pull,
2187
+ bool do_push,
2188
+ bool do_count,
2189
+ bool do_grad,
2190
+ bool do_sgrad) {
2191
+ PushPullAllocator info(
2192
+ grid.dim() - 2, bound, interpolation, extrapolate, do_pull, do_push, do_count, do_grad, do_sgrad);
2193
+ info.ioset(source, grid);
2194
+
2195
+ return AT_DISPATCH_FLOATING_TYPES_AND_HALF(grid.scalar_type(), "pushpull", [&] {
2196
+ if (info.canUse32BitIndexMath()) {
2197
+ PushPullImpl<scalar_t, int32_t> algo(info);
2198
+ pushpull_kernel<<<GET_BLOCKS(algo.voxcount()), CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream()>>>(algo);
2199
+ return algo.output;
2200
+ } else {
2201
+ PushPullImpl<scalar_t, int64_t> algo(info);
2202
+ pushpull_kernel<<<GET_BLOCKS(algo.voxcount()), CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream()>>>(algo);
2203
+ return algo.output;
2204
+ }
2205
+ });
2206
+ }
2207
+
2208
+ // Three arguments (source, grid, target)
2209
+ // > `bound` and `interpolation` can be single arguments or vectors.
2210
+ // > `source` can be a tensor or a vector of dimensions.
2211
+ template <typename BoundType, typename InterpolationType, typename SourceType>
2212
+ MONAI_HOST std::deque<Tensor> pushpull(
2213
+ const SourceType& source,
2214
+ const Tensor& grid,
2215
+ const Tensor& target,
2216
+ BoundType bound,
2217
+ InterpolationType interpolation,
2218
+ bool extrapolate,
2219
+ bool do_pull,
2220
+ bool do_push,
2221
+ bool do_count,
2222
+ bool do_grad,
2223
+ bool do_sgrad) {
2224
+ PushPullAllocator info(
2225
+ grid.dim() - 2, bound, interpolation, extrapolate, do_pull, do_push, do_count, do_grad, do_sgrad);
2226
+ info.ioset(source, grid, target);
2227
+
2228
+ return AT_DISPATCH_FLOATING_TYPES_AND_HALF(grid.scalar_type(), "pushpull", [&] {
2229
+ if (info.canUse32BitIndexMath()) {
2230
+ PushPullImpl<scalar_t, int32_t> algo(info);
2231
+ pushpull_kernel<<<GET_BLOCKS(algo.voxcount()), CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream()>>>(algo);
2232
+ return algo.output;
2233
+ } else {
2234
+ PushPullImpl<scalar_t, int64_t> algo(info);
2235
+ pushpull_kernel<<<GET_BLOCKS(algo.voxcount()), CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream()>>>(algo);
2236
+ return algo.output;
2237
+ }
2238
+ });
2239
+ }
2240
+
2241
+ PUSHPULL_INSTANTIATE;
2242
+
2243
+ } // namespace gpu
2244
+ } // namespace monai
source_code/SegMamba/monai/csrc/utils/meta_macros.h ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ Copyright (c) MONAI Consortium
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+ http://www.apache.org/licenses/LICENSE-2.0
7
+ Unless required by applicable law or agreed to in writing, software
8
+ distributed under the License is distributed on an "AS IS" BASIS,
9
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10
+ See the License for the specific language governing permissions and
11
+ limitations under the License.
12
+ */
13
+
14
+ #pragma once
15
+
16
+ // Helper Macros: for internal use (see below)
17
+ #define _DO_1(TARGET) TARGET(1)
18
+ #define _DO_2(TARGET) TARGET(2) _DO_1(TARGET)
19
+ #define _DO_3(TARGET) TARGET(3) _DO_2(TARGET)
20
+ #define _DO_4(TARGET) TARGET(4) _DO_3(TARGET)
21
+ #define _DO_5(TARGET) TARGET(5) _DO_4(TARGET)
22
+ #define _DO_6(TARGET) TARGET(6) _DO_5(TARGET)
23
+ #define _DO_7(TARGET) TARGET(7) _DO_6(TARGET)
24
+ #define _DO_8(TARGET) TARGET(8) _DO_7(TARGET)
25
+ #define _DO_9(TARGET) TARGET(9) _DO_8(TARGET)
26
+ #define _DO_10(TARGET) TARGET(10) _DO_9(TARGET)
27
+ #define _DO_11(TARGET) TARGET(11) _DO_10(TARGET)
28
+ #define _DO_12(TARGET) TARGET(12) _DO_11(TARGET)
29
+ #define _DO_13(TARGET) TARGET(13) _DO_12(TARGET)
30
+ #define _DO_14(TARGET) TARGET(14) _DO_13(TARGET)
31
+ #define _DO_15(TARGET) TARGET(15) _DO_14(TARGET)
32
+ #define _DO_16(TARGET) TARGET(16) _DO_15(TARGET)
33
+ #define _DO_17(TARGET) TARGET(17) _DO_16(TARGET)
34
+ #define _DO_18(TARGET) TARGET(18) _DO_17(TARGET)
35
+ #define _DO_19(TARGET) TARGET(19) _DO_18(TARGET)
36
+ #define _DO_20(TARGET) TARGET(20) _DO_19(TARGET)
37
+ #define _DO_21(TARGET) TARGET(21) _DO_20(TARGET)
38
+ #define _DO_22(TARGET) TARGET(22) _DO_21(TARGET)
39
+ #define _DO_23(TARGET) TARGET(23) _DO_22(TARGET)
40
+ #define _DO_24(TARGET) TARGET(24) _DO_23(TARGET)
41
+ #define _DO_25(TARGET) TARGET(25) _DO_24(TARGET)
42
+ #define _DO_26(TARGET) TARGET(26) _DO_25(TARGET)
43
+ #define _DO_27(TARGET) TARGET(27) _DO_26(TARGET)
44
+ #define _DO_28(TARGET) TARGET(28) _DO_27(TARGET)
45
+ #define _DO_29(TARGET) TARGET(29) _DO_28(TARGET)
46
+ #define _DO_30(TARGET) TARGET(30) _DO_29(TARGET)
47
+ #define _DO_31(TARGET) TARGET(31) _DO_30(TARGET)
48
+ #define _DO_32(TARGET) TARGET(32) _DO_31(TARGET)
49
+
50
+ #define _DO_A_1(TARGET, A) TARGET(A, 1)
51
+ #define _DO_A_2(TARGET, A) TARGET(A, 2) _DO_A_1(TARGET, A)
52
+ #define _DO_A_3(TARGET, A) TARGET(A, 3) _DO_A_2(TARGET, A)
53
+ #define _DO_A_4(TARGET, A) TARGET(A, 4) _DO_A_3(TARGET, A)
54
+ #define _DO_A_5(TARGET, A) TARGET(A, 5) _DO_A_4(TARGET, A)
55
+ #define _DO_A_6(TARGET, A) TARGET(A, 6) _DO_A_5(TARGET, A)
56
+ #define _DO_A_7(TARGET, A) TARGET(A, 7) _DO_A_6(TARGET, A)
57
+ #define _DO_A_8(TARGET, A) TARGET(A, 8) _DO_A_7(TARGET, A)
58
+ #define _DO_A_9(TARGET, A) TARGET(A, 9) _DO_A_8(TARGET, A)
59
+ #define _DO_A_10(TARGET, A) TARGET(A, 10) _DO_A_9(TARGET, A)
60
+ #define _DO_A_11(TARGET, A) TARGET(A, 11) _DO_A_10(TARGET, A)
61
+ #define _DO_A_12(TARGET, A) TARGET(A, 12) _DO_A_11(TARGET, A)
62
+ #define _DO_A_13(TARGET, A) TARGET(A, 13) _DO_A_12(TARGET, A)
63
+ #define _DO_A_14(TARGET, A) TARGET(A, 14) _DO_A_13(TARGET, A)
64
+ #define _DO_A_15(TARGET, A) TARGET(A, 15) _DO_A_14(TARGET, A)
65
+ #define _DO_A_16(TARGET, A) TARGET(A, 16) _DO_A_15(TARGET, A)
66
+ #define _DO_A_17(TARGET, A) TARGET(A, 17) _DO_A_16(TARGET, A)
67
+ #define _DO_A_18(TARGET, A) TARGET(A, 18) _DO_A_17(TARGET, A)
68
+ #define _DO_A_19(TARGET, A) TARGET(A, 19) _DO_A_18(TARGET, A)
69
+ #define _DO_A_20(TARGET, A) TARGET(A, 20) _DO_A_19(TARGET, A)
70
+ #define _DO_A_21(TARGET, A) TARGET(A, 21) _DO_A_20(TARGET, A)
71
+ #define _DO_A_22(TARGET, A) TARGET(A, 22) _DO_A_21(TARGET, A)
72
+ #define _DO_A_23(TARGET, A) TARGET(A, 23) _DO_A_22(TARGET, A)
73
+ #define _DO_A_24(TARGET, A) TARGET(A, 24) _DO_A_23(TARGET, A)
74
+ #define _DO_A_25(TARGET, A) TARGET(A, 25) _DO_A_24(TARGET, A)
75
+ #define _DO_A_26(TARGET, A) TARGET(A, 26) _DO_A_25(TARGET, A)
76
+ #define _DO_A_27(TARGET, A) TARGET(A, 27) _DO_A_26(TARGET, A)
77
+ #define _DO_A_28(TARGET, A) TARGET(A, 28) _DO_A_27(TARGET, A)
78
+ #define _DO_A_29(TARGET, A) TARGET(A, 29) _DO_A_28(TARGET, A)
79
+ #define _DO_A_30(TARGET, A) TARGET(A, 30) _DO_A_29(TARGET, A)
80
+ #define _DO_A_31(TARGET, A) TARGET(A, 31) _DO_A_30(TARGET, A)
81
+ #define _DO_A_32(TARGET, A) TARGET(A, 32) _DO_A_31(TARGET, A)
82
+
83
+ #define _DO_1_B(TARGET, B_RANGE) _DO_A_##B_RANGE(TARGET, 1)
84
+ #define _DO_2_B(TARGET, B_RANGE) _DO_A_##B_RANGE(TARGET, 2) _DO_1_B(TARGET, B_RANGE)
85
+ #define _DO_3_B(TARGET, B_RANGE) _DO_A_##B_RANGE(TARGET, 3) _DO_2_B(TARGET, B_RANGE)
86
+ #define _DO_4_B(TARGET, B_RANGE) _DO_A_##B_RANGE(TARGET, 4) _DO_3_B(TARGET, B_RANGE)
87
+ #define _DO_5_B(TARGET, B_RANGE) _DO_A_##B_RANGE(TARGET, 5) _DO_4_B(TARGET, B_RANGE)
88
+ #define _DO_6_B(TARGET, B_RANGE) _DO_A_##B_RANGE(TARGET, 6) _DO_5_B(TARGET, B_RANGE)
89
+ #define _DO_7_B(TARGET, B_RANGE) _DO_A_##B_RANGE(TARGET, 7) _DO_6_B(TARGET, B_RANGE)
90
+ #define _DO_8_B(TARGET, B_RANGE) _DO_A_##B_RANGE(TARGET, 8) _DO_7_B(TARGET, B_RANGE)
91
+ #define _DO_9_B(TARGET, B_RANGE) _DO_A_##B_RANGE(TARGET, 9) _DO_8_B(TARGET, B_RANGE)
92
+ #define _DO_10_B(TARGET, B_RANGE) _DO_A_##B_RANGE(TARGET, 10) _DO_9_B(TARGET, B_RANGE)
93
+ #define _DO_11_B(TARGET, B_RANGE) _DO_A_##B_RANGE(TARGET, 11) _DO_10_B(TARGET, B_RANGE)
94
+ #define _DO_12_B(TARGET, B_RANGE) _DO_A_##B_RANGE(TARGET, 12) _DO_11_B(TARGET, B_RANGE)
95
+ #define _DO_13_B(TARGET, B_RANGE) _DO_A_##B_RANGE(TARGET, 13) _DO_12_B(TARGET, B_RANGE)
96
+ #define _DO_14_B(TARGET, B_RANGE) _DO_A_##B_RANGE(TARGET, 14) _DO_13_B(TARGET, B_RANGE)
97
+ #define _DO_15_B(TARGET, B_RANGE) _DO_A_##B_RANGE(TARGET, 15) _DO_14_B(TARGET, B_RANGE)
98
+ #define _DO_16_B(TARGET, B_RANGE) _DO_A_##B_RANGE(TARGET, 16) _DO_15_B(TARGET, B_RANGE)
99
+ #define _DO_17_B(TARGET, B_RANGE) _DO_A_##B_RANGE(TARGET, 17) _DO_16_B(TARGET, B_RANGE)
100
+ #define _DO_18_B(TARGET, B_RANGE) _DO_A_##B_RANGE(TARGET, 18) _DO_17_B(TARGET, B_RANGE)
101
+ #define _DO_19_B(TARGET, B_RANGE) _DO_A_##B_RANGE(TARGET, 19) _DO_18_B(TARGET, B_RANGE)
102
+ #define _DO_20_B(TARGET, B_RANGE) _DO_A_##B_RANGE(TARGET, 20) _DO_19_B(TARGET, B_RANGE)
103
+ #define _DO_21_B(TARGET, B_RANGE) _DO_A_##B_RANGE(TARGET, 21) _DO_20_B(TARGET, B_RANGE)
104
+ #define _DO_22_B(TARGET, B_RANGE) _DO_A_##B_RANGE(TARGET, 22) _DO_21_B(TARGET, B_RANGE)
105
+ #define _DO_23_B(TARGET, B_RANGE) _DO_A_##B_RANGE(TARGET, 23) _DO_22_B(TARGET, B_RANGE)
106
+ #define _DO_24_B(TARGET, B_RANGE) _DO_A_##B_RANGE(TARGET, 24) _DO_23_B(TARGET, B_RANGE)
107
+ #define _DO_25_B(TARGET, B_RANGE) _DO_A_##B_RANGE(TARGET, 25) _DO_24_B(TARGET, B_RANGE)
108
+ #define _DO_26_B(TARGET, B_RANGE) _DO_A_##B_RANGE(TARGET, 26) _DO_25_B(TARGET, B_RANGE)
109
+ #define _DO_27_B(TARGET, B_RANGE) _DO_A_##B_RANGE(TARGET, 27) _DO_26_B(TARGET, B_RANGE)
110
+ #define _DO_28_B(TARGET, B_RANGE) _DO_A_##B_RANGE(TARGET, 28) _DO_27_B(TARGET, B_RANGE)
111
+ #define _DO_29_B(TARGET, B_RANGE) _DO_A_##B_RANGE(TARGET, 29) _DO_28_B(TARGET, B_RANGE)
112
+ #define _DO_30_B(TARGET, B_RANGE) _DO_A_##B_RANGE(TARGET, 30) _DO_29_B(TARGET, B_RANGE)
113
+ #define _DO_31_B(TARGET, B_RANGE) _DO_A_##B_RANGE(TARGET, 31) _DO_30_B(TARGET, B_RANGE)
114
+ #define _DO_32_B(TARGET, B_RANGE) _DO_A_##B_RANGE(TARGET, 32) _DO_31_B(TARGET, B_RANGE)
115
+
116
+ #define _CASE_A(A) \
117
+ case (A): \
118
+ CASE(A) break;
119
+ #define _CASE_AB(A, B) \
120
+ case (A * 100 + B): \
121
+ CASE(A, B) break;
122
+
123
+ // Preproccessor For Loops
124
+ #define DO_FOR_A(TARGET, A_RANGE) _DO_##A_RANGE(TARGET)
125
+ #define DO_FOR_AB(TARGET, A_RANGE, B_RANGE) _DO_##A_RANGE##_B(TARGET, B_RANGE)
126
+
127
+ // Preproccessor Switch Statement Generators
128
+ #define SWITCH_A(CASE, A_RANGE, A) \
129
+ switch (A) { DO_FOR_A(_CASE_A, A_RANGE) }
130
+ #define SWITCH_AB(CALL, A_RANGE, B_RANGE, A, B) \
131
+ switch (A * 100 + B) { DO_FOR_AB(_CASE_AB, A_RANGE, B_RANGE) }
source_code/SegMamba/monai/fl/client/__init__.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) MONAI Consortium
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ # Unless required by applicable law or agreed to in writing, software
7
+ # distributed under the License is distributed on an "AS IS" BASIS,
8
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9
+ # See the License for the specific language governing permissions and
10
+ # limitations under the License.
11
+
12
+ from __future__ import annotations
13
+
14
+ from .client_algo import BaseClient, ClientAlgo, ClientAlgoStats
15
+ from .monai_algo import MonaiAlgo, MonaiAlgoStats
source_code/SegMamba/monai/fl/utils/constants.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) MONAI Consortium
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ # Unless required by applicable law or agreed to in writing, software
7
+ # distributed under the License is distributed on an "AS IS" BASIS,
8
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9
+ # See the License for the specific language governing permissions and
10
+ # limitations under the License.
11
+
12
+ from __future__ import annotations
13
+
14
+ from monai.utils.enums import StrEnum
15
+
16
+
17
+ class WeightType(StrEnum):
18
+ WEIGHTS = "fl_weights_full"
19
+ WEIGHT_DIFF = "fl_weight_diff"
20
+
21
+
22
+ class ModelType(StrEnum):
23
+ BEST_MODEL = "fl_best_model"
24
+ FINAL_MODEL = "fl_final_model"
25
+
26
+
27
+ class ExtraItems(StrEnum):
28
+ ABORT = "fl_abort"
29
+ MODEL_TYPE = "fl_model_type"
30
+ CLIENT_NAME = "fl_client_name"
31
+ APP_ROOT = "fl_app_root"
32
+ STATS_SENDER = "fl_stats_sender"
33
+
34
+
35
+ class FlPhase(StrEnum):
36
+ IDLE = "fl_idle"
37
+ TRAIN = "fl_train"
38
+ EVALUATE = "fl_evaluate"
39
+ GET_WEIGHTS = "fl_get_weights"
40
+ GET_DATA_STATS = "fl_get_data_stats"
41
+
42
+
43
+ class FlStatistics(StrEnum):
44
+ NUM_EXECUTED_ITERATIONS = "num_executed_iterations"
45
+ STATISTICS = "statistics"
46
+ HIST_BINS = "hist_bins"
47
+ HIST_RANGE = "hist_range"
48
+ DATA_STATS = "data_stats"
49
+ DATA_COUNT = "data_count"
50
+ FAIL_COUNT = "fail_count"
51
+ TOTAL_DATA = "total_data"
52
+ FEATURE_NAMES = "feature_names"
53
+
54
+
55
+ class FiltersType(StrEnum):
56
+ PRE_FILTERS = "pre_filters"
57
+ POST_WEIGHT_FILTERS = "post_weight_filters"
58
+ POST_EVALUATE_FILTERS = "post_evaluate_filters"
59
+ POST_STATISTICS_FILTERS = "post_statistics_filters"
source_code/SegMamba/monai/fl/utils/exchange_object.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) MONAI Consortium
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ # Unless required by applicable law or agreed to in writing, software
7
+ # distributed under the License is distributed on an "AS IS" BASIS,
8
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9
+ # See the License for the specific language governing permissions and
10
+ # limitations under the License.
11
+
12
+ from __future__ import annotations
13
+
14
+ from monai.fl.utils.constants import WeightType
15
+
16
+
17
+ class ExchangeObject(dict):
18
+ """
19
+ Contains the information shared between client and server.
20
+
21
+ Args:
22
+ weights: model weights.
23
+ optim: optimizer weights.
24
+ metrics: evaluation metrics.
25
+ weight_type: type of weights (see monai.fl.utils.constants.WeightType).
26
+ statistics: training statistics, i.e. number executed iterations.
27
+ """
28
+
29
+ def __init__(
30
+ self,
31
+ weights: dict | None = None,
32
+ optim: dict | None = None,
33
+ metrics: dict | None = None,
34
+ weight_type: WeightType | None = None,
35
+ statistics: dict | None = None,
36
+ ):
37
+ super().__init__()
38
+ self.weights = weights
39
+ self.optim = optim
40
+ self.metrics = metrics
41
+ self.weight_type = weight_type
42
+ self.statistics = statistics
43
+ self._summary: dict = {}
44
+
45
+ @property
46
+ def metrics(self):
47
+ return self._metrics
48
+
49
+ @metrics.setter
50
+ def metrics(self, metrics):
51
+ if metrics is not None:
52
+ if not isinstance(metrics, dict):
53
+ raise ValueError(f"Expected metrics to be of type dict but received {type(metrics)}")
54
+ self._metrics = metrics
55
+
56
+ @property
57
+ def statistics(self):
58
+ return self._statistics
59
+
60
+ @statistics.setter
61
+ def statistics(self, statistics):
62
+ if statistics is not None:
63
+ if not isinstance(statistics, dict):
64
+ raise ValueError(f"Expected statistics to be of type dict but received {type(statistics)}")
65
+ self._statistics = statistics
66
+
67
+ @property
68
+ def weight_type(self):
69
+ return self._weight_type
70
+
71
+ @weight_type.setter
72
+ def weight_type(self, weight_type):
73
+ if weight_type is not None:
74
+ if weight_type not in [WeightType.WEIGHTS, WeightType.WEIGHT_DIFF]:
75
+ raise ValueError(f"Expected weight type to be either {WeightType.WEIGHTS} or {WeightType.WEIGHT_DIFF}")
76
+ self._weight_type = weight_type
77
+
78
+ def is_valid_weights(self):
79
+ if not self.weights:
80
+ return False
81
+ if not self.weight_type:
82
+ return False
83
+ return True
84
+
85
+ def _add_to_summary(self, key, value):
86
+ if value:
87
+ if isinstance(value, dict):
88
+ self._summary[key] = len(value)
89
+ elif isinstance(value, WeightType):
90
+ self._summary[key] = value
91
+ else:
92
+ self._summary[key] = type(value)
93
+
94
+ def summary(self):
95
+ self._summary.update(self)
96
+ for k, v in zip(
97
+ ["weights", "optim", "metrics", "weight_type", "statistics"],
98
+ [self.weights, self.optim, self.metrics, self.weight_type, self.statistics],
99
+ ):
100
+ self._add_to_summary(k, v)
101
+ return self._summary
102
+
103
+ def __repr__(self):
104
+ return str(self.summary())
105
+
106
+ def __str__(self):
107
+ return str(self.summary())
source_code/SegMamba/monai/handlers/hausdorff_distance.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) MONAI Consortium
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ # Unless required by applicable law or agreed to in writing, software
7
+ # distributed under the License is distributed on an "AS IS" BASIS,
8
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9
+ # See the License for the specific language governing permissions and
10
+ # limitations under the License.
11
+
12
+ from __future__ import annotations
13
+
14
+ from collections.abc import Callable
15
+
16
+ from monai.handlers.ignite_metric import IgniteMetricHandler
17
+ from monai.metrics import HausdorffDistanceMetric
18
+ from monai.utils import MetricReduction
19
+
20
+
21
+ class HausdorffDistance(IgniteMetricHandler):
22
+ """
23
+ Computes Hausdorff distance from full size Tensor and collects average over batch, class-channels, iterations.
24
+ """
25
+
26
+ def __init__(
27
+ self,
28
+ include_background: bool = False,
29
+ distance_metric: str = "euclidean",
30
+ percentile: float | None = None,
31
+ directed: bool = False,
32
+ reduction: MetricReduction | str = MetricReduction.MEAN,
33
+ output_transform: Callable = lambda x: x,
34
+ save_details: bool = True,
35
+ ) -> None:
36
+ """
37
+
38
+ Args:
39
+ include_background: whether to include distance computation on the first channel of the predicted output.
40
+ Defaults to ``False``.
41
+ distance_metric: : [``"euclidean"``, ``"chessboard"``, ``"taxicab"``]
42
+ the metric used to compute surface distance. Defaults to ``"euclidean"``.
43
+ percentile: an optional float number between 0 and 100. If specified, the corresponding
44
+ percentile of the Hausdorff Distance rather than the maximum result will be achieved.
45
+ Defaults to ``None``.
46
+ directed: whether to calculate directed Hausdorff distance. Defaults to ``False``.
47
+ reduction: define the mode to reduce metrics, will only execute reduction on `not-nan` values,
48
+ available reduction modes: {``"none"``, ``"mean"``, ``"sum"``, ``"mean_batch"``, ``"sum_batch"``,
49
+ ``"mean_channel"``, ``"sum_channel"``}, default to ``"mean"``. if "none", will not do reduction.
50
+ output_transform: callable to extract `y_pred` and `y` from `ignite.engine.state.output` then
51
+ construct `(y_pred, y)` pair, where `y_pred` and `y` can be `batch-first` Tensors or
52
+ lists of `channel-first` Tensors. the form of `(y_pred, y)` is required by the `update()`.
53
+ `engine.state` and `output_transform` inherit from the ignite concept:
54
+ https://pytorch.org/ignite/concepts.html#state, explanation and usage example are in the tutorial:
55
+ https://github.com/Project-MONAI/tutorials/blob/master/modules/batch_output_transform.ipynb.
56
+ save_details: whether to save metric computation details per image, for example: hausdorff distance
57
+ of every image. default to True, will save to `engine.state.metric_details` dict with the metric name as key.
58
+
59
+ """
60
+ metric_fn = HausdorffDistanceMetric(
61
+ include_background=include_background,
62
+ distance_metric=distance_metric,
63
+ percentile=percentile,
64
+ directed=directed,
65
+ reduction=reduction,
66
+ )
67
+ super().__init__(metric_fn=metric_fn, output_transform=output_transform, save_details=save_details)
source_code/SegMamba/monai/handlers/tensorboard_handlers.py ADDED
@@ -0,0 +1,454 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) MONAI Consortium
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ # Unless required by applicable law or agreed to in writing, software
7
+ # distributed under the License is distributed on an "AS IS" BASIS,
8
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9
+ # See the License for the specific language governing permissions and
10
+ # limitations under the License.
11
+
12
+ from __future__ import annotations
13
+
14
+ import warnings
15
+ from collections.abc import Callable, Sequence
16
+ from typing import TYPE_CHECKING, Any
17
+
18
+ import numpy as np
19
+ import torch
20
+
21
+ from monai.config import IgniteInfo
22
+ from monai.utils import is_scalar, min_version, optional_import
23
+ from monai.visualize import plot_2d_or_3d_image
24
+
25
+ Events, _ = optional_import("ignite.engine", IgniteInfo.OPT_IMPORT_VERSION, min_version, "Events")
26
+
27
+ if TYPE_CHECKING:
28
+ from ignite.engine import Engine
29
+ from tensorboardX import SummaryWriter as SummaryWriterX
30
+ from torch.utils.tensorboard import SummaryWriter
31
+ else:
32
+ Engine, _ = optional_import(
33
+ "ignite.engine", IgniteInfo.OPT_IMPORT_VERSION, min_version, "Engine", as_type="decorator"
34
+ )
35
+ SummaryWriter, _ = optional_import("torch.utils.tensorboard", name="SummaryWriter")
36
+ SummaryWriterX, _ = optional_import("tensorboardX", name="SummaryWriter")
37
+
38
+ DEFAULT_TAG = "Loss"
39
+
40
+
41
+ class TensorBoardHandler:
42
+ """
43
+ Base class for the handlers to write data into TensorBoard.
44
+
45
+ Args:
46
+ summary_writer: user can specify TensorBoard or TensorBoardX SummaryWriter,
47
+ default to create a new TensorBoard writer.
48
+ log_dir: if using default SummaryWriter, write logs to this directory, default is `./runs`.
49
+
50
+ """
51
+
52
+ def __init__(self, summary_writer: SummaryWriter | SummaryWriterX | None = None, log_dir: str = "./runs"):
53
+ if summary_writer is None:
54
+ self._writer = SummaryWriter(log_dir=log_dir)
55
+ self.internal_writer = True
56
+ else:
57
+ self._writer = summary_writer
58
+ self.internal_writer = False
59
+
60
+ def attach(self, engine: Engine) -> None:
61
+ raise NotImplementedError(f"Subclass {self.__class__.__name__} must implement this method.")
62
+
63
+ def close(self):
64
+ """
65
+ Close the summary writer if created in this TensorBoard handler.
66
+
67
+ """
68
+ if self.internal_writer:
69
+ self._writer.close()
70
+
71
+
72
+ class TensorBoardStatsHandler(TensorBoardHandler):
73
+ """
74
+ TensorBoardStatsHandler defines a set of Ignite Event-handlers for all the TensorBoard logics.
75
+ It can be used for any Ignite Engine(trainer, validator and evaluator).
76
+ And it can support both epoch level and iteration level with pre-defined TensorBoard event writer.
77
+ The expected data source is Ignite ``engine.state.output`` and ``engine.state.metrics``.
78
+
79
+ Default behaviors:
80
+ - When EPOCH_COMPLETED, write each dictionary item in
81
+ ``engine.state.metrics`` to TensorBoard.
82
+ - When ITERATION_COMPLETED, write each dictionary item in
83
+ ``self.output_transform(engine.state.output)`` to TensorBoard.
84
+
85
+ Usage example is available in the tutorial:
86
+ https://github.com/Project-MONAI/tutorials/blob/master/3d_segmentation/unet_segmentation_3d_ignite.ipynb.
87
+
88
+ """
89
+
90
+ def __init__(
91
+ self,
92
+ summary_writer: SummaryWriter | SummaryWriterX | None = None,
93
+ log_dir: str = "./runs",
94
+ iteration_log: bool | Callable[[Engine, int], bool] | int = True,
95
+ epoch_log: bool | Callable[[Engine, int], bool] | int = True,
96
+ epoch_event_writer: Callable[[Engine, Any], Any] | None = None,
97
+ iteration_event_writer: Callable[[Engine, Any], Any] | None = None,
98
+ output_transform: Callable = lambda x: x[0],
99
+ global_epoch_transform: Callable = lambda x: x,
100
+ state_attributes: Sequence[str] | None = None,
101
+ tag_name: str = DEFAULT_TAG,
102
+ ) -> None:
103
+ """
104
+ Args:
105
+ summary_writer: user can specify TensorBoard or TensorBoardX SummaryWriter,
106
+ default to create a new TensorBoard writer.
107
+ log_dir: if using default SummaryWriter, write logs to this directory, default is `./runs`.
108
+ iteration_log: whether to write data to TensorBoard when iteration completed, default to `True`.
109
+ ``iteration_log`` can be also a function or int. If it is an int, it will be interpreted as the iteration interval
110
+ at which the iteration_event_writer is called. If it is a function, it will be interpreted as an event filter
111
+ (see https://pytorch.org/ignite/generated/ignite.engine.events.Events.html for details).
112
+ Event filter function accepts as input engine and event value (iteration) and should return True/False.
113
+ epoch_log: whether to write data to TensorBoard when epoch completed, default to `True`.
114
+ ``epoch_log`` can be also a function or int. If it is an int, it will be interpreted as the epoch interval
115
+ at which the epoch_event_writer is called. If it is a function, it will be interpreted as an event filter.
116
+ See ``iteration_log`` argument for more details.
117
+ epoch_event_writer: customized callable TensorBoard writer for epoch level.
118
+ Must accept parameter "engine" and "summary_writer", use default event writer if None.
119
+ iteration_event_writer: customized callable TensorBoard writer for iteration level.
120
+ Must accept parameter "engine" and "summary_writer", use default event writer if None.
121
+ output_transform: a callable that is used to transform the
122
+ ``ignite.engine.state.output`` into a scalar to plot, or a dictionary of {key: scalar}.
123
+ In the latter case, the output string will be formatted as key: value.
124
+ By default this value plotting happens when every iteration completed.
125
+ The default behavior is to print loss from output[0] as output is a decollated list
126
+ and we replicated loss value for every item of the decollated list.
127
+ `engine.state` and `output_transform` inherit from the ignite concept:
128
+ https://pytorch.org/ignite/concepts.html#state, explanation and usage example are in the tutorial:
129
+ https://github.com/Project-MONAI/tutorials/blob/master/modules/batch_output_transform.ipynb.
130
+ global_epoch_transform: a callable that is used to customize global epoch number.
131
+ For example, in evaluation, the evaluator engine might want to use trainer engines epoch number
132
+ when plotting epoch vs metric curves.
133
+ state_attributes: expected attributes from `engine.state`, if provided, will extract them
134
+ when epoch completed.
135
+ tag_name: when iteration output is a scalar, tag_name is used to plot, defaults to ``'Loss'``.
136
+ """
137
+
138
+ super().__init__(summary_writer=summary_writer, log_dir=log_dir)
139
+ self.iteration_log = iteration_log
140
+ self.epoch_log = epoch_log
141
+ self.epoch_event_writer = epoch_event_writer
142
+ self.iteration_event_writer = iteration_event_writer
143
+ self.output_transform = output_transform
144
+ self.global_epoch_transform = global_epoch_transform
145
+ self.state_attributes = state_attributes
146
+ self.tag_name = tag_name
147
+
148
+ def attach(self, engine: Engine) -> None:
149
+ """
150
+ Register a set of Ignite Event-Handlers to a specified Ignite engine.
151
+
152
+ Args:
153
+ engine: Ignite Engine, it can be a trainer, validator or evaluator.
154
+
155
+ """
156
+ if self.iteration_log and not engine.has_event_handler(self.iteration_completed, Events.ITERATION_COMPLETED):
157
+ event = Events.ITERATION_COMPLETED
158
+ if callable(self.iteration_log): # substitute event with new one using filter callable
159
+ event = event(event_filter=self.iteration_log)
160
+ elif self.iteration_log > 1:
161
+ event = event(every=self.iteration_log)
162
+ engine.add_event_handler(event, self.iteration_completed)
163
+ if self.epoch_log and not engine.has_event_handler(self.epoch_completed, Events.EPOCH_COMPLETED):
164
+ event = Events.EPOCH_COMPLETED
165
+ if callable(self.epoch_log): # substitute event with new one using filter callable
166
+ event = event(event_filter=self.epoch_log)
167
+ elif self.epoch_log > 1:
168
+ event = event(every=self.epoch_log)
169
+ engine.add_event_handler(event, self.epoch_completed)
170
+
171
+ def epoch_completed(self, engine: Engine) -> None:
172
+ """
173
+ Handler for train or validation/evaluation epoch completed Event.
174
+ Write epoch level events, default values are from Ignite `engine.state.metrics` dict.
175
+
176
+ Args:
177
+ engine: Ignite Engine, it can be a trainer, validator or evaluator.
178
+
179
+ """
180
+ if self.epoch_event_writer is not None:
181
+ self.epoch_event_writer(engine, self._writer)
182
+ else:
183
+ self._default_epoch_writer(engine, self._writer)
184
+
185
+ def iteration_completed(self, engine: Engine) -> None:
186
+ """
187
+ Handler for train or validation/evaluation iteration completed Event.
188
+ Write iteration level events, default values are from Ignite `engine.state.output`.
189
+
190
+ Args:
191
+ engine: Ignite Engine, it can be a trainer, validator or evaluator.
192
+
193
+ """
194
+ if self.iteration_event_writer is not None:
195
+ self.iteration_event_writer(engine, self._writer)
196
+ else:
197
+ self._default_iteration_writer(engine, self._writer)
198
+
199
+ def _write_scalar(
200
+ self, _engine: Engine, writer: SummaryWriter | SummaryWriterX, tag: str, value: Any, step: int
201
+ ) -> None:
202
+ """
203
+ Write scale value into TensorBoard.
204
+ Default to call `SummaryWriter.add_scalar()`.
205
+
206
+ Args:
207
+ _engine: Ignite Engine, unused argument.
208
+ writer: TensorBoard or TensorBoardX writer, passed or created in TensorBoardHandler.
209
+ tag: tag name in the TensorBoard.
210
+ value: value of the scalar data for current step.
211
+ step: index of current step.
212
+
213
+ """
214
+ writer.add_scalar(tag, value, step)
215
+
216
+ def _default_epoch_writer(self, engine: Engine, writer: SummaryWriter | SummaryWriterX) -> None:
217
+ """
218
+ Execute epoch level event write operation.
219
+ Default to write the values from Ignite `engine.state.metrics` dict and
220
+ write the values of specified attributes of `engine.state`.
221
+
222
+ Args:
223
+ engine: Ignite Engine, it can be a trainer, validator or evaluator.
224
+ writer: TensorBoard or TensorBoardX writer, passed or created in TensorBoardHandler.
225
+
226
+ """
227
+ current_epoch = self.global_epoch_transform(engine.state.epoch)
228
+ summary_dict = engine.state.metrics
229
+ for name, value in summary_dict.items():
230
+ if is_scalar(value):
231
+ self._write_scalar(engine, writer, name, value, current_epoch)
232
+
233
+ if self.state_attributes is not None:
234
+ for attr in self.state_attributes:
235
+ self._write_scalar(engine, writer, attr, getattr(engine.state, attr, None), current_epoch)
236
+ writer.flush()
237
+
238
+ def _default_iteration_writer(self, engine: Engine, writer: SummaryWriter | SummaryWriterX) -> None:
239
+ """
240
+ Execute iteration level event write operation based on Ignite `engine.state.output` data.
241
+ Extract the values from `self.output_transform(engine.state.output)`.
242
+ Since `engine.state.output` is a decollated list and we replicated the loss value for every item
243
+ of the decollated list, the default behavior is to track the loss from `output[0]`.
244
+
245
+ Args:
246
+ engine: Ignite Engine, it can be a trainer, validator or evaluator.
247
+ writer: TensorBoard or TensorBoardX writer, passed or created in TensorBoardHandler.
248
+
249
+ """
250
+ loss = self.output_transform(engine.state.output)
251
+ if loss is None:
252
+ return # do nothing if output is empty
253
+ if isinstance(loss, dict):
254
+ for name in sorted(loss):
255
+ value = loss[name]
256
+ if not is_scalar(value):
257
+ warnings.warn(
258
+ "ignoring non-scalar output in TensorBoardStatsHandler,"
259
+ " make sure `output_transform(engine.state.output)` returns"
260
+ " a scalar or dictionary of key and scalar pairs to avoid this warning."
261
+ " {}:{}".format(name, type(value))
262
+ )
263
+ continue # not plot multi dimensional output
264
+ self._write_scalar(
265
+ _engine=engine,
266
+ writer=writer,
267
+ tag=name,
268
+ value=value.item() if isinstance(value, torch.Tensor) else value,
269
+ step=engine.state.iteration,
270
+ )
271
+ elif is_scalar(loss): # not printing multi dimensional output
272
+ self._write_scalar(
273
+ _engine=engine,
274
+ writer=writer,
275
+ tag=self.tag_name,
276
+ value=loss.item() if isinstance(loss, torch.Tensor) else loss,
277
+ step=engine.state.iteration,
278
+ )
279
+ else:
280
+ warnings.warn(
281
+ "ignoring non-scalar output in TensorBoardStatsHandler,"
282
+ " make sure `output_transform(engine.state.output)` returns"
283
+ " a scalar or a dictionary of key and scalar pairs to avoid this warning."
284
+ " {}".format(type(loss))
285
+ )
286
+ writer.flush()
287
+
288
+
289
+ class TensorBoardImageHandler(TensorBoardHandler):
290
+ """
291
+ TensorBoardImageHandler is an Ignite Event handler that can visualize images, labels and outputs as 2D/3D images.
292
+ 2D output (shape in Batch, channel, H, W) will be shown as simple image using the first element in the batch,
293
+ for 3D to ND output (shape in Batch, channel, H, W, D) input, each of ``self.max_channels`` number of images'
294
+ last three dimensions will be shown as animated GIF along the last axis (typically Depth).
295
+ And if writer is from TensorBoardX, data has 3 channels and `max_channels=3`, will plot as RGB video.
296
+
297
+ It can be used for any Ignite Engine (trainer, validator and evaluator).
298
+ User can easily add it to engine for any expected Event, for example: ``EPOCH_COMPLETED``,
299
+ ``ITERATION_COMPLETED``. The expected data source is ignite's ``engine.state.batch`` and ``engine.state.output``.
300
+
301
+ Default behavior:
302
+ - Show y_pred as images (GIF for 3D) on TensorBoard when Event triggered,
303
+ - Need to use ``batch_transform`` and ``output_transform`` to specify
304
+ how many images to show and show which channel.
305
+ - Expects ``batch_transform(engine.state.batch)`` to return data
306
+ format: (image[N, channel, ...], label[N, channel, ...]).
307
+ - Expects ``output_transform(engine.state.output)`` to return a torch
308
+ tensor in format (y_pred[N, channel, ...], loss).
309
+
310
+ Usage example is available in the tutorial:
311
+ https://github.com/Project-MONAI/tutorials/blob/master/3d_segmentation/unet_segmentation_3d_ignite.ipynb.
312
+
313
+ """
314
+
315
+ def __init__(
316
+ self,
317
+ summary_writer: SummaryWriter | SummaryWriterX | None = None,
318
+ log_dir: str = "./runs",
319
+ interval: int = 1,
320
+ epoch_level: bool = True,
321
+ batch_transform: Callable = lambda x: x,
322
+ output_transform: Callable = lambda x: x,
323
+ global_iter_transform: Callable = lambda x: x,
324
+ index: int = 0,
325
+ max_channels: int = 1,
326
+ frame_dim: int = -3,
327
+ max_frames: int = 64,
328
+ ) -> None:
329
+ """
330
+ Args:
331
+ summary_writer: user can specify TensorBoard or TensorBoardX SummaryWriter,
332
+ default to create a new TensorBoard writer.
333
+ log_dir: if using default SummaryWriter, write logs to this directory, default is `./runs`.
334
+ interval: plot content from engine.state every N epochs or every N iterations, default is 1.
335
+ epoch_level: plot content from engine.state every N epochs or N iterations. `True` is epoch level,
336
+ `False` is iteration level.
337
+ batch_transform: a callable that is used to extract `image` and `label` from `ignite.engine.state.batch`,
338
+ then construct `(image, label)` pair. for example: if `ignite.engine.state.batch` is `{"image": xxx,
339
+ "label": xxx, "other": xxx}`, `batch_transform` can be `lambda x: (x["image"], x["label"])`.
340
+ will use the result to plot image from `result[0][index]` and plot label from `result[1][index]`.
341
+ `engine.state` and `batch_transform` inherit from the ignite concept:
342
+ https://pytorch.org/ignite/concepts.html#state, explanation and usage example are in the tutorial:
343
+ https://github.com/Project-MONAI/tutorials/blob/master/modules/batch_output_transform.ipynb.
344
+ output_transform: a callable that is used to extract the `predictions` data from
345
+ `ignite.engine.state.output`, will use the result to plot output from `result[index]`.
346
+ `engine.state` and `output_transform` inherit from the ignite concept:
347
+ https://pytorch.org/ignite/concepts.html#state, explanation and usage example are in the tutorial:
348
+ https://github.com/Project-MONAI/tutorials/blob/master/modules/batch_output_transform.ipynb.
349
+ global_iter_transform: a callable that is used to customize global step number for TensorBoard.
350
+ For example, in evaluation, the evaluator engine needs to know current epoch from trainer.
351
+ index: plot which element in a data batch, default is the first element.
352
+ max_channels: number of channels to plot.
353
+ frame_dim: if plotting 3D image as GIF, specify the dimension used as frames,
354
+ expect input data shape as `NCHWD`, default to `-3` (the first spatial dim)
355
+ max_frames: if plot 3D RGB image as video in TensorBoardX, set the FPS to `max_frames`.
356
+ """
357
+ super().__init__(summary_writer=summary_writer, log_dir=log_dir)
358
+ self.interval = interval
359
+ self.epoch_level = epoch_level
360
+ self.batch_transform = batch_transform
361
+ self.output_transform = output_transform
362
+ self.global_iter_transform = global_iter_transform
363
+ self.index = index
364
+ self.frame_dim = frame_dim
365
+ self.max_frames = max_frames
366
+ self.max_channels = max_channels
367
+
368
+ def attach(self, engine: Engine) -> None:
369
+ """
370
+ Args:
371
+ engine: Ignite Engine, it can be a trainer, validator or evaluator.
372
+ """
373
+ if self.epoch_level:
374
+ engine.add_event_handler(Events.EPOCH_COMPLETED(every=self.interval), self)
375
+ else:
376
+ engine.add_event_handler(Events.ITERATION_COMPLETED(every=self.interval), self)
377
+
378
+ def __call__(self, engine: Engine) -> None:
379
+ """
380
+ Args:
381
+ engine: Ignite Engine, it can be a trainer, validator or evaluator.
382
+
383
+ Raises:
384
+ TypeError: When ``output_transform(engine.state.output)[0]`` type is not in
385
+ ``Optional[Union[numpy.ndarray, torch.Tensor]]``.
386
+ TypeError: When ``batch_transform(engine.state.batch)[1]`` type is not in
387
+ ``Optional[Union[numpy.ndarray, torch.Tensor]]``.
388
+ TypeError: When ``output_transform(engine.state.output)`` type is not in
389
+ ``Optional[Union[numpy.ndarray, torch.Tensor]]``.
390
+
391
+ """
392
+ step = self.global_iter_transform(engine.state.epoch if self.epoch_level else engine.state.iteration)
393
+ show_images = self.batch_transform(engine.state.batch)[0][self.index]
394
+ if isinstance(show_images, torch.Tensor):
395
+ show_images = show_images.detach().cpu().numpy()
396
+ if show_images is not None:
397
+ if not isinstance(show_images, np.ndarray):
398
+ raise TypeError(
399
+ "output_transform(engine.state.output)[0] must be None or one of "
400
+ f"(numpy.ndarray, torch.Tensor) but is {type(show_images).__name__}."
401
+ )
402
+ plot_2d_or_3d_image(
403
+ # add batch dim and plot the first item
404
+ data=show_images[None],
405
+ step=step,
406
+ writer=self._writer,
407
+ index=0,
408
+ max_channels=self.max_channels,
409
+ frame_dim=self.frame_dim,
410
+ max_frames=self.max_frames,
411
+ tag="input_0",
412
+ )
413
+
414
+ show_labels = self.batch_transform(engine.state.batch)[1][self.index]
415
+ if isinstance(show_labels, torch.Tensor):
416
+ show_labels = show_labels.detach().cpu().numpy()
417
+ if show_labels is not None:
418
+ if not isinstance(show_labels, np.ndarray):
419
+ raise TypeError(
420
+ "batch_transform(engine.state.batch)[1] must be None or one of "
421
+ f"(numpy.ndarray, torch.Tensor) but is {type(show_labels).__name__}."
422
+ )
423
+ plot_2d_or_3d_image(
424
+ data=show_labels[None],
425
+ step=step,
426
+ writer=self._writer,
427
+ index=0,
428
+ max_channels=self.max_channels,
429
+ frame_dim=self.frame_dim,
430
+ max_frames=self.max_frames,
431
+ tag="input_1",
432
+ )
433
+
434
+ show_outputs = self.output_transform(engine.state.output)[self.index]
435
+ if isinstance(show_outputs, torch.Tensor):
436
+ show_outputs = show_outputs.detach().cpu().numpy()
437
+ if show_outputs is not None:
438
+ if not isinstance(show_outputs, np.ndarray):
439
+ raise TypeError(
440
+ "output_transform(engine.state.output) must be None or one of "
441
+ f"(numpy.ndarray, torch.Tensor) but is {type(show_outputs).__name__}."
442
+ )
443
+ plot_2d_or_3d_image(
444
+ data=show_outputs[None],
445
+ step=step,
446
+ writer=self._writer,
447
+ index=0,
448
+ max_channels=self.max_channels,
449
+ frame_dim=self.frame_dim,
450
+ max_frames=self.max_frames,
451
+ tag="output",
452
+ )
453
+
454
+ self._writer.flush()
source_code/SegMamba/monai/inferers/inferer.py ADDED
@@ -0,0 +1,754 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) MONAI Consortium
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ # Unless required by applicable law or agreed to in writing, software
7
+ # distributed under the License is distributed on an "AS IS" BASIS,
8
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9
+ # See the License for the specific language governing permissions and
10
+ # limitations under the License.
11
+
12
+ from __future__ import annotations
13
+
14
+ import warnings
15
+ from abc import ABC, abstractmethod
16
+ from collections.abc import Callable, Iterable, Iterator, Mapping, Sequence
17
+ from pydoc import locate
18
+ from typing import Any
19
+
20
+ import torch
21
+ import torch.nn as nn
22
+
23
+ from monai.apps.utils import get_logger
24
+ from monai.data.meta_tensor import MetaTensor
25
+ from monai.data.thread_buffer import ThreadBuffer
26
+ from monai.inferers.merger import AvgMerger, Merger
27
+ from monai.inferers.splitter import Splitter
28
+ from monai.inferers.utils import compute_importance_map, sliding_window_inference
29
+ from monai.utils import BlendMode, PatchKeys, PytorchPadMode, ensure_tuple, optional_import
30
+ from monai.visualize import CAM, GradCAM, GradCAMpp
31
+
32
+ logger = get_logger(__name__)
33
+
34
+ __all__ = [
35
+ "Inferer",
36
+ "PatchInferer",
37
+ "SimpleInferer",
38
+ "SlidingWindowInferer",
39
+ "SaliencyInferer",
40
+ "SliceInferer",
41
+ "SlidingWindowInfererAdapt",
42
+ ]
43
+
44
+
45
+ class Inferer(ABC):
46
+ """
47
+ A base class for model inference.
48
+ Extend this class to support operations during inference, e.g. a sliding window method.
49
+
50
+ Example code::
51
+
52
+ device = torch.device("cuda:0")
53
+ transform = Compose([ToTensor(), LoadImage(image_only=True)])
54
+ data = transform(img_path).to(device)
55
+ model = UNet(...).to(device)
56
+ inferer = SlidingWindowInferer(...)
57
+
58
+ model.eval()
59
+ with torch.no_grad():
60
+ pred = inferer(inputs=data, network=model)
61
+ ...
62
+
63
+ """
64
+
65
+ @abstractmethod
66
+ def __call__(self, inputs: torch.Tensor, network: Callable, *args: Any, **kwargs: Any) -> Any:
67
+ """
68
+ Run inference on `inputs` with the `network` model.
69
+
70
+ Args:
71
+ inputs: input of the model inference.
72
+ network: model for inference.
73
+ args: optional args to be passed to ``network``.
74
+ kwargs: optional keyword args to be passed to ``network``.
75
+
76
+ Raises:
77
+ NotImplementedError: When the subclass does not override this method.
78
+
79
+ """
80
+ raise NotImplementedError(f"Subclass {self.__class__.__name__} must implement this method.")
81
+
82
+
83
+ class PatchInferer(Inferer):
84
+ """
85
+ Inference on patches instead of the whole image based on Splitter and Merger.
86
+ This splits the input image into patches and then merge the resulted patches.
87
+
88
+ Args:
89
+ splitter: a `Splitter` object that split the inputs into patches. Defaults to None.
90
+ If not provided or None, the inputs are considered to be already split into patches.
91
+ In this case, the output `merged_shape` and the optional `cropped_shape` cannot be inferred
92
+ and should be explicitly provided.
93
+ merger_cls: a `Merger` subclass that can be instantiated to merges patch outputs.
94
+ It can also be a string that matches the name of a class inherited from `Merger` class.
95
+ Defaults to `AvgMerger`.
96
+ batch_size: batch size for patches. If the input tensor is already batched [BxCxWxH],
97
+ this adds additional batching [(Bp*B)xCxWpxHp] for inference on patches.
98
+ Defaults to 1.
99
+ preprocessing: a callable that process patches before the being fed to the network.
100
+ Defaults to None.
101
+ postprocessing: a callable that process the output of the network.
102
+ Defaults to None.
103
+ output_keys: if the network output is a dictionary, this defines the keys of
104
+ the output dictionary to be used for merging.
105
+ Defaults to None, where all the keys are used.
106
+ match_spatial_shape: whether to crop the output to match the input shape. Defaults to True.
107
+ buffer_size: number of patches to be held in the buffer with a separate thread for batch sampling. Defaults to 0.
108
+ merger_kwargs: arguments to be passed to `merger_cls` for instantiation.
109
+ `merged_shape` is calculated automatically based on the input shape and
110
+ the output patch shape unless it is passed here.
111
+ """
112
+
113
+ def __init__(
114
+ self,
115
+ splitter: Splitter | None = None,
116
+ merger_cls: type[Merger] | str = AvgMerger,
117
+ batch_size: int = 1,
118
+ preprocessing: Callable | None = None,
119
+ postprocessing: Callable | None = None,
120
+ output_keys: Sequence | None = None,
121
+ match_spatial_shape: bool = True,
122
+ buffer_size: int = 0,
123
+ **merger_kwargs: Any,
124
+ ) -> None:
125
+ Inferer.__init__(self)
126
+ # splitter
127
+ if not isinstance(splitter, (Splitter, type(None))):
128
+ if not isinstance(splitter, Splitter):
129
+ raise TypeError(
130
+ f"'splitter' should be a `Splitter` object that returns: "
131
+ "an iterable of pairs of (patch, location) or a MetaTensor that has `PatchKeys.LOCATION` metadata)."
132
+ f"{type(splitter)} is given."
133
+ )
134
+ self.splitter = splitter
135
+
136
+ # merger
137
+ if isinstance(merger_cls, str):
138
+ valid_merger_cls: type[Merger]
139
+ # search amongst implemented mergers in MONAI
140
+ valid_merger_cls, merger_found = optional_import("monai.inferers.merger", name=merger_cls)
141
+ if not merger_found:
142
+ # try to locate the requested merger class (with dotted path)
143
+ valid_merger_cls = locate(merger_cls) # type: ignore
144
+ if valid_merger_cls is None:
145
+ raise ValueError(f"The requested `merger_cls` ['{merger_cls}'] does not exist.")
146
+ merger_cls = valid_merger_cls
147
+ if not issubclass(merger_cls, Merger):
148
+ raise TypeError(f"'merger' should be a subclass of `Merger`, {merger_cls} is given.")
149
+ self.merger_cls = merger_cls
150
+ self.merger_kwargs = merger_kwargs
151
+
152
+ # pre-processor (process patch before the network)
153
+ if preprocessing is not None and not callable(preprocessing):
154
+ raise TypeError(f"'preprocessing' should be a callable object, {type(preprocessing)} is given.")
155
+ self.preprocessing = preprocessing
156
+
157
+ # post-processor (process the output of the network)
158
+ if postprocessing is not None and not callable(postprocessing):
159
+ raise TypeError(f"'postprocessing' should be a callable object, {type(postprocessing)} is given.")
160
+ self.postprocessing = postprocessing
161
+
162
+ # batch size for patches
163
+ if batch_size < 1:
164
+ raise ValueError(f"`batch_size` must be a positive number, {batch_size} is given.")
165
+ self.batch_size = batch_size
166
+
167
+ # model output keys
168
+ self.output_keys = output_keys
169
+
170
+ # whether to crop the output to match the input shape
171
+ self.match_spatial_shape = match_spatial_shape
172
+
173
+ # buffer size for multithreaded batch sampling
174
+ self.buffer_size = buffer_size
175
+
176
+ def _batch_sampler(
177
+ self, patches: Iterable[tuple[torch.Tensor, Sequence[int]]] | MetaTensor
178
+ ) -> Iterator[tuple[torch.Tensor, Sequence, int]]:
179
+ """Generate batch of patches and locations
180
+
181
+ Args:
182
+ patches: a tensor or list of tensors
183
+
184
+ Yields:
185
+ A batch of patches (torch.Tensor or MetaTensor), a sequence of location tuples, and the batch size
186
+ """
187
+ if isinstance(patches, MetaTensor):
188
+ total_size = len(patches)
189
+ for i in range(0, total_size, self.batch_size):
190
+ batch_size = min(self.batch_size, total_size - i)
191
+ yield patches[i : i + batch_size], patches[i : i + batch_size].meta[PatchKeys.LOCATION], batch_size # type: ignore
192
+ else:
193
+ buffer: Iterable | ThreadBuffer
194
+ if self.buffer_size > 0:
195
+ # Use multi-threading to sample patches with a buffer
196
+ buffer = ThreadBuffer(patches, buffer_size=self.buffer_size, timeout=0.1)
197
+ else:
198
+ buffer = patches
199
+ patch_batch: list[Any] = [None] * self.batch_size
200
+ location_batch: list[Any] = [None] * self.batch_size
201
+ idx_in_batch = 0
202
+ for sample in buffer:
203
+ patch_batch[idx_in_batch] = sample[0]
204
+ location_batch[idx_in_batch] = sample[1]
205
+ idx_in_batch += 1
206
+ if idx_in_batch == self.batch_size:
207
+ # concatenate batch of patches to create a tensor
208
+ yield torch.cat(patch_batch), location_batch, idx_in_batch
209
+ patch_batch = [None] * self.batch_size
210
+ location_batch = [None] * self.batch_size
211
+ idx_in_batch = 0
212
+ if idx_in_batch > 0:
213
+ # concatenate batch of patches to create a tensor
214
+ yield torch.cat(patch_batch[:idx_in_batch]), location_batch, idx_in_batch
215
+
216
+ def _ensure_tuple_outputs(self, outputs: Any) -> tuple:
217
+ if isinstance(outputs, dict):
218
+ if self.output_keys is None:
219
+ self.output_keys = list(outputs.keys()) # model's output keys
220
+ return tuple(outputs[k] for k in self.output_keys)
221
+ return ensure_tuple(outputs, wrap_array=True)
222
+
223
+ def _run_inference(self, network: Callable, patch: torch.Tensor, *args: Any, **kwargs: Any) -> tuple:
224
+ # pre-process
225
+ if self.preprocessing:
226
+ patch = self.preprocessing(patch)
227
+ # inference
228
+ outputs = network(patch, *args, **kwargs)
229
+ # post-process
230
+ if self.postprocessing:
231
+ outputs = self.postprocessing(outputs)
232
+ # ensure we have a tuple of model outputs to support multiple outputs
233
+ return self._ensure_tuple_outputs(outputs)
234
+
235
+ def _initialize_mergers(self, inputs, outputs, patches, batch_size):
236
+ in_patch = torch.chunk(patches, batch_size)[0]
237
+ mergers = []
238
+ ratios = []
239
+ for out_patch_batch in outputs:
240
+ out_patch = torch.chunk(out_patch_batch, batch_size)[0]
241
+ # calculate the ratio of input and output patch sizes
242
+ ratio = tuple(op / ip for ip, op in zip(in_patch.shape[2:], out_patch.shape[2:]))
243
+
244
+ # calculate merged_shape and cropped_shape
245
+ merger_kwargs = self.merger_kwargs.copy()
246
+ cropped_shape, merged_shape = self._get_merged_shapes(inputs, out_patch, ratio)
247
+ if "merged_shape" not in merger_kwargs:
248
+ merger_kwargs["merged_shape"] = merged_shape
249
+ if merger_kwargs["merged_shape"] is None:
250
+ raise ValueError("`merged_shape` cannot be `None`.")
251
+ if "cropped_shape" not in merger_kwargs:
252
+ merger_kwargs["cropped_shape"] = cropped_shape
253
+
254
+ # initialize the merger
255
+ merger = self.merger_cls(**merger_kwargs)
256
+
257
+ # store mergers and input/output ratios
258
+ mergers.append(merger)
259
+ ratios.append(ratio)
260
+
261
+ return mergers, ratios
262
+
263
+ def _aggregate(self, outputs, locations, batch_size, mergers, ratios):
264
+ for output_patches, merger, ratio in zip(outputs, mergers, ratios):
265
+ # split batched output into individual patches and then aggregate
266
+ for in_loc, out_patch in zip(locations, torch.chunk(output_patches, batch_size)):
267
+ out_loc = [round(l * r) for l, r in zip(in_loc, ratio)]
268
+ merger.aggregate(out_patch, out_loc)
269
+
270
+ def _get_merged_shapes(self, inputs, out_patch, ratio):
271
+ """Define the shape of merged tensors (non-padded and padded)"""
272
+ if self.splitter is None:
273
+ return None, None
274
+
275
+ # input spatial shapes
276
+ original_spatial_shape = self.splitter.get_input_shape(inputs)
277
+ padded_spatial_shape = self.splitter.get_padded_shape(inputs)
278
+
279
+ # output spatial shapes
280
+ output_spatial_shape = tuple(round(s * r) for s, r in zip(original_spatial_shape, ratio))
281
+ padded_output_spatial_shape = tuple(round(s * r) for s, r in zip(padded_spatial_shape, ratio))
282
+
283
+ # output shapes
284
+ cropped_shape = out_patch.shape[:2] + output_spatial_shape
285
+ merged_shape = out_patch.shape[:2] + padded_output_spatial_shape
286
+
287
+ if not self.match_spatial_shape:
288
+ cropped_shape = merged_shape
289
+
290
+ return cropped_shape, merged_shape
291
+
292
+ def __call__(
293
+ self,
294
+ inputs: torch.Tensor,
295
+ network: Callable[..., torch.Tensor | Sequence[torch.Tensor] | dict[Any, torch.Tensor]],
296
+ *args: Any,
297
+ **kwargs: Any,
298
+ ) -> Any:
299
+ """
300
+ Args:
301
+ inputs: input data for inference, a torch.Tensor, representing an image or batch of images.
302
+ However if the data is already split, it can be fed by providing a list of tuple (patch, location),
303
+ or a MetaTensor that has metadata for `PatchKeys.LOCATION`. In both cases no splitter should be provided.
304
+ network: target model to execute inference.
305
+ supports callables such as ``lambda x: my_torch_model(x, additional_config)``
306
+ args: optional args to be passed to ``network``.
307
+ kwargs: optional keyword args to be passed to ``network``.
308
+
309
+ """
310
+ patches_locations: Iterable[tuple[torch.Tensor, Sequence[int]]] | MetaTensor
311
+ if self.splitter is None:
312
+ # handle situations where the splitter is not provided
313
+ if isinstance(inputs, torch.Tensor):
314
+ if isinstance(inputs, MetaTensor):
315
+ if PatchKeys.LOCATION not in inputs.meta:
316
+ raise ValueError(
317
+ "`PatchKey.LOCATION` does not exists in `inputs.meta`. "
318
+ "If the inputs are already split into patches, the location of patches needs to be "
319
+ "provided as `PatchKey.LOCATION` metadata in a MetaTensor. "
320
+ "If the input is not already split, please provide `splitter`."
321
+ )
322
+ else:
323
+ raise ValueError(
324
+ "`splitter` should be set if the input is not already split into patches. "
325
+ "For inputs that are split, the location of patches needs to be provided as "
326
+ "(image, location) pairs, or as `PatchKey.LOCATION` metadata in a MetaTensor. "
327
+ f"The provided inputs type is {type(inputs)}."
328
+ )
329
+ patches_locations = inputs
330
+ else:
331
+ # apply splitter
332
+ patches_locations = self.splitter(inputs)
333
+
334
+ ratios: list[float] = []
335
+ mergers: list[Merger] = []
336
+ for patches, locations, batch_size in self._batch_sampler(patches_locations):
337
+ # run inference
338
+ outputs = self._run_inference(network, patches, *args, **kwargs)
339
+ # initialize the mergers
340
+ if not mergers:
341
+ mergers, ratios = self._initialize_mergers(inputs, outputs, patches, batch_size)
342
+ # aggregate outputs
343
+ self._aggregate(outputs, locations, batch_size, mergers, ratios)
344
+
345
+ # finalize the mergers and get the results
346
+ merged_outputs = [merger.finalize() for merger in mergers]
347
+
348
+ # return according to the model output
349
+ if self.output_keys:
350
+ return dict(zip(self.output_keys, merged_outputs))
351
+ if len(merged_outputs) == 1:
352
+ return merged_outputs[0]
353
+ return merged_outputs
354
+
355
+
356
+ class SimpleInferer(Inferer):
357
+ """
358
+ SimpleInferer is the normal inference method that run model forward() directly.
359
+ Usage example can be found in the :py:class:`monai.inferers.Inferer` base class.
360
+
361
+ """
362
+
363
+ def __init__(self) -> None:
364
+ Inferer.__init__(self)
365
+
366
+ def __call__(
367
+ self, inputs: torch.Tensor, network: Callable[..., torch.Tensor], *args: Any, **kwargs: Any
368
+ ) -> torch.Tensor:
369
+ """Unified callable function API of Inferers.
370
+
371
+ Args:
372
+ inputs: model input data for inference.
373
+ network: target model to execute inference.
374
+ supports callables such as ``lambda x: my_torch_model(x, additional_config)``
375
+ args: optional args to be passed to ``network``.
376
+ kwargs: optional keyword args to be passed to ``network``.
377
+
378
+ """
379
+ return network(inputs, *args, **kwargs)
380
+
381
+
382
+ class SlidingWindowInferer(Inferer):
383
+ """
384
+ Sliding window method for model inference,
385
+ with `sw_batch_size` windows for every model.forward().
386
+ Usage example can be found in the :py:class:`monai.inferers.Inferer` base class.
387
+
388
+ Args:
389
+ roi_size: the window size to execute SlidingWindow evaluation.
390
+ If it has non-positive components, the corresponding `inputs` size will be used.
391
+ if the components of the `roi_size` are non-positive values, the transform will use the
392
+ corresponding components of img size. For example, `roi_size=(32, -1)` will be adapted
393
+ to `(32, 64)` if the second spatial dimension size of img is `64`.
394
+ sw_batch_size: the batch size to run window slices.
395
+ overlap: Amount of overlap between scans along each spatial dimension, defaults to ``0.25``.
396
+ mode: {``"constant"``, ``"gaussian"``}
397
+ How to blend output of overlapping windows. Defaults to ``"constant"``.
398
+
399
+ - ``"constant``": gives equal weight to all predictions.
400
+ - ``"gaussian``": gives less weight to predictions on edges of windows.
401
+
402
+ sigma_scale: the standard deviation coefficient of the Gaussian window when `mode` is ``"gaussian"``.
403
+ Default: 0.125. Actual window sigma is ``sigma_scale`` * ``dim_size``.
404
+ When sigma_scale is a sequence of floats, the values denote sigma_scale at the corresponding
405
+ spatial dimensions.
406
+ padding_mode: {``"constant"``, ``"reflect"``, ``"replicate"``, ``"circular"``}
407
+ Padding mode when ``roi_size`` is larger than inputs. Defaults to ``"constant"``
408
+ See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.pad.html
409
+ cval: fill value for 'constant' padding mode. Default: 0
410
+ sw_device: device for the window data.
411
+ By default the device (and accordingly the memory) of the `inputs` is used.
412
+ Normally `sw_device` should be consistent with the device where `predictor` is defined.
413
+ device: device for the stitched output prediction.
414
+ By default the device (and accordingly the memory) of the `inputs` is used. If for example
415
+ set to device=torch.device('cpu') the gpu memory consumption is less and independent of the
416
+ `inputs` and `roi_size`. Output is on the `device`.
417
+ progress: whether to print a tqdm progress bar.
418
+ cache_roi_weight_map: whether to precompute the ROI weight map.
419
+ cpu_thresh: when provided, dynamically switch to stitching on cpu (to save gpu memory)
420
+ when input image volume is larger than this threshold (in pixels/voxels).
421
+ Otherwise use ``"device"``. Thus, the output may end-up on either cpu or gpu.
422
+ buffer_steps: the number of sliding window iterations along the ``buffer_dim``
423
+ to be buffered on ``sw_device`` before writing to ``device``.
424
+ (Typically, ``sw_device`` is ``cuda`` and ``device`` is ``cpu``.)
425
+ default is None, no buffering. For the buffer dim, when spatial size is divisible by buffer_steps*roi_size,
426
+ (i.e. no overlapping among the buffers) non_blocking copy may be automatically enabled for efficiency.
427
+ buffer_dim: the spatial dimension along which the buffers are created.
428
+ 0 indicates the first spatial dimension. Default is -1, the last spatial dimension.
429
+ with_coord: whether to pass the window coordinates to ``network``. Defaults to False.
430
+ If True, the ``network``'s 2nd input argument should accept the window coordinates.
431
+
432
+ Note:
433
+ ``sw_batch_size`` denotes the max number of windows per network inference iteration,
434
+ not the batch size of inputs.
435
+
436
+ """
437
+
438
+ def __init__(
439
+ self,
440
+ roi_size: Sequence[int] | int,
441
+ sw_batch_size: int = 1,
442
+ overlap: Sequence[float] | float = 0.25,
443
+ mode: BlendMode | str = BlendMode.CONSTANT,
444
+ sigma_scale: Sequence[float] | float = 0.125,
445
+ padding_mode: PytorchPadMode | str = PytorchPadMode.CONSTANT,
446
+ cval: float = 0.0,
447
+ sw_device: torch.device | str | None = None,
448
+ device: torch.device | str | None = None,
449
+ progress: bool = False,
450
+ cache_roi_weight_map: bool = False,
451
+ cpu_thresh: int | None = None,
452
+ buffer_steps: int | None = None,
453
+ buffer_dim: int = -1,
454
+ with_coord: bool = False,
455
+ ) -> None:
456
+ super().__init__()
457
+ self.roi_size = roi_size
458
+ self.sw_batch_size = sw_batch_size
459
+ self.overlap = overlap
460
+ self.mode: BlendMode = BlendMode(mode)
461
+ self.sigma_scale = sigma_scale
462
+ self.padding_mode = padding_mode
463
+ self.cval = cval
464
+ self.sw_device = sw_device
465
+ self.device = device
466
+ self.progress = progress
467
+ self.cpu_thresh = cpu_thresh
468
+ self.buffer_steps = buffer_steps
469
+ self.buffer_dim = buffer_dim
470
+ self.with_coord = with_coord
471
+
472
+ # compute_importance_map takes long time when computing on cpu. We thus
473
+ # compute it once if it's static and then save it for future usage
474
+ self.roi_weight_map = None
475
+ try:
476
+ if cache_roi_weight_map and isinstance(roi_size, Sequence) and min(roi_size) > 0: # non-dynamic roi size
477
+ if device is None:
478
+ device = "cpu"
479
+ self.roi_weight_map = compute_importance_map(
480
+ ensure_tuple(self.roi_size), mode=mode, sigma_scale=sigma_scale, device=device
481
+ )
482
+ if cache_roi_weight_map and self.roi_weight_map is None:
483
+ warnings.warn("cache_roi_weight_map=True, but cache is not created. (dynamic roi_size?)")
484
+ except BaseException as e:
485
+ raise RuntimeError(
486
+ f"roi size {self.roi_size}, mode={mode}, sigma_scale={sigma_scale}, device={device}\n"
487
+ "Seems to be OOM. Please try smaller patch size or mode='constant' instead of mode='gaussian'."
488
+ ) from e
489
+
490
+ def __call__(
491
+ self,
492
+ inputs: torch.Tensor,
493
+ network: Callable[..., torch.Tensor | Sequence[torch.Tensor] | dict[Any, torch.Tensor]],
494
+ *args: Any,
495
+ **kwargs: Any,
496
+ ) -> torch.Tensor | tuple[torch.Tensor, ...] | dict[Any, torch.Tensor]:
497
+ """
498
+
499
+ Args:
500
+ inputs: model input data for inference.
501
+ network: target model to execute inference.
502
+ supports callables such as ``lambda x: my_torch_model(x, additional_config)``
503
+ args: optional args to be passed to ``network``.
504
+ kwargs: optional keyword args to be passed to ``network``.
505
+
506
+ """
507
+
508
+ device = kwargs.pop("device", self.device)
509
+ buffer_steps = kwargs.pop("buffer_steps", self.buffer_steps)
510
+ buffer_dim = kwargs.pop("buffer_dim", self.buffer_dim)
511
+
512
+ if device is None and self.cpu_thresh is not None and inputs.shape[2:].numel() > self.cpu_thresh:
513
+ device = "cpu" # stitch in cpu memory if image is too large
514
+
515
+ return sliding_window_inference(
516
+ inputs,
517
+ self.roi_size,
518
+ self.sw_batch_size,
519
+ network,
520
+ self.overlap,
521
+ self.mode,
522
+ self.sigma_scale,
523
+ self.padding_mode,
524
+ self.cval,
525
+ self.sw_device,
526
+ device,
527
+ self.progress,
528
+ self.roi_weight_map,
529
+ None,
530
+ buffer_steps,
531
+ buffer_dim,
532
+ self.with_coord,
533
+ *args,
534
+ **kwargs,
535
+ )
536
+
537
+
538
+ class SlidingWindowInfererAdapt(SlidingWindowInferer):
539
+ """
540
+ SlidingWindowInfererAdapt extends SlidingWindowInferer to automatically switch to buffered and then to CPU stitching,
541
+ when OOM on GPU. It also records a size of such large images to automatically
542
+ try CPU stitching for the next large image of a similar size. If the stitching 'device' input parameter is provided,
543
+ automatic adaptation won't be attempted, please keep the default option device = None for adaptive behavior.
544
+ Note: the output might be on CPU (even if the input was on GPU), if the GPU memory was not sufficient.
545
+
546
+ """
547
+
548
+ def __call__(
549
+ self,
550
+ inputs: torch.Tensor,
551
+ network: Callable[..., torch.Tensor | Sequence[torch.Tensor] | dict[Any, torch.Tensor]],
552
+ *args: Any,
553
+ **kwargs: Any,
554
+ ) -> torch.Tensor | tuple[torch.Tensor, ...] | dict[Any, torch.Tensor]:
555
+ """
556
+
557
+ Args:
558
+ inputs: model input data for inference.
559
+ network: target model to execute inference.
560
+ supports callables such as ``lambda x: my_torch_model(x, additional_config)``
561
+ args: optional args to be passed to ``network``.
562
+ kwargs: optional keyword args to be passed to ``network``.
563
+
564
+ """
565
+
566
+ # if device is provided, use without any adaptations
567
+ if self.device is not None:
568
+ return super().__call__(inputs, network, *args, **kwargs)
569
+
570
+ skip_buffer = self.buffer_steps is not None and self.buffer_steps <= 0
571
+ cpu_cond = self.cpu_thresh is not None and inputs.shape[2:].numel() > self.cpu_thresh
572
+ gpu_stitching = inputs.is_cuda and not cpu_cond
573
+ buffered_stitching = inputs.is_cuda and cpu_cond and not skip_buffer
574
+ buffer_steps = max(1, self.buffer_steps) if self.buffer_steps is not None else 1
575
+ buffer_dim = -1
576
+
577
+ sh = list(inputs.shape[2:])
578
+ max_dim = sh.index(max(sh))
579
+ if inputs.shape[max_dim + 2] / inputs.shape[-1] >= 2:
580
+ buffer_dim = max_dim
581
+
582
+ for _ in range(10): # at most 10 trials
583
+ try:
584
+ return super().__call__(
585
+ inputs,
586
+ network,
587
+ *args,
588
+ device=inputs.device if gpu_stitching else torch.device("cpu"),
589
+ buffer_steps=buffer_steps if buffered_stitching else None,
590
+ buffer_dim=buffer_dim,
591
+ **kwargs,
592
+ )
593
+ except RuntimeError as e:
594
+ if not gpu_stitching and not buffered_stitching or "OutOfMemoryError" not in str(type(e).__name__):
595
+ raise e
596
+
597
+ logger.info(e)
598
+
599
+ if gpu_stitching: # if failed on gpu
600
+ gpu_stitching = False
601
+ self.cpu_thresh = inputs.shape[2:].numel() - 1 # update thresh
602
+
603
+ if skip_buffer:
604
+ buffered_stitching = False
605
+ logger.warning(f"GPU stitching failed, attempting on CPU, image dim {inputs.shape}.")
606
+
607
+ else:
608
+ buffered_stitching = True
609
+ self.buffer_steps = buffer_steps
610
+ logger.warning(
611
+ f"GPU stitching failed, buffer {buffer_steps} dim {buffer_dim}, image dim {inputs.shape}."
612
+ )
613
+ elif buffer_steps > 1:
614
+ buffer_steps = max(1, buffer_steps // 2)
615
+ self.buffer_steps = buffer_steps
616
+ logger.warning(
617
+ f"GPU buffered stitching failed, image dim {inputs.shape} reducing buffer to {buffer_steps}."
618
+ )
619
+ else:
620
+ buffered_stitching = False
621
+ logger.warning(f"GPU buffered stitching failed, attempting on CPU, image dim {inputs.shape}.")
622
+ raise RuntimeError( # not possible to finish after the trials
623
+ f"SlidingWindowInfererAdapt {skip_buffer} {cpu_cond} {gpu_stitching} {buffered_stitching} {buffer_steps}"
624
+ )
625
+
626
+
627
+ class SaliencyInferer(Inferer):
628
+ """
629
+ SaliencyInferer is inference with activation maps.
630
+
631
+ Args:
632
+ cam_name: expected CAM method name, should be: "CAM", "GradCAM" or "GradCAMpp".
633
+ target_layers: name of the model layer to generate the feature map.
634
+ class_idx: index of the class to be visualized. if None, default to argmax(logits).
635
+ args: other optional args to be passed to the `__init__` of cam.
636
+ kwargs: other optional keyword args to be passed to `__init__` of cam.
637
+
638
+ """
639
+
640
+ def __init__(
641
+ self, cam_name: str, target_layers: str, class_idx: int | None = None, *args: Any, **kwargs: Any
642
+ ) -> None:
643
+ Inferer.__init__(self)
644
+ if cam_name.lower() not in ("cam", "gradcam", "gradcampp"):
645
+ raise ValueError("cam_name should be: 'CAM', 'GradCAM' or 'GradCAMpp'.")
646
+ self.cam_name = cam_name.lower()
647
+ self.target_layers = target_layers
648
+ self.class_idx = class_idx
649
+ self.args = args
650
+ self.kwargs = kwargs
651
+
652
+ def __call__(self, inputs: torch.Tensor, network: nn.Module, *args: Any, **kwargs: Any): # type: ignore
653
+ """Unified callable function API of Inferers.
654
+
655
+ Args:
656
+ inputs: model input data for inference.
657
+ network: target model to execute inference.
658
+ supports callables such as ``lambda x: my_torch_model(x, additional_config)``
659
+ args: other optional args to be passed to the `__call__` of cam.
660
+ kwargs: other optional keyword args to be passed to `__call__` of cam.
661
+
662
+ """
663
+ cam: CAM | GradCAM | GradCAMpp
664
+ if self.cam_name == "cam":
665
+ cam = CAM(network, self.target_layers, *self.args, **self.kwargs)
666
+ elif self.cam_name == "gradcam":
667
+ cam = GradCAM(network, self.target_layers, *self.args, **self.kwargs)
668
+ else:
669
+ cam = GradCAMpp(network, self.target_layers, *self.args, **self.kwargs)
670
+
671
+ return cam(inputs, self.class_idx, *args, **kwargs)
672
+
673
+
674
+ class SliceInferer(SlidingWindowInferer):
675
+ """
676
+ SliceInferer extends SlidingWindowInferer to provide slice-by-slice (2D) inference when provided a 3D volume.
677
+ A typical use case could be a 2D model (like 2D segmentation UNet) operates on the slices from a 3D volume,
678
+ and the output is a 3D volume with 2D slices aggregated. Example::
679
+
680
+ # sliding over the `spatial_dim`
681
+ inferer = SliceInferer(roi_size=(64, 256), sw_batch_size=1, spatial_dim=1)
682
+ output = inferer(input_volume, net)
683
+
684
+ Args:
685
+ spatial_dim: Spatial dimension over which the slice-by-slice inference runs on the 3D volume.
686
+ For example ``0`` could slide over axial slices. ``1`` over coronal slices and ``2`` over sagittal slices.
687
+ args: other optional args to be passed to the `__init__` of base class SlidingWindowInferer.
688
+ kwargs: other optional keyword args to be passed to `__init__` of base class SlidingWindowInferer.
689
+
690
+ Note:
691
+ ``roi_size`` in SliceInferer is expected to be a 2D tuple when a 3D volume is provided. This allows
692
+ sliding across slices along the 3D volume using a selected ``spatial_dim``.
693
+
694
+ """
695
+
696
+ def __init__(self, spatial_dim: int = 0, *args: Any, **kwargs: Any) -> None:
697
+ self.spatial_dim = spatial_dim
698
+ super().__init__(*args, **kwargs)
699
+ self.orig_roi_size = ensure_tuple(self.roi_size)
700
+
701
+ def __call__(
702
+ self,
703
+ inputs: torch.Tensor,
704
+ network: Callable[..., torch.Tensor | Sequence[torch.Tensor] | dict[Any, torch.Tensor]],
705
+ *args: Any,
706
+ **kwargs: Any,
707
+ ) -> torch.Tensor | tuple[torch.Tensor, ...] | dict[Any, torch.Tensor]:
708
+ """
709
+ Args:
710
+ inputs: 3D input for inference
711
+ network: 2D model to execute inference on slices in the 3D input
712
+ args: optional args to be passed to ``network``.
713
+ kwargs: optional keyword args to be passed to ``network``.
714
+ """
715
+ if self.spatial_dim > 2:
716
+ raise ValueError("`spatial_dim` can only be `0, 1, 2` with `[H, W, D]` respectively.")
717
+
718
+ # Check if ``roi_size`` tuple is 2D and ``inputs`` tensor is 3D
719
+ self.roi_size = ensure_tuple(self.roi_size)
720
+ if len(self.orig_roi_size) == 2 and len(inputs.shape[2:]) == 3:
721
+ self.roi_size = list(self.orig_roi_size)
722
+ self.roi_size.insert(self.spatial_dim, 1)
723
+ else:
724
+ raise RuntimeError(
725
+ f"Currently, only 2D `roi_size` ({self.orig_roi_size}) with 3D `inputs` tensor (shape={inputs.shape}) is supported."
726
+ )
727
+
728
+ return super().__call__(inputs=inputs, network=lambda x: self.network_wrapper(network, x, *args, **kwargs))
729
+
730
+ def network_wrapper(
731
+ self,
732
+ network: Callable[..., torch.Tensor | Sequence[torch.Tensor] | dict[Any, torch.Tensor]],
733
+ x: torch.Tensor,
734
+ *args: Any,
735
+ **kwargs: Any,
736
+ ) -> torch.Tensor | tuple[torch.Tensor, ...] | dict[Any, torch.Tensor]:
737
+ """
738
+ Wrapper handles inference for 2D models over 3D volume inputs.
739
+ """
740
+ # Pass 4D input [N, C, H, W]/[N, C, D, W]/[N, C, D, H] to the model as it is 2D.
741
+ x = x.squeeze(dim=self.spatial_dim + 2)
742
+ out = network(x, *args, **kwargs)
743
+
744
+ # Unsqueeze the network output so it is [N, C, D, H, W] as expected by
745
+ # the default SlidingWindowInferer class
746
+ if isinstance(out, torch.Tensor):
747
+ return out.unsqueeze(dim=self.spatial_dim + 2)
748
+
749
+ if isinstance(out, Mapping):
750
+ for k in out.keys():
751
+ out[k] = out[k].unsqueeze(dim=self.spatial_dim + 2)
752
+ return out
753
+
754
+ return tuple(out_i.unsqueeze(dim=self.spatial_dim + 2) for out_i in out)
source_code/SegMamba/monai/losses/adversarial_loss.py ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) MONAI Consortium
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ # Unless required by applicable law or agreed to in writing, software
7
+ # distributed under the License is distributed on an "AS IS" BASIS,
8
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9
+ # See the License for the specific language governing permissions and
10
+ # limitations under the License.
11
+
12
+ from __future__ import annotations
13
+
14
+ import warnings
15
+
16
+ import torch
17
+ from torch.nn.modules.loss import _Loss
18
+
19
+ from monai.networks.layers.utils import get_act_layer
20
+ from monai.utils import LossReduction
21
+ from monai.utils.enums import StrEnum
22
+
23
+
24
+ class AdversarialCriterions(StrEnum):
25
+ BCE = "bce"
26
+ HINGE = "hinge"
27
+ LEAST_SQUARE = "least_squares"
28
+
29
+
30
+ class PatchAdversarialLoss(_Loss):
31
+ """
32
+ Calculates an adversarial loss on a Patch Discriminator or a Multi-scale Patch Discriminator.
33
+ Warning: due to the possibility of using different criterions, the output of the discrimination
34
+ mustn't be passed to a final activation layer. That is taken care of internally within the loss.
35
+
36
+ Args:
37
+ reduction: {``"none"``, ``"mean"``, ``"sum"``}
38
+ Specifies the reduction to apply to the output. Defaults to ``"mean"``.
39
+
40
+ - ``"none"``: no reduction will be applied.
41
+ - ``"mean"``: the sum of the output will be divided by the number of elements in the output.
42
+ - ``"sum"``: the output will be summed.
43
+
44
+ criterion: which criterion (hinge, least_squares or bce) you want to use on the discriminators outputs.
45
+ Depending on the criterion, a different activation layer will be used. Make sure you don't run the outputs
46
+ through an activation layer prior to calling the loss.
47
+ no_activation_leastsq: if True, the activation layer in the case of least-squares is removed.
48
+ """
49
+
50
+ def __init__(
51
+ self,
52
+ reduction: LossReduction | str = LossReduction.MEAN,
53
+ criterion: str = AdversarialCriterions.LEAST_SQUARE,
54
+ no_activation_leastsq: bool = False,
55
+ ) -> None:
56
+ super().__init__(reduction=LossReduction(reduction))
57
+
58
+ if criterion.lower() not in list(AdversarialCriterions):
59
+ raise ValueError(
60
+ "Unrecognised criterion entered for Adversarial Loss. Must be one in: %s"
61
+ % ", ".join(AdversarialCriterions)
62
+ )
63
+
64
+ # Depending on the criterion, a different activation layer is used.
65
+ self.real_label = 1.0
66
+ self.fake_label = 0.0
67
+ self.loss_fct: _Loss
68
+ if criterion == AdversarialCriterions.BCE:
69
+ self.activation = get_act_layer("SIGMOID")
70
+ self.loss_fct = torch.nn.BCELoss(reduction=reduction)
71
+ elif criterion == AdversarialCriterions.HINGE:
72
+ self.activation = get_act_layer("TANH")
73
+ self.fake_label = -1.0
74
+ elif criterion == AdversarialCriterions.LEAST_SQUARE:
75
+ if no_activation_leastsq:
76
+ self.activation = None
77
+ else:
78
+ self.activation = get_act_layer(name=("LEAKYRELU", {"negative_slope": 0.05}))
79
+ self.loss_fct = torch.nn.MSELoss(reduction=reduction)
80
+
81
+ self.criterion = criterion
82
+ self.reduction = reduction
83
+
84
+ def get_target_tensor(self, input: torch.Tensor, target_is_real: bool) -> torch.Tensor:
85
+ """
86
+ Gets the ground truth tensor for the discriminator depending on whether the input is real or fake.
87
+
88
+ Args:
89
+ input: input tensor from the discriminator (output of discriminator, or output of one of the multi-scale
90
+ discriminator). This is used to match the shape.
91
+ target_is_real: whether the input is real or wannabe-real (1s) or fake (0s).
92
+ Returns:
93
+ """
94
+ filling_label = self.real_label if target_is_real else self.fake_label
95
+ label_tensor = torch.tensor(1).fill_(filling_label).type(input.type()).to(input[0].device)
96
+ label_tensor.requires_grad_(False)
97
+ return label_tensor.expand_as(input)
98
+
99
+ def get_zero_tensor(self, input: torch.Tensor) -> torch.Tensor:
100
+ """
101
+ Gets a zero tensor.
102
+
103
+ Args:
104
+ input: tensor which shape you want the zeros tensor to correspond to.
105
+ Returns:
106
+ """
107
+
108
+ zero_label_tensor = torch.tensor(0).type(input[0].type()).to(input[0].device)
109
+ zero_label_tensor.requires_grad_(False)
110
+ return zero_label_tensor.expand_as(input)
111
+
112
+ def forward(
113
+ self, input: torch.Tensor | list, target_is_real: bool, for_discriminator: bool
114
+ ) -> torch.Tensor | list[torch.Tensor]:
115
+ """
116
+
117
+ Args:
118
+ input: output of Multi-Scale Patch Discriminator or Patch Discriminator; being a list of tensors
119
+ or a tensor; they shouldn't have gone through an activation layer.
120
+ target_is_real: whereas the input corresponds to discriminator output for real or fake images
121
+ for_discriminator: whereas this is being calculated for discriminator or generator loss. In the last
122
+ case, target_is_real is set to True, as the generator wants the input to be dimmed as real.
123
+ Returns: if reduction is None, returns a list with the loss tensors of each discriminator if multi-scale
124
+ discriminator is active, or the loss tensor if there is just one discriminator. Otherwise, it returns the
125
+ summed or mean loss over the tensor and discriminator/s.
126
+
127
+ """
128
+
129
+ if not for_discriminator and not target_is_real:
130
+ target_is_real = True # With generator, we always want this to be true!
131
+ warnings.warn(
132
+ "Variable target_is_real has been set to False, but for_discriminator is set"
133
+ "to False. To optimise a generator, target_is_real must be set to True."
134
+ )
135
+
136
+ if not isinstance(input, list):
137
+ input = [input]
138
+ target_ = []
139
+ for _, disc_out in enumerate(input):
140
+ if self.criterion != AdversarialCriterions.HINGE:
141
+ target_.append(self.get_target_tensor(disc_out, target_is_real))
142
+ else:
143
+ target_.append(self.get_zero_tensor(disc_out))
144
+
145
+ # Loss calculation
146
+ loss_list = []
147
+ for disc_ind, disc_out in enumerate(input):
148
+ if self.activation is not None:
149
+ disc_out = self.activation(disc_out)
150
+ if self.criterion == AdversarialCriterions.HINGE and not target_is_real:
151
+ loss_ = self._forward_single(-disc_out, target_[disc_ind])
152
+ else:
153
+ loss_ = self._forward_single(disc_out, target_[disc_ind])
154
+ loss_list.append(loss_)
155
+
156
+ loss: torch.Tensor | list[torch.Tensor]
157
+ if loss_list is not None:
158
+ if self.reduction == LossReduction.MEAN:
159
+ loss = torch.mean(torch.stack(loss_list))
160
+ elif self.reduction == LossReduction.SUM:
161
+ loss = torch.sum(torch.stack(loss_list))
162
+ else:
163
+ loss = loss_list
164
+ return loss
165
+
166
+ def _forward_single(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
167
+ forward: torch.Tensor
168
+ if self.criterion == AdversarialCriterions.BCE or self.criterion == AdversarialCriterions.LEAST_SQUARE:
169
+ forward = self.loss_fct(input, target)
170
+ elif self.criterion == AdversarialCriterions.HINGE:
171
+ minval = torch.min(input - 1, self.get_zero_tensor(input))
172
+ forward = -torch.mean(minval)
173
+ return forward
source_code/SegMamba/monai/losses/contrastive.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) MONAI Consortium
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ # Unless required by applicable law or agreed to in writing, software
7
+ # distributed under the License is distributed on an "AS IS" BASIS,
8
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9
+ # See the License for the specific language governing permissions and
10
+ # limitations under the License.
11
+
12
+ from __future__ import annotations
13
+
14
+ from warnings import warn
15
+
16
+ import torch
17
+ from torch.nn import functional as F
18
+ from torch.nn.modules.loss import _Loss
19
+
20
+
21
+ class ContrastiveLoss(_Loss):
22
+ """
23
+ Compute the Contrastive loss defined in:
24
+
25
+ Chen, Ting, et al. "A simple framework for contrastive learning of visual representations." International
26
+ conference on machine learning. PMLR, 2020. (http://proceedings.mlr.press/v119/chen20j.html)
27
+
28
+ Adapted from:
29
+ https://github.com/Sara-Ahmed/SiT/blob/1aacd6adcd39b71efc903d16b4e9095b97dda76f/losses.py#L5
30
+
31
+ """
32
+
33
+ def __init__(self, temperature: float = 0.5, batch_size: int = -1) -> None:
34
+ """
35
+ Args:
36
+ temperature: Can be scaled between 0 and 1 for learning from negative samples, ideally set to 0.5.
37
+
38
+ Raises:
39
+ ValueError: When an input of dimension length > 2 is passed
40
+ ValueError: When input and target are of different shapes
41
+
42
+ """
43
+ super().__init__()
44
+ self.temperature = temperature
45
+
46
+ if batch_size != -1:
47
+ warn("batch_size is no longer required to be set. It will be estimated dynamically in the forward call")
48
+
49
+ def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
50
+ """
51
+ Args:
52
+ input: the shape should be B[F].
53
+ target: the shape should be B[F].
54
+ """
55
+ if len(target.shape) > 2 or len(input.shape) > 2:
56
+ raise ValueError(
57
+ f"Either target or input has dimensions greater than 2 where target "
58
+ f"shape is ({target.shape}) and input shape is ({input.shape})"
59
+ )
60
+
61
+ if target.shape != input.shape:
62
+ raise ValueError(f"ground truth has differing shape ({target.shape}) from input ({input.shape})")
63
+
64
+ temperature_tensor = torch.as_tensor(self.temperature).to(input.device)
65
+ batch_size = input.shape[0]
66
+
67
+ negatives_mask = ~torch.eye(batch_size * 2, batch_size * 2, dtype=torch.bool)
68
+ negatives_mask = torch.clone(negatives_mask.type(torch.float)).to(input.device)
69
+
70
+ repr = torch.cat([input, target], dim=0)
71
+ sim_matrix = F.cosine_similarity(repr.unsqueeze(1), repr.unsqueeze(0), dim=2)
72
+ sim_ij = torch.diag(sim_matrix, batch_size)
73
+ sim_ji = torch.diag(sim_matrix, -batch_size)
74
+
75
+ positives = torch.cat([sim_ij, sim_ji], dim=0)
76
+ nominator = torch.exp(positives / temperature_tensor)
77
+ denominator = negatives_mask * torch.exp(sim_matrix / temperature_tensor)
78
+
79
+ loss_partial = -torch.log(nominator / torch.sum(denominator, dim=1))
80
+
81
+ return torch.sum(loss_partial) / (2 * batch_size)
source_code/SegMamba/monai/losses/deform.py ADDED
@@ -0,0 +1,210 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) MONAI Consortium
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ # Unless required by applicable law or agreed to in writing, software
7
+ # distributed under the License is distributed on an "AS IS" BASIS,
8
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9
+ # See the License for the specific language governing permissions and
10
+ # limitations under the License.
11
+
12
+ from __future__ import annotations
13
+
14
+ import torch
15
+ from torch.nn.modules.loss import _Loss
16
+
17
+ from monai.utils import LossReduction
18
+
19
+
20
+ def spatial_gradient(x: torch.Tensor, dim: int) -> torch.Tensor:
21
+ """
22
+ Calculate gradients on single dimension of a tensor using central finite difference.
23
+ It moves the tensor along the dimension to calculate the approximate gradient
24
+ dx[i] = (x[i+1] - x[i-1]) / 2.
25
+ Adapted from:
26
+ DeepReg (https://github.com/DeepRegNet/DeepReg)
27
+
28
+ Args:
29
+ x: the shape should be BCH(WD).
30
+ dim: dimension to calculate gradient along.
31
+ Returns:
32
+ gradient_dx: the shape should be BCH(WD)
33
+ """
34
+ slice_1 = slice(1, -1)
35
+ slice_2_s = slice(2, None)
36
+ slice_2_e = slice(None, -2)
37
+ slice_all = slice(None)
38
+ slicing_s, slicing_e = [slice_all, slice_all], [slice_all, slice_all]
39
+ while len(slicing_s) < x.ndim:
40
+ slicing_s = slicing_s + [slice_1]
41
+ slicing_e = slicing_e + [slice_1]
42
+ slicing_s[dim] = slice_2_s
43
+ slicing_e[dim] = slice_2_e
44
+ return (x[slicing_s] - x[slicing_e]) / 2.0
45
+
46
+
47
+ class BendingEnergyLoss(_Loss):
48
+ """
49
+ Calculate the bending energy based on second-order differentiation of ``pred`` using central finite difference.
50
+
51
+ For more information,
52
+ see https://github.com/Project-MONAI/tutorials/blob/main/modules/bending_energy_diffusion_loss_notes.ipynb.
53
+
54
+ Adapted from:
55
+ DeepReg (https://github.com/DeepRegNet/DeepReg)
56
+ """
57
+
58
+ def __init__(self, normalize: bool = False, reduction: LossReduction | str = LossReduction.MEAN) -> None:
59
+ """
60
+ Args:
61
+ normalize:
62
+ Whether to divide out spatial sizes in order to make the computation roughly
63
+ invariant to image scale (i.e. vector field sampling resolution). Defaults to False.
64
+ reduction: {``"none"``, ``"mean"``, ``"sum"``}
65
+ Specifies the reduction to apply to the output. Defaults to ``"mean"``.
66
+
67
+ - ``"none"``: no reduction will be applied.
68
+ - ``"mean"``: the sum of the output will be divided by the number of elements in the output.
69
+ - ``"sum"``: the output will be summed.
70
+ """
71
+ super().__init__(reduction=LossReduction(reduction).value)
72
+ self.normalize = normalize
73
+
74
+ def forward(self, pred: torch.Tensor) -> torch.Tensor:
75
+ """
76
+ Args:
77
+ pred: the shape should be BCH(WD)
78
+
79
+ Raises:
80
+ ValueError: When ``self.reduction`` is not one of ["mean", "sum", "none"].
81
+ ValueError: When ``pred`` is not 3-d, 4-d or 5-d.
82
+ ValueError: When any spatial dimension of ``pred`` has size less than or equal to 4.
83
+ ValueError: When the number of channels of ``pred`` does not match the number of spatial dimensions.
84
+
85
+ """
86
+ if pred.ndim not in [3, 4, 5]:
87
+ raise ValueError(f"Expecting 3-d, 4-d or 5-d pred, instead got pred of shape {pred.shape}")
88
+ for i in range(pred.ndim - 2):
89
+ if pred.shape[-i - 1] <= 4:
90
+ raise ValueError(f"All spatial dimensions must be > 4, got spatial dimensions {pred.shape[2:]}")
91
+ if pred.shape[1] != pred.ndim - 2:
92
+ raise ValueError(
93
+ f"Number of vector components, i.e. number of channels of the input DDF, {pred.shape[1]}, "
94
+ f"does not match number of spatial dimensions, {pred.ndim - 2}"
95
+ )
96
+
97
+ # first order gradient
98
+ first_order_gradient = [spatial_gradient(pred, dim) for dim in range(2, pred.ndim)]
99
+
100
+ # spatial dimensions in a shape suited for broadcasting below
101
+ if self.normalize:
102
+ spatial_dims = torch.tensor(pred.shape, device=pred.device)[2:].reshape((1, -1) + (pred.ndim - 2) * (1,))
103
+
104
+ energy = torch.tensor(0)
105
+ for dim_1, g in enumerate(first_order_gradient):
106
+ dim_1 += 2
107
+ if self.normalize:
108
+ g *= pred.shape[dim_1] / spatial_dims
109
+ energy = energy + (spatial_gradient(g, dim_1) * pred.shape[dim_1]) ** 2
110
+ else:
111
+ energy = energy + spatial_gradient(g, dim_1) ** 2
112
+ for dim_2 in range(dim_1 + 1, pred.ndim):
113
+ if self.normalize:
114
+ energy = energy + 2 * (spatial_gradient(g, dim_2) * pred.shape[dim_2]) ** 2
115
+ else:
116
+ energy = energy + 2 * spatial_gradient(g, dim_2) ** 2
117
+
118
+ if self.reduction == LossReduction.MEAN.value:
119
+ energy = torch.mean(energy) # the batch and channel average
120
+ elif self.reduction == LossReduction.SUM.value:
121
+ energy = torch.sum(energy) # sum over the batch and channel dims
122
+ elif self.reduction != LossReduction.NONE.value:
123
+ raise ValueError(f'Unsupported reduction: {self.reduction}, available options are ["mean", "sum", "none"].')
124
+
125
+ return energy
126
+
127
+
128
+ class DiffusionLoss(_Loss):
129
+ """
130
+ Calculate the diffusion based on first-order differentiation of ``pred`` using central finite difference.
131
+ For the original paper, please refer to
132
+ VoxelMorph: A Learning Framework for Deformable Medical Image Registration,
133
+ Guha Balakrishnan, Amy Zhao, Mert R. Sabuncu, John Guttag, Adrian V. Dalca
134
+ IEEE TMI: Transactions on Medical Imaging. 2019. eprint arXiv:1809.05231.
135
+
136
+ For more information,
137
+ see https://github.com/Project-MONAI/tutorials/blob/main/modules/bending_energy_diffusion_loss_notes.ipynb.
138
+
139
+ Adapted from:
140
+ VoxelMorph (https://github.com/voxelmorph/voxelmorph)
141
+ """
142
+
143
+ def __init__(self, normalize: bool = False, reduction: LossReduction | str = LossReduction.MEAN) -> None:
144
+ """
145
+ Args:
146
+ normalize:
147
+ Whether to divide out spatial sizes in order to make the computation roughly
148
+ invariant to image scale (i.e. vector field sampling resolution). Defaults to False.
149
+ reduction: {``"none"``, ``"mean"``, ``"sum"``}
150
+ Specifies the reduction to apply to the output. Defaults to ``"mean"``.
151
+
152
+ - ``"none"``: no reduction will be applied.
153
+ - ``"mean"``: the sum of the output will be divided by the number of elements in the output.
154
+ - ``"sum"``: the output will be summed.
155
+ """
156
+ super().__init__(reduction=LossReduction(reduction).value)
157
+ self.normalize = normalize
158
+
159
+ def forward(self, pred: torch.Tensor) -> torch.Tensor:
160
+ """
161
+ Args:
162
+ pred:
163
+ Predicted dense displacement field (DDF) with shape BCH[WD],
164
+ where C is the number of spatial dimensions.
165
+ Note that diffusion loss can only be calculated
166
+ when the sizes of the DDF along all spatial dimensions are greater than 2.
167
+
168
+ Raises:
169
+ ValueError: When ``self.reduction`` is not one of ["mean", "sum", "none"].
170
+ ValueError: When ``pred`` is not 3-d, 4-d or 5-d.
171
+ ValueError: When any spatial dimension of ``pred`` has size less than or equal to 2.
172
+ ValueError: When the number of channels of ``pred`` does not match the number of spatial dimensions.
173
+
174
+ """
175
+ if pred.ndim not in [3, 4, 5]:
176
+ raise ValueError(f"Expecting 3-d, 4-d or 5-d pred, instead got pred of shape {pred.shape}")
177
+ for i in range(pred.ndim - 2):
178
+ if pred.shape[-i - 1] <= 2:
179
+ raise ValueError(f"All spatial dimensions must be > 2, got spatial dimensions {pred.shape[2:]}")
180
+ if pred.shape[1] != pred.ndim - 2:
181
+ raise ValueError(
182
+ f"Number of vector components, i.e. number of channels of the input DDF, {pred.shape[1]}, "
183
+ f"does not match number of spatial dimensions, {pred.ndim - 2}"
184
+ )
185
+
186
+ # first order gradient
187
+ first_order_gradient = [spatial_gradient(pred, dim) for dim in range(2, pred.ndim)]
188
+
189
+ # spatial dimensions in a shape suited for broadcasting below
190
+ if self.normalize:
191
+ spatial_dims = torch.tensor(pred.shape, device=pred.device)[2:].reshape((1, -1) + (pred.ndim - 2) * (1,))
192
+
193
+ diffusion = torch.tensor(0)
194
+ for dim_1, g in enumerate(first_order_gradient):
195
+ dim_1 += 2
196
+ if self.normalize:
197
+ # We divide the partial derivative for each vector component at each voxel by the spatial size
198
+ # corresponding to that component relative to the spatial size of the vector component with respect
199
+ # to which the partial derivative is taken.
200
+ g *= pred.shape[dim_1] / spatial_dims
201
+ diffusion = diffusion + g**2
202
+
203
+ if self.reduction == LossReduction.MEAN.value:
204
+ diffusion = torch.mean(diffusion) # the batch and channel average
205
+ elif self.reduction == LossReduction.SUM.value:
206
+ diffusion = torch.sum(diffusion) # sum over the batch and channel dims
207
+ elif self.reduction != LossReduction.NONE.value:
208
+ raise ValueError(f'Unsupported reduction: {self.reduction}, available options are ["mean", "sum", "none"].')
209
+
210
+ return diffusion
source_code/SegMamba/monai/losses/giou_loss.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) MONAI Consortium
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ # Unless required by applicable law or agreed to in writing, software
7
+ # distributed under the License is distributed on an "AS IS" BASIS,
8
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9
+ # See the License for the specific language governing permissions and
10
+ # limitations under the License.
11
+
12
+ from __future__ import annotations
13
+
14
+ import torch
15
+ from torch.nn.modules.loss import _Loss
16
+
17
+ from monai.data.box_utils import COMPUTE_DTYPE, box_pair_giou
18
+ from monai.utils import LossReduction
19
+
20
+
21
+ class BoxGIoULoss(_Loss):
22
+ """
23
+ Compute the generalized intersection over union (GIoU) loss of a pair of boxes.
24
+ The two inputs should have the same shape. giou_loss = 1.0 - giou
25
+
26
+ The range of GIoU is (-1.0, 1.0]. Thus the range of GIoU loss is [0.0, 2.0).
27
+
28
+ Args:
29
+ reduction: {``"none"``, ``"mean"``, ``"sum"``}
30
+ Specifies the reduction to apply to the output. Defaults to ``"mean"``.
31
+ - ``"none"``: no reduction will be applied.
32
+ - ``"mean"``: the sum of the output will be divided by the number of elements in the output.
33
+ - ``"sum"``: the output will be summed.
34
+ """
35
+
36
+ def __init__(self, reduction: LossReduction | str = LossReduction.MEAN) -> None:
37
+ super().__init__(reduction=LossReduction(reduction).value)
38
+
39
+ def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
40
+ """
41
+ Args:
42
+ input: predicted bounding boxes, Nx4 or Nx6 torch tensor. The box mode is assumed to be ``StandardMode``
43
+ target: GT bounding boxes, Nx4 or Nx6 torch tensor. The box mode is assumed to be ``StandardMode``
44
+
45
+ Raises:
46
+ ValueError: When the two inputs have different shape.
47
+ """
48
+ if target.shape != input.shape:
49
+ raise ValueError(f"ground truth has different shape ({target.shape}) from input ({input.shape})")
50
+
51
+ box_dtype = input.dtype
52
+ giou: torch.Tensor = box_pair_giou( # type: ignore
53
+ target.to(dtype=COMPUTE_DTYPE), input.to(dtype=COMPUTE_DTYPE)
54
+ )
55
+ loss: torch.Tensor = 1.0 - giou
56
+ if self.reduction == LossReduction.MEAN.value:
57
+ loss = loss.mean()
58
+ elif self.reduction == LossReduction.SUM.value:
59
+ loss = loss.sum()
60
+ elif self.reduction == LossReduction.NONE.value:
61
+ pass
62
+ else:
63
+ raise ValueError(f'Unsupported reduction: {self.reduction}, available options are ["mean", "sum", "none"].')
64
+ return loss.to(box_dtype)
65
+
66
+
67
+ giou = BoxGIoULoss
source_code/SegMamba/monai/losses/tversky.py ADDED
@@ -0,0 +1,162 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) MONAI Consortium
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ # Unless required by applicable law or agreed to in writing, software
7
+ # distributed under the License is distributed on an "AS IS" BASIS,
8
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9
+ # See the License for the specific language governing permissions and
10
+ # limitations under the License.
11
+
12
+ from __future__ import annotations
13
+
14
+ import warnings
15
+ from collections.abc import Callable
16
+
17
+ import torch
18
+ from torch.nn.modules.loss import _Loss
19
+
20
+ from monai.networks import one_hot
21
+ from monai.utils import LossReduction
22
+
23
+
24
+ class TverskyLoss(_Loss):
25
+ """
26
+ Compute the Tversky loss defined in:
27
+
28
+ Sadegh et al. (2017) Tversky loss function for image segmentation
29
+ using 3D fully convolutional deep networks. (https://arxiv.org/abs/1706.05721)
30
+
31
+ Adapted from:
32
+ https://github.com/NifTK/NiftyNet/blob/v0.6.0/niftynet/layer/loss_segmentation.py#L631
33
+
34
+ """
35
+
36
+ def __init__(
37
+ self,
38
+ include_background: bool = True,
39
+ to_onehot_y: bool = False,
40
+ sigmoid: bool = False,
41
+ softmax: bool = False,
42
+ other_act: Callable | None = None,
43
+ alpha: float = 0.5,
44
+ beta: float = 0.5,
45
+ reduction: LossReduction | str = LossReduction.MEAN,
46
+ smooth_nr: float = 1e-5,
47
+ smooth_dr: float = 1e-5,
48
+ batch: bool = False,
49
+ ) -> None:
50
+ """
51
+ Args:
52
+ include_background: If False channel index 0 (background category) is excluded from the calculation.
53
+ to_onehot_y: whether to convert `y` into the one-hot format. Defaults to False.
54
+ sigmoid: If True, apply a sigmoid function to the prediction.
55
+ softmax: If True, apply a softmax function to the prediction.
56
+ other_act: if don't want to use `sigmoid` or `softmax`, use other callable function to execute
57
+ other activation layers, Defaults to ``None``. for example:
58
+ `other_act = torch.tanh`.
59
+ alpha: weight of false positives
60
+ beta: weight of false negatives
61
+ reduction: {``"none"``, ``"mean"``, ``"sum"``}
62
+ Specifies the reduction to apply to the output. Defaults to ``"mean"``.
63
+
64
+ - ``"none"``: no reduction will be applied.
65
+ - ``"mean"``: the sum of the output will be divided by the number of elements in the output.
66
+ - ``"sum"``: the output will be summed.
67
+
68
+ smooth_nr: a small constant added to the numerator to avoid zero.
69
+ smooth_dr: a small constant added to the denominator to avoid nan.
70
+ batch: whether to sum the intersection and union areas over the batch dimension before the dividing.
71
+ Defaults to False, a Dice loss value is computed independently from each item in the batch
72
+ before any `reduction`.
73
+
74
+ Raises:
75
+ TypeError: When ``other_act`` is not an ``Optional[Callable]``.
76
+ ValueError: When more than 1 of [``sigmoid=True``, ``softmax=True``, ``other_act is not None``].
77
+ Incompatible values.
78
+
79
+ """
80
+
81
+ super().__init__(reduction=LossReduction(reduction).value)
82
+ if other_act is not None and not callable(other_act):
83
+ raise TypeError(f"other_act must be None or callable but is {type(other_act).__name__}.")
84
+ if int(sigmoid) + int(softmax) + int(other_act is not None) > 1:
85
+ raise ValueError("Incompatible values: more than 1 of [sigmoid=True, softmax=True, other_act is not None].")
86
+ self.include_background = include_background
87
+ self.to_onehot_y = to_onehot_y
88
+ self.sigmoid = sigmoid
89
+ self.softmax = softmax
90
+ self.other_act = other_act
91
+ self.alpha = alpha
92
+ self.beta = beta
93
+ self.smooth_nr = float(smooth_nr)
94
+ self.smooth_dr = float(smooth_dr)
95
+ self.batch = batch
96
+
97
+ def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
98
+ """
99
+ Args:
100
+ input: the shape should be BNH[WD].
101
+ target: the shape should be BNH[WD].
102
+
103
+ Raises:
104
+ ValueError: When ``self.reduction`` is not one of ["mean", "sum", "none"].
105
+
106
+ """
107
+ if self.sigmoid:
108
+ input = torch.sigmoid(input)
109
+
110
+ n_pred_ch = input.shape[1]
111
+ if self.softmax:
112
+ if n_pred_ch == 1:
113
+ warnings.warn("single channel prediction, `softmax=True` ignored.")
114
+ else:
115
+ input = torch.softmax(input, 1)
116
+
117
+ if self.other_act is not None:
118
+ input = self.other_act(input)
119
+
120
+ if self.to_onehot_y:
121
+ if n_pred_ch == 1:
122
+ warnings.warn("single channel prediction, `to_onehot_y=True` ignored.")
123
+ else:
124
+ target = one_hot(target, num_classes=n_pred_ch)
125
+
126
+ if not self.include_background:
127
+ if n_pred_ch == 1:
128
+ warnings.warn("single channel prediction, `include_background=False` ignored.")
129
+ else:
130
+ # if skipping background, removing first channel
131
+ target = target[:, 1:]
132
+ input = input[:, 1:]
133
+
134
+ if target.shape != input.shape:
135
+ raise AssertionError(f"ground truth has differing shape ({target.shape}) from input ({input.shape})")
136
+
137
+ p0 = input
138
+ p1 = 1 - p0
139
+ g0 = target
140
+ g1 = 1 - g0
141
+
142
+ # reducing only spatial dimensions (not batch nor channels)
143
+ reduce_axis: list[int] = torch.arange(2, len(input.shape)).tolist()
144
+ if self.batch:
145
+ # reducing spatial dimensions and batch
146
+ reduce_axis = [0] + reduce_axis
147
+
148
+ tp = torch.sum(p0 * g0, reduce_axis)
149
+ fp = self.alpha * torch.sum(p0 * g1, reduce_axis)
150
+ fn = self.beta * torch.sum(p1 * g0, reduce_axis)
151
+ numerator = tp + self.smooth_nr
152
+ denominator = tp + fp + fn + self.smooth_dr
153
+
154
+ score: torch.Tensor = 1.0 - numerator / denominator
155
+
156
+ if self.reduction == LossReduction.SUM.value:
157
+ return torch.sum(score) # sum over the batch and channel dims
158
+ if self.reduction == LossReduction.NONE.value:
159
+ return score # returns [N, num_classes] losses
160
+ if self.reduction == LossReduction.MEAN.value:
161
+ return torch.mean(score)
162
+ raise ValueError(f'Unsupported reduction: {self.reduction}, available options are ["mean", "sum", "none"].')