ChipYTY commited on
Commit
fe8202e
·
verified ·
1 Parent(s): 34a4bcb

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. source_code/SegMamba/.DS_Store +0 -0
  2. source_code/SegMamba/.gitignore +160 -0
  3. source_code/SegMamba/0_inference.py +20 -0
  4. source_code/SegMamba/1_rename_mri_data.py +47 -0
  5. source_code/SegMamba/2_preprocessing_mri.py +85 -0
  6. source_code/SegMamba/3_train.py +225 -0
  7. source_code/SegMamba/4_predict.py +261 -0
  8. source_code/SegMamba/5_compute_metrics.py +175 -0
  9. source_code/SegMamba/6_visualize_predictions.py +219 -0
  10. source_code/SegMamba/README.md +132 -0
  11. source_code/SegMamba/causal-conv1d/.DS_Store +0 -0
  12. source_code/SegMamba/causal-conv1d/AUTHORS +1 -0
  13. source_code/SegMamba/causal-conv1d/LICENSE +29 -0
  14. source_code/SegMamba/causal-conv1d/README.md +1 -0
  15. source_code/SegMamba/causal-conv1d/build/lib.linux-x86_64-cpython-312/causal_conv1d/__init__.py +3 -0
  16. source_code/SegMamba/causal-conv1d/build/lib.linux-x86_64-cpython-312/causal_conv1d/causal_conv1d_interface.py +104 -0
  17. source_code/SegMamba/causal-conv1d/build/temp.linux-x86_64-cpython-312/.ninja_log +5 -0
  18. source_code/SegMamba/causal-conv1d/build/temp.linux-x86_64-cpython-312/build.ninja +40 -0
  19. source_code/SegMamba/causal-conv1d/causal_conv1d.egg-info/PKG-INFO +29 -0
  20. source_code/SegMamba/causal-conv1d/causal_conv1d.egg-info/SOURCES.txt +16 -0
  21. source_code/SegMamba/causal-conv1d/causal_conv1d.egg-info/dependency_links.txt +1 -0
  22. source_code/SegMamba/causal-conv1d/causal_conv1d.egg-info/requires.txt +3 -0
  23. source_code/SegMamba/causal-conv1d/causal_conv1d.egg-info/top_level.txt +2 -0
  24. source_code/SegMamba/causal-conv1d/causal_conv1d/__init__.py +3 -0
  25. source_code/SegMamba/causal-conv1d/causal_conv1d/causal_conv1d_interface.py +104 -0
  26. source_code/SegMamba/causal-conv1d/csrc/causal_conv1d.cpp +333 -0
  27. source_code/SegMamba/causal-conv1d/csrc/causal_conv1d.h +53 -0
  28. source_code/SegMamba/causal-conv1d/csrc/causal_conv1d_bwd.cu +525 -0
  29. source_code/SegMamba/causal-conv1d/csrc/causal_conv1d_common.h +64 -0
  30. source_code/SegMamba/causal-conv1d/csrc/causal_conv1d_fwd.cu +350 -0
  31. source_code/SegMamba/causal-conv1d/csrc/causal_conv1d_update.cu +96 -0
  32. source_code/SegMamba/causal-conv1d/csrc/static_switch.h +25 -0
  33. source_code/SegMamba/causal-conv1d/setup.py +264 -0
  34. source_code/SegMamba/causal-conv1d/tests/test_causal_conv1d.py +173 -0
  35. source_code/SegMamba/light_training/.DS_Store +0 -0
  36. source_code/SegMamba/light_training/augment/multi_processor.py +10 -0
  37. source_code/SegMamba/light_training/augment/train_augment.py +279 -0
  38. source_code/SegMamba/light_training/dataloading/__init__.py +0 -0
  39. source_code/SegMamba/light_training/dataloading/base_data_loader.py +213 -0
  40. source_code/SegMamba/light_training/dataloading/dataset.py +318 -0
  41. source_code/SegMamba/light_training/dataloading/dataset_sdm_edge.py +331 -0
  42. source_code/SegMamba/light_training/dataloading/get_train_val_test_datalist.py +36 -0
  43. source_code/SegMamba/light_training/dataloading/utils.py +25 -0
  44. source_code/SegMamba/light_training/dataloading_global/__init__.py +0 -0
  45. source_code/SegMamba/light_training/dataloading_global/dataset.py +329 -0
  46. source_code/SegMamba/light_training/dataloading_global/utils.py +27 -0
  47. source_code/SegMamba/light_training/evaluation/metric.py +406 -0
  48. source_code/SegMamba/light_training/examples/1_rename_mri_data_BraTS2023.py +27 -0
  49. source_code/SegMamba/light_training/examples/2_preprocessing_AIIB23.py +130 -0
  50. source_code/SegMamba/light_training/examples/2_preprocessing_BraTS2023.py +94 -0
source_code/SegMamba/.DS_Store ADDED
Binary file (8.2 kB). View file
 
source_code/SegMamba/.gitignore ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ share/python-wheels/
24
+ *.egg-info/
25
+ .installed.cfg
26
+ *.egg
27
+ MANIFEST
28
+
29
+ # PyInstaller
30
+ # Usually these files are written by a python script from a template
31
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
32
+ *.manifest
33
+ *.spec
34
+
35
+ # Installer logs
36
+ pip-log.txt
37
+ pip-delete-this-directory.txt
38
+
39
+ # Unit test / coverage reports
40
+ htmlcov/
41
+ .tox/
42
+ .nox/
43
+ .coverage
44
+ .coverage.*
45
+ .cache
46
+ nosetests.xml
47
+ coverage.xml
48
+ *.cover
49
+ *.py,cover
50
+ .hypothesis/
51
+ .pytest_cache/
52
+ cover/
53
+
54
+ # Translations
55
+ *.mo
56
+ *.pot
57
+
58
+ # Django stuff:
59
+ *.log
60
+ local_settings.py
61
+ db.sqlite3
62
+ db.sqlite3-journal
63
+
64
+ # Flask stuff:
65
+ instance/
66
+ .webassets-cache
67
+
68
+ # Scrapy stuff:
69
+ .scrapy
70
+
71
+ # Sphinx documentation
72
+ docs/_build/
73
+
74
+ # PyBuilder
75
+ .pybuilder/
76
+ target/
77
+
78
+ # Jupyter Notebook
79
+ .ipynb_checkpoints
80
+
81
+ # IPython
82
+ profile_default/
83
+ ipython_config.py
84
+
85
+ # pyenv
86
+ # For a library or package, you might want to ignore these files since the code is
87
+ # intended to run in multiple environments; otherwise, check them in:
88
+ # .python-version
89
+
90
+ # pipenv
91
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
93
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
94
+ # install all needed dependencies.
95
+ #Pipfile.lock
96
+
97
+ # poetry
98
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
99
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
100
+ # commonly ignored for libraries.
101
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
102
+ #poetry.lock
103
+
104
+ # pdm
105
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
106
+ #pdm.lock
107
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
108
+ # in version control.
109
+ # https://pdm.fming.dev/#use-with-ide
110
+ .pdm.toml
111
+
112
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
113
+ __pypackages__/
114
+
115
+ # Celery stuff
116
+ celerybeat-schedule
117
+ celerybeat.pid
118
+
119
+ # SageMath parsed files
120
+ *.sage.py
121
+
122
+ # Environments
123
+ .env
124
+ .venv
125
+ env/
126
+ venv/
127
+ ENV/
128
+ env.bak/
129
+ venv.bak/
130
+
131
+ # Spyder project settings
132
+ .spyderproject
133
+ .spyproject
134
+
135
+ # Rope project settings
136
+ .ropeproject
137
+
138
+ # mkdocs documentation
139
+ /site
140
+
141
+ # mypy
142
+ .mypy_cache/
143
+ .dmypy.json
144
+ dmypy.json
145
+
146
+ # Pyre type checker
147
+ .pyre/
148
+
149
+ # pytype static type analyzer
150
+ .pytype/
151
+
152
+ # Cython debug symbols
153
+ cython_debug/
154
+
155
+ # PyCharm
156
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
157
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
158
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
159
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
160
+ #.idea/
source_code/SegMamba/0_inference.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+
3
+ import torch
4
+ from model_segmamba.segmamba import SegMamba
5
+
6
+ t1 = torch.rand(1, 4, 128, 128, 128).cuda()
7
+
8
+
9
+ model = SegMamba(in_chans=4,
10
+ out_chans=4,
11
+ depths=[2,2,2,2],
12
+ feat_size=[48, 96, 192, 384]).cuda()
13
+
14
+ out = model(t1)
15
+
16
+ print(out.shape)
17
+
18
+
19
+
20
+
source_code/SegMamba/1_rename_mri_data.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+
3
+
4
+ import os
5
+
6
+ def main():
7
+ import argparse
8
+
9
+ parser = argparse.ArgumentParser(
10
+ description="Rename BraTS2023 case files to short names (t1c/t1n/t2f/t2w/seg.nii.gz)."
11
+ )
12
+ parser.add_argument(
13
+ "--data_dir",
14
+ type=str,
15
+ default="./data/raw_data/BraTS2023/ASNR-MICCAI-BraTS2023-GLI-Challenge-TrainingData/",
16
+ help="BraTS2023 directory that contains case folders.",
17
+ )
18
+ args = parser.parse_args()
19
+
20
+ data_dir = args.data_dir
21
+ if not os.path.isdir(data_dir):
22
+ raise FileNotFoundError(f"data_dir not found: {data_dir}")
23
+
24
+ all_cases = sorted(os.listdir(data_dir))
25
+ for case_name in all_cases:
26
+ case_dir = os.path.join(data_dir, case_name)
27
+ if not os.path.isdir(case_dir):
28
+ continue
29
+
30
+ for data_name in os.listdir(case_dir):
31
+ if "-" not in data_name:
32
+ continue
33
+
34
+ new_name = data_name.split("-")[-1]
35
+ new_path = os.path.join(case_dir, new_name)
36
+ old_path = os.path.join(case_dir, data_name)
37
+
38
+ if os.path.exists(new_path):
39
+ # already renamed (or conflict). Skip to be safe.
40
+ continue
41
+
42
+ os.rename(old_path, new_path)
43
+ print(f"{new_path} 命名成功")
44
+
45
+
46
+ if __name__ == "__main__":
47
+ main()
source_code/SegMamba/2_preprocessing_mri.py ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ from light_training.preprocessing.preprocessors.preprocessor_mri import MultiModalityPreprocessor
3
+ import argparse
4
+
5
+ data_filename = ["t2w.nii.gz",
6
+ "t2f.nii.gz",
7
+ "t1n.nii.gz",
8
+ "t1c.nii.gz"]
9
+ seg_filename = "seg.nii.gz"
10
+
11
+ def _parse_spacing(s: str):
12
+ parts = [p.strip() for p in s.split(",") if p.strip()]
13
+ if len(parts) != 3:
14
+ raise ValueError(f"output_spacing should be like '1,1,1', got: {s}")
15
+ return [float(parts[0]), float(parts[1]), float(parts[2])]
16
+
17
+
18
+ def main():
19
+ parser = argparse.ArgumentParser(description="BraTS2023 preprocessing (resample/normalization/cropping).")
20
+ parser.add_argument(
21
+ "--base_dir",
22
+ type=str,
23
+ default="./data/raw_data/BraTS2023/",
24
+ help="Base directory that contains the BraTS2023 image_dir folder.",
25
+ )
26
+ parser.add_argument(
27
+ "--image_dir",
28
+ type=str,
29
+ default="ASNR-MICCAI-BraTS2023-GLI-Challenge-TrainingData",
30
+ help="Folder name under base_dir.",
31
+ )
32
+ parser.add_argument(
33
+ "--output_dir",
34
+ type=str,
35
+ default="./data/fullres/train/",
36
+ help="Output directory for preprocessed npz/npy/pkl files.",
37
+ )
38
+ parser.add_argument(
39
+ "--output_spacing",
40
+ type=str,
41
+ default="1,1,1",
42
+ help="Target spacing, e.g. '1,1,1'.",
43
+ )
44
+ parser.add_argument(
45
+ "--num_processes",
46
+ type=int,
47
+ default=8,
48
+ help="Number of worker processes for preprocessing.",
49
+ )
50
+ parser.add_argument(
51
+ "--only_plan",
52
+ action="store_true",
53
+ help="Only run planning (statistics) and exit.",
54
+ )
55
+ parser.add_argument(
56
+ "--skip_plan",
57
+ action="store_true",
58
+ help="Skip planning step.",
59
+ )
60
+ args = parser.parse_args()
61
+
62
+ preprocessor = MultiModalityPreprocessor(
63
+ base_dir=args.base_dir,
64
+ image_dir=args.image_dir,
65
+ data_filenames=data_filename,
66
+ seg_filename=seg_filename,
67
+ )
68
+
69
+ if not args.skip_plan:
70
+ preprocessor.run_plan()
71
+ if args.only_plan:
72
+ return
73
+
74
+ out_spacing = _parse_spacing(args.output_spacing)
75
+ preprocessor.run(
76
+ output_spacing=out_spacing,
77
+ output_dir=args.output_dir,
78
+ all_labels=[1, 2, 3],
79
+ num_processes=args.num_processes,
80
+ )
81
+
82
+
83
+ if __name__ == "__main__":
84
+ main()
85
+
source_code/SegMamba/3_train.py ADDED
@@ -0,0 +1,225 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from light_training.dataloading.dataset import get_train_val_test_loader_from_train
3
+ import torch
4
+ import torch.nn as nn
5
+ from monai.inferers import SlidingWindowInferer
6
+ from light_training.evaluation.metric import dice
7
+ from light_training.trainer import Trainer
8
+ from monai.utils import set_determinism
9
+ from light_training.utils.files_helper import save_new_model_and_delete_last
10
+ from monai.losses.dice import DiceLoss
11
+ set_determinism(123)
12
+ import os
13
+ import argparse
14
+
15
+ def func(m, epochs):
16
+ return np.exp(-10*(1- m / epochs)**2)
17
+
18
+ class BraTSTrainer(Trainer):
19
+ def __init__(
20
+ self,
21
+ env_type,
22
+ max_epochs,
23
+ batch_size,
24
+ device="cpu",
25
+ val_every=1,
26
+ num_gpus=1,
27
+ logdir="./logs/",
28
+ roi_size=(128, 128, 128),
29
+ augmentation=True,
30
+ train_process=18,
31
+ master_ip='localhost',
32
+ master_port=17750,
33
+ training_script="train.py",
34
+ ):
35
+ super().__init__(
36
+ env_type,
37
+ max_epochs,
38
+ batch_size,
39
+ device,
40
+ val_every,
41
+ num_gpus,
42
+ logdir,
43
+ master_ip,
44
+ master_port,
45
+ training_script,
46
+ train_process=train_process,
47
+ )
48
+ self.window_infer = SlidingWindowInferer(roi_size=list(roi_size), sw_batch_size=1, overlap=0.5)
49
+ self.augmentation = augmentation
50
+ from model_segmamba.segmamba import SegMamba
51
+
52
+ self.model = SegMamba(in_chans=4,
53
+ out_chans=4,
54
+ depths=[2,2,2,2],
55
+ feat_size=[48, 96, 192, 384])
56
+
57
+ self.patch_size = list(roi_size)
58
+ self.best_mean_dice = 0.0
59
+ self.ce = nn.CrossEntropyLoss()
60
+ self.mse = nn.MSELoss()
61
+ self.train_process = train_process
62
+ self.model_save_path = os.path.join(logdir, "model")
63
+ self.optimizer = torch.optim.SGD(self.model.parameters(), lr=1e-2, weight_decay=3e-5,
64
+ momentum=0.99, nesterov=True)
65
+
66
+ self.scheduler_type = "poly"
67
+ self.cross = nn.CrossEntropyLoss()
68
+
69
+ def training_step(self, batch):
70
+ image, label = self.get_input(batch)
71
+
72
+ pred = self.model(image)
73
+
74
+ loss = self.cross(pred, label)
75
+
76
+ self.log("training_loss", loss, step=self.global_step)
77
+
78
+ return loss
79
+
80
+ def convert_labels(self, labels):
81
+ ## TC, WT and ET
82
+ result = [(labels == 1) | (labels == 3), (labels == 1) | (labels == 3) | (labels == 2), labels == 3]
83
+
84
+ return torch.cat(result, dim=1).float()
85
+
86
+
87
+ def get_input(self, batch):
88
+ image = batch["data"]
89
+ label = batch["seg"]
90
+
91
+ label = label[:, 0].long()
92
+ return image, label
93
+
94
+ def cal_metric(self, gt, pred, voxel_spacing=[1.0, 1.0, 1.0]):
95
+ if pred.sum() > 0 and gt.sum() > 0:
96
+ d = dice(pred, gt)
97
+ return np.array([d, 50])
98
+
99
+ elif gt.sum() == 0 and pred.sum() == 0:
100
+ return np.array([1.0, 50])
101
+
102
+ else:
103
+ return np.array([0.0, 50])
104
+
105
+ def validation_step(self, batch):
106
+ image, label = self.get_input(batch)
107
+
108
+ output = self.model(image)
109
+
110
+ output = output.argmax(dim=1)
111
+
112
+ output = output[:, None]
113
+ output = self.convert_labels(output)
114
+
115
+ label = label[:, None]
116
+ label = self.convert_labels(label)
117
+
118
+ output = output.cpu().numpy()
119
+ target = label.cpu().numpy()
120
+
121
+ dices = []
122
+
123
+ c = 3
124
+ for i in range(0, c):
125
+ pred_c = output[:, i]
126
+ target_c = target[:, i]
127
+
128
+ cal_dice, _ = self.cal_metric(target_c, pred_c)
129
+ dices.append(cal_dice)
130
+
131
+ return dices
132
+
133
+ def validation_end(self, val_outputs):
134
+ dices = val_outputs
135
+
136
+ tc, wt, et = dices[0].mean(), dices[1].mean(), dices[2].mean()
137
+
138
+ print(f"dices is {tc, wt, et}")
139
+
140
+ mean_dice = (tc + wt + et) / 3
141
+
142
+ self.log("tc", tc, step=self.epoch)
143
+ self.log("wt", wt, step=self.epoch)
144
+ self.log("et", et, step=self.epoch)
145
+
146
+ self.log("mean_dice", mean_dice, step=self.epoch)
147
+
148
+ if mean_dice > self.best_mean_dice:
149
+ self.best_mean_dice = mean_dice
150
+ save_new_model_and_delete_last(self.model,
151
+ os.path.join(self.model_save_path,
152
+ f"best_model_{mean_dice:.4f}.pt"),
153
+ delete_symbol="best_model")
154
+
155
+ save_new_model_and_delete_last(self.model,
156
+ os.path.join(self.model_save_path,
157
+ f"final_model_{mean_dice:.4f}.pt"),
158
+ delete_symbol="final_model")
159
+
160
+
161
+ if (self.epoch + 1) % 100 == 0:
162
+ torch.save(self.model.state_dict(), os.path.join(self.model_save_path, f"tmp_model_ep{self.epoch}_{mean_dice:.4f}.pt"))
163
+
164
+ print(f"mean_dice is {mean_dice}")
165
+
166
+ def _parse_csv_ints(s: str, n: int):
167
+ parts = [p.strip() for p in s.split(",") if p.strip()]
168
+ if len(parts) != n:
169
+ raise ValueError(f"expect {n} integers like '128,128,128', got: {s}")
170
+ return [int(x) for x in parts]
171
+
172
+
173
+ def _parse_augmentation(s: str):
174
+ s = str(s).strip().lower()
175
+ if s in {"true", "1", "yes", "y"}:
176
+ return True
177
+ if s in {"false", "0", "no", "n"}:
178
+ return False
179
+ # allow special modes used by Trainer.get_multi_processor_loader
180
+ # e.g. nomirror / onlymirror / onlyspatial
181
+ return s
182
+
183
+
184
+ def main():
185
+ parser = argparse.ArgumentParser(description="SegMamba BraTS2023 training.")
186
+ parser.add_argument("--data_dir", type=str, default="./data/fullres/train", help="Preprocessed data directory (contains *.npz).")
187
+ parser.add_argument("--logdir", type=str, default="./logs/segmamba", help="Log/checkpoint directory.")
188
+ parser.add_argument("--env", type=str, default="pytorch", choices=["pytorch", "DDP", "ddp"], help="Training environment.")
189
+ parser.add_argument("--max_epoch", type=int, default=1000)
190
+ parser.add_argument("--batch_size", type=int, default=2)
191
+ parser.add_argument("--val_every", type=int, default=2)
192
+ parser.add_argument("--num_gpus", type=int, default=1)
193
+ parser.add_argument("--device", type=str, default="cuda:0", help="Device for single GPU; DDP will use LOCAL_RANK.")
194
+ parser.add_argument("--roi_size", type=str, default="128,128,128", help="Patch/ROI size, e.g. '128,128,128'.")
195
+ parser.add_argument("--augmentation", type=str, default="true", help="true/false/nomirror/onlymirror/onlyspatial")
196
+ parser.add_argument("--train_process", type=int, default=18, help="Number of augmentation worker processes (per rank).")
197
+ parser.add_argument("--master_port", type=int, default=17759)
198
+ # torchrun launcher will append this; ignore it here (Trainer will read it too)
199
+ parser.add_argument("--not_call_launch", action="store_true", help=argparse.SUPPRESS)
200
+ args, _ = parser.parse_known_args()
201
+
202
+ roi_size = _parse_csv_ints(args.roi_size, 3)
203
+ augmentation = _parse_augmentation(args.augmentation)
204
+
205
+ trainer = BraTSTrainer(
206
+ env_type=args.env,
207
+ max_epochs=args.max_epoch,
208
+ batch_size=args.batch_size,
209
+ device=args.device,
210
+ logdir=args.logdir,
211
+ val_every=args.val_every,
212
+ num_gpus=args.num_gpus,
213
+ master_port=args.master_port,
214
+ training_script=__file__,
215
+ roi_size=roi_size,
216
+ augmentation=augmentation,
217
+ train_process=args.train_process,
218
+ )
219
+
220
+ train_ds, val_ds, test_ds = get_train_val_test_loader_from_train(args.data_dir)
221
+ trainer.train(train_dataset=train_ds, val_dataset=val_ds)
222
+
223
+
224
+ if __name__ == "__main__":
225
+ main()
source_code/SegMamba/4_predict.py ADDED
@@ -0,0 +1,261 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import glob
3
+ import os
4
+ import re
5
+ import sys
6
+
7
+ import numpy as np
8
+ import torch
9
+ import SimpleITK as sitk
10
+
11
+ # Prefer pip-installed MONAI over the local monai/ folder.
12
+ os.environ.setdefault("MONAI_SKIP_SUBMODULES", "1")
13
+ _repo_root = os.path.abspath(os.path.dirname(__file__))
14
+ if "" in sys.path:
15
+ sys.path.remove("")
16
+ if _repo_root in sys.path:
17
+ sys.path.remove(_repo_root)
18
+ import monai # noqa: E402
19
+ sys.path.insert(0, _repo_root)
20
+
21
+ from monai.inferers import SlidingWindowInferer
22
+ from monai.utils import set_determinism
23
+
24
+ from light_training.dataloading.dataset import MedicalDataset, get_train_val_test_loader_from_train
25
+ from light_training.evaluation.metric import dice
26
+ from light_training.prediction import Predictor
27
+ from light_training.trainer import Trainer
28
+
29
+ set_determinism(123)
30
+
31
+
32
+ def _parse_csv_ints(s: str, n: int):
33
+ parts = [p.strip() for p in str(s).split(",") if p.strip()]
34
+ if len(parts) != n:
35
+ raise ValueError(f"expect {n} integers like '128,128,128', got: {s}")
36
+ return [int(x) for x in parts]
37
+
38
+
39
+ def _parse_csv_floats(s: str, n: int):
40
+ parts = [p.strip() for p in str(s).split(",") if p.strip()]
41
+ if len(parts) != n:
42
+ raise ValueError(f"expect {n} floats like '1,1,1', got: {s}")
43
+ return [float(x) for x in parts]
44
+
45
+
46
+ def _find_ckpt_from_logdir(logdir: str, prefer: str = "best") -> str:
47
+ model_dir = os.path.join(logdir, "model")
48
+ if not os.path.isdir(model_dir):
49
+ raise FileNotFoundError(f"model dir not found: {model_dir}")
50
+
51
+ best = sorted(glob.glob(os.path.join(model_dir, "best_model_*.pt")))
52
+ final = sorted(glob.glob(os.path.join(model_dir, "final_model_*.pt")))
53
+ tmp = sorted(glob.glob(os.path.join(model_dir, "tmp_model_ep*.pt")))
54
+ any_pt = sorted(glob.glob(os.path.join(model_dir, "*.pt")))
55
+
56
+ def pick_by_score(paths):
57
+ # filenames like best_model_0.9038.pt / final_model_0.9038.pt
58
+ scored = []
59
+ for p in paths:
60
+ m = re.search(r"_(\d+\\.?\\d*)\\.pt$", os.path.basename(p))
61
+ if m is None:
62
+ continue
63
+ try:
64
+ scored.append((float(m.group(1)), p))
65
+ except ValueError:
66
+ continue
67
+ if scored:
68
+ scored.sort(key=lambda x: x[0], reverse=True)
69
+ return scored[0][1]
70
+ return None
71
+
72
+ if prefer == "best":
73
+ picked = pick_by_score(best) or (best[-1] if best else None)
74
+ if picked:
75
+ return picked
76
+ if prefer in {"best", "final"}:
77
+ picked = pick_by_score(final) or (final[-1] if final else None)
78
+ if picked:
79
+ return picked
80
+ if prefer in {"best", "final", "latest"}:
81
+ if tmp:
82
+ tmp.sort(key=lambda p: os.path.getmtime(p), reverse=True)
83
+ return tmp[0]
84
+ if any_pt:
85
+ any_pt.sort(key=lambda p: os.path.getmtime(p), reverse=True)
86
+ return any_pt[0]
87
+
88
+ raise FileNotFoundError(f"no checkpoint found under: {model_dir}")
89
+
90
+
91
+ class BraTSTrainer(Trainer):
92
+ def __init__(
93
+ self,
94
+ ckpt_path: str,
95
+ save_path: str,
96
+ patch_size,
97
+ sw_batch_size: int = 2,
98
+ overlap: float = 0.5,
99
+ mirror_axes=(0, 1, 2),
100
+ raw_spacing=(1.0, 1.0, 1.0),
101
+ device="cuda:0",
102
+ print_dice: bool = False,
103
+ ):
104
+ super().__init__(
105
+ env_type="pytorch",
106
+ max_epochs=1,
107
+ batch_size=1,
108
+ device=device,
109
+ val_every=1,
110
+ num_gpus=1,
111
+ logdir="",
112
+ master_port=17751,
113
+ training_script=__file__,
114
+ )
115
+
116
+ self.patch_size = patch_size
117
+ self.augmentation = False
118
+ self.print_dice = print_dice
119
+ self.save_path = save_path
120
+ self.raw_spacing = raw_spacing
121
+
122
+ from model_segmamba.segmamba import SegMamba
123
+
124
+ self.model = SegMamba(
125
+ in_chans=4,
126
+ out_chans=4,
127
+ depths=[2, 2, 2, 2],
128
+ feat_size=[48, 96, 192, 384],
129
+ )
130
+ self.load_state_dict(ckpt_path, strict=True)
131
+ self.model.eval()
132
+
133
+ window_infer = SlidingWindowInferer(
134
+ roi_size=patch_size,
135
+ sw_batch_size=sw_batch_size,
136
+ overlap=overlap,
137
+ progress=True,
138
+ mode="gaussian",
139
+ )
140
+
141
+ self.predictor = Predictor(
142
+ window_infer=window_infer,
143
+ mirror_axes=list(mirror_axes) if mirror_axes is not None else None,
144
+ )
145
+
146
+ os.makedirs(self.save_path, exist_ok=True)
147
+
148
+ def convert_labels(self, labels):
149
+ ## TC, WT and ET
150
+ result = [(labels == 1) | (labels == 3), (labels == 1) | (labels == 3) | (labels == 2), labels == 3]
151
+
152
+ return torch.cat(result, dim=1).float()
153
+
154
+ def get_input(self, batch):
155
+ image = batch["data"]
156
+ label = batch["seg"]
157
+ properties = batch["properties"]
158
+ label = self.convert_labels(label)
159
+
160
+ return image, label, properties
161
+
162
+ def validation_step(self, batch):
163
+ image, label, properties = self.get_input(batch)
164
+ # The preprocessed datasets used in many setups (including /data/yty/brats23_processed)
165
+ # do NOT contain cropping/resample metadata (shape_before_cropping, bbox_used_for_cropping, ...),
166
+ # so we directly save predictions in the same (D,H,W) space as the inputs.
167
+ #
168
+ # We save as a TRUE 4D NIfTI (t,z,y,x) with t=3 (TC/WT/ET) so that
169
+ # `sitk.GetArrayFromImage` returns shape (3, D, H, W), matching `5_compute_metrics.py`.
170
+
171
+ logits = self.predictor.maybe_mirror_and_predict(image, self.model, device=self.device) # (1,4,D,H,W) on CPU
172
+ pred_lbl = logits.argmax(dim=1) # (1,D,H,W)
173
+ pred_3c = self.convert_labels(pred_lbl[:, None])[0].cpu().numpy().astype(np.uint8) # (3,D,H,W)
174
+
175
+ if self.print_dice:
176
+ gt_3c = label[0].cpu().numpy()
177
+ dices = [dice(pred_3c[i], gt_3c[i]) for i in range(3)]
178
+ print(dices)
179
+
180
+ case_name = properties.get("name", "")
181
+ if isinstance(case_name, (list, tuple)) and len(case_name) > 0:
182
+ case_name = case_name[0]
183
+
184
+ out_path = os.path.join(self.save_path, f"{case_name}.nii.gz")
185
+ pred_itk = sitk.GetImageFromArray(pred_3c, isVector=False)
186
+ pred_itk.SetSpacing((float(self.raw_spacing[0]), float(self.raw_spacing[1]), float(self.raw_spacing[2]), 1.0))
187
+ sitk.WriteImage(pred_itk, out_path)
188
+ print(f"saved: {out_path}")
189
+
190
+ return 0
191
+
192
+ def convert_labels_dim0(self, labels):
193
+ ## TC, WT and ET
194
+ result = [(labels == 1) | (labels == 3), (labels == 1) | (labels == 3) | (labels == 2), labels == 3]
195
+
196
+ return torch.cat(result, dim=0).float()
197
+
198
+
199
+ def main():
200
+ parser = argparse.ArgumentParser(description="SegMamba inference/prediction for BraTS2023.")
201
+ parser.add_argument("--data_dir", type=str, default="./data/fullres/train", help="Preprocessed data directory (contains *.npz).")
202
+ parser.add_argument("--split", type=str, default="test", choices=["train", "val", "test", "all"])
203
+ parser.add_argument("--train_rate", type=float, default=0.7)
204
+ parser.add_argument("--val_rate", type=float, default=0.1)
205
+ parser.add_argument("--test_rate", type=float, default=0.2)
206
+ parser.add_argument("--seed", type=int, default=42)
207
+
208
+ parser.add_argument("--ckpt", type=str, default="", help="Checkpoint path (*.pt). If empty, will search under --logdir/model.")
209
+ parser.add_argument("--logdir", type=str, default="./logs/segmamba", help="Training logdir to locate checkpoints when --ckpt is empty.")
210
+ parser.add_argument("--ckpt_prefer", type=str, default="best", choices=["best", "final", "latest"])
211
+
212
+ parser.add_argument("--save_dir", type=str, default="./prediction_results/segmamba", help="Directory to save prediction nii.gz.")
213
+ parser.add_argument("--device", type=str, default="cuda:0")
214
+ parser.add_argument("--patch_size", type=str, default="128,128,128")
215
+ parser.add_argument("--sw_batch_size", type=int, default=2)
216
+ parser.add_argument("--overlap", type=float, default=0.5)
217
+ parser.add_argument("--raw_spacing", type=str, default="1,1,1", help="Spacing used when saving NIfTI, e.g. '1,1,1'.")
218
+ parser.add_argument("--no_mirror", action="store_true", help="Disable mirror TTA.")
219
+ parser.add_argument("--print_dice", action="store_true", help="Print dice against preprocessed seg (if available).")
220
+ args = parser.parse_args()
221
+
222
+ patch_size = _parse_csv_ints(args.patch_size, 3)
223
+ raw_spacing = _parse_csv_floats(args.raw_spacing, 3)
224
+
225
+ ckpt_path = args.ckpt.strip()
226
+ if ckpt_path == "":
227
+ ckpt_path = _find_ckpt_from_logdir(args.logdir, prefer=args.ckpt_prefer)
228
+ if not os.path.isfile(ckpt_path):
229
+ raise FileNotFoundError(f"checkpoint not found: {ckpt_path}")
230
+ print(f"Using checkpoint: {ckpt_path}")
231
+
232
+ trainer = BraTSTrainer(
233
+ ckpt_path=ckpt_path,
234
+ save_path=args.save_dir,
235
+ patch_size=patch_size,
236
+ sw_batch_size=args.sw_batch_size,
237
+ overlap=args.overlap,
238
+ mirror_axes=None if args.no_mirror else (0, 1, 2),
239
+ raw_spacing=raw_spacing,
240
+ device=args.device,
241
+ print_dice=args.print_dice,
242
+ )
243
+
244
+ if args.split == "all":
245
+ all_paths = sorted(glob.glob(os.path.join(args.data_dir, "*.npz")))
246
+ ds = MedicalDataset(all_paths, test=False)
247
+ else:
248
+ train_ds, val_ds, test_ds = get_train_val_test_loader_from_train(
249
+ args.data_dir,
250
+ train_rate=args.train_rate,
251
+ val_rate=args.val_rate,
252
+ test_rate=args.test_rate,
253
+ seed=args.seed,
254
+ )
255
+ ds = {"train": train_ds, "val": val_ds, "test": test_ds}[args.split]
256
+
257
+ trainer.validation_single_gpu(ds)
258
+
259
+
260
+ if __name__ == "__main__":
261
+ main()
source_code/SegMamba/5_compute_metrics.py ADDED
@@ -0,0 +1,175 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import glob
3
+ import json
4
+ import os
5
+ import sys
6
+
7
+ import numpy as np
8
+ import SimpleITK as sitk
9
+ import torch
10
+ from medpy import metric
11
+
12
+ # Prefer pip-installed MONAI over the local monai/ folder.
13
+ os.environ.setdefault("MONAI_SKIP_SUBMODULES", "1")
14
+ _repo_root = os.path.abspath(os.path.dirname(__file__))
15
+ if "" in sys.path:
16
+ sys.path.remove("")
17
+ if _repo_root in sys.path:
18
+ sys.path.remove(_repo_root)
19
+ import monai # noqa: E402
20
+ sys.path.insert(0, _repo_root)
21
+
22
+ from monai.utils import set_determinism
23
+ from tqdm import tqdm
24
+
25
+ from light_training.dataloading.dataset import MedicalDataset, get_train_val_test_loader_from_train
26
+
27
+ set_determinism(123)
28
+
29
+ def cal_metric(gt, pred, voxel_spacing):
30
+ if pred.sum() > 0 and gt.sum() > 0:
31
+ dice = metric.binary.dc(pred, gt)
32
+ hd95 = metric.binary.hd95(pred, gt, voxelspacing=voxel_spacing)
33
+ return np.array([dice, hd95])
34
+ else:
35
+ return np.array([0.0, 50])
36
+
37
+ def each_cases_metric(gt, pred, voxel_spacing):
38
+ classes_num = 3
39
+ class_wise_metric = np.zeros((classes_num, 2))
40
+ for cls in range(0, classes_num):
41
+ class_wise_metric[cls, ...] = cal_metric(pred[cls], gt[cls], voxel_spacing)
42
+ print(class_wise_metric)
43
+ return class_wise_metric
44
+
45
+ def convert_labels(labels):
46
+ ## TC, WT and ET
47
+ labels = labels.unsqueeze(dim=0)
48
+
49
+ result = [(labels == 1) | (labels == 3), (labels == 1) | (labels == 3) | (labels == 2), labels == 3]
50
+
51
+ return torch.cat(result, dim=0).float()
52
+
53
+
54
+ if __name__ == "__main__":
55
+ parser = argparse.ArgumentParser(description="Compute Dice/HD95 for BraTS2023 (TC/WT/ET) from saved predictions.")
56
+ parser.add_argument("--pred_name", required=True, type=str, help="Prediction folder name under results_root.")
57
+ parser.add_argument("--results_root", type=str, default="prediction_results")
58
+ parser.add_argument("--data_dir", type=str, default="./data/fullres/train", help="Preprocessed data directory (contains *.npz).")
59
+ parser.add_argument(
60
+ "--gt_source",
61
+ type=str,
62
+ default="processed",
63
+ choices=["processed", "raw"],
64
+ help="GT source. 'processed' uses *_seg.npy from preprocessed dataset (recommended for /data/yty/brats23_processed). "
65
+ "'raw' uses seg.nii.gz from --raw_data_dir.",
66
+ )
67
+ parser.add_argument(
68
+ "--raw_data_dir",
69
+ type=str,
70
+ default="./data/raw_data/BraTS2023/ASNR-MICCAI-BraTS2023-GLI-Challenge-TrainingData/",
71
+ help="Raw BraTS2023 training data directory that contains case folders with seg.nii.gz.",
72
+ )
73
+ parser.add_argument("--split", type=str, default="test", choices=["train", "val", "test", "all"])
74
+ parser.add_argument("--train_rate", type=float, default=0.7)
75
+ parser.add_argument("--val_rate", type=float, default=0.1)
76
+ parser.add_argument("--test_rate", type=float, default=0.2)
77
+ parser.add_argument("--seed", type=int, default=42)
78
+ parser.add_argument("--voxel_spacing", type=str, default="1,1,1", help="Voxel spacing for HD95, e.g. '1,1,1'.")
79
+ args = parser.parse_args()
80
+
81
+ voxel_spacing = [float(x) for x in args.voxel_spacing.split(",")]
82
+
83
+ if args.split == "all":
84
+ all_paths = sorted(glob.glob(os.path.join(args.data_dir, "*.npz")))
85
+ ds = MedicalDataset(all_paths, test=False)
86
+ else:
87
+ train_ds, val_ds, test_ds = get_train_val_test_loader_from_train(
88
+ args.data_dir,
89
+ train_rate=args.train_rate,
90
+ val_rate=args.val_rate,
91
+ test_rate=args.test_rate,
92
+ seed=args.seed,
93
+ )
94
+ ds = {"train": train_ds, "val": val_ds, "test": test_ds}[args.split]
95
+
96
+ print(f"Evaluating {len(ds)} cases from split={args.split}")
97
+
98
+ all_results = np.zeros((len(ds), 3, 2), dtype=np.float32)
99
+
100
+ for ind, batch in enumerate(tqdm(ds, total=len(ds))):
101
+ properties = batch["properties"]
102
+ case_name = properties["name"]
103
+ pred_path = os.path.join(args.results_root, args.pred_name, f"{case_name}.nii.gz")
104
+ if not os.path.isfile(pred_path):
105
+ raise FileNotFoundError(f"Prediction not found: {pred_path}")
106
+
107
+ if args.gt_source == "raw":
108
+ gt_path = os.path.join(args.raw_data_dir, case_name, "seg.nii.gz")
109
+ if not os.path.isfile(gt_path):
110
+ raise FileNotFoundError(f"GT not found: {gt_path}")
111
+ gt_itk = sitk.ReadImage(gt_path)
112
+ gt_array = sitk.GetArrayFromImage(gt_itk).astype(np.int32)
113
+ gt_array = torch.from_numpy(gt_array)
114
+ gt_array = convert_labels(gt_array).numpy()
115
+ else:
116
+ # preprocessed GT (same space as saved predictions from 4_predict.py)
117
+ if "seg" not in batch:
118
+ raise KeyError("gt_source=processed requires 'seg' in dataset samples, but it's missing.")
119
+ seg = batch["seg"] # expected shape: (1, D, H, W)
120
+ if isinstance(seg, np.ndarray):
121
+ seg_t = torch.from_numpy(seg)
122
+ else:
123
+ # np.memmap is also an ndarray subclass, keep it generic
124
+ seg_t = torch.from_numpy(np.asarray(seg))
125
+ if seg_t.ndim == 4 and seg_t.shape[0] == 1:
126
+ seg_t = seg_t[0]
127
+ gt_array = convert_labels(seg_t).numpy()
128
+
129
+ pred_itk = sitk.ReadImage(pred_path)
130
+ pred_array = sitk.GetArrayFromImage(pred_itk)
131
+
132
+ m = each_cases_metric(gt_array, pred_array, voxel_spacing)
133
+ all_results[ind, ...] = m
134
+
135
+ out_dir = os.path.join(args.results_root, "result_metrics")
136
+ os.makedirs(out_dir, exist_ok=True)
137
+ out_path = os.path.join(out_dir, f"{args.pred_name}.npy")
138
+ np.save(out_path, all_results)
139
+
140
+ result = np.load(out_path)
141
+ mean_per_class = result.mean(axis=0)
142
+ std_per_class = result.std(axis=0)
143
+ mean_dice = float(mean_per_class[:, 0].mean())
144
+ mean_hd95 = float(mean_per_class[:, 1].mean())
145
+
146
+ summary = {
147
+ "pred_name": args.pred_name,
148
+ "results_root": args.results_root,
149
+ "data_dir": args.data_dir,
150
+ "split": args.split,
151
+ "gt_source": args.gt_source,
152
+ "raw_data_dir": args.raw_data_dir if args.gt_source == "raw" else None,
153
+ "voxel_spacing": voxel_spacing,
154
+ "num_cases": int(result.shape[0]),
155
+ "mean_per_class": mean_per_class.tolist(), # [TC, WT, ET] x [dice, hd95]
156
+ "std_per_class": std_per_class.tolist(),
157
+ "mean_dice": mean_dice,
158
+ "mean_hd95": mean_hd95,
159
+ }
160
+ summary_path = os.path.join(out_dir, f"{args.pred_name}_summary.json")
161
+ with open(summary_path, "w") as f:
162
+ json.dump(summary, f, indent=2)
163
+
164
+ print("saved:", out_path)
165
+ print("summary:", summary_path)
166
+ print(result.shape)
167
+ print("mean(TC/WT/ET) [dice, hd95]:")
168
+ print(mean_per_class)
169
+ print("std(TC/WT/ET) [dice, hd95]:")
170
+ print(std_per_class)
171
+ print("mean dice:", mean_dice)
172
+ print("mean hd95:", mean_hd95)
173
+
174
+
175
+
source_code/SegMamba/6_visualize_predictions.py ADDED
@@ -0,0 +1,219 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os
3
+ import sys
4
+
5
+ import numpy as np
6
+ import SimpleITK as sitk
7
+
8
+ import matplotlib
9
+ matplotlib.use("Agg")
10
+ import matplotlib.pyplot as plt
11
+ from matplotlib.patches import Patch
12
+
13
+
14
+ MODALITY_MAP = {
15
+ "t2w": 0,
16
+ "t2f": 1,
17
+ "t1n": 2,
18
+ "t1c": 3,
19
+ }
20
+
21
+
22
+ def _parse_cases(s: str):
23
+ if not s:
24
+ return []
25
+ return [p.strip() for p in s.split(",") if p.strip()]
26
+
27
+
28
+ def _normalize_slice(img2d: np.ndarray) -> np.ndarray:
29
+ p1, p99 = np.percentile(img2d, (1, 99))
30
+ if p99 <= p1:
31
+ return np.zeros_like(img2d, dtype=np.float32)
32
+ img = (img2d - p1) / (p99 - p1)
33
+ return np.clip(img, 0.0, 1.0).astype(np.float32)
34
+
35
+
36
+ def _pred_to_three_channels(pred: np.ndarray) -> np.ndarray:
37
+ # Accept either 4D (3, D, H, W) or 3D label map (D, H, W) with labels 0-3.
38
+ if pred.ndim == 4:
39
+ return pred
40
+ if pred.ndim != 3:
41
+ raise ValueError(f"unexpected pred shape: {pred.shape}")
42
+ # label -> TC/WT/ET
43
+ labels = pred
44
+ tc = (labels == 1) | (labels == 3)
45
+ wt = (labels == 1) | (labels == 2) | (labels == 3)
46
+ et = labels == 3
47
+ return np.stack([tc, wt, et], axis=0).astype(np.uint8)
48
+
49
+
50
+ def _pick_slices(mask_3c: np.ndarray, num_slices: int) -> list[int]:
51
+ # mask_3c: (3, D, H, W)
52
+ mask_sum = mask_3c.sum(axis=0) # (D, H, W)
53
+ per_slice = mask_sum.reshape(mask_sum.shape[0], -1).sum(axis=1)
54
+ if per_slice.max() == 0:
55
+ # fallback: evenly spaced slices
56
+ return sorted(set(np.linspace(0, mask_sum.shape[0] - 1, num_slices, dtype=int).tolist()))
57
+ idx = np.argsort(per_slice)[::-1]
58
+ chosen = []
59
+ for i in idx:
60
+ if len(chosen) >= num_slices:
61
+ break
62
+ chosen.append(int(i))
63
+ return sorted(chosen)
64
+
65
+
66
+ def _overlay_mask(gray: np.ndarray, masks: list[np.ndarray]) -> np.ndarray:
67
+ # gray: (H, W), masks: [tc, wt, et]
68
+ rgb = np.stack([gray, gray, gray], axis=-1)
69
+ colors = [
70
+ (1.0, 0.0, 0.0), # TC - red
71
+ (0.0, 1.0, 0.0), # WT - green
72
+ (1.0, 1.0, 0.0), # ET - yellow
73
+ ]
74
+ alphas = [0.5, 0.25, 0.5]
75
+ for mask, color, alpha in zip(masks, colors, alphas):
76
+ m = mask.astype(bool)
77
+ if m.any():
78
+ rgb[m] = rgb[m] * (1.0 - alpha) + np.array(color) * alpha
79
+ return rgb
80
+
81
+
82
+ def _load_processed_image(processed_dir: str, case_name: str, modality: int) -> np.ndarray:
83
+ img_path = os.path.join(processed_dir, f"{case_name}.npy")
84
+ if not os.path.isfile(img_path):
85
+ raise FileNotFoundError(f"processed image not found: {img_path}")
86
+ arr = np.load(img_path, mmap_mode="r")
87
+ if arr.ndim != 4:
88
+ raise ValueError(f"unexpected image shape: {arr.shape}")
89
+ return np.asarray(arr[modality], dtype=np.float32) # (D, H, W)
90
+
91
+
92
+ def _load_prediction(pred_dir: str, case_name: str) -> np.ndarray:
93
+ pred_path = os.path.join(pred_dir, f"{case_name}.nii.gz")
94
+ if not os.path.isfile(pred_path):
95
+ raise FileNotFoundError(f"prediction not found: {pred_path}")
96
+ pred_itk = sitk.ReadImage(pred_path)
97
+ pred_arr = sitk.GetArrayFromImage(pred_itk)
98
+ return _pred_to_three_channels(np.asarray(pred_arr))
99
+
100
+ def _load_gt(processed_dir: str, case_name: str) -> np.ndarray:
101
+ seg_path = os.path.join(processed_dir, f"{case_name}_seg.npy")
102
+ if not os.path.isfile(seg_path):
103
+ raise FileNotFoundError(f"gt seg not found: {seg_path}")
104
+ seg = np.load(seg_path, mmap_mode="r")
105
+ seg = np.asarray(seg)
106
+ if seg.ndim == 4 and seg.shape[0] == 1:
107
+ seg = seg[0]
108
+ return _pred_to_three_channels(seg)
109
+
110
+
111
+ def visualize_case(case_name: str, pred_dir: str, processed_dir: str, modality: int, num_slices: int, out_dir: str, show_gt: bool = True):
112
+ img = _load_processed_image(processed_dir, case_name, modality) # (D, H, W)
113
+ pred = _load_prediction(pred_dir, case_name) # (3, D, H, W)
114
+ gt = None
115
+ if show_gt:
116
+ try:
117
+ gt = _load_gt(processed_dir, case_name)
118
+ except FileNotFoundError:
119
+ gt = None
120
+
121
+ if pred.shape[1:] != img.shape:
122
+ raise ValueError(f"shape mismatch for {case_name}: img={img.shape}, pred={pred.shape}")
123
+ if gt is not None and gt.shape[1:] != img.shape:
124
+ raise ValueError(f"shape mismatch for {case_name}: img={img.shape}, gt={gt.shape}")
125
+
126
+ slice_ids = _pick_slices(pred, num_slices)
127
+
128
+ ncols = 3 if gt is not None else 2
129
+ fig, axes = plt.subplots(nrows=len(slice_ids), ncols=ncols, figsize=(4 * ncols, 3 * len(slice_ids)))
130
+ if len(slice_ids) == 1:
131
+ axes = np.array([axes])
132
+ if ncols == 2 and axes.ndim == 1:
133
+ axes = axes[None, :]
134
+
135
+ for row, z in enumerate(slice_ids):
136
+ img2d = img[z]
137
+ gray = _normalize_slice(img2d)
138
+ tc = pred[0, z]
139
+ wt = pred[1, z]
140
+ et = pred[2, z]
141
+
142
+ axes[row, 0].imshow(gray, cmap="gray")
143
+ axes[row, 0].set_title(f"{case_name} z={z} (raw)")
144
+ axes[row, 0].axis("off")
145
+
146
+ overlay = _overlay_mask(gray, [tc, wt, et])
147
+ axes[row, 1].imshow(overlay)
148
+ axes[row, 1].set_title(f"{case_name} z={z} (pred)")
149
+ axes[row, 1].axis("off")
150
+
151
+ if gt is not None:
152
+ gt_tc = gt[0, z]
153
+ gt_wt = gt[1, z]
154
+ gt_et = gt[2, z]
155
+ gt_overlay = _overlay_mask(gray, [gt_tc, gt_wt, gt_et])
156
+ axes[row, 2].imshow(gt_overlay)
157
+ axes[row, 2].set_title(f"{case_name} z={z} (gt)")
158
+ axes[row, 2].axis("off")
159
+
160
+ legend = [
161
+ Patch(color=(1.0, 0.0, 0.0), label="TC"),
162
+ Patch(color=(0.0, 1.0, 0.0), label="WT"),
163
+ Patch(color=(1.0, 1.0, 0.0), label="ET"),
164
+ ]
165
+ fig.legend(handles=legend, loc="lower center", ncol=3)
166
+ fig.tight_layout(rect=[0, 0.05, 1, 1])
167
+
168
+ os.makedirs(out_dir, exist_ok=True)
169
+ out_path = os.path.join(out_dir, f"{case_name}_overlay.png")
170
+ fig.savefig(out_path, dpi=150)
171
+ plt.close(fig)
172
+
173
+ return out_path
174
+
175
+
176
+ def main():
177
+ parser = argparse.ArgumentParser(description="Visualize SegMamba predictions (overlay on processed images).")
178
+ parser.add_argument("--pred_dir", type=str, required=True, help="Prediction folder containing case_name.nii.gz.")
179
+ parser.add_argument("--processed_dir", type=str, required=True, help="Processed data dir containing case_name.npy.")
180
+ parser.add_argument("--out_dir", type=str, default="./prediction_results/visualizations")
181
+ parser.add_argument("--modality", type=str, default="t2f", help="t2w|t2f|t1n|t1c or an int index.")
182
+ parser.add_argument("--num_cases", type=int, default=5)
183
+ parser.add_argument("--num_slices", type=int, default=3)
184
+ parser.add_argument("--cases", type=str, default="", help="Comma-separated case names to visualize.")
185
+ parser.add_argument("--no_gt", action="store_true", help="Disable GT overlay (prediction only).")
186
+ args = parser.parse_args()
187
+
188
+ if args.modality.isdigit():
189
+ modality = int(args.modality)
190
+ else:
191
+ modality = MODALITY_MAP.get(args.modality.lower(), 1)
192
+ if modality < 0 or modality > 3:
193
+ raise ValueError("modality index must be 0..3")
194
+
195
+ cases = _parse_cases(args.cases)
196
+ if not cases:
197
+ pred_files = sorted([f for f in os.listdir(args.pred_dir) if f.endswith(".nii.gz")])
198
+ cases = [os.path.splitext(os.path.splitext(f)[0])[0] for f in pred_files][: args.num_cases]
199
+
200
+ if not cases:
201
+ print("No cases found.")
202
+ sys.exit(0)
203
+
204
+ print(f"Visualizing {len(cases)} cases, modality={modality}")
205
+ for case_name in cases:
206
+ out_path = visualize_case(
207
+ case_name=case_name,
208
+ pred_dir=args.pred_dir,
209
+ processed_dir=args.processed_dir,
210
+ modality=modality,
211
+ num_slices=args.num_slices,
212
+ out_dir=args.out_dir,
213
+ show_gt=not args.no_gt,
214
+ )
215
+ print(f"saved: {out_path}")
216
+
217
+
218
+ if __name__ == "__main__":
219
+ main()
source_code/SegMamba/README.md ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SegMamba
2
+
3
+ **Recent news: If you are interested in the research about vision language models, please refers to the latest work: https://github.com/MrGiovanni/RadGPT (ICCV2025)**
4
+
5
+ **Now we have open-sourced the pre-processing, training, inference, and metrics computation codes.**
6
+
7
+ SegMamba: Long-range Sequential Modeling Mamba For 3D Medical Image Segmentation
8
+
9
+ [https://arxiv.org/abs/2401.13560](https://arxiv.org/abs/2401.13560)
10
+
11
+ ![](images/method_figure.jpg)
12
+
13
+ ![](images/modules.jpg)
14
+
15
+ Our advantage in speed and memory.
16
+ ![](images/segmamba_ablation.jpg)
17
+
18
+ ## Contact
19
+ If you have any questions about our project, please feel free to contact us by email at zxing565@connect.hkust-gz.edu.cn or via WeChat at 18340097191. Furthermore, the data underlying this article will be shared on reasonable request to gaof57@mail.sysu.edu.cn.
20
+
21
+ ## Environment install
22
+ Clone this repository and navigate to the root directory of the project.
23
+
24
+ ```bash
25
+ git clone https://github.com/ge-xing/SegMamba.git
26
+
27
+ cd SegMamba
28
+ ```
29
+ ### Install causal-conv1d
30
+
31
+ ```bash
32
+ cd causal-conv1d
33
+
34
+ python setup.py install
35
+ ```
36
+
37
+ ### Install mamba
38
+
39
+ ```bash
40
+ cd mamba
41
+
42
+ python setup.py install
43
+ ```
44
+
45
+ ### Install monai
46
+
47
+ ```bash
48
+ pip install monai
49
+ ```
50
+
51
+ ## Simple test
52
+
53
+ ```bash
54
+ python 0_inference.py
55
+ ```
56
+
57
+ ## Preprocessing, training, testing, inference, and metrics computation
58
+
59
+ ### Data downloading
60
+
61
+ Data is from [https://arxiv.org/abs/2305.17033](https://arxiv.org/abs/2305.17033)
62
+
63
+ Download from Baidu Disk [https://pan.baidu.com/s/1C0FUHdDtWNaYWLtDDP9TnA?pwd=ty22提取码ty22](https://pan.baidu.com/s/1C0FUHdDtWNaYWLtDDP9TnA?pwd=ty22)
64
+
65
+ Download from OneDrive [https://hkustgz-my.sharepoint.com/:f:/g/personal/zxing565_connect_hkust-gz_edu_cn/EqqaINbHRxREuIj0XGicY2EBv8hjwEFKgFOhF_Ub0mvENw?e=yTpE9B](https://hkustgz-my.sharepoint.com/:f:/g/personal/zxing565_connect_hkust-gz_edu_cn/EqqaINbHRxREuIj0XGicY2EBv8hjwEFKgFOhF_Ub0mvENw?e=yTpE9B)
66
+
67
+ ### Preprocessing
68
+ In my setting, the data directory of BraTS2023 is : "./data/raw_data/BraTS2023/ASNR-MICCAI-BraTS2023-GLI-Challenge-TrainingData/"
69
+
70
+ First, we need to run the rename process.
71
+
72
+ ```bash
73
+ python 1_rename_mri_data.py
74
+ ```
75
+
76
+ Then, we need to run the pre-processing code to do resample, normalization, and crop processes.
77
+
78
+ ```bash
79
+ python 2_preprocessing_mri.py
80
+ ```
81
+
82
+ After pre-processing, the data structure will be in this format:
83
+
84
+ ![](images/data_structure.jpg)
85
+ ### Training
86
+
87
+ When the pre-processing process is done, we can train our model.
88
+
89
+ We mainly use the pre-processde data from last step: **data_dir = "./data/fullres/train"**
90
+
91
+
92
+ ```bash
93
+ python 3_train.py
94
+ ```
95
+
96
+ The training logs and checkpoints are saved in:
97
+ **logdir = f"./logs/segmamba"**
98
+
99
+
100
+
101
+
102
+ ### Inference
103
+
104
+ When we have trained our models, we can inference all the data in testing set.
105
+
106
+ ```bash
107
+ python 4_predict.py
108
+ ```
109
+
110
+ When this process is done, the prediction cases will be put in this path:
111
+ **save_path = "./prediction_results/segmamba"**
112
+
113
+ ### Metrics computation
114
+ We can obtain the Dice score and HD95 on each segmentation target (WT, TC, ET for BraTS2023 dataset) using this code:
115
+
116
+ ```bash
117
+ python 5_compute_metrics.py --pred_name="segmamba"
118
+ ```
119
+
120
+
121
+
122
+ ## Acknowledgement
123
+ Many thanks for these repos for their great contribution!
124
+
125
+ [https://github.com/MIC-DKFZ/nnUNet](https://github.com/MIC-DKFZ/nnUNet)
126
+
127
+ [https://github.com/Project-MONAI/MONAI](https://github.com/Project-MONAI/MONAI)
128
+
129
+ [https://github.com/hustvl/Vim](https://github.com/hustvl/Vim)
130
+
131
+ [https://github.com/bowang-lab/U-Mamba](https://github.com/bowang-lab/U-Mamba)
132
+
source_code/SegMamba/causal-conv1d/.DS_Store ADDED
Binary file (6.15 kB). View file
 
source_code/SegMamba/causal-conv1d/AUTHORS ADDED
@@ -0,0 +1 @@
 
 
1
+ Tri Dao, tri@tridao.me
source_code/SegMamba/causal-conv1d/LICENSE ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ BSD 3-Clause License
2
+
3
+ Copyright (c) 2022, the respective contributors, as shown by the AUTHORS file.
4
+ All rights reserved.
5
+
6
+ Redistribution and use in source and binary forms, with or without
7
+ modification, are permitted provided that the following conditions are met:
8
+
9
+ * Redistributions of source code must retain the above copyright notice, this
10
+ list of conditions and the following disclaimer.
11
+
12
+ * Redistributions in binary form must reproduce the above copyright notice,
13
+ this list of conditions and the following disclaimer in the documentation
14
+ and/or other materials provided with the distribution.
15
+
16
+ * Neither the name of the copyright holder nor the names of its
17
+ contributors may be used to endorse or promote products derived from
18
+ this software without specific prior written permission.
19
+
20
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
24
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
26
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
source_code/SegMamba/causal-conv1d/README.md ADDED
@@ -0,0 +1 @@
 
 
1
+ # Causal depthwise conv1d in CUDA with a PyTorch interface
source_code/SegMamba/causal-conv1d/build/lib.linux-x86_64-cpython-312/causal_conv1d/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ __version__ = "1.0.0"
2
+
3
+ from causal_conv1d.causal_conv1d_interface import causal_conv1d_fn, causal_conv1d_update
source_code/SegMamba/causal-conv1d/build/lib.linux-x86_64-cpython-312/causal_conv1d/causal_conv1d_interface.py ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2023, Tri Dao.
2
+
3
+ import torch
4
+ import torch.nn.functional as F
5
+
6
+
7
+ import causal_conv1d_cuda
8
+
9
+
10
+ class CausalConv1dFn(torch.autograd.Function):
11
+ @staticmethod
12
+ def forward(ctx, x, weight, bias=None, activation=None):
13
+ if activation not in [None, "silu", "swish"]:
14
+ raise NotImplementedError("activation must be None, silu, or swish")
15
+ if x.stride(2) != 1 and x.stride(1) != 1:
16
+ x = x.contiguous()
17
+ bias = bias.contiguous() if bias is not None else None
18
+ ctx.save_for_backward(x, weight, bias)
19
+ ctx.activation = activation in ["silu", "swish"]
20
+ out = causal_conv1d_cuda.causal_conv1d_fwd(x, weight, bias, ctx.activation)
21
+ return out
22
+
23
+ @staticmethod
24
+ def backward(ctx, dout):
25
+ x, weight, bias = ctx.saved_tensors
26
+ if dout.stride(2) != 1 and dout.stride(1) != 1:
27
+ dout = dout.contiguous()
28
+ # The kernel supports passing in a pre-allocated dx (e.g., in case we want to fuse the
29
+ # backward of conv1d with the backward of chunk).
30
+ # Here we just pass in None and dx will be allocated in the C++ code.
31
+ dx, dweight, dbias = causal_conv1d_cuda.causal_conv1d_bwd(
32
+ x, weight, bias, dout, None, ctx.activation
33
+ )
34
+ return dx, dweight, dbias if bias is not None else None, None
35
+
36
+
37
+ def causal_conv1d_fn(x, weight, bias=None, activation=None):
38
+ """
39
+ x: (batch, dim, seqlen)
40
+ weight: (dim, width)
41
+ bias: (dim,)
42
+ activation: either None or "silu" or "swish"
43
+
44
+ out: (batch, dim, seqlen)
45
+ """
46
+ return CausalConv1dFn.apply(x, weight, bias, activation)
47
+
48
+
49
+ def causal_conv1d_ref(x, weight, bias=None, activation=None):
50
+ """
51
+ x: (batch, dim, seqlen)
52
+ weight: (dim, width)
53
+ bias: (dim,)
54
+
55
+ out: (batch, dim, seqlen)
56
+ """
57
+ if activation not in [None, "silu", "swish"]:
58
+ raise NotImplementedError("activation must be None, silu, or swish")
59
+ dtype_in = x.dtype
60
+ x = x.to(weight.dtype)
61
+ seqlen = x.shape[-1]
62
+ dim, width = weight.shape
63
+ out = F.conv1d(x, weight.unsqueeze(1), bias, padding=width - 1, groups=dim)
64
+ out = out[..., :seqlen]
65
+ return (out if activation is None else F.silu(out)).to(dtype=dtype_in)
66
+
67
+
68
+ def causal_conv1d_update(x, conv_state, weight, bias=None, activation=None):
69
+ """
70
+ x: (batch, dim)
71
+ conv_state: (batch, dim, width)
72
+ weight: (dim, width)
73
+ bias: (dim,)
74
+
75
+ out: (batch, dim)
76
+ """
77
+ if activation not in [None, "silu", "swish"]:
78
+ raise NotImplementedError("activation must be None, silu, or swish")
79
+ activation = activation in ["silu", "swish"]
80
+ return causal_conv1d_cuda.causal_conv1d_update(x, conv_state, weight, bias, activation)
81
+
82
+
83
+ def causal_conv1d_update_ref(x, conv_state, weight, bias=None, activation=None):
84
+ """
85
+ x: (batch, dim)
86
+ conv_state: (batch, dim, width)
87
+ weight: (dim, width)
88
+ bias: (dim,)
89
+
90
+ out: (batch, dim)
91
+ """
92
+ if activation not in [None, "silu", "swish"]:
93
+ raise NotImplementedError("activation must be None, silu, or swish")
94
+ dtype_in = x.dtype
95
+ batch, dim = x.shape
96
+ width = weight.shape[1]
97
+ assert conv_state.shape == (batch, dim, width)
98
+ assert weight.shape == (dim, width)
99
+ conv_state.copy_(torch.roll(conv_state, shifts=-1, dims=-1)) # Update state (B D W)
100
+ conv_state[:, :, -1] = x
101
+ out = torch.sum(conv_state * weight, dim=-1) # (B D)
102
+ if bias is not None:
103
+ out += bias
104
+ return (out if activation is None else F.silu(out)).to(dtype=dtype_in)
source_code/SegMamba/causal-conv1d/build/temp.linux-x86_64-cpython-312/.ninja_log ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # ninja log v5
2
+ 1 3925 1769349295059886191 /root/githubs/SegMamba/causal-conv1d/build/temp.linux-x86_64-cpython-312/csrc/causal_conv1d_update.o 3402387f7700f2cb
3
+ 0 8815 1769349299946937322 /root/githubs/SegMamba/causal-conv1d/build/temp.linux-x86_64-cpython-312/csrc/causal_conv1d_fwd.o f8d3256741ca6581
4
+ 0 19513 1769349310651049313 /root/githubs/SegMamba/causal-conv1d/build/temp.linux-x86_64-cpython-312/csrc/causal_conv1d.o 9aac860c790009d8
5
+ 0 21969 1769349313087074800 /root/githubs/SegMamba/causal-conv1d/build/temp.linux-x86_64-cpython-312/csrc/causal_conv1d_bwd.o 2a543079906b9d85
source_code/SegMamba/causal-conv1d/build/temp.linux-x86_64-cpython-312/build.ninja ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ninja_required_version = 1.3
2
+ cxx = c++
3
+ nvcc = /usr/local/cuda/bin/nvcc
4
+
5
+ cflags = -pthread -B /root/miniforge/compiler_compat -fno-strict-overflow -Wsign-compare -DNDEBUG -O2 -Wall -fPIC -O2 -isystem /root/miniforge/include -fPIC -O2 -isystem /root/miniforge/include -fPIC -I/root/githubs/SegMamba/causal-conv1d -I/root/miniforge/lib/python3.12/site-packages/torch/include -I/root/miniforge/lib/python3.12/site-packages/torch/include/torch/csrc/api/include -I/usr/local/cuda/include -I/root/miniforge/include/python3.12 -c
6
+ post_cflags = -O3 -DTORCH_API_INCLUDE_EXTENSION_H -DTORCH_EXTENSION_NAME=causal_conv1d_cuda -std=c++17
7
+ cuda_cflags = -I/root/githubs/SegMamba/causal-conv1d -I/root/miniforge/lib/python3.12/site-packages/torch/include -I/root/miniforge/lib/python3.12/site-packages/torch/include/torch/csrc/api/include -I/usr/local/cuda/include -I/root/miniforge/include/python3.12 -c
8
+ cuda_post_cflags = -D__CUDA_NO_HALF_OPERATORS__ -D__CUDA_NO_HALF_CONVERSIONS__ -D__CUDA_NO_BFLOAT16_CONVERSIONS__ -D__CUDA_NO_HALF2_OPERATORS__ --expt-relaxed-constexpr --compiler-options ''"'"'-fPIC'"'"'' -O3 -U__CUDA_NO_HALF_OPERATORS__ -U__CUDA_NO_HALF_CONVERSIONS__ -U__CUDA_NO_BFLOAT16_OPERATORS__ -U__CUDA_NO_BFLOAT16_CONVERSIONS__ -U__CUDA_NO_BFLOAT162_OPERATORS__ -U__CUDA_NO_BFLOAT162_CONVERSIONS__ --expt-relaxed-constexpr --expt-extended-lambda --use_fast_math --ptxas-options=-v -lineinfo -gencode arch=compute_70,code=sm_70 -gencode arch=compute_80,code=sm_80 -gencode arch=compute_90,code=sm_90 --threads 4 -DTORCH_API_INCLUDE_EXTENSION_H -DTORCH_EXTENSION_NAME=causal_conv1d_cuda -std=c++17
9
+ cuda_dlink_post_cflags =
10
+ sycl_dlink_post_cflags =
11
+ ldflags =
12
+
13
+ rule compile
14
+ command = $cxx -MMD -MF $out.d $cflags -c $in -o $out $post_cflags
15
+ depfile = $out.d
16
+ deps = gcc
17
+
18
+ rule cuda_compile
19
+ depfile = $out.d
20
+ deps = gcc
21
+ command = $nvcc --generate-dependencies-with-compile --dependency-output $out.d $cuda_cflags -c $in -o $out $cuda_post_cflags
22
+
23
+
24
+
25
+
26
+
27
+
28
+
29
+ build /root/githubs/SegMamba/causal-conv1d/build/temp.linux-x86_64-cpython-312/csrc/causal_conv1d.o: compile /root/githubs/SegMamba/causal-conv1d/csrc/causal_conv1d.cpp
30
+ build /root/githubs/SegMamba/causal-conv1d/build/temp.linux-x86_64-cpython-312/csrc/causal_conv1d_bwd.o: cuda_compile /root/githubs/SegMamba/causal-conv1d/csrc/causal_conv1d_bwd.cu
31
+ build /root/githubs/SegMamba/causal-conv1d/build/temp.linux-x86_64-cpython-312/csrc/causal_conv1d_fwd.o: cuda_compile /root/githubs/SegMamba/causal-conv1d/csrc/causal_conv1d_fwd.cu
32
+ build /root/githubs/SegMamba/causal-conv1d/build/temp.linux-x86_64-cpython-312/csrc/causal_conv1d_update.o: cuda_compile /root/githubs/SegMamba/causal-conv1d/csrc/causal_conv1d_update.cu
33
+
34
+
35
+
36
+
37
+
38
+
39
+
40
+
source_code/SegMamba/causal-conv1d/causal_conv1d.egg-info/PKG-INFO ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.4
2
+ Name: causal_conv1d
3
+ Version: 1.0.0
4
+ Summary: Causal depthwise conv1d in CUDA, with a PyTorch interface
5
+ Home-page: https://github.com/Dao-AILab/causal-conv1d
6
+ Author: Tri Dao
7
+ Author-email: tri@tridao.me
8
+ Classifier: Programming Language :: Python :: 3
9
+ Classifier: License :: OSI Approved :: BSD License
10
+ Classifier: Operating System :: Unix
11
+ Requires-Python: >=3.7
12
+ Description-Content-Type: text/markdown
13
+ License-File: LICENSE
14
+ License-File: AUTHORS
15
+ Requires-Dist: torch
16
+ Requires-Dist: packaging
17
+ Requires-Dist: ninja
18
+ Dynamic: author
19
+ Dynamic: author-email
20
+ Dynamic: classifier
21
+ Dynamic: description
22
+ Dynamic: description-content-type
23
+ Dynamic: home-page
24
+ Dynamic: license-file
25
+ Dynamic: requires-dist
26
+ Dynamic: requires-python
27
+ Dynamic: summary
28
+
29
+ # Causal depthwise conv1d in CUDA with a PyTorch interface
source_code/SegMamba/causal-conv1d/causal_conv1d.egg-info/SOURCES.txt ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ AUTHORS
2
+ LICENSE
3
+ README.md
4
+ setup.py
5
+ causal_conv1d/__init__.py
6
+ causal_conv1d/causal_conv1d_interface.py
7
+ causal_conv1d.egg-info/PKG-INFO
8
+ causal_conv1d.egg-info/SOURCES.txt
9
+ causal_conv1d.egg-info/dependency_links.txt
10
+ causal_conv1d.egg-info/requires.txt
11
+ causal_conv1d.egg-info/top_level.txt
12
+ csrc/causal_conv1d.cpp
13
+ csrc/causal_conv1d_bwd.cu
14
+ csrc/causal_conv1d_fwd.cu
15
+ csrc/causal_conv1d_update.cu
16
+ tests/test_causal_conv1d.py
source_code/SegMamba/causal-conv1d/causal_conv1d.egg-info/dependency_links.txt ADDED
@@ -0,0 +1 @@
 
 
1
+
source_code/SegMamba/causal-conv1d/causal_conv1d.egg-info/requires.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ torch
2
+ packaging
3
+ ninja
source_code/SegMamba/causal-conv1d/causal_conv1d.egg-info/top_level.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ causal_conv1d
2
+ causal_conv1d_cuda
source_code/SegMamba/causal-conv1d/causal_conv1d/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ __version__ = "1.0.0"
2
+
3
+ from causal_conv1d.causal_conv1d_interface import causal_conv1d_fn, causal_conv1d_update
source_code/SegMamba/causal-conv1d/causal_conv1d/causal_conv1d_interface.py ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2023, Tri Dao.
2
+
3
+ import torch
4
+ import torch.nn.functional as F
5
+
6
+
7
+ import causal_conv1d_cuda
8
+
9
+
10
+ class CausalConv1dFn(torch.autograd.Function):
11
+ @staticmethod
12
+ def forward(ctx, x, weight, bias=None, activation=None):
13
+ if activation not in [None, "silu", "swish"]:
14
+ raise NotImplementedError("activation must be None, silu, or swish")
15
+ if x.stride(2) != 1 and x.stride(1) != 1:
16
+ x = x.contiguous()
17
+ bias = bias.contiguous() if bias is not None else None
18
+ ctx.save_for_backward(x, weight, bias)
19
+ ctx.activation = activation in ["silu", "swish"]
20
+ out = causal_conv1d_cuda.causal_conv1d_fwd(x, weight, bias, ctx.activation)
21
+ return out
22
+
23
+ @staticmethod
24
+ def backward(ctx, dout):
25
+ x, weight, bias = ctx.saved_tensors
26
+ if dout.stride(2) != 1 and dout.stride(1) != 1:
27
+ dout = dout.contiguous()
28
+ # The kernel supports passing in a pre-allocated dx (e.g., in case we want to fuse the
29
+ # backward of conv1d with the backward of chunk).
30
+ # Here we just pass in None and dx will be allocated in the C++ code.
31
+ dx, dweight, dbias = causal_conv1d_cuda.causal_conv1d_bwd(
32
+ x, weight, bias, dout, None, ctx.activation
33
+ )
34
+ return dx, dweight, dbias if bias is not None else None, None
35
+
36
+
37
+ def causal_conv1d_fn(x, weight, bias=None, activation=None):
38
+ """
39
+ x: (batch, dim, seqlen)
40
+ weight: (dim, width)
41
+ bias: (dim,)
42
+ activation: either None or "silu" or "swish"
43
+
44
+ out: (batch, dim, seqlen)
45
+ """
46
+ return CausalConv1dFn.apply(x, weight, bias, activation)
47
+
48
+
49
+ def causal_conv1d_ref(x, weight, bias=None, activation=None):
50
+ """
51
+ x: (batch, dim, seqlen)
52
+ weight: (dim, width)
53
+ bias: (dim,)
54
+
55
+ out: (batch, dim, seqlen)
56
+ """
57
+ if activation not in [None, "silu", "swish"]:
58
+ raise NotImplementedError("activation must be None, silu, or swish")
59
+ dtype_in = x.dtype
60
+ x = x.to(weight.dtype)
61
+ seqlen = x.shape[-1]
62
+ dim, width = weight.shape
63
+ out = F.conv1d(x, weight.unsqueeze(1), bias, padding=width - 1, groups=dim)
64
+ out = out[..., :seqlen]
65
+ return (out if activation is None else F.silu(out)).to(dtype=dtype_in)
66
+
67
+
68
+ def causal_conv1d_update(x, conv_state, weight, bias=None, activation=None):
69
+ """
70
+ x: (batch, dim)
71
+ conv_state: (batch, dim, width)
72
+ weight: (dim, width)
73
+ bias: (dim,)
74
+
75
+ out: (batch, dim)
76
+ """
77
+ if activation not in [None, "silu", "swish"]:
78
+ raise NotImplementedError("activation must be None, silu, or swish")
79
+ activation = activation in ["silu", "swish"]
80
+ return causal_conv1d_cuda.causal_conv1d_update(x, conv_state, weight, bias, activation)
81
+
82
+
83
+ def causal_conv1d_update_ref(x, conv_state, weight, bias=None, activation=None):
84
+ """
85
+ x: (batch, dim)
86
+ conv_state: (batch, dim, width)
87
+ weight: (dim, width)
88
+ bias: (dim,)
89
+
90
+ out: (batch, dim)
91
+ """
92
+ if activation not in [None, "silu", "swish"]:
93
+ raise NotImplementedError("activation must be None, silu, or swish")
94
+ dtype_in = x.dtype
95
+ batch, dim = x.shape
96
+ width = weight.shape[1]
97
+ assert conv_state.shape == (batch, dim, width)
98
+ assert weight.shape == (dim, width)
99
+ conv_state.copy_(torch.roll(conv_state, shifts=-1, dims=-1)) # Update state (B D W)
100
+ conv_state[:, :, -1] = x
101
+ out = torch.sum(conv_state * weight, dim=-1) # (B D)
102
+ if bias is not None:
103
+ out += bias
104
+ return (out if activation is None else F.silu(out)).to(dtype=dtype_in)
source_code/SegMamba/causal-conv1d/csrc/causal_conv1d.cpp ADDED
@@ -0,0 +1,333 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /******************************************************************************
2
+ * Copyright (c) 2023, Tri Dao.
3
+ ******************************************************************************/
4
+
5
+ #include <ATen/cuda/CUDAContext.h>
6
+ #include <c10/cuda/CUDAGuard.h>
7
+ #include <torch/extension.h>
8
+ #include <vector>
9
+
10
+ #include "causal_conv1d.h"
11
+
12
+ #define CHECK_SHAPE(x, ...) TORCH_CHECK(x.sizes() == torch::IntArrayRef({__VA_ARGS__}), #x " must have shape (" #__VA_ARGS__ ")")
13
+
14
+ #define DISPATCH_ITYPE_FLOAT_AND_HALF_AND_BF16(ITYPE, NAME, ...) \
15
+ if (ITYPE == at::ScalarType::Half) { \
16
+ using input_t = at::Half; \
17
+ __VA_ARGS__(); \
18
+ } else if (ITYPE == at::ScalarType::BFloat16) { \
19
+ using input_t = at::BFloat16; \
20
+ __VA_ARGS__(); \
21
+ } else if (ITYPE == at::ScalarType::Float) { \
22
+ using input_t = float; \
23
+ __VA_ARGS__(); \
24
+ } else { \
25
+ AT_ERROR(#NAME, " not implemented for input type '", toString(ITYPE), "'"); \
26
+ }
27
+
28
+ #define DISPATCH_WTYPE_FLOAT_AND_HALF_AND_BF16(WTYPE, NAME, ...) \
29
+ if (WTYPE == at::ScalarType::Half) { \
30
+ using weight_t = at::Half; \
31
+ __VA_ARGS__(); \
32
+ } else if (WTYPE == at::ScalarType::BFloat16) { \
33
+ using weight_t = at::BFloat16; \
34
+ __VA_ARGS__(); \
35
+ } else if (WTYPE == at::ScalarType::Float) { \
36
+ using weight_t = float; \
37
+ __VA_ARGS__(); \
38
+ } else { \
39
+ AT_ERROR(#NAME, " not implemented for weight type '", toString(WTYPE), "'"); \
40
+ }
41
+
42
+ template<typename input_t, typename weight_t>
43
+ void causal_conv1d_fwd_cuda(ConvParamsBase &params, cudaStream_t stream);
44
+ template <typename input_t, typename weight_t>
45
+ void causal_conv1d_channellast_fwd_cuda(ConvParamsBase &params, cudaStream_t stream);
46
+
47
+ template<typename input_t, typename weight_t>
48
+ void causal_conv1d_bwd_cuda(ConvParamsBwd &params, cudaStream_t stream);
49
+ template<typename input_t, typename weight_t>
50
+ void causal_conv1d_channellast_bwd_cuda(ConvParamsBwd &params, cudaStream_t stream);
51
+
52
+ template<typename input_t, typename weight_t>
53
+ void causal_conv1d_update_cuda(ConvParamsBase &params, cudaStream_t stream);
54
+
55
+ void set_conv_params_fwd(ConvParamsBase &params,
56
+ // sizes
57
+ const size_t batch,
58
+ const size_t dim,
59
+ const size_t seqlen,
60
+ const size_t width,
61
+ // device pointers
62
+ const at::Tensor x,
63
+ const at::Tensor weight,
64
+ const at::Tensor out,
65
+ void* bias_ptr,
66
+ bool silu_activation) {
67
+
68
+ // Reset the parameters
69
+ memset(&params, 0, sizeof(params));
70
+
71
+ params.batch = batch;
72
+ params.dim = dim;
73
+ params.seqlen = seqlen;
74
+ params.width = width;
75
+
76
+ params.silu_activation = silu_activation;
77
+
78
+ // Set the pointers and strides.
79
+ params.x_ptr = x.data_ptr();
80
+ params.weight_ptr = weight.data_ptr();
81
+ params.bias_ptr = bias_ptr;
82
+ params.out_ptr = out.data_ptr();
83
+ // All stride are in elements, not bytes.
84
+ params.x_batch_stride = x.stride(0);
85
+ params.x_c_stride = x.stride(1);
86
+ params.x_l_stride = x.stride(-1);
87
+ params.weight_c_stride = weight.stride(0);
88
+ params.weight_width_stride = weight.stride(1);
89
+ params.out_batch_stride = out.stride(0);
90
+ params.out_c_stride = out.stride(1);
91
+ params.out_l_stride = out.stride(-1);
92
+ }
93
+
94
+
95
+ void set_conv_params_bwd(ConvParamsBwd &params,
96
+ // sizes
97
+ const size_t batch,
98
+ const size_t dim,
99
+ const size_t seqlen,
100
+ const size_t width,
101
+ // device pointers
102
+ const at::Tensor x,
103
+ const at::Tensor weight,
104
+ void* bias_ptr,
105
+ const at::Tensor dout,
106
+ const at::Tensor dx,
107
+ const at::Tensor dweight,
108
+ void* dbias_ptr,
109
+ bool silu_activation) {
110
+ // Pass in "dout" instead of "out", we're not gonna use "out" at all.
111
+ set_conv_params_fwd(params, batch, dim, seqlen, width,
112
+ x, weight, dout, bias_ptr, silu_activation);
113
+
114
+ // Set the pointers and strides.
115
+ params.dout_ptr = dout.data_ptr();
116
+ params.dx_ptr = dx.data_ptr();
117
+ params.dweight_ptr = dweight.data_ptr();
118
+ params.dbias_ptr = dbias_ptr;
119
+ // All stride are in elements, not bytes.
120
+ params.dout_batch_stride = dout.stride(0);
121
+ params.dout_c_stride = dout.stride(1);
122
+ params.dout_l_stride = dout.stride(2);
123
+ params.dweight_c_stride = dweight.stride(0);
124
+ params.dweight_width_stride = dweight.stride(1);
125
+ params.dx_batch_stride = dx.stride(0);
126
+ params.dx_c_stride = dx.stride(1);
127
+ params.dx_l_stride = dx.stride(2);
128
+ }
129
+
130
+ at::Tensor
131
+ causal_conv1d_fwd(const at::Tensor &x, const at::Tensor &weight,
132
+ const c10::optional<at::Tensor> &bias_,
133
+ bool silu_activation) {
134
+ auto input_type = x.scalar_type();
135
+ auto weight_type = weight.scalar_type();
136
+ TORCH_CHECK(input_type == at::ScalarType::Float || input_type == at::ScalarType::Half || input_type == at::ScalarType::BFloat16);
137
+ TORCH_CHECK(weight_type == at::ScalarType::Float || weight_type == at::ScalarType::Half || weight_type == at::ScalarType::BFloat16);
138
+
139
+ TORCH_CHECK(x.is_cuda());
140
+ TORCH_CHECK(weight.is_cuda());
141
+
142
+ const auto sizes = x.sizes();
143
+ const int batch_size = sizes[0];
144
+ const int dim = sizes[1];
145
+ const int seqlen = sizes[2];
146
+ const int width = weight.size(-1);
147
+
148
+ CHECK_SHAPE(x, batch_size, dim, seqlen);
149
+ CHECK_SHAPE(weight, dim, width);
150
+
151
+ TORCH_CHECK(x.stride(2) == 1 || x.stride(1) == 1);
152
+ const bool is_channel_last = x.stride(1) == 1 && x.stride(2) > 1;
153
+
154
+ if (is_channel_last) {
155
+ TORCH_CHECK(dim % 8 == 0, "causal_conv1d only supports channel dimension divisible by 8 for now");
156
+ }
157
+ TORCH_CHECK(width >= 2 && width <= 4, "causal_conv1d only supports width between 2 and 4");
158
+
159
+
160
+ if (bias_.has_value()) {
161
+ auto bias = bias_.value();
162
+ TORCH_CHECK(bias.scalar_type() == weight_type);
163
+ TORCH_CHECK(bias.is_cuda());
164
+ TORCH_CHECK(bias.stride(-1) == 1);
165
+ CHECK_SHAPE(bias, dim);
166
+ }
167
+
168
+ at::Tensor out = torch::empty_like(x);
169
+
170
+ ConvParamsBase params;
171
+ set_conv_params_fwd(params, batch_size, dim, seqlen, width, x, weight, out,
172
+ bias_.has_value() ? bias_.value().data_ptr() : nullptr,
173
+ silu_activation);
174
+
175
+ // Otherwise the kernel will be launched from cuda:0 device
176
+ // Cast to char to avoid compiler warning about narrowing
177
+ at::cuda::CUDAGuard device_guard{(char)x.get_device()};
178
+ auto stream = at::cuda::getCurrentCUDAStream().stream();
179
+ DISPATCH_ITYPE_FLOAT_AND_HALF_AND_BF16(x.scalar_type(), "causal_conv1d_fwd", [&] {
180
+ DISPATCH_WTYPE_FLOAT_AND_HALF_AND_BF16(weight.scalar_type(), "causal_conv1d_fwd", [&] {
181
+ if (!is_channel_last) {
182
+ causal_conv1d_fwd_cuda<input_t, weight_t>(params, stream);
183
+ } else {
184
+ causal_conv1d_channellast_fwd_cuda<input_t, weight_t>(params, stream);
185
+ }
186
+ });
187
+ });
188
+ return out;
189
+ }
190
+
191
+ std::vector<at::Tensor>
192
+ causal_conv1d_bwd(const at::Tensor &x, const at::Tensor &weight,
193
+ const c10::optional<at::Tensor> &bias_,
194
+ at::Tensor &dout,
195
+ c10::optional<at::Tensor> &dx_,
196
+ bool silu_activation) {
197
+ auto input_type = x.scalar_type();
198
+ auto weight_type = weight.scalar_type();
199
+ TORCH_CHECK(input_type == at::ScalarType::Float || input_type == at::ScalarType::Half || input_type == at::ScalarType::BFloat16);
200
+ TORCH_CHECK(weight_type == at::ScalarType::Float || weight_type == at::ScalarType::Half || weight_type == at::ScalarType::BFloat16);
201
+
202
+ TORCH_CHECK(x.is_cuda());
203
+ TORCH_CHECK(weight.is_cuda());
204
+ TORCH_CHECK(dout.is_cuda());
205
+
206
+ const auto sizes = x.sizes();
207
+ const int batch_size = sizes[0];
208
+ const int dim = sizes[1];
209
+ const int seqlen = sizes[2];
210
+ const int width = weight.size(-1);
211
+
212
+ TORCH_CHECK(width >= 2 && width <= 4, "causal_conv1d only supports width between 2 and 4");
213
+
214
+ CHECK_SHAPE(x, batch_size, dim, seqlen);
215
+ CHECK_SHAPE(weight, dim, width);
216
+ CHECK_SHAPE(dout, batch_size, dim, seqlen);
217
+
218
+ TORCH_CHECK(x.stride(2) == 1 || x.stride(1) == 1);
219
+ const bool is_channel_last = x.stride(1) == 1 && x.stride(2) > 1;
220
+ if (!is_channel_last && dout.stride(2) != 1) { dout = dout.contiguous(); }
221
+ if (is_channel_last && dout.stride(1) != 1) { dout = dout.transpose(-1, -2).contiguous().transpose(-1, -2); }
222
+
223
+ if (bias_.has_value()) {
224
+ auto bias = bias_.value();
225
+ TORCH_CHECK(bias.scalar_type() == weight_type);
226
+ TORCH_CHECK(bias.is_cuda());
227
+ TORCH_CHECK(bias.stride(-1) == 1);
228
+ CHECK_SHAPE(bias, dim);
229
+ }
230
+
231
+ at::Tensor dx;
232
+ if (dx_.has_value()) {
233
+ dx = dx_.value();
234
+ TORCH_CHECK(dx.scalar_type() == input_type);
235
+ TORCH_CHECK(dx.is_cuda());
236
+ CHECK_SHAPE(dx, batch_size, dim, seqlen);
237
+ if (!is_channel_last) { TORCH_CHECK(dx.stride(2) == 1); }
238
+ if (is_channel_last) { TORCH_CHECK(dx.stride(1) == 1); }
239
+ } else {
240
+ dx = torch::empty_like(x);
241
+ }
242
+
243
+ // Otherwise the kernel will be launched from cuda:0 device
244
+ // Cast to char to avoid compiler warning about narrowing
245
+ at::cuda::CUDAGuard device_guard{(char)x.get_device()};
246
+
247
+ at::Tensor dweight = torch::zeros_like(weight, weight.options().dtype(at::kFloat));
248
+ at::Tensor dbias;
249
+ if (bias_.has_value()) { dbias = torch::zeros_like(bias_.value(), bias_.value().options().dtype(at::kFloat)); }
250
+
251
+ ConvParamsBwd params;
252
+ set_conv_params_bwd(params, batch_size, dim, seqlen, width,
253
+ x, weight, bias_.has_value() ? bias_.value().data_ptr() : nullptr,
254
+ dout, dx, dweight, bias_.has_value() ? dbias.data_ptr() : nullptr,
255
+ silu_activation);
256
+
257
+ auto stream = at::cuda::getCurrentCUDAStream().stream();
258
+ DISPATCH_ITYPE_FLOAT_AND_HALF_AND_BF16(x.scalar_type(), "causal_conv1d_bwd", [&] {
259
+ DISPATCH_WTYPE_FLOAT_AND_HALF_AND_BF16(weight.scalar_type(), "causal_conv1d_bwd", [&] {
260
+ if (!is_channel_last) {
261
+ causal_conv1d_bwd_cuda<input_t, weight_t>(params, stream);
262
+ } else {
263
+ causal_conv1d_channellast_bwd_cuda<input_t, weight_t>(params, stream);
264
+ }
265
+ });
266
+ });
267
+ return {dx, dweight.to(weight.dtype()), bias_.has_value() ? dbias.to(bias_.value().dtype()) : dbias};
268
+ }
269
+
270
+ at::Tensor
271
+ causal_conv1d_update(const at::Tensor &x,
272
+ const at::Tensor &conv_state,
273
+ const at::Tensor &weight,
274
+ const c10::optional<at::Tensor> &bias_,
275
+ bool silu_activation) {
276
+ auto input_type = x.scalar_type();
277
+ auto weight_type = weight.scalar_type();
278
+ TORCH_CHECK(input_type == at::ScalarType::Float || input_type == at::ScalarType::Half || input_type == at::ScalarType::BFloat16);
279
+ TORCH_CHECK(weight_type == at::ScalarType::Float || weight_type == at::ScalarType::Half || weight_type == at::ScalarType::BFloat16);
280
+ TORCH_CHECK(conv_state.scalar_type() == input_type);
281
+
282
+ TORCH_CHECK(x.is_cuda());
283
+ TORCH_CHECK(conv_state.is_cuda());
284
+ TORCH_CHECK(weight.is_cuda());
285
+
286
+ const auto sizes = x.sizes();
287
+ const int batch_size = sizes[0];
288
+ const int dim = sizes[1];
289
+ const int width = weight.size(-1);
290
+
291
+ CHECK_SHAPE(x, batch_size, dim);
292
+ CHECK_SHAPE(conv_state, batch_size, dim, width);
293
+ CHECK_SHAPE(weight, dim, width);
294
+
295
+ TORCH_CHECK(width >= 2 && width <= 4, "causal_conv1d only supports width between 2 and 4");
296
+
297
+ if (bias_.has_value()) {
298
+ auto bias = bias_.value();
299
+ TORCH_CHECK(bias.scalar_type() == weight_type);
300
+ TORCH_CHECK(bias.is_cuda());
301
+ TORCH_CHECK(bias.stride(-1) == 1);
302
+ CHECK_SHAPE(bias, dim);
303
+ }
304
+
305
+ at::Tensor out = torch::empty_like(x);
306
+
307
+ ConvParamsBase params;
308
+ set_conv_params_fwd(params, batch_size, dim, /*seqlen=*/1, width, x, weight, out,
309
+ bias_.has_value() ? bias_.value().data_ptr() : nullptr,
310
+ silu_activation);
311
+ params.conv_state_ptr = conv_state.data_ptr();
312
+ // All stride are in elements, not bytes.
313
+ params.conv_state_batch_stride = conv_state.stride(0);
314
+ params.conv_state_c_stride = conv_state.stride(1);
315
+ params.conv_state_l_stride = conv_state.stride(2);
316
+
317
+ // Otherwise the kernel will be launched from cuda:0 device
318
+ // Cast to char to avoid compiler warning about narrowing
319
+ at::cuda::CUDAGuard device_guard{(char)x.get_device()};
320
+ auto stream = at::cuda::getCurrentCUDAStream().stream();
321
+ DISPATCH_ITYPE_FLOAT_AND_HALF_AND_BF16(x.scalar_type(), "causal_conv1d_update", [&] {
322
+ DISPATCH_WTYPE_FLOAT_AND_HALF_AND_BF16(weight.scalar_type(), "causal_conv1d_update", [&] {
323
+ causal_conv1d_update_cuda<input_t, weight_t>(params, stream);
324
+ });
325
+ });
326
+ return out;
327
+ }
328
+
329
+ PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
330
+ m.def("causal_conv1d_fwd", &causal_conv1d_fwd, "Causal conv1d forward");
331
+ m.def("causal_conv1d_bwd", &causal_conv1d_bwd, "Causal conv1d backward");
332
+ m.def("causal_conv1d_update", &causal_conv1d_update, "Causal conv1d update");
333
+ }
source_code/SegMamba/causal-conv1d/csrc/causal_conv1d.h ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /******************************************************************************
2
+ * Copyright (c) 2023, Tri Dao.
3
+ ******************************************************************************/
4
+
5
+ #pragma once
6
+
7
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
8
+
9
+ struct ConvParamsBase {
10
+ using index_t = uint32_t;
11
+
12
+ int batch, dim, seqlen, width;
13
+ bool silu_activation;
14
+
15
+ index_t x_batch_stride;
16
+ index_t x_c_stride;
17
+ index_t x_l_stride;
18
+ index_t weight_c_stride;
19
+ index_t weight_width_stride;
20
+ index_t out_batch_stride;
21
+ index_t out_c_stride;
22
+ index_t out_l_stride;
23
+
24
+ index_t conv_state_batch_stride;
25
+ index_t conv_state_c_stride;
26
+ index_t conv_state_l_stride;
27
+
28
+ // Common data pointers.
29
+ void *__restrict__ x_ptr;
30
+ void *__restrict__ weight_ptr;
31
+ void *__restrict__ bias_ptr;
32
+ void *__restrict__ out_ptr;
33
+
34
+ void *__restrict__ conv_state_ptr;
35
+ };
36
+
37
+ struct ConvParamsBwd: public ConvParamsBase {
38
+ index_t dx_batch_stride;
39
+ index_t dx_c_stride;
40
+ index_t dx_l_stride;
41
+ index_t dweight_c_stride;
42
+ index_t dweight_width_stride;
43
+ index_t dout_batch_stride;
44
+ index_t dout_c_stride;
45
+ index_t dout_l_stride;
46
+
47
+ // Common data pointers.
48
+ void *__restrict__ dx_ptr;
49
+ void *__restrict__ dweight_ptr;
50
+ void *__restrict__ dbias_ptr;
51
+ void *__restrict__ dout_ptr;
52
+ };
53
+
source_code/SegMamba/causal-conv1d/csrc/causal_conv1d_bwd.cu ADDED
@@ -0,0 +1,525 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /******************************************************************************
2
+ * Copyright (c) 2023, Tri Dao.
3
+ ******************************************************************************/
4
+
5
+ #include <c10/util/BFloat16.h>
6
+ #include <c10/util/Half.h>
7
+ #include <c10/cuda/CUDAException.h> // For C10_CUDA_CHECK and C10_CUDA_KERNEL_LAUNCH_CHECK
8
+
9
+ #include <cub/block/block_load.cuh>
10
+ #include <cub/block/block_store.cuh>
11
+ #include <cub/block/block_reduce.cuh>
12
+
13
+ #include "causal_conv1d.h"
14
+ #include "causal_conv1d_common.h"
15
+ #include "static_switch.h"
16
+
17
+ template<int kNThreads_, int kWidth_, bool kSiluAct_, bool kIsVecLoad_, typename input_t_, typename weight_t_>
18
+ struct Causal_conv1d_bwd_kernel_traits {
19
+ using input_t = input_t_;
20
+ using weight_t = weight_t_;
21
+ static constexpr int kNThreads = kNThreads_;
22
+ static constexpr int kWidth = kWidth_;
23
+ static constexpr bool kSiluAct = kSiluAct_;
24
+ static constexpr int kNBytes = sizeof(input_t);
25
+ static_assert(kNBytes == 2 || kNBytes == 4);
26
+ static constexpr int kNElts = kNBytes == 4 ? 4 : 8;
27
+ static_assert(kWidth <= kNElts);
28
+ // It's possible that we need to do 2 rounds of exchange if input_t is 16 bits
29
+ // (since then we'd have 8 values of float, and each round we can exchange 4 floats).
30
+ static constexpr int kNExchangeRounds = sizeof(float) / sizeof(input_t);
31
+ static constexpr bool kIsVecLoad = kIsVecLoad_;
32
+ using vec_t = typename BytesToType<kNBytes * kNElts>::Type;
33
+ using BlockLoadT = cub::BlockLoad<input_t, kNThreads, kNElts, cub::BLOCK_LOAD_WARP_TRANSPOSE>;
34
+ using BlockLoadVecT = cub::BlockLoad<vec_t, kNThreads, 1, cub::BLOCK_LOAD_DIRECT>;
35
+ using BlockStoreT = cub::BlockStore<input_t, kNThreads, kNElts, cub::BLOCK_STORE_WARP_TRANSPOSE>;
36
+ using BlockStoreVecT = cub::BlockStore<vec_t, kNThreads, 1, cub::BLOCK_STORE_DIRECT>;
37
+ using BlockReduceFloatT = cub::BlockReduce<float, kNThreads>;
38
+ static constexpr int kSmemIOSize = kIsVecLoad
39
+ ? 0
40
+ : std::max({sizeof(typename BlockLoadT::TempStorage), sizeof(typename BlockStoreT::TempStorage)});
41
+ static constexpr int kSmemExchangeSize = kNThreads * kNBytes * kNElts * (!kSiluAct ? 1 : kNExchangeRounds + 1);
42
+ static constexpr int kSmemSize = std::max({kSmemExchangeSize,
43
+ int(sizeof(typename BlockReduceFloatT::TempStorage))}) + (kIsVecLoad ? 0 : kSmemIOSize);
44
+ };
45
+
46
+ template<typename Ktraits>
47
+ __global__ __launch_bounds__(Ktraits::kNThreads)
48
+ void causal_conv1d_bwd_kernel(ConvParamsBwd params) {
49
+ constexpr int kWidth = Ktraits::kWidth;
50
+ constexpr int kNThreads = Ktraits::kNThreads;
51
+ constexpr bool kSiluAct = Ktraits::kSiluAct;
52
+ constexpr int kNElts = Ktraits::kNElts;
53
+ constexpr int kNExchangeRounds = Ktraits::kNExchangeRounds;
54
+ constexpr bool kIsVecLoad = Ktraits::kIsVecLoad;
55
+ using input_t = typename Ktraits::input_t;
56
+ using vec_t = typename Ktraits::vec_t;
57
+ using weight_t = typename Ktraits::weight_t;
58
+
59
+ // Shared memory.
60
+ extern __shared__ char smem_[];
61
+ auto& smem_load = reinterpret_cast<typename Ktraits::BlockLoadT::TempStorage&>(smem_);
62
+ auto& smem_load_vec = reinterpret_cast<typename Ktraits::BlockLoadVecT::TempStorage&>(smem_);
63
+ auto& smem_store = reinterpret_cast<typename Ktraits::BlockStoreT::TempStorage&>(smem_);
64
+ auto& smem_store_vec = reinterpret_cast<typename Ktraits::BlockStoreVecT::TempStorage&>(smem_);
65
+ vec_t *smem_exchange = reinterpret_cast<vec_t *>(smem_ + Ktraits::kSmemIOSize);
66
+ vec_t *smem_exchange_x = reinterpret_cast<vec_t *>(smem_ + Ktraits::kSmemIOSize) + kNThreads * kNExchangeRounds;
67
+ auto& smem_reduce_float = *reinterpret_cast<typename Ktraits::BlockReduceFloatT::TempStorage*>(smem_ + Ktraits::kSmemIOSize);
68
+
69
+ const int tidx = threadIdx.x;
70
+ const int batch_id = blockIdx.x;
71
+ const int dim_id = blockIdx.y;
72
+ input_t *x = reinterpret_cast<input_t *>(params.x_ptr) + batch_id * params.x_batch_stride
73
+ + dim_id * params.x_c_stride;
74
+ weight_t *weight = reinterpret_cast<weight_t *>(params.weight_ptr) + dim_id * params.weight_c_stride;
75
+ input_t *dout = reinterpret_cast<input_t *>(params.dout_ptr) + batch_id * params.dout_batch_stride
76
+ + dim_id * params.dout_c_stride;
77
+ input_t *dx = reinterpret_cast<input_t *>(params.dx_ptr) + batch_id * params.dx_batch_stride
78
+ + dim_id * params.dx_c_stride;
79
+ float *dweight = reinterpret_cast<float *>(params.dweight_ptr) + dim_id * params.dweight_c_stride;
80
+ float bias_val = params.bias_ptr == nullptr ? 0.f : float(reinterpret_cast<weight_t *>(params.bias_ptr)[dim_id]);
81
+
82
+ // Thread kNThreads - 1 will load the first elements of the next chunk so we initialize those to 0.
83
+ if (tidx == 0) {
84
+ if constexpr (!kSiluAct) {
85
+ input_t zeros[kNElts] = {0};
86
+ smem_exchange[0] = reinterpret_cast<vec_t *>(zeros)[0];
87
+ } else {
88
+ float zeros[kNElts] = {0};
89
+ #pragma unroll
90
+ for (int r = 0; r < kNExchangeRounds; ++r) {
91
+ smem_exchange[r * kNThreads] = reinterpret_cast<vec_t *>(zeros)[r];
92
+ }
93
+ }
94
+ }
95
+
96
+ float weight_vals[kWidth];
97
+ #pragma unroll
98
+ for (int i = 0; i < kWidth; ++i) { weight_vals[i] = weight[i * params.weight_width_stride]; }
99
+
100
+ float dweight_vals[kWidth] = {0};
101
+ float dbias_val = 0;
102
+
103
+ constexpr int kChunkSize = kNThreads * kNElts;
104
+ const int n_chunks = (params.seqlen + kChunkSize - 1) / kChunkSize;
105
+ x += (n_chunks - 1) * kChunkSize;
106
+ dout += (n_chunks - 1) * kChunkSize;
107
+ dx += (n_chunks - 1) * kChunkSize;
108
+ for (int chunk = n_chunks - 1; chunk >= 0; --chunk) {
109
+ input_t x_vals_load[2 * kNElts] = {0};
110
+ input_t dout_vals_load[2 * kNElts] = {0};
111
+ if constexpr(kIsVecLoad) {
112
+ Ktraits::BlockLoadVecT(smem_load_vec).Load(reinterpret_cast<vec_t*>(x), *reinterpret_cast<vec_t (*)[1]>(&x_vals_load[kNElts]), (params.seqlen - chunk * kChunkSize) / kNElts);
113
+ Ktraits::BlockLoadVecT(smem_load_vec).Load(reinterpret_cast<vec_t*>(dout), *reinterpret_cast<vec_t (*)[1]>(&dout_vals_load[0]), (params.seqlen - chunk * kChunkSize) / kNElts);
114
+ } else {
115
+ __syncthreads();
116
+ Ktraits::BlockLoadT(smem_load).Load(x, *reinterpret_cast<input_t (*)[kNElts]>(&x_vals_load[kNElts]), params.seqlen - chunk * kChunkSize);
117
+ __syncthreads();
118
+ Ktraits::BlockLoadT(smem_load).Load(dout, *reinterpret_cast<input_t (*)[kNElts]>(&dout_vals_load[0]), params.seqlen - chunk * kChunkSize);
119
+ }
120
+ float dout_vals[2 * kNElts], x_vals[2 * kNElts];
121
+ if constexpr (!kSiluAct) {
122
+ __syncthreads();
123
+ // Thread 0 don't write yet, so that thread kNThreads - 1 can read
124
+ // the first elements of the next chunk.
125
+ if (tidx > 0) { smem_exchange[tidx] = reinterpret_cast<vec_t *>(dout_vals_load)[0]; }
126
+ __syncthreads();
127
+ reinterpret_cast<vec_t *>(dout_vals_load)[1] = smem_exchange[tidx < kNThreads - 1 ? tidx + 1 : 0];
128
+ __syncthreads();
129
+ // Now thread 0 can write the first elements of the current chunk.
130
+ if (tidx == 0) { smem_exchange[tidx] = reinterpret_cast<vec_t *>(dout_vals_load)[0]; }
131
+ #pragma unroll
132
+ for (int i = 0; i < 2 * kNElts; ++i) {
133
+ dout_vals[i] = float(dout_vals_load[i]);
134
+ x_vals[i] = float(x_vals_load[i]);
135
+ }
136
+ } else {
137
+ if (tidx == 0 && chunk > 0) {
138
+ if constexpr(kIsVecLoad) {
139
+ reinterpret_cast<vec_t *>(x_vals_load)[0] = reinterpret_cast<vec_t *>(x)[-1];
140
+ } else {
141
+ #pragma unroll
142
+ for (int i = 0; i < kNElts; ++i) {
143
+ if (chunk * kChunkSize + i < params.seqlen) { x_vals_load[i] = x[-kNElts + i]; }
144
+ }
145
+ }
146
+ }
147
+ __syncthreads();
148
+ smem_exchange_x[tidx] = reinterpret_cast<vec_t *>(x_vals_load)[1];
149
+ __syncthreads();
150
+ if (tidx > 0) { reinterpret_cast<vec_t *>(x_vals_load)[0] = smem_exchange_x[tidx - 1]; }
151
+ #pragma unroll
152
+ for (int i = 0; i < 2 * kNElts; ++i) { x_vals[i] = float(x_vals_load[i]); }
153
+ // Recompute the output
154
+ #pragma unroll
155
+ for (int i = 0; i < kNElts; ++i) {
156
+ float out_val = bias_val;
157
+ #pragma unroll
158
+ for (int w = 0; w < kWidth; ++w) {
159
+ out_val += weight_vals[w] * x_vals[kNElts + i - (kWidth - w - 1)];
160
+ }
161
+ float out_sigmoid_val = 1.0f / (1.0f + expf(-out_val));
162
+ dout_vals[i] = float(dout_vals_load[i]) * out_sigmoid_val
163
+ * (1.0f + out_val * (1.0f - out_sigmoid_val));
164
+ }
165
+ // Exchange the dout_vals. It's possible that we need to do 2 rounds of exchange
166
+ // if input_t is 16 bits (since then we'd have 8 values of float)
167
+ __syncthreads();
168
+ // Thread 0 don't write yet, so that thread kNThreads - 1 can read
169
+ // the first elements of the next chunk.
170
+ if (tidx > 0) {
171
+ #pragma unroll
172
+ for (int r = 0; r < kNExchangeRounds; ++r) {
173
+ smem_exchange[r * kNThreads + tidx] = reinterpret_cast<vec_t *>(dout_vals)[r];
174
+ }
175
+ }
176
+ __syncthreads();
177
+ #pragma unroll
178
+ for (int r = 0; r < kNExchangeRounds; ++r) {
179
+ reinterpret_cast<vec_t *>(dout_vals)[kNExchangeRounds + r]
180
+ = smem_exchange[r * kNThreads + (tidx < kNThreads - 1 ? tidx + 1 : 0)];
181
+ }
182
+ __syncthreads();
183
+ // Now thread 0 can write the first elements of the current chunk.
184
+ if (tidx == 0) {
185
+ #pragma unroll
186
+ for (int r = 0; r < kNExchangeRounds; ++r) {
187
+ smem_exchange[r * kNThreads + tidx] = reinterpret_cast<vec_t *>(dout_vals)[r];
188
+ }
189
+ }
190
+ }
191
+ dout -= kChunkSize;
192
+ x -= kChunkSize;
193
+
194
+ #pragma unroll
195
+ for (int i = 0; i < kNElts; ++i) { dbias_val += dout_vals[i]; }
196
+
197
+ float dx_vals[kNElts] = {0};
198
+ #pragma unroll
199
+ for (int i = 0; i < kNElts; ++i) {
200
+ #pragma unroll
201
+ for (int w = 0; w < kWidth; ++w) {
202
+ dx_vals[i] += weight_vals[w] * dout_vals[i + kWidth - w - 1];
203
+ }
204
+ }
205
+
206
+ input_t dx_vals_store[kNElts];
207
+ #pragma unroll
208
+ for (int i = 0; i < kNElts; ++i) { dx_vals_store[i] = dx_vals[i]; }
209
+ if constexpr(kIsVecLoad) {
210
+ Ktraits::BlockStoreVecT(smem_store_vec).Store(reinterpret_cast<vec_t*>(dx), reinterpret_cast<vec_t (&)[1]>(dx_vals_store), (params.seqlen - chunk * kChunkSize) / kNElts);
211
+ } else {
212
+ Ktraits::BlockStoreT(smem_store).Store(dx, dx_vals_store, params.seqlen - chunk * kChunkSize);
213
+ }
214
+ dx -= kChunkSize;
215
+
216
+ #pragma unroll
217
+ for (int w = 0; w < kWidth; ++w) {
218
+ #pragma unroll
219
+ for (int i = 0; i < kNElts; ++i) {
220
+ dweight_vals[w] += x_vals[kNElts + i] * dout_vals[i + kWidth - w - 1];
221
+ }
222
+ }
223
+ }
224
+
225
+ #pragma unroll
226
+ for (int w = 0; w < kWidth; ++w) {
227
+ __syncthreads();
228
+ dweight_vals[w] = Ktraits::BlockReduceFloatT(smem_reduce_float).Sum(dweight_vals[w]);
229
+ if (tidx == 0) {
230
+ atomicAdd(&reinterpret_cast<float *>(dweight)[w * params.dweight_width_stride], dweight_vals[w]);
231
+ }
232
+ }
233
+ if (params.bias_ptr != nullptr) {
234
+ __syncthreads();
235
+ dbias_val = Ktraits::BlockReduceFloatT(smem_reduce_float).Sum(dbias_val);
236
+ if (tidx == 0) {
237
+ atomicAdd(&reinterpret_cast<float *>(params.dbias_ptr)[dim_id], dbias_val);
238
+ }
239
+ }
240
+ }
241
+
242
+ template<int kNThreads, int kWidth, typename input_t, typename weight_t>
243
+ void causal_conv1d_bwd_launch(ConvParamsBwd &params, cudaStream_t stream) {
244
+ static constexpr int kNElts = sizeof(input_t) == 4 ? 4 : 8;
245
+ BOOL_SWITCH(params.seqlen % kNElts == 0, kIsVecLoad, [&] {
246
+ BOOL_SWITCH(params.silu_activation, kSiluAct, [&] {
247
+ using Ktraits = Causal_conv1d_bwd_kernel_traits<kNThreads, kWidth, kSiluAct, kIsVecLoad, input_t, weight_t>;
248
+ constexpr int kSmemSize = Ktraits::kSmemSize;
249
+ dim3 grid(params.batch, params.dim);
250
+ auto kernel = &causal_conv1d_bwd_kernel<Ktraits>;
251
+ if (kSmemSize >= 48 * 1024) {
252
+ C10_CUDA_CHECK(cudaFuncSetAttribute(
253
+ kernel, cudaFuncAttributeMaxDynamicSharedMemorySize, kSmemSize));
254
+ }
255
+ kernel<<<grid, Ktraits::kNThreads, kSmemSize, stream>>>(params);
256
+ C10_CUDA_KERNEL_LAUNCH_CHECK();
257
+ });
258
+ });
259
+ }
260
+
261
+ template<typename input_t, typename weight_t>
262
+ void causal_conv1d_bwd_cuda(ConvParamsBwd &params, cudaStream_t stream) {
263
+ if (params.width == 2) {
264
+ causal_conv1d_bwd_launch<128, 2, input_t, weight_t>(params, stream);
265
+ } else if (params.width == 3) {
266
+ causal_conv1d_bwd_launch<128, 3, input_t, weight_t>(params, stream);
267
+ } else if (params.width == 4) {
268
+ causal_conv1d_bwd_launch<128, 4, input_t, weight_t>(params, stream);
269
+ }
270
+ }
271
+
272
+ template<int kNThreads_, int kWidth_, int kChunkSizeL_, bool kSiluAct_, bool kIsVecLoad_, typename input_t_, typename weight_t_>
273
+ struct Causal_conv1d_channellast_bwd_kernel_traits {
274
+ // The cache line is 128 bytes, and we try to read 16 bytes per thread.
275
+ // So we have 8 threads per "row", so 32 or 64 elements in the channel dimension.
276
+ // That leaves 4 columns per warp, and so 16 columns per block (assuming each block has 128
277
+ // threads). Each each load is 16 x 32|64 elements in the L x C dimensions.
278
+ using input_t = input_t_;
279
+ using weight_t = weight_t_;
280
+ static constexpr bool kSiluAct = kSiluAct_;
281
+ static constexpr int kNThreads = kNThreads_;
282
+ static_assert(kNThreads % 32 == 0);
283
+ static constexpr int kNWarps = kNThreads / 32;
284
+ static constexpr int kWidth = kWidth_;
285
+ static constexpr int kChunkSizeL = kChunkSizeL_;
286
+ static constexpr int kNBytes = sizeof(input_t);
287
+ static_assert(kNBytes == 2 || kNBytes == 4);
288
+ static constexpr int kNElts = kNBytes == 4 ? 4 : 8;
289
+ static constexpr int kNEltsPerRow = 128 / kNBytes;
290
+ static constexpr int kNThreadsPerRow = kNEltsPerRow / kNElts; // Always 8 for now
291
+ static_assert(kNThreadsPerRow * kNBytes * kNElts == 128);
292
+ static constexpr int kNColsPerWarp = 32 / kNThreadsPerRow; // Always 4 for now
293
+ static_assert(kNColsPerWarp * kNThreadsPerRow == 32);
294
+ static constexpr int kNColsPerLoad = kNColsPerWarp * kNWarps;
295
+ static constexpr int kNLoads = kChunkSizeL / kNColsPerLoad;
296
+ static_assert(kNLoads * kNColsPerLoad == kChunkSizeL);
297
+ static constexpr bool kIsVecLoad = kIsVecLoad_;
298
+ using vec_t = typename BytesToType<kNBytes * kNElts>::Type;
299
+ // using BlockLoadT = cub::BlockLoad<input_t, kNThreads, kNItems, cub::BLOCK_LOAD_WARP_TRANSPOSE>;
300
+ // using BlockStoreT = cub::BlockStore<input_t, kNThreads, kNItems, cub::BLOCK_STORE_WARP_TRANSPOSE>;
301
+ // static constexpr int kSmemSize = std::max({sizeof(typename BlockLoadT::TempStorage),
302
+ // sizeof(typename BlockStoreT::TempStorage)});
303
+ // static constexpr int kSmemSize = kChunkSizeL * kNEltsPerRow * kNBytes;
304
+ };
305
+
306
+ template<typename Ktraits>
307
+ __global__ __launch_bounds__(Ktraits::kNThreads)
308
+ void causal_conv1d_channellast_bwd_kernel(ConvParamsBwd params) {
309
+ constexpr int kWidth = Ktraits::kWidth;
310
+ constexpr int kNThreads = Ktraits::kNThreads;
311
+ constexpr bool kSiluAct = Ktraits::kSiluAct;
312
+ constexpr int kNElts = Ktraits::kNElts;
313
+ constexpr int kNWarp = Ktraits::kNWarps;
314
+ constexpr int kNThreadsPerC = Ktraits::kNThreadsPerRow;
315
+ constexpr int kLPerLoad = Ktraits::kNColsPerLoad;
316
+ constexpr int kChunkSizeL = Ktraits::kChunkSizeL;
317
+ constexpr int kChunkSizeC = Ktraits::kNEltsPerRow;
318
+ using input_t = typename Ktraits::input_t;
319
+ using vec_t = typename Ktraits::vec_t;
320
+ using weight_t = typename Ktraits::weight_t;
321
+
322
+ // Shared memory.
323
+ __shared__ input_t dout_smem[kChunkSizeL + kWidth - 1][kChunkSizeC + kNElts];
324
+ __shared__ input_t x_smem[kWidth - 1 + kChunkSizeL + kWidth - 1][kChunkSizeC + kNElts];
325
+
326
+ const int tid = threadIdx.x;
327
+ const int l_idx = tid / kNThreadsPerC;
328
+ const int c_idx = tid % kNThreadsPerC;
329
+ const int batch_id = blockIdx.x;
330
+ const int chunk_l_id = blockIdx.y;
331
+ const int chunk_c_id = blockIdx.z;
332
+ input_t *x = reinterpret_cast<input_t *>(params.x_ptr) + batch_id * params.x_batch_stride
333
+ + (chunk_l_id * kChunkSizeL + l_idx) * params.x_l_stride + chunk_c_id * kChunkSizeC + c_idx * kNElts;
334
+ weight_t *weight = reinterpret_cast<weight_t *>(params.weight_ptr)
335
+ + chunk_c_id * kChunkSizeC * params.weight_c_stride;
336
+ input_t *dout = reinterpret_cast<input_t *>(params.dout_ptr) + batch_id * params.dout_batch_stride
337
+ + (chunk_l_id * kChunkSizeL + l_idx) * params.dout_l_stride + chunk_c_id * kChunkSizeC + c_idx * kNElts;
338
+ input_t *dx = reinterpret_cast<input_t *>(params.dx_ptr) + batch_id * params.dx_batch_stride
339
+ + (chunk_l_id * kChunkSizeL + l_idx) * params.dx_l_stride + chunk_c_id * kChunkSizeC + c_idx * kNElts;
340
+ float *dweight = reinterpret_cast<float *>(params.dweight_ptr)
341
+ + chunk_c_id * kChunkSizeC * params.dweight_c_stride;
342
+
343
+ #pragma unroll
344
+ for (int l = 0; l < Ktraits::kNLoads; ++l) {
345
+ input_t dout_vals_load[kNElts] = {0};
346
+ input_t x_vals_load[kNElts] = {0};
347
+ if (chunk_l_id * kChunkSizeL + l * kLPerLoad + l_idx < params.seqlen
348
+ && chunk_c_id * kChunkSizeC + c_idx * kNElts < params.dim) {
349
+ reinterpret_cast<vec_t *>(dout_vals_load)[0] = *reinterpret_cast<vec_t *>(dout + l * kLPerLoad * params.dout_l_stride);
350
+ reinterpret_cast<vec_t *>(x_vals_load)[0] = *reinterpret_cast<vec_t *>(x + l * kLPerLoad * params.x_l_stride);
351
+ }
352
+ reinterpret_cast<vec_t *>(dout_smem[l * kLPerLoad + l_idx])[c_idx] = reinterpret_cast<vec_t *>(dout_vals_load)[0];
353
+ reinterpret_cast<vec_t *>(x_smem[kWidth - 1 + l * kLPerLoad + l_idx])[c_idx] = reinterpret_cast<vec_t *>(x_vals_load)[0];
354
+ }
355
+ // Load the elements from the previous chunk or next chunk that are needed for convolution.
356
+ if (l_idx < kWidth - 1) {
357
+ input_t dout_vals_load[kNElts] = {0};
358
+ input_t x_vals_load[kNElts] = {0};
359
+ if ((chunk_l_id + 1) * kChunkSizeL + l_idx < params.seqlen
360
+ && chunk_c_id * kChunkSizeC + c_idx * kNElts < params.dim) {
361
+ reinterpret_cast<vec_t *>(dout_vals_load)[0] = *reinterpret_cast<vec_t *>(dout + kChunkSizeL * params.dout_l_stride);
362
+ }
363
+ if (chunk_l_id * kChunkSizeL + l_idx - (kWidth - 1) >= 0
364
+ && chunk_l_id * kChunkSizeL + l_idx - (kWidth - 1) < params.seqlen
365
+ && chunk_c_id * kChunkSizeC + c_idx * kNElts < params.dim) {
366
+ reinterpret_cast<vec_t *>(x_vals_load)[0] = *reinterpret_cast<vec_t *>(x - (kWidth - 1) * params.x_l_stride);
367
+ }
368
+ reinterpret_cast<vec_t *>(dout_smem[kChunkSizeL + l_idx])[c_idx] = reinterpret_cast<vec_t *>(dout_vals_load)[0];
369
+ reinterpret_cast<vec_t *>(x_smem[l_idx])[c_idx] = reinterpret_cast<vec_t *>(x_vals_load)[0];
370
+ }
371
+ // Need to load (kWdith - 1) extra x's on the right to recompute the (kChunkSizeL + kWidth - 1) outputs
372
+ if constexpr (kSiluAct) {
373
+ if (l_idx < kWidth - 1) {
374
+ input_t x_vals_load[kNElts] = {0};
375
+ if ((chunk_l_id + 1) * kChunkSizeL + l_idx < params.seqlen
376
+ && chunk_c_id * kChunkSizeC + c_idx * kNElts < params.dim) {
377
+ reinterpret_cast<vec_t *>(x_vals_load)[0] = *reinterpret_cast<vec_t *>(x + kChunkSizeL * params.x_l_stride);
378
+ }
379
+ reinterpret_cast<vec_t *>(x_smem[kWidth - 1 + kChunkSizeL + l_idx])[c_idx] = reinterpret_cast<vec_t *>(x_vals_load)[0];
380
+ }
381
+ }
382
+
383
+ __syncthreads();
384
+
385
+ constexpr int kLPerThread = std::min(kChunkSizeL * kChunkSizeC / kNThreads, kChunkSizeL);
386
+ static_assert(kLPerThread * kNThreads == kChunkSizeL * kChunkSizeC);
387
+ constexpr int kNThreadsPerRow = kChunkSizeL / kLPerThread;
388
+ static_assert(kNThreadsPerRow * kLPerThread == kChunkSizeL);
389
+ // kChunkSizeL, kLPerThread, kNThreadsPerRow should be powers of 2 for simplicity
390
+ static_assert((kChunkSizeL & (kChunkSizeL - 1)) == 0);
391
+ static_assert((kLPerThread & (kLPerThread - 1)) == 0);
392
+ static_assert((kNThreadsPerRow & (kNThreadsPerRow - 1)) == 0);
393
+ static_assert(kNThreadsPerRow <= 32);
394
+
395
+ const int row_idx = tid / kNThreadsPerRow;
396
+ const int col_idx = tid % kNThreadsPerRow;
397
+
398
+ float bias_val = params.bias_ptr == nullptr || chunk_c_id * kChunkSizeC + row_idx >= params.dim ? 0.f : float(reinterpret_cast<weight_t *>(params.bias_ptr)[chunk_c_id * kChunkSizeC + row_idx]);
399
+ float weight_vals[kWidth] = {0};
400
+ if (chunk_c_id * kChunkSizeC + row_idx < params.dim) {
401
+ #pragma unroll
402
+ for (int w = 0; w < kWidth; ++w) {
403
+ weight_vals[w] = weight[row_idx * params.weight_c_stride + w * params.weight_width_stride];
404
+ }
405
+ }
406
+ float dout_vals[kLPerThread + kWidth - 1];
407
+ float x_vals[kWidth - 1 + kLPerThread + kWidth - 1];
408
+ #pragma unroll
409
+ for (int i = 0; i < kWidth - 1 + kLPerThread; ++i) {
410
+ dout_vals[i] = float(dout_smem[col_idx * kLPerThread + i][row_idx]);
411
+ x_vals[i] = float(x_smem[col_idx * kLPerThread + i][row_idx]);
412
+ }
413
+
414
+ if constexpr (kSiluAct) { // Recompute the output
415
+ #pragma unroll
416
+ for (int i = kWidth - 1 + kLPerThread; i < kWidth - 1 + kLPerThread + kWidth - 1; ++i) {
417
+ x_vals[i] = float(x_smem[col_idx * kLPerThread + i][row_idx]);
418
+ }
419
+ #pragma unroll
420
+ for (int i = 0; i < kLPerThread + kWidth - 1; ++i) {
421
+ float out_val = bias_val;
422
+ #pragma unroll
423
+ for (int w = 0; w < kWidth; ++w) { out_val += weight_vals[w] * x_vals[i + w]; }
424
+ float out_val_sigmoid = 1.f / (1.f + expf(-out_val));
425
+ dout_vals[i] *= out_val_sigmoid * (1 + out_val * (1 - out_val_sigmoid));
426
+ }
427
+ }
428
+
429
+ float dweight_vals[kWidth] = {0};
430
+ SumOp<float> sum_op;
431
+ #pragma unroll
432
+ for (int w = 0; w < kWidth; ++w) {
433
+ #pragma unroll
434
+ for (int i = 0; i < kLPerThread; ++i) { dweight_vals[w] += x_vals[i + w] * dout_vals[i]; }
435
+ dweight_vals[w] = Allreduce<kNThreadsPerRow>::run(dweight_vals[w], sum_op);
436
+ if (col_idx == 0 && chunk_c_id * kChunkSizeC + row_idx < params.dim) {
437
+ atomicAdd(&reinterpret_cast<float *>(dweight)[row_idx * params.dweight_c_stride + w * params.dweight_width_stride], dweight_vals[w]);
438
+ }
439
+ }
440
+
441
+ if (params.bias_ptr != nullptr) {
442
+ float dbias_val = 0.f;
443
+ for (int i = 0; i < kLPerThread; ++i) { dbias_val += dout_vals[i]; }
444
+ dbias_val = Allreduce<kNThreadsPerRow>::run(dbias_val, sum_op);
445
+ if (col_idx == 0 && chunk_c_id * kChunkSizeC + row_idx < params.dim) {
446
+ atomicAdd(&reinterpret_cast<float *>(params.dbias_ptr)[chunk_c_id * kChunkSizeC + row_idx], dbias_val);
447
+ }
448
+ }
449
+
450
+ float dx_vals[kLPerThread] = {0};
451
+ #pragma unroll
452
+ for (int i = 0; i < kLPerThread; ++i) {
453
+ #pragma unroll
454
+ for (int w = 0; w < kWidth; ++w) { dx_vals[i] += weight_vals[kWidth - 1 - w] * dout_vals[i + w]; }
455
+ }
456
+ // Since kNThreadsPerRow is a power of 2 and <= 32, we only need syncwarp and not syncthreads.
457
+ __syncwarp();
458
+ #pragma unroll
459
+ for (int i = 0; i < kLPerThread; ++i) { x_smem[col_idx * kLPerThread + i][row_idx] = dx_vals[i]; }
460
+ __syncthreads();
461
+
462
+ #pragma unroll
463
+ for (int l = 0; l < Ktraits::kNLoads; ++l) {
464
+ input_t dx_vals_store[kNElts];
465
+ reinterpret_cast<vec_t *>(dx_vals_store)[0] = reinterpret_cast<vec_t *>(x_smem[l * kLPerLoad + l_idx])[c_idx];
466
+ if (chunk_l_id * kChunkSizeL + l * kLPerLoad + l_idx < params.seqlen
467
+ && chunk_c_id * kChunkSizeC + c_idx * kNElts < params.dim) {
468
+ *reinterpret_cast<vec_t *>(dx + l * kLPerLoad * params.dx_l_stride) = reinterpret_cast<vec_t *>(dx_vals_store)[0];
469
+ }
470
+ }
471
+
472
+ }
473
+
474
+ template<int kNThreads, int kWidth, typename input_t, typename weight_t>
475
+ void causal_conv1d_channellast_bwd_launch(ConvParamsBwd &params, cudaStream_t stream) {
476
+ BOOL_SWITCH(params.silu_activation, kSiluAct, [&] {
477
+ using Ktraits = Causal_conv1d_channellast_bwd_kernel_traits<kNThreads, kWidth, 64, kSiluAct, true, input_t, weight_t>;
478
+ // constexpr int kSmemSize = Ktraits::kSmemSize;
479
+ constexpr int kChunkSizeL = Ktraits::kChunkSizeL;
480
+ constexpr int kChunkSizeC = Ktraits::kNEltsPerRow;
481
+ const int n_chunks_L = (params.seqlen + kChunkSizeL - 1) / kChunkSizeL;
482
+ const int n_chunks_C = (params.dim + kChunkSizeC - 1) / kChunkSizeC;
483
+ dim3 grid(params.batch, n_chunks_L, n_chunks_C);
484
+ dim3 block(Ktraits::kNThreads);
485
+ auto kernel = &causal_conv1d_channellast_bwd_kernel<Ktraits>;
486
+ // if (kSmemSize >= 48 * 1024) {
487
+ // C10_CUDA_CHECK(cudaFuncSetAttribute(
488
+ // kernel, cudaFuncAttributeMaxDynamicSharedMemorySize, kSmemSize));
489
+ // }
490
+ // kernel<<<grid, Ktraits::kNThreads, kSmemSize, stream>>>(params);
491
+ kernel<<<grid, Ktraits::kNThreads, 0, stream>>>(params);
492
+ C10_CUDA_KERNEL_LAUNCH_CHECK();
493
+ });
494
+ }
495
+
496
+ template<typename input_t, typename weight_t>
497
+ void causal_conv1d_channellast_bwd_cuda(ConvParamsBwd &params, cudaStream_t stream) {
498
+ if (params.width == 2) {
499
+ causal_conv1d_channellast_bwd_launch<128, 2, input_t, weight_t>(params, stream);
500
+ } else if (params.width == 3) {
501
+ causal_conv1d_channellast_bwd_launch<128, 3, input_t, weight_t>(params, stream);
502
+ } else if (params.width == 4) {
503
+ causal_conv1d_channellast_bwd_launch<128, 4, input_t, weight_t>(params, stream);
504
+ }
505
+ }
506
+
507
+ template void causal_conv1d_bwd_cuda<float, float>(ConvParamsBwd &params, cudaStream_t stream);
508
+ template void causal_conv1d_bwd_cuda<at::Half, float>(ConvParamsBwd &params, cudaStream_t stream);
509
+ template void causal_conv1d_bwd_cuda<at::BFloat16, float>(ConvParamsBwd &params, cudaStream_t stream);
510
+ template void causal_conv1d_bwd_cuda<float, at::Half>(ConvParamsBwd &params, cudaStream_t stream);
511
+ template void causal_conv1d_bwd_cuda<at::Half, at::Half>(ConvParamsBwd &params, cudaStream_t stream);
512
+ template void causal_conv1d_bwd_cuda<at::BFloat16, at::Half>(ConvParamsBwd &params, cudaStream_t stream);
513
+ template void causal_conv1d_bwd_cuda<float, at::BFloat16>(ConvParamsBwd &params, cudaStream_t stream);
514
+ template void causal_conv1d_bwd_cuda<at::Half, at::BFloat16>(ConvParamsBwd &params, cudaStream_t stream);
515
+ template void causal_conv1d_bwd_cuda<at::BFloat16, at::BFloat16>(ConvParamsBwd &params, cudaStream_t stream);
516
+
517
+ template void causal_conv1d_channellast_bwd_cuda<float, float>(ConvParamsBwd &params, cudaStream_t stream);
518
+ template void causal_conv1d_channellast_bwd_cuda<at::Half, float>(ConvParamsBwd &params, cudaStream_t stream);
519
+ template void causal_conv1d_channellast_bwd_cuda<at::BFloat16, float>(ConvParamsBwd &params, cudaStream_t stream);
520
+ template void causal_conv1d_channellast_bwd_cuda<float, at::Half>(ConvParamsBwd &params, cudaStream_t stream);
521
+ template void causal_conv1d_channellast_bwd_cuda<at::Half, at::Half>(ConvParamsBwd &params, cudaStream_t stream);
522
+ template void causal_conv1d_channellast_bwd_cuda<at::BFloat16, at::Half>(ConvParamsBwd &params, cudaStream_t stream);
523
+ template void causal_conv1d_channellast_bwd_cuda<float, at::BFloat16>(ConvParamsBwd &params, cudaStream_t stream);
524
+ template void causal_conv1d_channellast_bwd_cuda<at::Half, at::BFloat16>(ConvParamsBwd &params, cudaStream_t stream);
525
+ template void causal_conv1d_channellast_bwd_cuda<at::BFloat16, at::BFloat16>(ConvParamsBwd &params, cudaStream_t stream);
source_code/SegMamba/causal-conv1d/csrc/causal_conv1d_common.h ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /******************************************************************************
2
+ * Copyright (c) 2023, Tri Dao.
3
+ ******************************************************************************/
4
+
5
+ #pragma once
6
+
7
+ #include <cuda_bf16.h>
8
+ #include <cuda_fp16.h>
9
+
10
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
11
+
12
+ template<int BYTES> struct BytesToType {};
13
+
14
+ template<> struct BytesToType<16> {
15
+ using Type = uint4;
16
+ static_assert(sizeof(Type) == 16);
17
+ };
18
+
19
+ template<> struct BytesToType<8> {
20
+ using Type = uint64_t;
21
+ static_assert(sizeof(Type) == 8);
22
+ };
23
+
24
+ template<> struct BytesToType<4> {
25
+ using Type = uint32_t;
26
+ static_assert(sizeof(Type) == 4);
27
+ };
28
+
29
+ template<> struct BytesToType<2> {
30
+ using Type = uint16_t;
31
+ static_assert(sizeof(Type) == 2);
32
+ };
33
+
34
+ template<> struct BytesToType<1> {
35
+ using Type = uint8_t;
36
+ static_assert(sizeof(Type) == 1);
37
+ };
38
+
39
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
40
+
41
+ template<typename T>
42
+ struct SumOp {
43
+ __device__ inline T operator()(T const & x, T const & y) { return x + y; }
44
+ };
45
+
46
+ template<int THREADS>
47
+ struct Allreduce {
48
+ static_assert(THREADS == 32 || THREADS == 16 || THREADS == 8 || THREADS == 4);
49
+ template<typename T, typename Operator>
50
+ static __device__ inline T run(T x, Operator &op) {
51
+ constexpr int OFFSET = THREADS / 2;
52
+ x = op(x, __shfl_xor_sync(uint32_t(-1), x, OFFSET));
53
+ return Allreduce<OFFSET>::run(x, op);
54
+ }
55
+ };
56
+
57
+ template<>
58
+ struct Allreduce<2> {
59
+ template<typename T, typename Operator>
60
+ static __device__ inline T run(T x, Operator &op) {
61
+ x = op(x, __shfl_xor_sync(uint32_t(-1), x, 1));
62
+ return x;
63
+ }
64
+ };
source_code/SegMamba/causal-conv1d/csrc/causal_conv1d_fwd.cu ADDED
@@ -0,0 +1,350 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /******************************************************************************
2
+ * Copyright (c) 2023, Tri Dao.
3
+ ******************************************************************************/
4
+
5
+ #include <c10/util/BFloat16.h>
6
+ #include <c10/util/Half.h>
7
+ #include <c10/cuda/CUDAException.h> // For C10_CUDA_CHECK and C10_CUDA_KERNEL_LAUNCH_CHECK
8
+
9
+ #include <cub/block/block_load.cuh>
10
+ #include <cub/block/block_store.cuh>
11
+
12
+ #include "causal_conv1d.h"
13
+ #include "causal_conv1d_common.h"
14
+ #include "static_switch.h"
15
+
16
+ template<int kNThreads_, int kWidth_, bool kIsVecLoad_, typename input_t_, typename weight_t_>
17
+ struct Causal_conv1d_fwd_kernel_traits {
18
+ using input_t = input_t_;
19
+ using weight_t = weight_t_;
20
+ static constexpr int kNThreads = kNThreads_;
21
+ static constexpr int kWidth = kWidth_;
22
+ static constexpr int kNBytes = sizeof(input_t);
23
+ static_assert(kNBytes == 2 || kNBytes == 4);
24
+ static constexpr int kNElts = kNBytes == 4 ? 4 : 8;
25
+ static_assert(kWidth <= kNElts);
26
+ static constexpr bool kIsVecLoad = kIsVecLoad_;
27
+ using vec_t = typename BytesToType<kNBytes * kNElts>::Type;
28
+ using BlockLoadT = cub::BlockLoad<input_t, kNThreads, kNElts, cub::BLOCK_LOAD_WARP_TRANSPOSE>;
29
+ using BlockLoadVecT = cub::BlockLoad<vec_t, kNThreads, 1, cub::BLOCK_LOAD_DIRECT>;
30
+ using BlockStoreT = cub::BlockStore<input_t, kNThreads, kNElts, cub::BLOCK_STORE_WARP_TRANSPOSE>;
31
+ using BlockStoreVecT = cub::BlockStore<vec_t, kNThreads, 1, cub::BLOCK_STORE_DIRECT>;
32
+ static constexpr int kSmemIOSize = kIsVecLoad
33
+ ? 0
34
+ : std::max({sizeof(typename BlockLoadT::TempStorage), sizeof(typename BlockStoreT::TempStorage)});
35
+ static constexpr int kSmemExchangeSize = kNThreads * kNBytes * kNElts;
36
+ static constexpr int kSmemSize = kSmemIOSize + kSmemExchangeSize;
37
+ };
38
+
39
+ template<typename Ktraits>
40
+ __global__ __launch_bounds__(Ktraits::kNThreads)
41
+ void causal_conv1d_fwd_kernel(ConvParamsBase params) {
42
+ constexpr int kWidth = Ktraits::kWidth;
43
+ constexpr int kNThreads = Ktraits::kNThreads;
44
+ constexpr int kNElts = Ktraits::kNElts;
45
+ constexpr bool kIsVecLoad = Ktraits::kIsVecLoad;
46
+ using input_t = typename Ktraits::input_t;
47
+ using vec_t = typename Ktraits::vec_t;
48
+ using weight_t = typename Ktraits::weight_t;
49
+
50
+ // Shared memory.
51
+ extern __shared__ char smem_[];
52
+ auto& smem_load = reinterpret_cast<typename Ktraits::BlockLoadT::TempStorage&>(smem_);
53
+ auto& smem_load_vec = reinterpret_cast<typename Ktraits::BlockLoadVecT::TempStorage&>(smem_);
54
+ auto& smem_store = reinterpret_cast<typename Ktraits::BlockStoreT::TempStorage&>(smem_);
55
+ auto& smem_store_vec = reinterpret_cast<typename Ktraits::BlockStoreVecT::TempStorage&>(smem_);
56
+ vec_t *smem_exchange = reinterpret_cast<vec_t *>(smem_ + Ktraits::kSmemIOSize);
57
+
58
+ const int tidx = threadIdx.x;
59
+ const int batch_id = blockIdx.x;
60
+ const int channel_id = blockIdx.y;
61
+ input_t *x = reinterpret_cast<input_t *>(params.x_ptr) + batch_id * params.x_batch_stride
62
+ + channel_id * params.x_c_stride;
63
+ weight_t *weight = reinterpret_cast<weight_t *>(params.weight_ptr) + channel_id * params.weight_c_stride;
64
+ input_t *out = reinterpret_cast<input_t *>(params.out_ptr) + batch_id * params.out_batch_stride
65
+ + channel_id * params.out_c_stride;
66
+ float bias_val = params.bias_ptr == nullptr ? 0.f : float(reinterpret_cast<weight_t *>(params.bias_ptr)[channel_id]);
67
+
68
+ // Thread 0 will load the last elements of the previous chunk, so we initialize those to 0.
69
+ if (tidx == 0) {
70
+ input_t zeros[kNElts] = {0};
71
+ smem_exchange[kNThreads - 1] = reinterpret_cast<vec_t *>(zeros)[0];
72
+ }
73
+
74
+ float weight_vals[kWidth];
75
+ #pragma unroll
76
+ for (int i = 0; i < kWidth; ++i) { weight_vals[i] = float(weight[i * params.weight_width_stride]); }
77
+
78
+ constexpr int kChunkSize = kNThreads * kNElts;
79
+ const int n_chunks = (params.seqlen + kChunkSize - 1) / kChunkSize;
80
+ for (int chunk = 0; chunk < n_chunks; ++chunk) {
81
+ input_t x_vals_load[2 * kNElts] = {0};
82
+ if constexpr(kIsVecLoad) {
83
+ Ktraits::BlockLoadVecT(smem_load_vec).Load(reinterpret_cast<vec_t*>(x), *reinterpret_cast<vec_t (*)[1]>(&x_vals_load[kNElts]), (params.seqlen - chunk * kChunkSize) / kNElts);
84
+ } else {
85
+ __syncthreads();
86
+ Ktraits::BlockLoadT(smem_load).Load(x, *reinterpret_cast<input_t (*)[kNElts]>(&x_vals_load[kNElts]), params.seqlen - chunk * kChunkSize);
87
+ }
88
+ x += kChunkSize;
89
+ __syncthreads();
90
+ // Thread kNThreads - 1 don't write yet, so that thread 0 can read
91
+ // the last elements of the previous chunk.
92
+ if (tidx < kNThreads - 1) { smem_exchange[tidx] = reinterpret_cast<vec_t *>(x_vals_load)[1]; }
93
+ __syncthreads();
94
+ reinterpret_cast<vec_t *>(x_vals_load)[0] = smem_exchange[tidx > 0 ? tidx - 1 : kNThreads - 1];
95
+ __syncthreads();
96
+ // Now thread kNThreads - 1 can write the last elements of the current chunk.
97
+ if (tidx == kNThreads - 1) { smem_exchange[tidx] = reinterpret_cast<vec_t *>(x_vals_load)[1]; }
98
+
99
+ float x_vals[2 * kNElts];
100
+ #pragma unroll
101
+ for (int i = 0; i < 2 * kNElts; ++i) { x_vals[i] = float(x_vals_load[i]); }
102
+
103
+ float out_vals[kNElts];
104
+ #pragma unroll
105
+ for (int i = 0; i < kNElts; ++i) {
106
+ out_vals[i] = bias_val;
107
+ #pragma unroll
108
+ for (int w = 0; w < kWidth; ++w) {
109
+ out_vals[i] += weight_vals[w] * x_vals[kNElts + i - (kWidth - w - 1)];
110
+ }
111
+ }
112
+
113
+ if (params.silu_activation) {
114
+ #pragma unroll
115
+ for (int i = 0; i < kNElts; ++i) {
116
+ out_vals[i] = out_vals[i] / (1 + expf(-out_vals[i]));
117
+ }
118
+ }
119
+
120
+ input_t out_vals_store[kNElts];
121
+ #pragma unroll
122
+ for (int i = 0; i < kNElts; ++i) { out_vals_store[i] = out_vals[i]; }
123
+ if constexpr(kIsVecLoad) {
124
+ Ktraits::BlockStoreVecT(smem_store_vec).Store(reinterpret_cast<vec_t*>(out), reinterpret_cast<vec_t (&)[1]>(out_vals_store), (params.seqlen - chunk * kChunkSize) / kNElts);
125
+ } else {
126
+ Ktraits::BlockStoreT(smem_store).Store(out, out_vals_store, params.seqlen - chunk * kChunkSize);
127
+ }
128
+ out += kChunkSize;
129
+ }
130
+ }
131
+
132
+ template<int kNThreads, int kWidth, typename input_t, typename weight_t>
133
+ void causal_conv1d_fwd_launch(ConvParamsBase &params, cudaStream_t stream) {
134
+ static constexpr int kNElts = sizeof(input_t) == 4 ? 4 : 8;
135
+ BOOL_SWITCH(params.seqlen % kNElts == 0, kIsVecLoad, [&] {
136
+ using Ktraits = Causal_conv1d_fwd_kernel_traits<kNThreads, kWidth, kIsVecLoad, input_t, weight_t>;
137
+ constexpr int kSmemSize = Ktraits::kSmemSize;
138
+ dim3 grid(params.batch, params.dim);
139
+ auto kernel = &causal_conv1d_fwd_kernel<Ktraits>;
140
+ if (kSmemSize >= 48 * 1024) {
141
+ C10_CUDA_CHECK(cudaFuncSetAttribute(
142
+ kernel, cudaFuncAttributeMaxDynamicSharedMemorySize, kSmemSize));
143
+ }
144
+ kernel<<<grid, Ktraits::kNThreads, kSmemSize, stream>>>(params);
145
+ C10_CUDA_KERNEL_LAUNCH_CHECK();
146
+ });
147
+ }
148
+
149
+ template<typename input_t, typename weight_t>
150
+ void causal_conv1d_fwd_cuda(ConvParamsBase &params, cudaStream_t stream) {
151
+ if (params.width == 2) {
152
+ causal_conv1d_fwd_launch<128, 2, input_t, weight_t>(params, stream);
153
+ } else if (params.width == 3) {
154
+ causal_conv1d_fwd_launch<128, 3, input_t, weight_t>(params, stream);
155
+ } else if (params.width == 4) {
156
+ causal_conv1d_fwd_launch<128, 4, input_t, weight_t>(params, stream);
157
+ }
158
+ }
159
+
160
+ template<int kNThreads_, int kWidth_, int kChunkSizeL_, bool kIsVecLoad_, typename input_t_, typename weight_t_>
161
+ struct Causal_conv1d_channellast_fwd_kernel_traits {
162
+ // The cache line is 128 bytes, and we try to read 16 bytes per thread.
163
+ // So we have 8 threads per "row", so 32 or 64 elements in the channel dimension.
164
+ // That leaves 4 columns per warp, and so 16 columns per block (assuming each block has 128
165
+ // threads). Each each load is 16 x 32|64 elements in the L x C dimensions.
166
+ using input_t = input_t_;
167
+ using weight_t = weight_t_;
168
+ static constexpr int kNThreads = kNThreads_;
169
+ static_assert(kNThreads % 32 == 0);
170
+ static constexpr int kNWarps = kNThreads / 32;
171
+ static constexpr int kWidth = kWidth_;
172
+ static constexpr int kChunkSizeL = kChunkSizeL_;
173
+ static constexpr int kNBytes = sizeof(input_t);
174
+ static_assert(kNBytes == 2 || kNBytes == 4);
175
+ static constexpr int kNElts = kNBytes == 4 ? 4 : 8;
176
+ static constexpr int kNEltsPerRow = 128 / kNBytes;
177
+ static constexpr int kNThreadsPerRow = kNEltsPerRow / kNElts; // Always 8 for now
178
+ static_assert(kNThreadsPerRow * kNBytes * kNElts == 128);
179
+ static constexpr int kNColsPerWarp = 32 / kNThreadsPerRow; // Always 4 for now
180
+ static_assert(kNColsPerWarp * kNThreadsPerRow == 32);
181
+ static constexpr int kNColsPerLoad = kNColsPerWarp * kNWarps;
182
+ static constexpr int kNLoads = kChunkSizeL / kNColsPerLoad;
183
+ static_assert(kNLoads * kNColsPerLoad == kChunkSizeL);
184
+ static constexpr bool kIsVecLoad = kIsVecLoad_;
185
+ using vec_t = typename BytesToType<kNBytes * kNElts>::Type;
186
+ // using BlockLoadT = cub::BlockLoad<input_t, kNThreads, kNItems, cub::BLOCK_LOAD_WARP_TRANSPOSE>;
187
+ // using BlockStoreT = cub::BlockStore<input_t, kNThreads, kNItems, cub::BLOCK_STORE_WARP_TRANSPOSE>;
188
+ // static constexpr int kSmemSize = std::max({sizeof(typename BlockLoadT::TempStorage),
189
+ // sizeof(typename BlockStoreT::TempStorage)});
190
+ // static constexpr int kSmemSize = kChunkSizeL * kNEltsPerRow * kNBytes;
191
+ };
192
+
193
+ template<typename Ktraits>
194
+ __global__ __launch_bounds__(Ktraits::kNThreads)
195
+ void causal_conv1d_channellast_fwd_kernel(ConvParamsBase params) {
196
+ constexpr int kWidth = Ktraits::kWidth;
197
+ constexpr int kNThreads = Ktraits::kNThreads;
198
+ constexpr int kNElts = Ktraits::kNElts;
199
+ constexpr int kNWarp = Ktraits::kNWarps;
200
+ constexpr int kNThreadsPerC = Ktraits::kNThreadsPerRow;
201
+ constexpr int kLPerLoad = Ktraits::kNColsPerLoad;
202
+ constexpr int kChunkSizeL = Ktraits::kChunkSizeL;
203
+ constexpr int kChunkSizeC = Ktraits::kNEltsPerRow;
204
+ using input_t = typename Ktraits::input_t;
205
+ using vec_t = typename Ktraits::vec_t;
206
+ using weight_t = typename Ktraits::weight_t;
207
+
208
+ // Shared memory.
209
+ __shared__ input_t x_smem[kWidth - 1 + kChunkSizeL][kChunkSizeC + kNElts];
210
+
211
+ const int tid = threadIdx.x;
212
+ const int l_idx = tid / kNThreadsPerC;
213
+ const int c_idx = tid % kNThreadsPerC;
214
+ const int batch_id = blockIdx.x;
215
+ const int chunk_l_id = blockIdx.y;
216
+ const int chunk_c_id = blockIdx.z;
217
+ input_t *x = reinterpret_cast<input_t *>(params.x_ptr) + batch_id * params.x_batch_stride
218
+ + (chunk_l_id * kChunkSizeL + l_idx) * params.x_l_stride + chunk_c_id * kChunkSizeC + c_idx * kNElts;
219
+ weight_t *weight = reinterpret_cast<weight_t *>(params.weight_ptr)
220
+ + chunk_c_id * kChunkSizeC * params.weight_c_stride;
221
+ input_t *out = reinterpret_cast<input_t *>(params.out_ptr) + batch_id * params.out_batch_stride
222
+ + (chunk_l_id * kChunkSizeL + l_idx) * params.out_l_stride + chunk_c_id * kChunkSizeC + c_idx * kNElts;
223
+
224
+ #pragma unroll
225
+ for (int l = 0; l < Ktraits::kNLoads; ++l) {
226
+ input_t x_vals_load[kNElts] = {0};
227
+ if (chunk_l_id * kChunkSizeL + l * kLPerLoad + l_idx < params.seqlen
228
+ && chunk_c_id * kChunkSizeC + c_idx * kNElts < params.dim) {
229
+ reinterpret_cast<vec_t *>(x_vals_load)[0] = *reinterpret_cast<vec_t *>(x + l * kLPerLoad * params.x_l_stride);
230
+ }
231
+ reinterpret_cast<vec_t *>(x_smem[kWidth - 1 + l * kLPerLoad + l_idx])[c_idx] = reinterpret_cast<vec_t *>(x_vals_load)[0];
232
+ }
233
+ // Load the elements from the previous chunk that are needed for convolution.
234
+ if (l_idx < kWidth - 1) {
235
+ input_t x_vals_load[kNElts] = {0};
236
+ if (chunk_l_id * kChunkSizeL + l_idx - (kWidth - 1) >= 0
237
+ && chunk_l_id * kChunkSizeL + l_idx - (kWidth - 1) < params.seqlen
238
+ && chunk_c_id * kChunkSizeC + c_idx * kNElts < params.dim) {
239
+ reinterpret_cast<vec_t *>(x_vals_load)[0] = *reinterpret_cast<vec_t *>(x - (kWidth - 1) * params.x_l_stride);
240
+ }
241
+ reinterpret_cast<vec_t *>(x_smem[l_idx])[c_idx] = reinterpret_cast<vec_t *>(x_vals_load)[0];
242
+ }
243
+
244
+ __syncthreads();
245
+
246
+ constexpr int kLPerThread = std::min(kChunkSizeL * kChunkSizeC / kNThreads, kChunkSizeL);
247
+ static_assert(kLPerThread * kNThreads == kChunkSizeL * kChunkSizeC);
248
+ constexpr int kNThreadsPerRow = kChunkSizeL / kLPerThread;
249
+ static_assert(kNThreadsPerRow * kLPerThread == kChunkSizeL);
250
+ // kChunkSizeL, kLPerThread, kNThreadsPerRow should be powers of 2 for simplicity
251
+ static_assert((kChunkSizeL & (kChunkSizeL - 1)) == 0);
252
+ static_assert((kLPerThread & (kLPerThread - 1)) == 0);
253
+ static_assert((kNThreadsPerRow & (kNThreadsPerRow - 1)) == 0);
254
+ static_assert(kNThreadsPerRow <= 32);
255
+
256
+ const int row_idx = tid / kNThreadsPerRow;
257
+ const int col_idx = tid % kNThreadsPerRow;
258
+
259
+ float bias_val = params.bias_ptr == nullptr || chunk_c_id * kChunkSizeC + row_idx >= params.dim ? 0.f : float(reinterpret_cast<weight_t *>(params.bias_ptr)[chunk_c_id * kChunkSizeC + row_idx]);
260
+ float weight_vals[kWidth] = {0};
261
+ if (chunk_c_id + kChunkSizeC + row_idx < params.dim) {
262
+ #pragma unroll
263
+ for (int w = 0; w < kWidth; ++w) {
264
+ weight_vals[w] = weight[row_idx * params.weight_c_stride + w * params.weight_width_stride];
265
+ }
266
+ }
267
+ float x_vals[kWidth - 1 + kLPerThread];
268
+ #pragma unroll
269
+ for (int i = 0; i < kWidth - 1 + kLPerThread; ++i) {
270
+ x_vals[i] = float(x_smem[col_idx * kLPerThread + i][row_idx]);
271
+ }
272
+
273
+ float out_vals[kLPerThread];
274
+ #pragma unroll
275
+ for (int i = 0; i < kLPerThread; ++i) {
276
+ out_vals[i] = bias_val;
277
+ #pragma unroll
278
+ for (int w = 0; w < kWidth; ++w) { out_vals[i] += weight_vals[w] * x_vals[i + w]; }
279
+ if (params.silu_activation) {out_vals[i] = out_vals[i] / (1 + expf(-out_vals[i])); }
280
+ }
281
+
282
+ // Since kNThreadsPerRow is a power of 2 and <= 32, we only need syncwarp and not syncthreads.
283
+ __syncwarp();
284
+ #pragma unroll
285
+ for (int i = 0; i < kLPerThread; ++i) { x_smem[col_idx * kLPerThread + i][row_idx] = out_vals[i]; }
286
+ __syncthreads();
287
+
288
+ #pragma unroll
289
+ for (int l = 0; l < Ktraits::kNLoads; ++l) {
290
+ input_t out_vals_store[kNElts];
291
+ reinterpret_cast<vec_t *>(out_vals_store)[0] = reinterpret_cast<vec_t *>(x_smem[l * kLPerLoad + l_idx])[c_idx];
292
+ if (chunk_l_id * kChunkSizeL + l * kLPerLoad + l_idx < params.seqlen
293
+ && chunk_c_id * kChunkSizeC + c_idx * kNElts < params.dim) {
294
+ *reinterpret_cast<vec_t *>(out + l * kLPerLoad * params.out_l_stride) = reinterpret_cast<vec_t *>(out_vals_store)[0];
295
+ }
296
+ }
297
+
298
+ }
299
+
300
+ template<int kNThreads, int kWidth, typename input_t, typename weight_t>
301
+ void causal_conv1d_channellast_fwd_launch(ConvParamsBase &params, cudaStream_t stream) {
302
+ using Ktraits = Causal_conv1d_channellast_fwd_kernel_traits<kNThreads, kWidth, 64, true, input_t, weight_t>;
303
+ // constexpr int kSmemSize = Ktraits::kSmemSize;
304
+ constexpr int kChunkSizeL = Ktraits::kChunkSizeL;
305
+ constexpr int kChunkSizeC = Ktraits::kNEltsPerRow;
306
+ const int n_chunks_L = (params.seqlen + kChunkSizeL - 1) / kChunkSizeL;
307
+ const int n_chunks_C = (params.dim + kChunkSizeC - 1) / kChunkSizeC;
308
+ // printf("n_chunks_L: %d, n_chunks_C: %d\n", n_chunks_L, n_chunks_C);
309
+ dim3 grid(params.batch, n_chunks_L, n_chunks_C);
310
+ dim3 block(Ktraits::kNThreads);
311
+ auto kernel = &causal_conv1d_channellast_fwd_kernel<Ktraits>;
312
+ // if (kSmemSize >= 48 * 1024) {
313
+ // C10_CUDA_CHECK(cudaFuncSetAttribute(
314
+ // kernel, cudaFuncAttributeMaxDynamicSharedMemorySize, kSmemSize));
315
+ // }
316
+ // kernel<<<grid, Ktraits::kNThreads, kSmemSize, stream>>>(params);
317
+ kernel<<<grid, Ktraits::kNThreads, 0, stream>>>(params);
318
+ C10_CUDA_KERNEL_LAUNCH_CHECK();
319
+ }
320
+
321
+ template<typename input_t, typename weight_t>
322
+ void causal_conv1d_channellast_fwd_cuda(ConvParamsBase &params, cudaStream_t stream) {
323
+ if (params.width == 2) {
324
+ causal_conv1d_channellast_fwd_launch<128, 2, input_t, weight_t>(params, stream);
325
+ } else if (params.width == 3) {
326
+ causal_conv1d_channellast_fwd_launch<128, 3, input_t, weight_t>(params, stream);
327
+ } else if (params.width == 4) {
328
+ causal_conv1d_channellast_fwd_launch<128, 4, input_t, weight_t>(params, stream);
329
+ }
330
+ }
331
+
332
+ template void causal_conv1d_fwd_cuda<float, float>(ConvParamsBase &params, cudaStream_t stream);
333
+ template void causal_conv1d_fwd_cuda<at::Half, float>(ConvParamsBase &params, cudaStream_t stream);
334
+ template void causal_conv1d_fwd_cuda<at::BFloat16, float>(ConvParamsBase &params, cudaStream_t stream);
335
+ template void causal_conv1d_fwd_cuda<float, at::Half>(ConvParamsBase &params, cudaStream_t stream);
336
+ template void causal_conv1d_fwd_cuda<at::Half, at::Half>(ConvParamsBase &params, cudaStream_t stream);
337
+ template void causal_conv1d_fwd_cuda<at::BFloat16, at::Half>(ConvParamsBase &params, cudaStream_t stream);
338
+ template void causal_conv1d_fwd_cuda<float, at::BFloat16>(ConvParamsBase &params, cudaStream_t stream);
339
+ template void causal_conv1d_fwd_cuda<at::Half, at::BFloat16>(ConvParamsBase &params, cudaStream_t stream);
340
+ template void causal_conv1d_fwd_cuda<at::BFloat16, at::BFloat16>(ConvParamsBase &params, cudaStream_t stream);
341
+
342
+ template void causal_conv1d_channellast_fwd_cuda<float, float>(ConvParamsBase &params, cudaStream_t stream);
343
+ template void causal_conv1d_channellast_fwd_cuda<at::Half, float>(ConvParamsBase &params, cudaStream_t stream);
344
+ template void causal_conv1d_channellast_fwd_cuda<at::BFloat16, float>(ConvParamsBase &params, cudaStream_t stream);
345
+ template void causal_conv1d_channellast_fwd_cuda<float, at::Half>(ConvParamsBase &params, cudaStream_t stream);
346
+ template void causal_conv1d_channellast_fwd_cuda<at::Half, at::Half>(ConvParamsBase &params, cudaStream_t stream);
347
+ template void causal_conv1d_channellast_fwd_cuda<at::BFloat16, at::Half>(ConvParamsBase &params, cudaStream_t stream);
348
+ template void causal_conv1d_channellast_fwd_cuda<float, at::BFloat16>(ConvParamsBase &params, cudaStream_t stream);
349
+ template void causal_conv1d_channellast_fwd_cuda<at::Half, at::BFloat16>(ConvParamsBase &params, cudaStream_t stream);
350
+ template void causal_conv1d_channellast_fwd_cuda<at::BFloat16, at::BFloat16>(ConvParamsBase &params, cudaStream_t stream);
source_code/SegMamba/causal-conv1d/csrc/causal_conv1d_update.cu ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /******************************************************************************
2
+ * Copyright (c) 2023, Tri Dao.
3
+ ******************************************************************************/
4
+
5
+ #include <c10/util/BFloat16.h>
6
+ #include <c10/util/Half.h>
7
+ #include <c10/cuda/CUDAException.h> // For C10_CUDA_CHECK and C10_CUDA_KERNEL_LAUNCH_CHECK
8
+
9
+ #include <cub/block/block_load.cuh>
10
+ #include <cub/block/block_store.cuh>
11
+
12
+ #include "causal_conv1d.h"
13
+ #include "causal_conv1d_common.h"
14
+ #include "static_switch.h"
15
+
16
+ template<int kNThreads_, int kWidth_, typename input_t_, typename weight_t_>
17
+ struct Causal_conv1d_update_kernel_traits {
18
+ using input_t = input_t_;
19
+ using weight_t = weight_t_;
20
+ static constexpr int kNThreads = kNThreads_;
21
+ static constexpr int kWidth = kWidth_;
22
+ static constexpr int kNBytes = sizeof(input_t);
23
+ static_assert(kNBytes == 2 || kNBytes == 4);
24
+ };
25
+
26
+ template<typename Ktraits>
27
+ __global__ __launch_bounds__(Ktraits::kNThreads)
28
+ void causal_conv1d_update_kernel(ConvParamsBase params) {
29
+ constexpr int kWidth = Ktraits::kWidth;
30
+ constexpr int kNThreads = Ktraits::kNThreads;
31
+ using input_t = typename Ktraits::input_t;
32
+ using weight_t = typename Ktraits::weight_t;
33
+
34
+ const int tidx = threadIdx.x;
35
+ const int batch_id = blockIdx.x;
36
+ const int channel_id = blockIdx.y * kNThreads + tidx;
37
+ input_t *x = reinterpret_cast<input_t *>(params.x_ptr) + batch_id * params.x_batch_stride
38
+ + channel_id * params.x_c_stride;
39
+ input_t *conv_state = reinterpret_cast<input_t *>(params.conv_state_ptr) + batch_id * params.conv_state_batch_stride
40
+ + channel_id * params.conv_state_c_stride;
41
+ weight_t *weight = reinterpret_cast<weight_t *>(params.weight_ptr) + channel_id * params.weight_c_stride;
42
+ input_t *out = reinterpret_cast<input_t *>(params.out_ptr) + batch_id * params.out_batch_stride
43
+ + channel_id * params.out_c_stride;
44
+ float bias_val = params.bias_ptr == nullptr || channel_id >= params.dim ? 0.f : float(reinterpret_cast<weight_t *>(params.bias_ptr)[channel_id]);
45
+
46
+ float weight_vals[kWidth] = {0};
47
+ if (channel_id < params.dim) {
48
+ #pragma unroll
49
+ for (int i = 0; i < kWidth; ++i) { weight_vals[i] = float(weight[i * params.weight_width_stride]); }
50
+ }
51
+
52
+ float x_vals[kWidth] = {0};
53
+ if (channel_id < params.dim) {
54
+ #pragma unroll
55
+ for (int i = 0; i < kWidth - 1; ++i) { x_vals[i] = float(conv_state[(i + 1) * params.conv_state_l_stride]); }
56
+ x_vals[kWidth - 1] = float(x[0]);
57
+ #pragma unroll
58
+ for (int i = 0; i < kWidth; ++i) { conv_state[i * params.conv_state_l_stride] = input_t(x_vals[i]); }
59
+ }
60
+
61
+ float out_val = bias_val;
62
+ #pragma unroll
63
+ for (int i = 0; i < kWidth; ++i) { out_val += weight_vals[i] * x_vals[i]; }
64
+ if (params.silu_activation) { out_val = out_val / (1 + expf(-out_val)); }
65
+ if (channel_id < params.dim) { out[0] = input_t(out_val); }
66
+ }
67
+
68
+ template<int kNThreads, int kWidth, typename input_t, typename weight_t>
69
+ void causal_conv1d_update_launch(ConvParamsBase &params, cudaStream_t stream) {
70
+ using Ktraits = Causal_conv1d_update_kernel_traits<kNThreads, kWidth, input_t, weight_t>;
71
+ dim3 grid(params.batch, (params.dim + kNThreads - 1) / kNThreads);
72
+ auto kernel = &causal_conv1d_update_kernel<Ktraits>;
73
+ kernel<<<grid, Ktraits::kNThreads, 0, stream>>>(params);
74
+ C10_CUDA_KERNEL_LAUNCH_CHECK();
75
+ }
76
+
77
+ template<typename input_t, typename weight_t>
78
+ void causal_conv1d_update_cuda(ConvParamsBase &params, cudaStream_t stream) {
79
+ if (params.width == 2) {
80
+ causal_conv1d_update_launch<64, 2, input_t, weight_t>(params, stream);
81
+ } else if (params.width == 3) {
82
+ causal_conv1d_update_launch<64, 3, input_t, weight_t>(params, stream);
83
+ } else if (params.width == 4) {
84
+ causal_conv1d_update_launch<64, 4, input_t, weight_t>(params, stream);
85
+ }
86
+ }
87
+
88
+ template void causal_conv1d_update_cuda<float, float>(ConvParamsBase &params, cudaStream_t stream);
89
+ template void causal_conv1d_update_cuda<at::Half, float>(ConvParamsBase &params, cudaStream_t stream);
90
+ template void causal_conv1d_update_cuda<at::BFloat16, float>(ConvParamsBase &params, cudaStream_t stream);
91
+ template void causal_conv1d_update_cuda<float, at::Half>(ConvParamsBase &params, cudaStream_t stream);
92
+ template void causal_conv1d_update_cuda<at::Half, at::Half>(ConvParamsBase &params, cudaStream_t stream);
93
+ template void causal_conv1d_update_cuda<at::BFloat16, at::Half>(ConvParamsBase &params, cudaStream_t stream);
94
+ template void causal_conv1d_update_cuda<float, at::BFloat16>(ConvParamsBase &params, cudaStream_t stream);
95
+ template void causal_conv1d_update_cuda<at::Half, at::BFloat16>(ConvParamsBase &params, cudaStream_t stream);
96
+ template void causal_conv1d_update_cuda<at::BFloat16, at::BFloat16>(ConvParamsBase &params, cudaStream_t stream);
source_code/SegMamba/causal-conv1d/csrc/static_switch.h ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Inspired by https://github.com/NVIDIA/DALI/blob/main/include/dali/core/static_switch.h
2
+ // and https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/Dispatch.h
3
+
4
+ #pragma once
5
+
6
+ /// @param COND - a boolean expression to switch by
7
+ /// @param CONST_NAME - a name given for the constexpr bool variable.
8
+ /// @param ... - code to execute for true and false
9
+ ///
10
+ /// Usage:
11
+ /// ```
12
+ /// BOOL_SWITCH(flag, BoolConst, [&] {
13
+ /// some_function<BoolConst>(...);
14
+ /// });
15
+ /// ```
16
+ #define BOOL_SWITCH(COND, CONST_NAME, ...) \
17
+ [&] { \
18
+ if (COND) { \
19
+ static constexpr bool CONST_NAME = true; \
20
+ return __VA_ARGS__(); \
21
+ } else { \
22
+ static constexpr bool CONST_NAME = false; \
23
+ return __VA_ARGS__(); \
24
+ } \
25
+ }()
source_code/SegMamba/causal-conv1d/setup.py ADDED
@@ -0,0 +1,264 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2023, Tri Dao.
2
+ import sys
3
+ import warnings
4
+ import os
5
+ import re
6
+ import ast
7
+ from pathlib import Path
8
+ from packaging.version import parse, Version
9
+ import platform
10
+
11
+ from setuptools import setup, find_packages
12
+ import subprocess
13
+
14
+ import urllib.request
15
+ import urllib.error
16
+ from wheel.bdist_wheel import bdist_wheel as _bdist_wheel
17
+
18
+ import torch
19
+ from torch.utils.cpp_extension import (
20
+ BuildExtension,
21
+ CppExtension,
22
+ CUDAExtension,
23
+ CUDA_HOME,
24
+ )
25
+
26
+
27
+ with open("README.md", "r", encoding="utf-8") as fh:
28
+ long_description = fh.read()
29
+
30
+
31
+ # ninja build does not work unless include_dirs are abs path
32
+ this_dir = os.path.dirname(os.path.abspath(__file__))
33
+
34
+ PACKAGE_NAME = "causal_conv1d"
35
+
36
+ BASE_WHEEL_URL = "https://github.com/Dao-AILab/causal-conv1d/releases/download/{tag_name}/{wheel_name}"
37
+
38
+ # FORCE_BUILD: Force a fresh build locally, instead of attempting to find prebuilt wheels
39
+ # SKIP_CUDA_BUILD: Intended to allow CI to use a simple `python setup.py sdist` run to copy over raw files, without any cuda compilation
40
+ FORCE_BUILD = os.getenv("CAUSAL_CONV1D_FORCE_BUILD", "FALSE") == "TRUE"
41
+ SKIP_CUDA_BUILD = os.getenv("CAUSAL_CONV1D_SKIP_CUDA_BUILD", "FALSE") == "TRUE"
42
+ # For CI, we want the option to build with C++11 ABI since the nvcr images use C++11 ABI
43
+ FORCE_CXX11_ABI = os.getenv("CAUSAL_CONV1D_FORCE_CXX11_ABI", "FALSE") == "TRUE"
44
+
45
+
46
+ def get_platform():
47
+ """
48
+ Returns the platform name as used in wheel filenames.
49
+ """
50
+ if sys.platform.startswith("linux"):
51
+ return "linux_x86_64"
52
+ elif sys.platform == "darwin":
53
+ mac_version = ".".join(platform.mac_ver()[0].split(".")[:2])
54
+ return f"macosx_{mac_version}_x86_64"
55
+ elif sys.platform == "win32":
56
+ return "win_amd64"
57
+ else:
58
+ raise ValueError("Unsupported platform: {}".format(sys.platform))
59
+
60
+
61
+ def get_cuda_bare_metal_version(cuda_dir):
62
+ raw_output = subprocess.check_output(
63
+ [cuda_dir + "/bin/nvcc", "-V"], universal_newlines=True
64
+ )
65
+ output = raw_output.split()
66
+ release_idx = output.index("release") + 1
67
+ bare_metal_version = parse(output[release_idx].split(",")[0])
68
+
69
+ return raw_output, bare_metal_version
70
+
71
+
72
+ def check_if_cuda_home_none(global_option: str) -> None:
73
+ if CUDA_HOME is not None:
74
+ return
75
+ # warn instead of error because user could be downloading prebuilt wheels, so nvcc won't be necessary
76
+ # in that case.
77
+ warnings.warn(
78
+ f"{global_option} was requested, but nvcc was not found. Are you sure your environment has nvcc available? "
79
+ "If you're installing within a container from https://hub.docker.com/r/pytorch/pytorch, "
80
+ "only images whose names contain 'devel' will provide nvcc."
81
+ )
82
+
83
+
84
+ def append_nvcc_threads(nvcc_extra_args):
85
+ return nvcc_extra_args + ["--threads", "4"]
86
+
87
+
88
+ cmdclass = {}
89
+ ext_modules = []
90
+
91
+ if not SKIP_CUDA_BUILD:
92
+ print("\n\ntorch.__version__ = {}\n\n".format(torch.__version__))
93
+ TORCH_MAJOR = int(torch.__version__.split(".")[0])
94
+ TORCH_MINOR = int(torch.__version__.split(".")[1])
95
+
96
+ check_if_cuda_home_none("causal_conv1d")
97
+ # Check, if CUDA11 is installed for compute capability 8.0
98
+ cc_flag = []
99
+ if CUDA_HOME is not None:
100
+ _, bare_metal_version = get_cuda_bare_metal_version(CUDA_HOME)
101
+ if bare_metal_version < Version("11.6"):
102
+ raise RuntimeError(
103
+ "causal_conv1d is only supported on CUDA 11.6 and above. "
104
+ "Note: make sure nvcc has a supported version by running nvcc -V."
105
+ )
106
+
107
+ cc_flag.append("-gencode")
108
+ cc_flag.append("arch=compute_70,code=sm_70")
109
+ cc_flag.append("-gencode")
110
+ cc_flag.append("arch=compute_80,code=sm_80")
111
+ if bare_metal_version >= Version("11.8"):
112
+ cc_flag.append("-gencode")
113
+ cc_flag.append("arch=compute_90,code=sm_90")
114
+
115
+ # HACK: The compiler flag -D_GLIBCXX_USE_CXX11_ABI is set to be the same as
116
+ # torch._C._GLIBCXX_USE_CXX11_ABI
117
+ # https://github.com/pytorch/pytorch/blob/8472c24e3b5b60150096486616d98b7bea01500b/torch/utils/cpp_extension.py#L920
118
+ if FORCE_CXX11_ABI:
119
+ torch._C._GLIBCXX_USE_CXX11_ABI = True
120
+
121
+ ext_modules.append(
122
+ CUDAExtension(
123
+ name="causal_conv1d_cuda",
124
+ sources=[
125
+ "csrc/causal_conv1d.cpp",
126
+ "csrc/causal_conv1d_fwd.cu",
127
+ "csrc/causal_conv1d_bwd.cu",
128
+ "csrc/causal_conv1d_update.cu",
129
+ ],
130
+ extra_compile_args={
131
+ "cxx": ["-O3"],
132
+ "nvcc": append_nvcc_threads(
133
+ [
134
+ "-O3",
135
+ "-U__CUDA_NO_HALF_OPERATORS__",
136
+ "-U__CUDA_NO_HALF_CONVERSIONS__",
137
+ "-U__CUDA_NO_BFLOAT16_OPERATORS__",
138
+ "-U__CUDA_NO_BFLOAT16_CONVERSIONS__",
139
+ "-U__CUDA_NO_BFLOAT162_OPERATORS__",
140
+ "-U__CUDA_NO_BFLOAT162_CONVERSIONS__",
141
+ "--expt-relaxed-constexpr",
142
+ "--expt-extended-lambda",
143
+ "--use_fast_math",
144
+ "--ptxas-options=-v",
145
+ "-lineinfo",
146
+ ]
147
+ + cc_flag
148
+ ),
149
+ },
150
+ include_dirs=[this_dir],
151
+ )
152
+ )
153
+
154
+
155
+ def get_package_version():
156
+ with open(Path(this_dir) / "causal_conv1d" / "__init__.py", "r") as f:
157
+ version_match = re.search(r"^__version__\s*=\s*(.*)$", f.read(), re.MULTILINE)
158
+ public_version = ast.literal_eval(version_match.group(1))
159
+ local_version = os.environ.get("CAUSAL_CONV1D_LOCAL_VERSION")
160
+ if local_version:
161
+ return f"{public_version}+{local_version}"
162
+ else:
163
+ return str(public_version)
164
+
165
+
166
+ def get_wheel_url():
167
+ # Determine the version numbers that will be used to determine the correct wheel
168
+ # We're using the CUDA version used to build torch, not the one currently installed
169
+ # _, cuda_version_raw = get_cuda_bare_metal_version(CUDA_HOME)
170
+ torch_cuda_version = parse(torch.version.cuda)
171
+ torch_version_raw = parse(torch.__version__)
172
+ # For CUDA 11, we only compile for CUDA 11.8, and for CUDA 12 we only compile for CUDA 12.2
173
+ # to save CI time. Minor versions should be compatible.
174
+ torch_cuda_version = parse("11.8") if torch_cuda_version.major == 11 else parse("12.2")
175
+ python_version = f"cp{sys.version_info.major}{sys.version_info.minor}"
176
+ platform_name = get_platform()
177
+ causal_conv1d_version = get_package_version()
178
+ # cuda_version = f"{cuda_version_raw.major}{cuda_version_raw.minor}"
179
+ cuda_version = f"{torch_cuda_version.major}{torch_cuda_version.minor}"
180
+ torch_version = f"{torch_version_raw.major}.{torch_version_raw.minor}"
181
+ cxx11_abi = str(torch._C._GLIBCXX_USE_CXX11_ABI).upper()
182
+
183
+ # Determine wheel URL based on CUDA version, torch version, python version and OS
184
+ wheel_filename = f"{PACKAGE_NAME}-{causal_conv1d_version}+cu{cuda_version}torch{torch_version}cxx11abi{cxx11_abi}-{python_version}-{python_version}-{platform_name}.whl"
185
+ wheel_url = BASE_WHEEL_URL.format(
186
+ tag_name=f"v{causal_conv1d_version}", wheel_name=wheel_filename
187
+ )
188
+ return wheel_url, wheel_filename
189
+
190
+
191
+ class CachedWheelsCommand(_bdist_wheel):
192
+ """
193
+ The CachedWheelsCommand plugs into the default bdist wheel, which is ran by pip when it cannot
194
+ find an existing wheel (which is currently the case for all installs). We use
195
+ the environment parameters to detect whether there is already a pre-built version of a compatible
196
+ wheel available and short-circuits the standard full build pipeline.
197
+ """
198
+
199
+ def run(self):
200
+ if FORCE_BUILD:
201
+ return super().run()
202
+
203
+ wheel_url, wheel_filename = get_wheel_url()
204
+ print("Guessing wheel URL: ", wheel_url)
205
+ try:
206
+ urllib.request.urlretrieve(wheel_url, wheel_filename)
207
+
208
+ # Make the archive
209
+ # Lifted from the root wheel processing command
210
+ # https://github.com/pypa/wheel/blob/cf71108ff9f6ffc36978069acb28824b44ae028e/src/wheel/bdist_wheel.py#LL381C9-L381C85
211
+ if not os.path.exists(self.dist_dir):
212
+ os.makedirs(self.dist_dir)
213
+
214
+ impl_tag, abi_tag, plat_tag = self.get_tag()
215
+ archive_basename = f"{self.wheel_dist_name}-{impl_tag}-{abi_tag}-{plat_tag}"
216
+
217
+ wheel_path = os.path.join(self.dist_dir, archive_basename + ".whl")
218
+ print("Raw wheel path", wheel_path)
219
+ os.rename(wheel_filename, wheel_path)
220
+ except urllib.error.HTTPError:
221
+ print("Precompiled wheel not found. Building from source...")
222
+ # If the wheel could not be downloaded, build from source
223
+ super().run()
224
+
225
+
226
+ setup(
227
+ name=PACKAGE_NAME,
228
+ version=get_package_version(),
229
+ packages=find_packages(
230
+ exclude=(
231
+ "build",
232
+ "csrc",
233
+ "include",
234
+ "tests",
235
+ "dist",
236
+ "docs",
237
+ "benchmarks",
238
+ "causal_conv1d.egg-info",
239
+ )
240
+ ),
241
+ author="Tri Dao",
242
+ author_email="tri@tridao.me",
243
+ description="Causal depthwise conv1d in CUDA, with a PyTorch interface",
244
+ long_description=long_description,
245
+ long_description_content_type="text/markdown",
246
+ url="https://github.com/Dao-AILab/causal-conv1d",
247
+ classifiers=[
248
+ "Programming Language :: Python :: 3",
249
+ "License :: OSI Approved :: BSD License",
250
+ "Operating System :: Unix",
251
+ ],
252
+ ext_modules=ext_modules,
253
+ cmdclass={"bdist_wheel": CachedWheelsCommand, "build_ext": BuildExtension}
254
+ if ext_modules
255
+ else {
256
+ "bdist_wheel": CachedWheelsCommand,
257
+ },
258
+ python_requires=">=3.7",
259
+ install_requires=[
260
+ "torch",
261
+ "packaging",
262
+ "ninja",
263
+ ],
264
+ )
source_code/SegMamba/causal-conv1d/tests/test_causal_conv1d.py ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2023, Tri Dao.
2
+
3
+ import math
4
+
5
+ import torch
6
+ import pytest
7
+
8
+ from einops import rearrange
9
+
10
+ from causal_conv1d.causal_conv1d_interface import causal_conv1d_fn, causal_conv1d_ref
11
+ from causal_conv1d.causal_conv1d_interface import causal_conv1d_update, causal_conv1d_update_ref
12
+
13
+
14
+ @pytest.mark.parametrize("channel_last", [False, True])
15
+ # @pytest.mark.parametrize('channel_last', [True])
16
+ @pytest.mark.parametrize("itype", [torch.float32, torch.float16, torch.bfloat16])
17
+ # @pytest.mark.parametrize('itype', [torch.float16])
18
+ @pytest.mark.parametrize("silu_activation", [False, True])
19
+ # @pytest.mark.parametrize('silu_activation', [True])
20
+ @pytest.mark.parametrize("has_bias", [False, True])
21
+ # @pytest.mark.parametrize('has_bias', [True])
22
+ @pytest.mark.parametrize("width", [2, 3, 4])
23
+ # @pytest.mark.parametrize('width', [2])
24
+ @pytest.mark.parametrize(
25
+ "seqlen", [8, 16, 32, 64, 128, 151, 256, 372, 512, 784, 1024, 1134, 2048, 4096]
26
+ )
27
+ # @pytest.mark.parametrize('seqlen', [8, 16, 32, 64, 128, 256, 512, 784, 1024, 2048, 4096])
28
+ # @pytest.mark.parametrize('seqlen', [128])
29
+ def test_causal_conv1d(seqlen, width, has_bias, silu_activation, itype, channel_last):
30
+ device = "cuda"
31
+ rtol, atol = (3e-4, 1e-3) if itype == torch.float32 else (3e-3, 5e-3)
32
+ if itype == torch.bfloat16:
33
+ rtol, atol = 1e-2, 5e-2
34
+ rtolw, atolw = (1e-3, 1e-3)
35
+ # set seed
36
+ torch.random.manual_seed(0)
37
+ batch_size = 2
38
+ # batch_size = 1
39
+ dim = 4096 + 32 # Try dim not divisible by 64
40
+ # dim = 64
41
+ if not channel_last:
42
+ x = torch.randn(batch_size, 4096 + dim + 64, seqlen, device=device, dtype=itype)[:, 4096:4096 + dim, :].requires_grad_()
43
+ else:
44
+ x = rearrange(
45
+ torch.randn(batch_size, seqlen, 4096 + dim + 64, device=device, dtype=itype)[:, :, 4096:4096 + dim], "b s d -> b d s"
46
+ ).requires_grad_()
47
+ weight = torch.randn(dim, width, device=device, dtype=torch.float32, requires_grad=True)
48
+ if has_bias:
49
+ bias = torch.randn(dim, device=device, dtype=torch.float32, requires_grad=True)
50
+ else:
51
+ bias = None
52
+ x_ref = x.detach().clone().requires_grad_()
53
+ weight_ref = weight.detach().clone().requires_grad_()
54
+ bias_ref = bias.detach().clone().requires_grad_() if bias is not None else None
55
+ activation = None if not silu_activation else "silu"
56
+ out = causal_conv1d_fn(x, weight, bias, activation=activation)
57
+ out_ref = causal_conv1d_ref(x_ref, weight_ref, bias_ref, activation=activation)
58
+
59
+ print(f"Output max diff: {(out - out_ref).abs().max().item()}")
60
+ print(f"Output mean diff: {(out - out_ref).abs().mean().item()}")
61
+ assert torch.allclose(out, out_ref, rtol=rtol, atol=atol)
62
+
63
+ g = torch.randn_like(out)
64
+ out_ref.backward(g)
65
+ out.backward(g)
66
+
67
+ print(f"dx max diff: {(x.grad - x_ref.grad).abs().max().item()}")
68
+ print(f"dweight max diff: {(weight.grad - weight_ref.grad).abs().max().item()}")
69
+ if has_bias:
70
+ print(f"dbias max diff: {(bias.grad - bias_ref.grad).abs().max().item()}")
71
+
72
+ assert torch.allclose(x.grad, x_ref.grad.to(dtype=itype), rtol=rtol, atol=atol)
73
+ assert torch.allclose(weight.grad, weight_ref.grad, rtol=rtolw, atol=atolw)
74
+ if has_bias:
75
+ assert torch.allclose(bias.grad, bias_ref.grad, rtol=rtolw, atol=atolw)
76
+
77
+
78
+ @pytest.mark.parametrize("itype", [torch.float32, torch.float16, torch.bfloat16])
79
+ # @pytest.mark.parametrize('itype', [torch.float16])
80
+ @pytest.mark.parametrize("silu_activation", [False, True])
81
+ # @pytest.mark.parametrize('silu_activation', [False])
82
+ @pytest.mark.parametrize("has_bias", [False, True])
83
+ # @pytest.mark.parametrize('has_bias', [True])
84
+ @pytest.mark.parametrize("width", [2, 3, 4])
85
+ # @pytest.mark.parametrize('width', [2])
86
+ @pytest.mark.parametrize("dim", [2048, 2048 + 16, 4096])
87
+ # @pytest.mark.parametrize("dim", [2048])
88
+ def test_causal_conv1d_update(dim, width, has_bias, silu_activation, itype):
89
+ device = "cuda"
90
+ rtol, atol = (3e-4, 1e-3) if itype == torch.float32 else (3e-3, 5e-3)
91
+ if itype == torch.bfloat16:
92
+ rtol, atol = 1e-2, 5e-2
93
+ rtolw, atolw = (1e-3, 1e-3)
94
+ # set seed
95
+ torch.random.manual_seed(0)
96
+ batch_size = 2
97
+ # batch_size = 1
98
+ # dim = 64
99
+ x = torch.randn(batch_size, dim, device=device, dtype=itype)
100
+ conv_state = torch.randn(batch_size, dim, width, device=device, dtype=itype)
101
+ weight = torch.randn(dim, width, device=device, dtype=torch.float32, requires_grad=True)
102
+ if has_bias:
103
+ bias = torch.randn(dim, device=device, dtype=torch.float32, requires_grad=True)
104
+ else:
105
+ bias = None
106
+ conv_state_ref = conv_state.detach().clone()
107
+ activation = None if not silu_activation else "silu"
108
+ out = causal_conv1d_update(x, conv_state, weight, bias, activation=activation)
109
+ out_ref = causal_conv1d_update_ref(x, conv_state_ref, weight, bias, activation=activation)
110
+
111
+ print(f"Output max diff: {(out - out_ref).abs().max().item()}")
112
+ print(f"Output mean diff: {(out - out_ref).abs().mean().item()}")
113
+ assert torch.equal(conv_state, conv_state_ref)
114
+ assert torch.allclose(out, out_ref, rtol=rtol, atol=atol)
115
+
116
+
117
+ # @pytest.mark.parametrize("channel_last", [False, True])
118
+ @pytest.mark.parametrize('channel_last', [True])
119
+ # @pytest.mark.parametrize("itype", [torch.float32, torch.float16, torch.bfloat16])
120
+ @pytest.mark.parametrize('itype', [torch.bfloat16])
121
+ # @pytest.mark.parametrize("silu_activation", [False, True])
122
+ @pytest.mark.parametrize('silu_activation', [True])
123
+ # @pytest.mark.parametrize("has_bias", [False, True])
124
+ @pytest.mark.parametrize('has_bias', [True])
125
+ # @pytest.mark.parametrize("width", [2, 3, 4])
126
+ @pytest.mark.parametrize('width', [4])
127
+ @pytest.mark.parametrize(
128
+ # "seqlen", [8, 16, 32, 64, 128, 151, 256, 372, 512, 784, 1024, 1134, 2048, 4096]
129
+ "seqlen", [2048]
130
+ )
131
+ # @pytest.mark.parametrize('seqlen', [8, 16, 32, 64, 128, 256, 512, 784, 1024, 2048, 4096])
132
+ # @pytest.mark.parametrize('seqlen', [128])
133
+ def test_causal_conv1d_race_condition(seqlen, width, has_bias, silu_activation, itype, channel_last):
134
+ device = "cuda"
135
+ # set seed
136
+ torch.random.manual_seed(0)
137
+ batch_size = 2
138
+ # batch_size = 1
139
+ dim = 4096 + 32 # Try dim not divisible by 64
140
+ # dim = 64
141
+ if not channel_last:
142
+ x = torch.randn(batch_size, 4096 + dim + 64, seqlen, device=device, dtype=itype)[:, 4096:4096 + dim, :].requires_grad_()
143
+ else:
144
+ x = rearrange(
145
+ torch.randn(batch_size, seqlen, 4096 + dim + 64, device=device, dtype=itype)[:, :, 4096:4096 + dim], "b s d -> b d s"
146
+ ).requires_grad_()
147
+ weight = torch.randn(dim, width, device=device, dtype=torch.float32, requires_grad=True)
148
+ if has_bias:
149
+ bias = torch.randn(dim, device=device, dtype=torch.float32, requires_grad=True)
150
+ else:
151
+ bias = None
152
+ activation = None if not silu_activation else "silu"
153
+ out0 = causal_conv1d_fn(x, weight, bias, activation=activation)
154
+ g = torch.randn_like(out0)
155
+ dx0, dw0, db0 = torch.autograd.grad(out0, (x, weight, bias), g)
156
+ dw_atol = 1e-4
157
+ db_atol = 1e-4
158
+
159
+ for i in range(10000):
160
+ out = causal_conv1d_fn(x, weight, bias, activation=activation)
161
+ dx, dw, db = torch.autograd.grad(out, (x, weight, bias), g)
162
+ dw_equal = torch.allclose(dw, dw0, atol=dw_atol)
163
+ # if not dw_equal:
164
+ # breakpoint()
165
+ if has_bias:
166
+ db_equal = torch.allclose(db, db0, atol=db_atol)
167
+ # if not db_equal:
168
+ # breakpoint()
169
+ assert torch.equal(out, out0)
170
+ assert torch.equal(dx, dx0)
171
+ assert dw_equal
172
+ if has_bias:
173
+ assert dw_equal
source_code/SegMamba/light_training/.DS_Store ADDED
Binary file (8.2 kB). View file
 
source_code/SegMamba/light_training/augment/multi_processor.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ from batchgenerators.dataloading.nondet_multi_threaded_augmenter import NonDetMultiThreadedAugmenter
2
+
3
+
4
+ class LimitedLenWrapper(NonDetMultiThreadedAugmenter):
5
+ def __init__(self, my_imaginary_length, *args, **kwargs):
6
+ super().__init__(*args, **kwargs)
7
+ self.len = my_imaginary_length
8
+
9
+ def __len__(self):
10
+ return self.len
source_code/SegMamba/light_training/augment/train_augment.py ADDED
@@ -0,0 +1,279 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+ import multiprocessing
3
+ import os
4
+ import shutil
5
+ import sys
6
+ import warnings
7
+ from copy import deepcopy
8
+ from datetime import datetime
9
+ from time import time, sleep
10
+ from typing import Union, Tuple, List
11
+ import numpy as np
12
+ import torch
13
+ from batchgenerators.dataloading.single_threaded_augmenter import SingleThreadedAugmenter
14
+ from batchgenerators.transforms.abstract_transforms import AbstractTransform, Compose
15
+ from batchgenerators.transforms.color_transforms import BrightnessMultiplicativeTransform, \
16
+ ContrastAugmentationTransform, GammaTransform
17
+ from batchgenerators.transforms.noise_transforms import GaussianNoiseTransform, GaussianBlurTransform
18
+ from batchgenerators.transforms.resample_transforms import SimulateLowResolutionTransform
19
+ from batchgenerators.transforms.spatial_transforms import SpatialTransform, MirrorTransform
20
+ from batchgenerators.transforms.utility_transforms import RemoveLabelTransform, RenameTransform, NumpyToTensor
21
+
22
+
23
+ def get_train_transforms(patch_size, mirror_axes=None):
24
+ tr_transforms = []
25
+ patch_size_spatial = patch_size
26
+ ignore_axes = None
27
+ angle = (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi)
28
+
29
+ tr_transforms.append(SpatialTransform(
30
+ patch_size_spatial, patch_center_dist_from_border=None,
31
+ do_elastic_deform=False, alpha=(0, 0), sigma=(0, 0),
32
+ do_rotation=True, angle_x=angle, angle_y=angle, angle_z=angle,
33
+ p_rot_per_axis=1, # todo experiment with this
34
+ do_scale=True, scale=(0.7, 1.4),
35
+ border_mode_data="constant", border_cval_data=0, order_data=3,
36
+ border_mode_seg="constant", border_cval_seg=-1, order_seg=1,
37
+ random_crop=False, # random cropping is part of our dataloaders
38
+ p_el_per_sample=0, p_scale_per_sample=0.2, p_rot_per_sample=0.2,
39
+ independent_scale_for_each_axis=False # todo experiment with this
40
+ ))
41
+
42
+ tr_transforms.append(GaussianNoiseTransform(p_per_sample=0.1))
43
+ tr_transforms.append(GaussianBlurTransform((0.5, 1.), different_sigma_per_channel=True, p_per_sample=0.2,
44
+ p_per_channel=0.5))
45
+ tr_transforms.append(BrightnessMultiplicativeTransform(multiplier_range=(0.75, 1.25), p_per_sample=0.15))
46
+ tr_transforms.append(ContrastAugmentationTransform(p_per_sample=0.15))
47
+ tr_transforms.append(SimulateLowResolutionTransform(zoom_range=(0.5, 1), per_channel=True,
48
+ p_per_channel=0.5,
49
+ order_downsample=0, order_upsample=3, p_per_sample=0.25,
50
+ ignore_axes=ignore_axes))
51
+ tr_transforms.append(GammaTransform((0.7, 1.5), True, True, retain_stats=True, p_per_sample=0.1))
52
+ tr_transforms.append(GammaTransform((0.7, 1.5), False, True, retain_stats=True, p_per_sample=0.3))
53
+
54
+ if mirror_axes is not None and len(mirror_axes) > 0:
55
+ tr_transforms.append(MirrorTransform(mirror_axes))
56
+
57
+ tr_transforms.append(RemoveLabelTransform(-1, 0))
58
+ tr_transforms.append(NumpyToTensor(['data', 'seg'], 'float'))
59
+
60
+ tr_transforms = Compose(tr_transforms)
61
+
62
+ return tr_transforms
63
+
64
+ def get_train_transforms_nomirror(patch_size, mirror_axes=None):
65
+ tr_transforms = []
66
+ patch_size_spatial = patch_size
67
+ ignore_axes = None
68
+ angle = (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi)
69
+
70
+ tr_transforms.append(SpatialTransform(
71
+ patch_size_spatial, patch_center_dist_from_border=None,
72
+ do_elastic_deform=False, alpha=(0, 0), sigma=(0, 0),
73
+ do_rotation=True, angle_x=angle, angle_y=angle, angle_z=angle,
74
+ p_rot_per_axis=1, # todo experiment with this
75
+ do_scale=True, scale=(0.7, 1.4),
76
+ border_mode_data="constant", border_cval_data=0, order_data=3,
77
+ border_mode_seg="constant", border_cval_seg=-1, order_seg=1,
78
+ random_crop=False, # random cropping is part of our dataloaders
79
+ p_el_per_sample=0, p_scale_per_sample=0.2, p_rot_per_sample=0.2,
80
+ independent_scale_for_each_axis=False # todo experiment with this
81
+ ))
82
+
83
+ tr_transforms.append(GaussianNoiseTransform(p_per_sample=0.1))
84
+ tr_transforms.append(GaussianBlurTransform((0.5, 1.), different_sigma_per_channel=True, p_per_sample=0.2,
85
+ p_per_channel=0.5))
86
+ tr_transforms.append(BrightnessMultiplicativeTransform(multiplier_range=(0.75, 1.25), p_per_sample=0.15))
87
+ tr_transforms.append(ContrastAugmentationTransform(p_per_sample=0.15))
88
+ tr_transforms.append(SimulateLowResolutionTransform(zoom_range=(0.5, 1), per_channel=True,
89
+ p_per_channel=0.5,
90
+ order_downsample=0, order_upsample=3, p_per_sample=0.25,
91
+ ignore_axes=ignore_axes))
92
+ tr_transforms.append(GammaTransform((0.7, 1.5), True, True, retain_stats=True, p_per_sample=0.1))
93
+ tr_transforms.append(GammaTransform((0.7, 1.5), False, True, retain_stats=True, p_per_sample=0.3))
94
+
95
+ # if mirror_axes is not None and len(mirror_axes) > 0:
96
+ # tr_transforms.append(MirrorTransform(mirror_axes))
97
+
98
+ tr_transforms.append(RemoveLabelTransform(-1, 0))
99
+ tr_transforms.append(NumpyToTensor(['data', 'seg'], 'float'))
100
+
101
+ tr_transforms = Compose(tr_transforms)
102
+
103
+ return tr_transforms
104
+
105
+ def get_train_transforms_onlymirror(patch_size, mirror_axes=None):
106
+ tr_transforms = []
107
+ patch_size_spatial = patch_size
108
+ ignore_axes = None
109
+ angle = (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi)
110
+
111
+ # tr_transforms.append(SpatialTransform(
112
+ # patch_size_spatial, patch_center_dist_from_border=None,
113
+ # do_elastic_deform=False, alpha=(0, 0), sigma=(0, 0),
114
+ # do_rotation=True, angle_x=angle, angle_y=angle, angle_z=angle,
115
+ # p_rot_per_axis=1, # todo experiment with this
116
+ # do_scale=True, scale=(0.7, 1.4),
117
+ # border_mode_data="constant", border_cval_data=0, order_data=3,
118
+ # border_mode_seg="constant", border_cval_seg=-1, order_seg=1,
119
+ # random_crop=False, # random cropping is part of our dataloaders
120
+ # p_el_per_sample=0, p_scale_per_sample=0.2, p_rot_per_sample=0.2,
121
+ # independent_scale_for_each_axis=False # todo experiment with this
122
+ # ))
123
+
124
+ tr_transforms.append(GaussianNoiseTransform(p_per_sample=0.1))
125
+ tr_transforms.append(GaussianBlurTransform((0.5, 1.), different_sigma_per_channel=True, p_per_sample=0.2,
126
+ p_per_channel=0.5))
127
+ tr_transforms.append(BrightnessMultiplicativeTransform(multiplier_range=(0.75, 1.25), p_per_sample=0.15))
128
+ tr_transforms.append(ContrastAugmentationTransform(p_per_sample=0.15))
129
+ tr_transforms.append(SimulateLowResolutionTransform(zoom_range=(0.5, 1), per_channel=True,
130
+ p_per_channel=0.5,
131
+ order_downsample=0, order_upsample=3, p_per_sample=0.25,
132
+ ignore_axes=ignore_axes))
133
+ tr_transforms.append(GammaTransform((0.7, 1.5), True, True, retain_stats=True, p_per_sample=0.1))
134
+ tr_transforms.append(GammaTransform((0.7, 1.5), False, True, retain_stats=True, p_per_sample=0.3))
135
+
136
+ if mirror_axes is not None and len(mirror_axes) > 0:
137
+ tr_transforms.append(MirrorTransform(mirror_axes))
138
+
139
+ tr_transforms.append(RemoveLabelTransform(-1, 0))
140
+ tr_transforms.append(NumpyToTensor(['data', 'seg'], 'float'))
141
+
142
+ tr_transforms = Compose(tr_transforms)
143
+
144
+ return tr_transforms
145
+
146
+ def get_train_transforms_onlyspatial(patch_size, mirror_axes=None):
147
+ tr_transforms = []
148
+ patch_size_spatial = patch_size
149
+ ignore_axes = None
150
+ angle = (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi)
151
+
152
+ tr_transforms.append(SpatialTransform(
153
+ patch_size_spatial, patch_center_dist_from_border=None,
154
+ do_elastic_deform=False, alpha=(0, 0), sigma=(0, 0),
155
+ do_rotation=True, angle_x=angle, angle_y=angle, angle_z=angle,
156
+ p_rot_per_axis=1, # todo experiment with this
157
+ do_scale=True, scale=(0.7, 1.4),
158
+ border_mode_data="constant", border_cval_data=0, order_data=3,
159
+ border_mode_seg="constant", border_cval_seg=-1, order_seg=1,
160
+ random_crop=False, # random cropping is part of our dataloaders
161
+ p_el_per_sample=0, p_scale_per_sample=0.2, p_rot_per_sample=0.2,
162
+ independent_scale_for_each_axis=False # todo experiment with this
163
+ ))
164
+
165
+ # tr_transforms.append(GaussianNoiseTransform(p_per_sample=0.1))
166
+ # tr_transforms.append(GaussianBlurTransform((0.5, 1.), different_sigma_per_channel=True, p_per_sample=0.2,
167
+ # p_per_channel=0.5))
168
+ # tr_transforms.append(BrightnessMultiplicativeTransform(multiplier_range=(0.75, 1.25), p_per_sample=0.15))
169
+ # tr_transforms.append(ContrastAugmentationTransform(p_per_sample=0.15))
170
+ # tr_transforms.append(SimulateLowResolutionTransform(zoom_range=(0.5, 1), per_channel=True,
171
+ # p_per_channel=0.5,
172
+ # order_downsample=0, order_upsample=3, p_per_sample=0.25,
173
+ # ignore_axes=ignore_axes))
174
+ # tr_transforms.append(GammaTransform((0.7, 1.5), True, True, retain_stats=True, p_per_sample=0.1))
175
+ # tr_transforms.append(GammaTransform((0.7, 1.5), False, True, retain_stats=True, p_per_sample=0.3))
176
+
177
+ if mirror_axes is not None and len(mirror_axes) > 0:
178
+ tr_transforms.append(MirrorTransform(mirror_axes))
179
+
180
+ tr_transforms.append(RemoveLabelTransform(-1, 0))
181
+ tr_transforms.append(NumpyToTensor(['data', 'seg'], 'float'))
182
+
183
+ tr_transforms = Compose(tr_transforms)
184
+
185
+ return tr_transforms
186
+
187
+ def get_train_transforms_noaug(patch_size, mirror_axes=None):
188
+ tr_transforms = []
189
+ # patch_size_spatial = patch_size
190
+ # ignore_axes = None
191
+ # angle = (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi)
192
+
193
+ # tr_transforms.append(SpatialTransform(
194
+ # patch_size_spatial, patch_center_dist_from_border=None,
195
+ # do_elastic_deform=False, alpha=(0, 0), sigma=(0, 0),
196
+ # do_rotation=True, angle_x=angle, angle_y=angle, angle_z=angle,
197
+ # p_rot_per_axis=1, # todo experiment with this
198
+ # do_scale=True, scale=(0.7, 1.4),
199
+ # border_mode_data="constant", border_cval_data=0, order_data=3,
200
+ # border_mode_seg="constant", border_cval_seg=-1, order_seg=1,
201
+ # random_crop=False, # random cropping is part of our dataloaders
202
+ # p_el_per_sample=0, p_scale_per_sample=0.2, p_rot_per_sample=0.2,
203
+ # independent_scale_for_each_axis=False # todo experiment with this
204
+ # ))
205
+
206
+ # tr_transforms.append(GaussianNoiseTransform(p_per_sample=0.1))
207
+ # tr_transforms.append(GaussianBlurTransform((0.5, 1.), different_sigma_per_channel=True, p_per_sample=0.2,
208
+ # p_per_channel=0.5))
209
+ # tr_transforms.append(BrightnessMultiplicativeTransform(multiplier_range=(0.75, 1.25), p_per_sample=0.15))
210
+ # tr_transforms.append(ContrastAugmentationTransform(p_per_sample=0.15))
211
+ # tr_transforms.append(SimulateLowResolutionTransform(zoom_range=(0.5, 1), per_channel=True,
212
+ # p_per_channel=0.5,
213
+ # order_downsample=0, order_upsample=3, p_per_sample=0.25,
214
+ # ignore_axes=ignore_axes))
215
+ # tr_transforms.append(GammaTransform((0.7, 1.5), True, True, retain_stats=True, p_per_sample=0.1))
216
+ # tr_transforms.append(GammaTransform((0.7, 1.5), False, True, retain_stats=True, p_per_sample=0.3))
217
+
218
+ # if mirror_axes is not None and len(mirror_axes) > 0:
219
+ # tr_transforms.append(MirrorTransform(mirror_axes))
220
+
221
+ tr_transforms.append(RemoveLabelTransform(-1, 0))
222
+ tr_transforms.append(NumpyToTensor(['data', 'seg'], 'float'))
223
+
224
+ tr_transforms = Compose(tr_transforms)
225
+
226
+ return tr_transforms
227
+
228
+ def get_validation_transforms() -> AbstractTransform:
229
+ val_transforms = []
230
+ val_transforms.append(RemoveLabelTransform(-1, 0))
231
+
232
+ # val_transforms.append(RenameTransform('seg', 'target', True))
233
+
234
+ val_transforms.append(NumpyToTensor(['data', 'seg'], 'float'))
235
+ val_transforms = Compose(val_transforms)
236
+ return val_transforms
237
+
238
+ # import SimpleITK as sitk
239
+ # import matplotlib.pyplot as plt
240
+
241
+ # image = sitk.ReadImage("/Users/xingzhaohu/Documents/工作/code/medical_image_processing/SSL/BraTS20_Training_365/BraTS20_Training_365_flair.nii.gz")
242
+ # label = sitk.ReadImage("/Users/xingzhaohu/Documents/工作/code/medical_image_processing/SSL/BraTS20_Training_365/BraTS20_Training_365_seg.nii.gz")
243
+
244
+ # # image = sitk.ReadImage("./AIIB/image/AIIB23_171.nii.gz")
245
+ # # label = sitk.ReadImage("./AIIB/gt/AIIB23_171.nii.gz")
246
+
247
+ # image_arr = sitk.GetArrayFromImage(image)
248
+ # label_arr = sitk.GetArrayFromImage(label)
249
+ # intensityproperties = {}
250
+
251
+ # norm = RescaleTo01Normalization(intensityproperties=intensityproperties)
252
+ # image_arr = image_arr[0:128, 0:128, 0:128][None, None]
253
+ # label_arr = label_arr[0:128, 0:128, 0:128][None, None]
254
+
255
+
256
+ # image_arr = norm.run(image_arr, label_arr)
257
+
258
+ # print(image_arr.shape, label_arr.shape)
259
+
260
+ # tr_transforms = Compose(tr_transforms)
261
+
262
+ # trans_out = tr_transforms(data=image_arr, seg=label_arr)
263
+
264
+ # image_arr_aug = trans_out["data"]
265
+ # label_arr_aug = trans_out["seg"]
266
+
267
+ # print(image_arr_aug.shape, label_arr_aug.shape)
268
+
269
+
270
+ # for i in range(40, 128):
271
+ # plt.subplot(1, 4, 1)
272
+ # plt.imshow(image_arr[0, 0, i], cmap="gray")
273
+ # plt.subplot(1, 4, 2)
274
+ # plt.imshow(label_arr[0, 0, i], cmap="gray")
275
+ # plt.subplot(1, 4, 3)
276
+ # plt.imshow(image_arr_aug[0, 0, i], cmap="gray")
277
+ # plt.subplot(1, 4, 4)
278
+ # plt.imshow(label_arr_aug[0, 0, i], cmap="gray")
279
+ # plt.show()
source_code/SegMamba/light_training/dataloading/__init__.py ADDED
File without changes
source_code/SegMamba/light_training/dataloading/base_data_loader.py ADDED
@@ -0,0 +1,213 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from typing import Union, Tuple
3
+ import time
4
+
5
+ class DataLoaderMultiProcess:
6
+ def __init__(self, dataset,
7
+ patch_size,
8
+ batch_size=2,
9
+ oversample_foreground_percent=0.33,
10
+ probabilistic_oversampling=False,
11
+ print_time=False):
12
+ pass
13
+ self.dataset = dataset
14
+ self.patch_size = patch_size
15
+ # self.annotated_classes_key = annotated_classes_key ## (1, 2, 3 ..)
16
+ self.batch_size = batch_size
17
+ self.keys = [i for i in range(len(dataset))]
18
+ self.thread_id = 0
19
+ self.oversample_foreground_percent = oversample_foreground_percent
20
+ self.need_to_pad = (np.array([0, 0, 0])).astype(int)
21
+
22
+ self.get_do_oversample = self._oversample_last_XX_percent if not probabilistic_oversampling \
23
+ else self._probabilistic_oversampling
24
+ self.data_shape = None
25
+ self.seg_shape = None
26
+ self.print_time = print_time
27
+
28
+ def determine_shapes(self):
29
+ # load one case
30
+ item = self.dataset.__getitem__(0)
31
+ data, seg, properties = item["data"], item["seg"], item["properties"]
32
+ num_color_channels = data.shape[0]
33
+ num_output_channels = seg.shape[0]
34
+ patch_size = self.patch_size
35
+ data_shape = (self.batch_size, num_color_channels, patch_size[0], patch_size[1], patch_size[2])
36
+ seg_shape = (self.batch_size, num_output_channels, patch_size[0], patch_size[1], patch_size[2])
37
+ return data_shape, seg_shape
38
+
39
+ def generate_train_batch(self):
40
+
41
+ selected_keys = np.random.choice(self.keys, self.batch_size, True, None)
42
+ if self.data_shape is None:
43
+ self.data_shape, self.seg_shape = self.determine_shapes()
44
+
45
+ data_all = np.zeros(self.data_shape, dtype=np.float32)
46
+ data_all_global = np.zeros(self.data_shape, dtype=np.float32)
47
+ seg_all_global = np.zeros(self.seg_shape, dtype=np.float32)
48
+ data_global = None
49
+ seg_global = None
50
+ seg_all = np.zeros(self.seg_shape, dtype=np.float32)
51
+
52
+ case_properties = []
53
+
54
+ index = 0
55
+ for j, key in enumerate(selected_keys):
56
+
57
+ force_fg = self.get_do_oversample(j)
58
+ s = time.time()
59
+ item = self.dataset.__getitem__(key)
60
+ e = time.time()
61
+ if self.print_time:
62
+ print(f"read single data time is {e - s}")
63
+ # print(f"read data time is {e - s}")
64
+ data, seg, properties = item["data"], item["seg"], item["properties"]
65
+
66
+ if "data_global" in item:
67
+ data_global = item["data_global"]
68
+
69
+ if "seg_global" in item:
70
+ seg_global = item["seg_global"]
71
+
72
+ case_properties.append(properties)
73
+ # If we are doing the cascade then the segmentation from the previous stage will already have been loaded by
74
+ # self._data.load_case(i) (see nnUNetDataset.load_case)
75
+ shape = data.shape[1:]
76
+ dim = len(shape)
77
+
78
+ s = time.time()
79
+ bbox_lbs, bbox_ubs = self.get_bbox(shape, force_fg, properties['class_locations'])
80
+ e = time.time()
81
+ if self.print_time:
82
+ print(f"get bbox time is {e - s}")
83
+ # whoever wrote this knew what he was doing (hint: it was me). We first crop the data to the region of the
84
+ # bbox that actually lies within the data. This will result in a smaller array which is then faster to pad.
85
+ # valid_bbox is just the coord that lied within the data cube. It will be padded to match the patch size
86
+ # later
87
+ valid_bbox_lbs = [max(0, bbox_lbs[i]) for i in range(dim)]
88
+ valid_bbox_ubs = [min(shape[i], bbox_ubs[i]) for i in range(dim)]
89
+
90
+ # At this point you might ask yourself why we would treat seg differently from seg_from_previous_stage.
91
+ # Why not just concatenate them here and forget about the if statements? Well that's because segneeds to
92
+ # be padded with -1 constant whereas seg_from_previous_stage needs to be padded with 0s (we could also
93
+ # remove label -1 in the data augmentation but this way it is less error prone)
94
+ this_slice = tuple([slice(0, data.shape[0])] + [slice(i, j) for i, j in zip(valid_bbox_lbs, valid_bbox_ubs)])
95
+ data = data[this_slice]
96
+
97
+ this_slice = tuple([slice(0, seg.shape[0])] + [slice(i, j) for i, j in zip(valid_bbox_lbs, valid_bbox_ubs)])
98
+ seg = seg[this_slice]
99
+
100
+
101
+ s = time.time()
102
+ padding = [(-min(0, bbox_lbs[i]), max(bbox_ubs[i] - shape[i], 0)) for i in range(dim)]
103
+ # print(f"box is {bbox_lbs, bbox_ubs}, padding is {padding}")
104
+ data_all[j] = np.pad(data, ((0, 0), *padding), 'constant', constant_values=0)
105
+ seg_all[j] = np.pad(seg, ((0, 0), *padding), 'constant', constant_values=0)
106
+
107
+ if data_global is not None :
108
+ data_all_global[j] = data_global
109
+
110
+ if seg_global is not None :
111
+ seg_all_global[j] = seg_global
112
+
113
+
114
+ e = time.time()
115
+ if self.print_time:
116
+ print(f"box is {bbox_lbs, bbox_ubs}, padding is {padding}")
117
+ print(f"setting data value time is {e - s}")
118
+
119
+
120
+ if data_global is None:
121
+ return {'data': data_all,
122
+ 'seg': seg_all, 'properties': case_properties,
123
+ 'keys': selected_keys}
124
+
125
+ return {'data': data_all, "data_global": data_all_global,
126
+ "seg_global": seg_all_global,
127
+ 'seg': seg_all, 'properties': case_properties,
128
+ 'keys': selected_keys}
129
+
130
+ def __next__(self):
131
+
132
+ return self.generate_train_batch()
133
+
134
+ def set_thread_id(self, thread_id):
135
+ self.thread_id = thread_id
136
+
137
+ def _oversample_last_XX_percent(self, sample_idx: int) -> bool:
138
+ """
139
+ determines whether sample sample_idx in a minibatch needs to be guaranteed foreground
140
+ """
141
+ return not sample_idx < round(self.batch_size * (1 - self.oversample_foreground_percent))
142
+
143
+ def _probabilistic_oversampling(self, sample_idx: int) -> bool:
144
+ # print('YEAH BOIIIIII')
145
+ return np.random.uniform() < self.oversample_foreground_percent
146
+
147
+ def get_bbox(self, data_shape: np.ndarray, force_fg: bool, class_locations: Union[dict, None],
148
+ overwrite_class: Union[int, Tuple[int, ...]] = None, verbose: bool = False):
149
+ # in dataloader 2d we need to select the slice prior to this and also modify the class_locations to only have
150
+ # locations for the given slice
151
+ need_to_pad = self.need_to_pad.copy()
152
+ dim = len(data_shape)
153
+
154
+ for d in range(dim):
155
+ # if case_all_data.shape + need_to_pad is still < patch size we need to pad more! We pad on both sides
156
+ # always
157
+ if need_to_pad[d] + data_shape[d] < self.patch_size[d]:
158
+ need_to_pad[d] = self.patch_size[d] - data_shape[d]
159
+
160
+ # we can now choose the bbox from -need_to_pad // 2 to shape - patch_size + need_to_pad // 2. Here we
161
+ # define what the upper and lower bound can be to then sample form them with np.random.randint
162
+ lbs = [- need_to_pad[i] // 2 for i in range(dim)]
163
+ ubs = [data_shape[i] + need_to_pad[i] // 2 + need_to_pad[i] % 2 - self.patch_size[i] for i in range(dim)]
164
+
165
+ # if not force_fg then we can just sample the bbox randomly from lb and ub. Else we need to make sure we get
166
+ # at least one of the foreground classes in the patch
167
+ if not force_fg:
168
+ bbox_lbs = [np.random.randint(lbs[i], ubs[i] + 1) for i in range(dim)]
169
+ # print('I want a random location')
170
+ else:
171
+ assert class_locations is not None, 'if force_fg is set class_locations cannot be None'
172
+ if overwrite_class is not None:
173
+ assert overwrite_class in class_locations.keys(), 'desired class ("overwrite_class") does not ' \
174
+ 'have class_locations (missing key)'
175
+ # this saves us a np.unique. Preprocessing already did that for all cases. Neat.
176
+ # class_locations keys can also be tuple
177
+ eligible_classes_or_regions = [i for i in class_locations.keys() if len(class_locations[i]) > 0]
178
+
179
+ # if we have annotated_classes_key locations and other classes are present, remove the annotated_classes_key from the list
180
+ # strange formulation needed to circumvent
181
+ # ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()
182
+ # tmp = [i == self.annotated_classes_key if isinstance(i, tuple) else False for i in eligible_classes_or_regions]
183
+ # if any(tmp):
184
+ # if len(eligible_classes_or_regions) > 1:
185
+ # eligible_classes_or_regions.pop(np.where(tmp)[0][0])
186
+
187
+ if len(eligible_classes_or_regions) == 0:
188
+ # this only happens if some image does not contain foreground voxels at all
189
+ selected_class = None
190
+ if verbose:
191
+ print('case does not contain any foreground classes')
192
+ else:
193
+ # I hate myself. Future me aint gonna be happy to read this
194
+ # 2022_11_25: had to read it today. Wasn't too bad
195
+ selected_class = eligible_classes_or_regions[np.random.choice(len(eligible_classes_or_regions))] if \
196
+ (overwrite_class is None or (overwrite_class not in eligible_classes_or_regions)) else overwrite_class
197
+ # print(f'I want to have foreground, selected class: {selected_class}')
198
+
199
+ voxels_of_that_class = class_locations[selected_class] if selected_class is not None else None
200
+
201
+ if voxels_of_that_class is not None and len(voxels_of_that_class) > 0:
202
+ selected_voxel = voxels_of_that_class[np.random.choice(len(voxels_of_that_class))]
203
+ # selected voxel is center voxel. Subtract half the patch size to get lower bbox voxel.
204
+ # Make sure it is within the bounds of lb and ub
205
+ # i + 1 because we have first dimension 0!
206
+ bbox_lbs = [max(lbs[i], selected_voxel[i + 1] - self.patch_size[i] // 2) for i in range(dim)]
207
+ else:
208
+ # If the image does not contain any foreground classes, we fall back to random cropping
209
+ bbox_lbs = [np.random.randint(lbs[i], ubs[i] + 1) for i in range(dim)]
210
+
211
+ bbox_ubs = [bbox_lbs[i] + self.patch_size[i] for i in range(dim)]
212
+
213
+ return bbox_lbs, bbox_ubs
source_code/SegMamba/light_training/dataloading/dataset.py ADDED
@@ -0,0 +1,318 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # Copyright 2020 - 2022 MONAI Consortium
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ # Unless required by applicable law or agreed to in writing, software
8
+ # distributed under the License is distributed on an "AS IS" BASIS,
9
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10
+ # See the License for the specific language governing permissions and
11
+ # limitations under the License.
12
+ from sklearn.model_selection import KFold ## K折交叉验证
13
+ import pickle
14
+ import os
15
+ import json
16
+ import math
17
+ import numpy as np
18
+ import torch
19
+ import SimpleITK as sitk
20
+ from tqdm import tqdm
21
+ from torch.utils.data import Dataset
22
+ import glob
23
+ from light_training.dataloading.utils import unpack_dataset
24
+ import random
25
+
26
+ class MedicalDataset(Dataset):
27
+ def __init__(self, datalist, test=False) -> None:
28
+ super().__init__()
29
+
30
+ self.datalist = datalist
31
+ self.test = test
32
+
33
+ self.data_cached = []
34
+ for p in tqdm(self.datalist, total=len(self.datalist)):
35
+ info = self.load_pkl(p)
36
+
37
+ self.data_cached.append(info)
38
+
39
+ ## unpacking
40
+ print(f"unpacking data ....")
41
+ # for
42
+ folder = []
43
+ for p in self.datalist:
44
+ f = os.path.dirname(p)
45
+ if f not in folder:
46
+ folder.append(f)
47
+ for f in folder:
48
+ unpack_dataset(f,
49
+ unpack_segmentation=True,
50
+ overwrite_existing=False,
51
+ num_processes=8)
52
+
53
+
54
+ print(f"data length is {len(self.datalist)}")
55
+
56
+ def load_pkl(self, data_path):
57
+ pass
58
+ properties_path = f"{data_path[:-4]}.pkl"
59
+ df = open(properties_path, "rb")
60
+ info = pickle.load(df)
61
+
62
+ return info
63
+
64
+ def post(self, batch_data):
65
+ return batch_data
66
+
67
+ def read_data(self, data_path):
68
+
69
+ image_path = data_path.replace(".npz", ".npy")
70
+ seg_path = data_path.replace(".npz", "_seg.npy")
71
+ image_data = np.load(image_path, "r+")
72
+
73
+ seg_data = None
74
+ if not self.test:
75
+ seg_data = np.load(seg_path, "r+")
76
+ return image_data, seg_data
77
+
78
+ def __getitem__(self, i):
79
+
80
+ image, seg = self.read_data(self.datalist[i])
81
+
82
+ properties = self.data_cached[i]
83
+
84
+ if seg is None:
85
+ return {
86
+ "data": image,
87
+ "properties": properties
88
+ }
89
+ else :
90
+ return {
91
+ "data": image,
92
+ "seg": seg,
93
+ "properties": properties
94
+ }
95
+
96
+ def __len__(self):
97
+ return len(self.datalist)
98
+
99
+ def get_train_test_loader_from_test_list(data_dir, test_list):
100
+ all_paths = glob.glob(f"{data_dir}/*.npz")
101
+
102
+ test_datalist = []
103
+ train_datalist = []
104
+
105
+ test_list_1 = []
106
+ for t in test_list:
107
+ test_list_1.append(t.replace(".nii.gz", ""))
108
+
109
+ test_list = test_list_1
110
+ for p in all_paths:
111
+ p2 = p.split("/")[-1].split(".")[0]
112
+ if p2 in test_list:
113
+ test_datalist.append(p)
114
+ else :
115
+ train_datalist.append(p)
116
+
117
+ print(f"training data is {len(train_datalist)}")
118
+ print(f"test data is {len(test_datalist)}", test_datalist)
119
+
120
+ train_ds = MedicalDataset(train_datalist)
121
+ test_ds = MedicalDataset(test_datalist)
122
+
123
+ loader = [train_ds, test_ds]
124
+
125
+ return loader
126
+
127
+ def get_kfold_data(data_paths, n_splits, shuffle=False):
128
+ X = np.arange(len(data_paths))
129
+ kfold = KFold(n_splits=n_splits, shuffle=shuffle) ## kfold为KFolf类的一个对象
130
+ return_res = []
131
+ for a, b in kfold.split(X):
132
+ fold_train = []
133
+ fold_val = []
134
+ for i in a:
135
+ fold_train.append(data_paths[i])
136
+ for j in b:
137
+ fold_val.append(data_paths[j])
138
+ return_res.append({"train_data": fold_train, "val_data": fold_val})
139
+
140
+ return return_res
141
+
142
+ def get_kfold_loader(data_dir, fold=0, test_dir=None):
143
+
144
+ all_paths = glob.glob(f"{data_dir}/*.npz")
145
+ fold_data = get_kfold_data(all_paths, 5)[fold]
146
+
147
+ train_datalist = fold_data["train_data"]
148
+ val_datalist = fold_data["val_data"]
149
+
150
+ print(f"training data is {len(train_datalist)}")
151
+ print(f"validation data is {len(val_datalist)}")
152
+ train_ds = MedicalDataset(train_datalist)
153
+
154
+ val_ds = MedicalDataset(val_datalist)
155
+
156
+ if test_dir is not None:
157
+ test_paths = glob.glob(f"{test_dir}/*.npz")
158
+ test_ds = MedicalDataset(test_paths, test=True)
159
+ else:
160
+ test_ds = None
161
+
162
+ loader = [train_ds, val_ds, test_ds]
163
+
164
+ return loader
165
+
166
+ def get_all_training_loader(data_dir, fold=0, test_dir=None):
167
+ ## train all labeled data
168
+ ## fold denote the validation data in training data
169
+ all_paths = glob.glob(f"{data_dir}/*.npz")
170
+ fold_data = get_kfold_data(all_paths, 5)[fold]
171
+
172
+ train_datalist = all_paths
173
+ val_datalist = fold_data["val_data"]
174
+
175
+ print(f"training data is {len(train_datalist)}")
176
+ print(f"validation data is {len(val_datalist)}")
177
+ train_ds = MedicalDataset(train_datalist)
178
+
179
+ val_ds = MedicalDataset(val_datalist)
180
+
181
+ if test_dir is not None:
182
+ test_paths = glob.glob(f"{test_dir}/*.npz")
183
+ test_ds = MedicalDataset(test_paths, test=True)
184
+ else:
185
+ test_ds = None
186
+
187
+ loader = [train_ds, val_ds, test_ds]
188
+
189
+ return loader
190
+
191
+ def get_train_val_test_loader_seperate(train_dir, val_dir, test_dir=None):
192
+ train_datalist = glob.glob(f"{train_dir}/*.npz")
193
+ val_datalist = glob.glob(f"{val_dir}/*.npz")
194
+
195
+ print(f"training data is {len(train_datalist)}")
196
+ print(f"validation data is {len(val_datalist)}")
197
+
198
+ if test_dir is not None:
199
+ test_datalist = glob.glob(f"{test_dir}/*.npz")
200
+ print(f"test data is {len(test_datalist)}")
201
+ test_ds = MedicalDataset(test_datalist, test=True)
202
+ else :
203
+ test_ds = None
204
+
205
+ train_ds = MedicalDataset(train_datalist)
206
+ val_ds = MedicalDataset(val_datalist)
207
+
208
+ loader = [train_ds, val_ds, test_ds]
209
+
210
+ return loader
211
+
212
+ def get_train_val_test_loader_from_split_json(data_dir, split_json_file):
213
+ import json
214
+
215
+ with open(split_json_file, "r") as f:
216
+
217
+ datalist = json.loads(f.read())
218
+
219
+ train_datalist = datalist["train"]
220
+ val_datalist = datalist["validation"]
221
+ test_datalist = datalist["test"]
222
+
223
+ def add_pre(datalist):
224
+ for i in range(len(datalist)):
225
+ datalist[i] = os.path.join(data_dir, datalist[i])
226
+
227
+ add_pre(train_datalist)
228
+ add_pre(val_datalist)
229
+ add_pre(test_datalist)
230
+ print(f"training data is {len(train_datalist)}")
231
+ print(f"validation data is {len(val_datalist)}")
232
+ print(f"test data is {len(test_datalist)}", sorted(test_datalist))
233
+
234
+ train_ds = MedicalDataset(train_datalist)
235
+ val_ds = MedicalDataset(val_datalist)
236
+ test_ds = MedicalDataset(test_datalist)
237
+
238
+ loader = [train_ds, val_ds, test_ds]
239
+
240
+ return loader
241
+
242
+
243
+ def get_train_val_test_loader_from_train(data_dir, train_rate=0.7, val_rate=0.1, test_rate=0.2, seed=42):
244
+ ## train all labeled data
245
+ ## fold denote the validation data in training data
246
+ all_paths = glob.glob(f"{data_dir}/*.npz")
247
+ # fold_data = get_kfold_data(all_paths, 5)[fold]
248
+
249
+ train_number = int(len(all_paths) * train_rate)
250
+ val_number = int(len(all_paths) * val_rate)
251
+ test_number = int(len(all_paths) * test_rate)
252
+ random.seed(seed)
253
+ # random_state = random.random
254
+ random.shuffle(all_paths)
255
+
256
+ train_datalist = all_paths[:train_number]
257
+ val_datalist = all_paths[train_number: train_number + val_number]
258
+ test_datalist = all_paths[-test_number:]
259
+
260
+ print(f"training data is {len(train_datalist)}")
261
+ print(f"validation data is {len(val_datalist)}")
262
+ print(f"test data is {len(test_datalist)}", sorted(test_datalist))
263
+
264
+ train_ds = MedicalDataset(train_datalist)
265
+ val_ds = MedicalDataset(val_datalist)
266
+ test_ds = MedicalDataset(test_datalist)
267
+
268
+ loader = [train_ds, val_ds, test_ds]
269
+
270
+ return loader
271
+
272
+ def get_train_loader_from_train(data_dir):
273
+ ## train all labeled data
274
+ ## fold denote the validation data in training data
275
+ all_paths = glob.glob(f"{data_dir}/*.npz")
276
+ # fold_data = get_kfold_data(all_paths, 5)[fold]
277
+
278
+ train_ds = MedicalDataset(all_paths)
279
+
280
+ return train_ds
281
+
282
+ def get_test_loader_from_test(data_dir):
283
+ all_paths = glob.glob(f"{data_dir}/*.npz")
284
+
285
+ test_ds = MedicalDataset(all_paths)
286
+
287
+ return test_ds
288
+
289
+ def get_multi_dir_training_loader(data_dir, fold=0, test_dir=None):
290
+ ## train all labeled data
291
+ ## fold denote the validation data in training data
292
+ all_paths = []
293
+ for p in data_dir:
294
+ paths = glob.glob(f"{p}/*.npz")
295
+ for pp in paths:
296
+ all_paths.append(pp)
297
+
298
+ # print(all_paths)
299
+ fold_data = get_kfold_data(all_paths, 5)[fold]
300
+
301
+ train_datalist = all_paths
302
+ val_datalist = fold_data["val_data"]
303
+
304
+ print(f"training data is {len(train_datalist)}")
305
+ print(f"validation data is {len(val_datalist)}")
306
+ train_ds = MedicalDataset(train_datalist)
307
+
308
+ val_ds = MedicalDataset(val_datalist)
309
+
310
+ if test_dir is not None:
311
+ test_paths = glob.glob(f"{test_dir}/*.npz")
312
+ test_ds = MedicalDataset(test_paths, test=True)
313
+ else:
314
+ test_ds = None
315
+
316
+ loader = [train_ds, val_ds, test_ds]
317
+
318
+ return loader
source_code/SegMamba/light_training/dataloading/dataset_sdm_edge.py ADDED
@@ -0,0 +1,331 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # Copyright 2020 - 2022 MONAI Consortium
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ # Unless required by applicable law or agreed to in writing, software
8
+ # distributed under the License is distributed on an "AS IS" BASIS,
9
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10
+ # See the License for the specific language governing permissions and
11
+ # limitations under the License.
12
+ from sklearn.model_selection import KFold ## K折交叉验证
13
+ import pickle
14
+ import os
15
+ import json
16
+ import math
17
+ import numpy as np
18
+ import torch
19
+ from monai import transforms
20
+ import SimpleITK as sitk
21
+ from tqdm import tqdm
22
+ from torch.utils.data import Dataset
23
+ import glob
24
+ from light_training.dataloading.utils import unpack_dataset
25
+ import random
26
+ import torch
27
+ import numpy as np
28
+ from scipy.ndimage import distance_transform_edt as distance
29
+ from skimage import segmentation as skimage_seg
30
+ from skimage.morphology import dilation, disk
31
+ import scipy.ndimage as ndimage
32
+
33
+ def get_edge_points(img):
34
+ """
35
+ get edge points of a binary segmentation result
36
+ """
37
+ dim = len(img.shape)
38
+ if (dim == 2):
39
+ strt = ndimage.generate_binary_structure(2, 1)
40
+ else:
41
+ strt = ndimage.generate_binary_structure(3, 1)
42
+ ero = ndimage.binary_erosion(img, strt)
43
+ edge = np.asarray(img, np.uint8) - np.asarray(ero, np.uint8)
44
+ return edge
45
+
46
+ def edge_3d(image_3d):
47
+ # image_3d = torch.from_numpy(image_3d)
48
+ return_edge = np.zeros_like(image_3d)
49
+
50
+ for i in range(image_3d.shape[0]):
51
+ for j in range(image_3d.shape[1]):
52
+ return_edge[i, j] = get_edge_points(image_3d[i, j])
53
+
54
+ return return_edge
55
+
56
+ def compute_sdf(img_gt, out_shape):
57
+ """
58
+ compute the signed distance map of binary mask
59
+ input: segmentation, shape = (batch_size,c, x, y, z)
60
+ output: the Signed Distance Map (SDM)
61
+ sdf(x) = 0; x in segmentation boundary
62
+ -inf|x-y|; x in segmentation
63
+ +inf|x-y|; x out of segmentation
64
+ normalize sdf to [-1,1]
65
+
66
+ """
67
+
68
+ img_gt = img_gt.astype(np.uint8)
69
+ normalized_sdf = np.zeros(out_shape)
70
+
71
+ for b in range(out_shape[0]): # batch size
72
+ for c in range(out_shape[1]):
73
+ posmask = img_gt[b, c].astype(np.bool_)
74
+ if posmask.any():
75
+ negmask = ~posmask
76
+ posdis = distance(posmask)
77
+ negdis = distance(negmask)
78
+ boundary = skimage_seg.find_boundaries(posmask, mode='inner').astype(np.uint8)
79
+ sdf = (negdis-np.min(negdis))/(np.max(negdis)-np.min(negdis)) - (posdis-np.min(posdis))/(np.max(posdis)-np.min(posdis))
80
+ sdf[boundary==1] = 0
81
+ normalized_sdf[b][c] = sdf
82
+ assert np.min(sdf) == -1.0, print(np.min(posdis), np.max(posdis), np.min(negdis), np.max(negdis))
83
+ assert np.max(sdf) == 1.0, print(np.min(posdis), np.min(negdis), np.max(posdis), np.max(negdis))
84
+
85
+ return normalized_sdf
86
+
87
+ def convert_labels(labels):
88
+ ## TC, WT and ET
89
+ labels = labels[None, None]
90
+ result = [(labels == 1) | (labels == 3), (labels == 1) | (labels == 3) | (labels == 2), labels == 3]
91
+
92
+ return torch.cat(result, dim=1).float()
93
+
94
+ class MedicalDataset(Dataset):
95
+ def __init__(self, datalist, test=False) -> None:
96
+ super().__init__()
97
+
98
+ self.datalist = datalist
99
+ self.test = test
100
+
101
+ self.data_cached = []
102
+ for p in tqdm(self.datalist, total=len(self.datalist)):
103
+ info = self.load_pkl(p)
104
+
105
+ self.data_cached.append(info)
106
+
107
+ ## unpacking
108
+ print(f"unpacking data ....")
109
+ # for
110
+ folder = []
111
+ for p in self.datalist:
112
+ f = os.path.dirname(p)
113
+ if f not in folder:
114
+ folder.append(f)
115
+ for f in folder:
116
+ unpack_dataset(f,
117
+ unpack_segmentation=True,
118
+ overwrite_existing=False,
119
+ num_processes=8)
120
+
121
+
122
+ print(f"data length is {len(self.datalist)}")
123
+
124
+ def load_pkl(self, data_path):
125
+ pass
126
+ properties_path = f"{data_path[:-4]}.pkl"
127
+ df = open(properties_path, "rb")
128
+ info = pickle.load(df)
129
+
130
+ return info
131
+
132
+ def read_data(self, data_path):
133
+
134
+ image_path = data_path.replace(".npz", ".npy")
135
+ seg_path = data_path.replace(".npz", "_seg.npy")
136
+ image_data = np.load(image_path, "r")
137
+
138
+ seg_data = None
139
+ if not self.test:
140
+ seg_data = np.load(seg_path, "r")
141
+ return image_data, seg_data
142
+
143
+ # def post(self, batch_data):
144
+ # seg = convert_labels(batch_data["seg"]).numpy()
145
+ # seg_shape = seg.shape
146
+ # seg_edge = edge_3d(seg)
147
+ # seg_sdm = 1 - compute_sdf(seg, out_shape=seg_shape)
148
+ # seg_sdm = seg_sdm + seg_edge
149
+
150
+ # seg_edge = torch.from_numpy(seg_edge)
151
+ # seg_sdm = torch.from_numpy(seg_sdm)
152
+
153
+ # batch_data["seg_edge"] = seg_edge
154
+ # batch_data["seg_sdm"] = seg_sdm
155
+
156
+ # print(f"post!!!!!!!!!")
157
+ # return batch_data
158
+
159
+ def __getitem__(self, i):
160
+
161
+ image, seg = self.read_data(self.datalist[i])
162
+
163
+ properties = self.data_cached[i]
164
+ case_name = properties["name"]
165
+
166
+ if seg is not None:
167
+ sdm = np.load(os.path.join("./data/fullres/train_sdm/", f"{case_name}_seg_sdm.npy"), "r")
168
+
169
+ # print(seg.shape, sdm.shape)
170
+ sdm = sdm[0]
171
+ seg = np.concatenate([seg, sdm], axis=0)
172
+
173
+ # print(f"sdm sum is {sdm.sum()}")
174
+ if seg is None:
175
+ return {
176
+ "data": image,
177
+ "properties": properties
178
+ }
179
+ else :
180
+ return {
181
+ "data": image,
182
+ "seg": seg,
183
+ "properties": properties
184
+ }
185
+
186
+ def __len__(self):
187
+ return len(self.datalist)
188
+
189
+ def get_kfold_data(data_paths, n_splits, shuffle=False):
190
+ X = np.arange(len(data_paths))
191
+ kfold = KFold(n_splits=n_splits, shuffle=shuffle) ## kfold为KFolf类的一个对象
192
+ return_res = []
193
+ for a, b in kfold.split(X):
194
+ fold_train = []
195
+ fold_val = []
196
+ for i in a:
197
+ fold_train.append(data_paths[i])
198
+ for j in b:
199
+ fold_val.append(data_paths[j])
200
+ return_res.append({"train_data": fold_train, "val_data": fold_val})
201
+
202
+ return return_res
203
+
204
+ def get_kfold_loader(data_dir, fold=0, test_dir=None):
205
+
206
+ all_paths = glob.glob(f"{data_dir}/*.npz")
207
+ fold_data = get_kfold_data(all_paths, 5)[fold]
208
+
209
+ train_datalist = fold_data["train_data"]
210
+ val_datalist = fold_data["val_data"]
211
+
212
+ print(f"training data is {len(train_datalist)}")
213
+ print(f"validation data is {len(val_datalist)}")
214
+ train_ds = MedicalDataset(train_datalist)
215
+
216
+ val_ds = MedicalDataset(val_datalist)
217
+
218
+ if test_dir is not None:
219
+ test_paths = glob.glob(f"{test_dir}/*.npz")
220
+ test_ds = MedicalDataset(test_paths, test=True)
221
+ else:
222
+ test_ds = None
223
+
224
+ loader = [train_ds, val_ds, test_ds]
225
+
226
+ return loader
227
+
228
+ def get_all_training_loader(data_dir, fold=0, test_dir=None):
229
+ ## train all labeled data
230
+ ## fold denote the validation data in training data
231
+ all_paths = glob.glob(f"{data_dir}/*.npz")
232
+ fold_data = get_kfold_data(all_paths, 5)[fold]
233
+
234
+ train_datalist = all_paths
235
+ val_datalist = fold_data["val_data"]
236
+
237
+ print(f"training data is {len(train_datalist)}")
238
+ print(f"validation data is {len(val_datalist)}")
239
+ train_ds = MedicalDataset(train_datalist)
240
+
241
+ val_ds = MedicalDataset(val_datalist)
242
+
243
+ if test_dir is not None:
244
+ test_paths = glob.glob(f"{test_dir}/*.npz")
245
+ test_ds = MedicalDataset(test_paths, test=True)
246
+ else:
247
+ test_ds = None
248
+
249
+ loader = [train_ds, val_ds, test_ds]
250
+
251
+ return loader
252
+
253
+ def get_train_val_test_loader_seperate(train_dir, val_dir, test_dir=None):
254
+ train_datalist = glob.glob(f"{train_dir}/*.npz")
255
+ val_datalist = glob.glob(f"{val_dir}/*.npz")
256
+
257
+ print(f"training data is {len(train_datalist)}")
258
+ print(f"validation data is {len(val_datalist)}")
259
+
260
+ if test_dir is not None:
261
+ test_datalist = glob.glob(f"{test_dir}/*.npz")
262
+ print(f"test data is {len(test_datalist)}")
263
+ test_ds = MedicalDataset(test_datalist, test=True)
264
+ else :
265
+ test_ds = None
266
+
267
+ train_ds = MedicalDataset(train_datalist)
268
+ val_ds = MedicalDataset(val_datalist)
269
+
270
+ loader = [train_ds, val_ds, test_ds]
271
+
272
+ return loader
273
+
274
+ def get_train_val_test_loader_from_train(data_dir, train_rate=0.7, val_rate=0.1, test_rate=0.2):
275
+ ## train all labeled data
276
+ ## fold denote the validation data in training data
277
+ all_paths = glob.glob(f"{data_dir}/*.npz")
278
+ # fold_data = get_kfold_data(all_paths, 5)[fold]
279
+
280
+ train_number = int(len(all_paths) * train_rate)
281
+ val_number = int(len(all_paths) * val_rate)
282
+ test_number = int(len(all_paths) * test_rate)
283
+
284
+ random.shuffle(all_paths)
285
+
286
+ train_datalist = all_paths[:train_number]
287
+ val_datalist = all_paths[train_number: train_number + val_number]
288
+ test_datalist = all_paths[-test_number:]
289
+
290
+ print(f"training data is {len(train_datalist)}")
291
+ print(f"validation data is {len(val_datalist)}")
292
+ print(f"test data is {len(test_datalist)}")
293
+
294
+ train_ds = MedicalDataset(train_datalist)
295
+ val_ds = MedicalDataset(val_datalist)
296
+ test_ds = MedicalDataset(test_datalist)
297
+
298
+ loader = [train_ds, val_ds, test_ds]
299
+
300
+ return loader
301
+
302
+ def get_multi_dir_training_loader(data_dir, fold=0, test_dir=None):
303
+ ## train all labeled data
304
+ ## fold denote the validation data in training data
305
+ all_paths = []
306
+ for p in data_dir:
307
+ paths = glob.glob(f"{p}/*.npz")
308
+ for pp in paths:
309
+ all_paths.append(pp)
310
+
311
+ # print(all_paths)
312
+ fold_data = get_kfold_data(all_paths, 5)[fold]
313
+
314
+ train_datalist = all_paths
315
+ val_datalist = fold_data["val_data"]
316
+
317
+ print(f"training data is {len(train_datalist)}")
318
+ print(f"validation data is {len(val_datalist)}")
319
+ train_ds = MedicalDataset(train_datalist)
320
+
321
+ val_ds = MedicalDataset(val_datalist)
322
+
323
+ if test_dir is not None:
324
+ test_paths = glob.glob(f"{test_dir}/*.npz")
325
+ test_ds = MedicalDataset(test_paths, test=True)
326
+ else:
327
+ test_ds = None
328
+
329
+ loader = [train_ds, val_ds, test_ds]
330
+
331
+ return loader
source_code/SegMamba/light_training/dataloading/get_train_val_test_datalist.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import glob
3
+ import random
4
+ import json
5
+
6
+ def get_train_val_test_list_from_fulldata(data_dir, train_rate=0.7, val_rate=0.1, test_rate=0.2, seed=42):
7
+ all_paths = glob.glob(f"{data_dir}/*.npz")
8
+
9
+ ## eliminate the pre
10
+ all_paths_save = []
11
+ for p in all_paths:
12
+ all_paths_save.append(p.split("/")[-1])
13
+ all_paths = all_paths_save
14
+ train_number = int(len(all_paths) * train_rate)
15
+ val_number = int(len(all_paths) * val_rate)
16
+ test_number = int(len(all_paths) * test_rate)
17
+ random.seed(seed)
18
+ random.shuffle(all_paths)
19
+ train_datalist = all_paths[:train_number]
20
+ val_datalist = all_paths[train_number: train_number + val_number]
21
+ test_datalist = all_paths[-test_number:]
22
+
23
+ print(f"training data is {len(train_datalist)}")
24
+ print(f"validation data is {len(val_datalist)}")
25
+ print(f"test data is {len(test_datalist)}", sorted(test_datalist))
26
+
27
+ datalist = {
28
+ "train": train_datalist,
29
+ "validation": val_datalist,
30
+ "test": test_datalist
31
+ }
32
+
33
+ datalist = json.dumps(datalist)
34
+
35
+ with open("./data_split.json", "w") as f:
36
+ f.write(datalist)
source_code/SegMamba/light_training/dataloading/utils.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import os
3
+ from batchgenerators.utilities.file_and_folder_operations import isfile, subfiles
4
+ import multiprocessing
5
+
6
+ def _convert_to_npy(npz_file: str, unpack_segmentation: bool = True, overwrite_existing: bool = False) -> None:
7
+ # try:
8
+ a = np.load(npz_file) # inexpensive, no compression is done here. This just reads metadata
9
+ if overwrite_existing or not isfile(npz_file[:-3] + "npy"):
10
+ np.save(npz_file[:-3] + "npy", a['data'])
11
+
12
+ if unpack_segmentation and (overwrite_existing or not isfile(npz_file[:-4] + "_seg.npy")):
13
+ np.save(npz_file[:-4] + "_seg.npy", a['seg'])
14
+
15
+ def unpack_dataset(folder: str, unpack_segmentation: bool = True, overwrite_existing: bool = False,
16
+ num_processes: int = 8):
17
+ """
18
+ all npz files in this folder belong to the dataset, unpack them all
19
+ """
20
+ with multiprocessing.get_context("spawn").Pool(num_processes) as p:
21
+ npz_files = subfiles(folder, True, None, ".npz", True)
22
+ p.starmap(_convert_to_npy, zip(npz_files,
23
+ [unpack_segmentation] * len(npz_files),
24
+ [overwrite_existing] * len(npz_files))
25
+ )
source_code/SegMamba/light_training/dataloading_global/__init__.py ADDED
File without changes
source_code/SegMamba/light_training/dataloading_global/dataset.py ADDED
@@ -0,0 +1,329 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # Copyright 2020 - 2022 MONAI Consortium
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ # Unless required by applicable law or agreed to in writing, software
8
+ # distributed under the License is distributed on an "AS IS" BASIS,
9
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10
+ # See the License for the specific language governing permissions and
11
+ # limitations under the License.
12
+ from sklearn.model_selection import KFold ## K折交叉验证
13
+ import pickle
14
+ import os
15
+ import json
16
+ import math
17
+ import numpy as np
18
+ import torch
19
+ from monai import transforms
20
+ import SimpleITK as sitk
21
+ from tqdm import tqdm
22
+ from torch.utils.data import Dataset
23
+ import glob
24
+ from light_training.dataloading_global.utils import unpack_dataset
25
+ import random
26
+
27
+ class MedicalDataset(Dataset):
28
+ def __init__(self, datalist, test=False) -> None:
29
+ super().__init__()
30
+
31
+ self.datalist = datalist
32
+ self.test = test
33
+
34
+ self.data_cached = []
35
+ for p in tqdm(self.datalist, total=len(self.datalist)):
36
+ info = self.load_pkl(p)
37
+
38
+ self.data_cached.append(info)
39
+
40
+ ## unpacking
41
+ print(f"unpacking data ....")
42
+ # for
43
+ folder = []
44
+ for p in self.datalist:
45
+ f = os.path.dirname(p)
46
+ if f not in folder:
47
+ folder.append(f)
48
+ for f in folder:
49
+ unpack_dataset(f,
50
+ unpack_segmentation=True,
51
+ overwrite_existing=False,
52
+ num_processes=8)
53
+
54
+
55
+ print(f"data length is {len(self.datalist)}")
56
+
57
+ def load_pkl(self, data_path):
58
+ pass
59
+ properties_path = f"{data_path[:-4]}.pkl"
60
+ df = open(properties_path, "rb")
61
+ info = pickle.load(df)
62
+
63
+ return info
64
+
65
+ def post(self, batch_data):
66
+ return batch_data
67
+
68
+ def read_data(self, data_path):
69
+
70
+ image_path = data_path.replace(".npz", ".npy")
71
+ seg_path = data_path.replace(".npz", "_seg.npy")
72
+ image_global_path = data_path.replace(".npz", "_global.npy")
73
+ seg_global_path = data_path.replace(".npz", "_global_seg.npy")
74
+
75
+ image_data = np.load(image_path, "r+")
76
+ image_data_global = np.load(image_global_path, "r+")
77
+
78
+ seg_data = None
79
+ if not self.test:
80
+ seg_data = np.load(seg_path, "r+")
81
+ seg_global_data = np.load(seg_global_path, "r+")
82
+
83
+ return image_data, image_data_global, seg_data, seg_global_data
84
+
85
+
86
+ def __getitem__(self, i):
87
+
88
+ image, image_data_global, seg, seg_global = self.read_data(self.datalist[i])
89
+
90
+ # print(image_data_global.shape)
91
+ properties = self.data_cached[i]
92
+
93
+ if seg is None:
94
+ return {
95
+ "data": image,
96
+ "data_global": image_data_global,
97
+ "properties": properties
98
+ }
99
+ else :
100
+ return {
101
+ "data": image,
102
+ "data_global": image_data_global,
103
+ "seg": seg,
104
+ "seg_global": seg_global,
105
+ "properties": properties
106
+ }
107
+
108
+ def __len__(self):
109
+ return len(self.datalist)
110
+
111
+ def get_train_test_loader_from_test_list(data_dir, test_list):
112
+ all_paths = glob.glob(f"{data_dir}/*.npz")
113
+
114
+ test_datalist = []
115
+ train_datalist = []
116
+
117
+ test_list_1 = []
118
+ for t in test_list:
119
+ test_list_1.append(t.replace(".nii.gz", ""))
120
+
121
+ test_list = test_list_1
122
+ for p in all_paths:
123
+ p2 = p.split("/")[-1].split(".")[0]
124
+ if p2 in test_list:
125
+ test_datalist.append(p)
126
+ else :
127
+ train_datalist.append(p)
128
+
129
+ print(f"training data is {len(train_datalist)}")
130
+ print(f"test data is {len(test_datalist)}", test_datalist)
131
+
132
+ train_ds = MedicalDataset(train_datalist)
133
+ test_ds = MedicalDataset(test_datalist)
134
+
135
+ loader = [train_ds, test_ds]
136
+
137
+ return loader
138
+
139
+ def get_kfold_data(data_paths, n_splits, shuffle=False):
140
+ X = np.arange(len(data_paths))
141
+ kfold = KFold(n_splits=n_splits, shuffle=shuffle) ## kfold为KFolf类的一个对象
142
+ return_res = []
143
+ for a, b in kfold.split(X):
144
+ fold_train = []
145
+ fold_val = []
146
+ for i in a:
147
+ fold_train.append(data_paths[i])
148
+ for j in b:
149
+ fold_val.append(data_paths[j])
150
+ return_res.append({"train_data": fold_train, "val_data": fold_val})
151
+
152
+ return return_res
153
+
154
+ def get_kfold_loader(data_dir, fold=0, test_dir=None):
155
+
156
+ all_paths = glob.glob(f"{data_dir}/*.npz")
157
+ fold_data = get_kfold_data(all_paths, 5)[fold]
158
+
159
+ train_datalist = fold_data["train_data"]
160
+ val_datalist = fold_data["val_data"]
161
+
162
+ print(f"training data is {len(train_datalist)}")
163
+ print(f"validation data is {len(val_datalist)}")
164
+ train_ds = MedicalDataset(train_datalist)
165
+
166
+ val_ds = MedicalDataset(val_datalist)
167
+
168
+ if test_dir is not None:
169
+ test_paths = glob.glob(f"{test_dir}/*.npz")
170
+ test_ds = MedicalDataset(test_paths, test=True)
171
+ else:
172
+ test_ds = None
173
+
174
+ loader = [train_ds, val_ds, test_ds]
175
+
176
+ return loader
177
+
178
+ def get_all_training_loader(data_dir, fold=0, test_dir=None):
179
+ ## train all labeled data
180
+ ## fold denote the validation data in training data
181
+ all_paths = glob.glob(f"{data_dir}/*.npz")
182
+ fold_data = get_kfold_data(all_paths, 5)[fold]
183
+
184
+ train_datalist = all_paths
185
+ val_datalist = fold_data["val_data"]
186
+
187
+ print(f"training data is {len(train_datalist)}")
188
+ print(f"validation data is {len(val_datalist)}")
189
+ train_ds = MedicalDataset(train_datalist)
190
+
191
+ val_ds = MedicalDataset(val_datalist)
192
+
193
+ if test_dir is not None:
194
+ test_paths = glob.glob(f"{test_dir}/*.npz")
195
+ test_ds = MedicalDataset(test_paths, test=True)
196
+ else:
197
+ test_ds = None
198
+
199
+ loader = [train_ds, val_ds, test_ds]
200
+
201
+ return loader
202
+
203
+ def get_train_val_test_loader_seperate(train_dir, val_dir, test_dir=None):
204
+ train_datalist = glob.glob(f"{train_dir}/*.npz")
205
+ val_datalist = glob.glob(f"{val_dir}/*.npz")
206
+
207
+ print(f"training data is {len(train_datalist)}")
208
+ print(f"validation data is {len(val_datalist)}")
209
+
210
+ if test_dir is not None:
211
+ test_datalist = glob.glob(f"{test_dir}/*.npz")
212
+ print(f"test data is {len(test_datalist)}")
213
+ test_ds = MedicalDataset(test_datalist, test=True)
214
+ else :
215
+ test_ds = None
216
+
217
+ train_ds = MedicalDataset(train_datalist)
218
+ val_ds = MedicalDataset(val_datalist)
219
+
220
+ loader = [train_ds, val_ds, test_ds]
221
+
222
+ return loader
223
+
224
+ def get_train_val_test_loader_from_train(data_dir, train_rate=0.7, val_rate=0.1, test_rate=0.2, seed=42):
225
+ ## train all labeled data
226
+ ## fold denote the validation data in training data
227
+ all_paths = glob.glob(f"{data_dir}/*.npz")
228
+ # fold_data = get_kfold_data(all_paths, 5)[fold]
229
+
230
+ train_number = int(len(all_paths) * train_rate)
231
+ val_number = int(len(all_paths) * val_rate)
232
+ test_number = int(len(all_paths) * test_rate)
233
+ random.seed(seed)
234
+ # random_state = random.random
235
+ random.shuffle(all_paths)
236
+ train_datalist = all_paths[:train_number]
237
+ val_datalist = all_paths[train_number: train_number + val_number]
238
+ test_datalist = all_paths[-test_number:]
239
+
240
+ print(f"training data is {len(train_datalist)}")
241
+ print(f"validation data is {len(val_datalist)}")
242
+ print(f"test data is {len(test_datalist)}", sorted(test_datalist))
243
+
244
+ train_ds = MedicalDataset(train_datalist)
245
+ val_ds = MedicalDataset(val_datalist)
246
+ test_ds = MedicalDataset(test_datalist)
247
+
248
+ loader = [train_ds, val_ds, test_ds]
249
+
250
+ return loader
251
+
252
+ def get_train_val_test_loader_from_split_json(data_dir, split_json_file):
253
+ import json
254
+
255
+ with open(split_json_file, "r") as f:
256
+
257
+ datalist = json.loads(f.read())
258
+
259
+ train_datalist = datalist["train"]
260
+ val_datalist = datalist["validation"]
261
+ test_datalist = datalist["test"]
262
+
263
+ def add_pre(datalist):
264
+ for i in range(len(datalist)):
265
+ datalist[i] = os.path.join(data_dir, datalist[i])
266
+
267
+ add_pre(train_datalist)
268
+ add_pre(val_datalist)
269
+ add_pre(test_datalist)
270
+
271
+ print(f"training data is {len(train_datalist)}")
272
+ print(f"validation data is {len(val_datalist)}")
273
+ print(f"test data is {len(test_datalist)}", sorted(test_datalist))
274
+
275
+ train_ds = MedicalDataset(train_datalist)
276
+ val_ds = MedicalDataset(val_datalist)
277
+ test_ds = MedicalDataset(test_datalist)
278
+
279
+ loader = [train_ds, val_ds, test_ds]
280
+
281
+ return loader
282
+
283
+ def get_train_loader_from_train(data_dir):
284
+ ## train all labeled data
285
+ ## fold denote the validation data in training data
286
+ all_paths = glob.glob(f"{data_dir}/*.npz")
287
+ # fold_data = get_kfold_data(all_paths, 5)[fold]
288
+
289
+ train_ds = MedicalDataset(all_paths)
290
+
291
+ return train_ds
292
+
293
+ def get_test_loader_from_test(data_dir):
294
+ all_paths = glob.glob(f"{data_dir}/*.npz")
295
+
296
+ test_ds = MedicalDataset(all_paths)
297
+
298
+ return test_ds
299
+
300
+ def get_multi_dir_training_loader(data_dir, fold=0, test_dir=None):
301
+ ## train all labeled data
302
+ ## fold denote the validation data in training data
303
+ all_paths = []
304
+ for p in data_dir:
305
+ paths = glob.glob(f"{p}/*.npz")
306
+ for pp in paths:
307
+ all_paths.append(pp)
308
+
309
+ # print(all_paths)
310
+ fold_data = get_kfold_data(all_paths, 5)[fold]
311
+
312
+ train_datalist = all_paths
313
+ val_datalist = fold_data["val_data"]
314
+
315
+ print(f"training data is {len(train_datalist)}")
316
+ print(f"validation data is {len(val_datalist)}")
317
+ train_ds = MedicalDataset(train_datalist)
318
+
319
+ val_ds = MedicalDataset(val_datalist)
320
+
321
+ if test_dir is not None:
322
+ test_paths = glob.glob(f"{test_dir}/*.npz")
323
+ test_ds = MedicalDataset(test_paths, test=True)
324
+ else:
325
+ test_ds = None
326
+
327
+ loader = [train_ds, val_ds, test_ds]
328
+
329
+ return loader
source_code/SegMamba/light_training/dataloading_global/utils.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import os
3
+ from batchgenerators.utilities.file_and_folder_operations import isfile, subfiles
4
+ import multiprocessing
5
+
6
+ def _convert_to_npy(npz_file: str, unpack_segmentation: bool = True, overwrite_existing: bool = False) -> None:
7
+ # try:
8
+ a = np.load(npz_file) # inexpensive, no compression is done here. This just reads metadata
9
+ if overwrite_existing or not isfile(npz_file[:-3] + "npy"):
10
+ np.save(npz_file[:-3] + "npy", a['data'])
11
+ np.save(npz_file[:-4] + "_global.npy", a['data_global'])
12
+ np.save(npz_file[:-4] + "_global_seg.npy", a['seg_global'])
13
+
14
+ if unpack_segmentation and (overwrite_existing or not isfile(npz_file[:-4] + "_seg.npy")):
15
+ np.save(npz_file[:-4] + "_seg.npy", a['seg'])
16
+
17
+ def unpack_dataset(folder: str, unpack_segmentation: bool = True, overwrite_existing: bool = False,
18
+ num_processes: int = 8):
19
+ """
20
+ all npz files in this folder belong to the dataset, unpack them all
21
+ """
22
+ with multiprocessing.get_context("spawn").Pool(num_processes) as p:
23
+ npz_files = subfiles(folder, True, None, ".npz", True)
24
+ p.starmap(_convert_to_npy, zip(npz_files,
25
+ [unpack_segmentation] * len(npz_files),
26
+ [overwrite_existing] * len(npz_files))
27
+ )
source_code/SegMamba/light_training/evaluation/metric.py ADDED
@@ -0,0 +1,406 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import numpy as np
16
+ from medpy import metric
17
+
18
+
19
+ def assert_shape(test, reference):
20
+
21
+ assert test.shape == reference.shape, "Shape mismatch: {} and {}".format(
22
+ test.shape, reference.shape)
23
+
24
+
25
+ class ConfusionMatrix:
26
+
27
+ def __init__(self, test=None, reference=None):
28
+
29
+ self.tp = None
30
+ self.fp = None
31
+ self.tn = None
32
+ self.fn = None
33
+ self.size = None
34
+ self.reference_empty = None
35
+ self.reference_full = None
36
+ self.test_empty = None
37
+ self.test_full = None
38
+ self.set_reference(reference)
39
+ self.set_test(test)
40
+
41
+ def set_test(self, test):
42
+
43
+ self.test = test
44
+ self.reset()
45
+
46
+ def set_reference(self, reference):
47
+
48
+ self.reference = reference
49
+ self.reset()
50
+
51
+ def reset(self):
52
+
53
+ self.tp = None
54
+ self.fp = None
55
+ self.tn = None
56
+ self.fn = None
57
+ self.size = None
58
+ self.test_empty = None
59
+ self.test_full = None
60
+ self.reference_empty = None
61
+ self.reference_full = None
62
+
63
+ def compute(self):
64
+
65
+ if self.test is None or self.reference is None:
66
+ raise ValueError("'test' and 'reference' must both be set to compute confusion matrix.")
67
+
68
+ assert_shape(self.test, self.reference)
69
+
70
+ self.tp = int(((self.test != 0) * (self.reference != 0)).sum())
71
+ self.fp = int(((self.test != 0) * (self.reference == 0)).sum())
72
+ self.tn = int(((self.test == 0) * (self.reference == 0)).sum())
73
+ self.fn = int(((self.test == 0) * (self.reference != 0)).sum())
74
+ self.size = int(np.prod(self.reference.shape, dtype=np.int64))
75
+ self.test_empty = not np.any(self.test)
76
+ self.test_full = np.all(self.test)
77
+ self.reference_empty = not np.any(self.reference)
78
+ self.reference_full = np.all(self.reference)
79
+
80
+ def get_matrix(self):
81
+
82
+ for entry in (self.tp, self.fp, self.tn, self.fn):
83
+ if entry is None:
84
+ self.compute()
85
+ break
86
+
87
+ return self.tp, self.fp, self.tn, self.fn
88
+
89
+ def get_size(self):
90
+
91
+ if self.size is None:
92
+ self.compute()
93
+ return self.size
94
+
95
+ def get_existence(self):
96
+
97
+ for case in (self.test_empty, self.test_full, self.reference_empty, self.reference_full):
98
+ if case is None:
99
+ self.compute()
100
+ break
101
+
102
+ return self.test_empty, self.test_full, self.reference_empty, self.reference_full
103
+
104
+
105
+ def dice(test=None, reference=None, confusion_matrix=None, nan_for_nonexisting=True, **kwargs):
106
+ """2TP / (2TP + FP + FN)"""
107
+
108
+ if confusion_matrix is None:
109
+ confusion_matrix = ConfusionMatrix(test, reference)
110
+
111
+ tp, fp, tn, fn = confusion_matrix.get_matrix()
112
+ test_empty, test_full, reference_empty, reference_full = confusion_matrix.get_existence()
113
+
114
+ if test_empty and reference_empty:
115
+ if nan_for_nonexisting:
116
+ return float("NaN")
117
+ else:
118
+ return 0.
119
+
120
+ return float(2. * tp / (2 * tp + fp + fn))
121
+
122
+
123
+ def jaccard(test=None, reference=None, confusion_matrix=None, nan_for_nonexisting=True, **kwargs):
124
+ """TP / (TP + FP + FN)"""
125
+
126
+ if confusion_matrix is None:
127
+ confusion_matrix = ConfusionMatrix(test, reference)
128
+
129
+ tp, fp, tn, fn = confusion_matrix.get_matrix()
130
+ test_empty, test_full, reference_empty, reference_full = confusion_matrix.get_existence()
131
+
132
+ if test_empty and reference_empty:
133
+ if nan_for_nonexisting:
134
+ return float("NaN")
135
+ else:
136
+ return 0.
137
+
138
+ return float(tp / (tp + fp + fn))
139
+
140
+
141
+ def precision(test=None, reference=None, confusion_matrix=None, nan_for_nonexisting=True, **kwargs):
142
+ """TP / (TP + FP)"""
143
+
144
+ if confusion_matrix is None:
145
+ confusion_matrix = ConfusionMatrix(test, reference)
146
+
147
+ tp, fp, tn, fn = confusion_matrix.get_matrix()
148
+ test_empty, test_full, reference_empty, reference_full = confusion_matrix.get_existence()
149
+
150
+ if test_empty:
151
+ if nan_for_nonexisting:
152
+ return float("NaN")
153
+ else:
154
+ return 0.
155
+
156
+ return float(tp / (tp + fp))
157
+
158
+
159
+ def sensitivity(test=None, reference=None, confusion_matrix=None, nan_for_nonexisting=True, **kwargs):
160
+ """TP / (TP + FN)"""
161
+
162
+ if confusion_matrix is None:
163
+ confusion_matrix = ConfusionMatrix(test, reference)
164
+
165
+ tp, fp, tn, fn = confusion_matrix.get_matrix()
166
+ test_empty, test_full, reference_empty, reference_full = confusion_matrix.get_existence()
167
+
168
+ if reference_empty:
169
+ if nan_for_nonexisting:
170
+ return float("NaN")
171
+ else:
172
+ return 0.
173
+
174
+ return float(tp / (tp + fn))
175
+
176
+
177
+ def recall(test=None, reference=None, confusion_matrix=None, nan_for_nonexisting=True, **kwargs):
178
+ """TP / (TP + FN)"""
179
+
180
+ return sensitivity(test, reference, confusion_matrix, nan_for_nonexisting, **kwargs)
181
+
182
+
183
+ def specificity(test=None, reference=None, confusion_matrix=None, nan_for_nonexisting=True, **kwargs):
184
+ """TN / (TN + FP)"""
185
+
186
+ if confusion_matrix is None:
187
+ confusion_matrix = ConfusionMatrix(test, reference)
188
+
189
+ tp, fp, tn, fn = confusion_matrix.get_matrix()
190
+ test_empty, test_full, reference_empty, reference_full = confusion_matrix.get_existence()
191
+
192
+ if reference_full:
193
+ if nan_for_nonexisting:
194
+ return float("NaN")
195
+ else:
196
+ return 0.
197
+
198
+ return float(tn / (tn + fp))
199
+
200
+
201
+ def accuracy(test=None, reference=None, confusion_matrix=None, **kwargs):
202
+ """(TP + TN) / (TP + FP + FN + TN)"""
203
+
204
+ if confusion_matrix is None:
205
+ confusion_matrix = ConfusionMatrix(test, reference)
206
+
207
+ tp, fp, tn, fn = confusion_matrix.get_matrix()
208
+
209
+ return float((tp + tn) / (tp + fp + tn + fn))
210
+
211
+
212
+ def fscore(test=None, reference=None, confusion_matrix=None, nan_for_nonexisting=True, beta=1., **kwargs):
213
+ """(1 + b^2) * TP / ((1 + b^2) * TP + b^2 * FN + FP)"""
214
+
215
+ precision_ = precision(test, reference, confusion_matrix, nan_for_nonexisting)
216
+ recall_ = recall(test, reference, confusion_matrix, nan_for_nonexisting)
217
+
218
+ return (1 + beta*beta) * precision_ * recall_ /\
219
+ ((beta*beta * precision_) + recall_)
220
+
221
+
222
+ def false_positive_rate(test=None, reference=None, confusion_matrix=None, nan_for_nonexisting=True, **kwargs):
223
+ """FP / (FP + TN)"""
224
+
225
+ return 1 - specificity(test, reference, confusion_matrix, nan_for_nonexisting)
226
+
227
+
228
+ def false_omission_rate(test=None, reference=None, confusion_matrix=None, nan_for_nonexisting=True, **kwargs):
229
+ """FN / (TN + FN)"""
230
+
231
+ if confusion_matrix is None:
232
+ confusion_matrix = ConfusionMatrix(test, reference)
233
+
234
+ tp, fp, tn, fn = confusion_matrix.get_matrix()
235
+ test_empty, test_full, reference_empty, reference_full = confusion_matrix.get_existence()
236
+
237
+ if test_full:
238
+ if nan_for_nonexisting:
239
+ return float("NaN")
240
+ else:
241
+ return 0.
242
+
243
+ return float(fn / (fn + tn))
244
+
245
+
246
+ def false_negative_rate(test=None, reference=None, confusion_matrix=None, nan_for_nonexisting=True, **kwargs):
247
+ """FN / (TP + FN)"""
248
+
249
+ return 1 - sensitivity(test, reference, confusion_matrix, nan_for_nonexisting)
250
+
251
+
252
+ def true_negative_rate(test=None, reference=None, confusion_matrix=None, nan_for_nonexisting=True, **kwargs):
253
+ """TN / (TN + FP)"""
254
+
255
+ return specificity(test, reference, confusion_matrix, nan_for_nonexisting)
256
+
257
+
258
+ def false_discovery_rate(test=None, reference=None, confusion_matrix=None, nan_for_nonexisting=True, **kwargs):
259
+ """FP / (TP + FP)"""
260
+
261
+ return 1 - precision(test, reference, confusion_matrix, nan_for_nonexisting)
262
+
263
+
264
+ def negative_predictive_value(test=None, reference=None, confusion_matrix=None, nan_for_nonexisting=True, **kwargs):
265
+ """TN / (TN + FN)"""
266
+
267
+ return 1 - false_omission_rate(test, reference, confusion_matrix, nan_for_nonexisting)
268
+
269
+
270
+ def total_positives_test(test=None, reference=None, confusion_matrix=None, **kwargs):
271
+ """TP + FP"""
272
+
273
+ if confusion_matrix is None:
274
+ confusion_matrix = ConfusionMatrix(test, reference)
275
+
276
+ tp, fp, tn, fn = confusion_matrix.get_matrix()
277
+
278
+ return tp + fp
279
+
280
+
281
+ def total_negatives_test(test=None, reference=None, confusion_matrix=None, **kwargs):
282
+ """TN + FN"""
283
+
284
+ if confusion_matrix is None:
285
+ confusion_matrix = ConfusionMatrix(test, reference)
286
+
287
+ tp, fp, tn, fn = confusion_matrix.get_matrix()
288
+
289
+ return tn + fn
290
+
291
+
292
+ def total_positives_reference(test=None, reference=None, confusion_matrix=None, **kwargs):
293
+ """TP + FN"""
294
+
295
+ if confusion_matrix is None:
296
+ confusion_matrix = ConfusionMatrix(test, reference)
297
+
298
+ tp, fp, tn, fn = confusion_matrix.get_matrix()
299
+
300
+ return tp + fn
301
+
302
+
303
+ def total_negatives_reference(test=None, reference=None, confusion_matrix=None, **kwargs):
304
+ """TN + FP"""
305
+
306
+ if confusion_matrix is None:
307
+ confusion_matrix = ConfusionMatrix(test, reference)
308
+
309
+ tp, fp, tn, fn = confusion_matrix.get_matrix()
310
+
311
+ return tn + fp
312
+
313
+
314
+ def hausdorff_distance(test=None, reference=None, confusion_matrix=None, nan_for_nonexisting=True, voxel_spacing=None, connectivity=1, **kwargs):
315
+
316
+ if confusion_matrix is None:
317
+ confusion_matrix = ConfusionMatrix(test, reference)
318
+
319
+ test_empty, test_full, reference_empty, reference_full = confusion_matrix.get_existence()
320
+
321
+ if test_empty or test_full or reference_empty or reference_full:
322
+ if nan_for_nonexisting:
323
+ return float("NaN")
324
+ else:
325
+ return 0
326
+
327
+ test, reference = confusion_matrix.test, confusion_matrix.reference
328
+
329
+ return metric.hd(test, reference, voxel_spacing, connectivity)
330
+
331
+
332
+ def hausdorff_distance_95(test=None, reference=None, confusion_matrix=None, nan_for_nonexisting=True, voxel_spacing=None, connectivity=1, **kwargs):
333
+
334
+ if confusion_matrix is None:
335
+ confusion_matrix = ConfusionMatrix(test, reference)
336
+
337
+ test_empty, test_full, reference_empty, reference_full = confusion_matrix.get_existence()
338
+
339
+ if test_empty or test_full or reference_empty or reference_full:
340
+ if nan_for_nonexisting:
341
+ return float("NaN")
342
+ else:
343
+ return 0
344
+
345
+ test, reference = confusion_matrix.test, confusion_matrix.reference
346
+
347
+ return metric.hd95(test, reference, voxel_spacing, connectivity)
348
+
349
+
350
+ def avg_surface_distance(test=None, reference=None, confusion_matrix=None, nan_for_nonexisting=True, voxel_spacing=None, connectivity=1, **kwargs):
351
+
352
+ if confusion_matrix is None:
353
+ confusion_matrix = ConfusionMatrix(test, reference)
354
+
355
+ test_empty, test_full, reference_empty, reference_full = confusion_matrix.get_existence()
356
+
357
+ if test_empty or test_full or reference_empty or reference_full:
358
+ if nan_for_nonexisting:
359
+ return float("NaN")
360
+ else:
361
+ return 0
362
+
363
+ test, reference = confusion_matrix.test, confusion_matrix.reference
364
+
365
+ return metric.asd(test, reference, voxel_spacing, connectivity)
366
+
367
+
368
+ def avg_surface_distance_symmetric(test=None, reference=None, confusion_matrix=None, nan_for_nonexisting=True, voxel_spacing=None, connectivity=1, **kwargs):
369
+
370
+ if confusion_matrix is None:
371
+ confusion_matrix = ConfusionMatrix(test, reference)
372
+
373
+ test_empty, test_full, reference_empty, reference_full = confusion_matrix.get_existence()
374
+
375
+ if test_empty or test_full or reference_empty or reference_full:
376
+ if nan_for_nonexisting:
377
+ return float("NaN")
378
+ else:
379
+ return 0
380
+
381
+ test, reference = confusion_matrix.test, confusion_matrix.reference
382
+
383
+ return metric.assd(test, reference, voxel_spacing, connectivity)
384
+
385
+
386
+ ALL_METRICS = {
387
+ "False Positive Rate": false_positive_rate,
388
+ "Dice": dice,
389
+ "Jaccard": jaccard,
390
+ "Hausdorff Distance": hausdorff_distance,
391
+ "Hausdorff Distance 95": hausdorff_distance_95,
392
+ "Precision": precision,
393
+ "Recall": recall,
394
+ "Avg. Symmetric Surface Distance": avg_surface_distance_symmetric,
395
+ "Avg. Surface Distance": avg_surface_distance,
396
+ "Accuracy": accuracy,
397
+ "False Omission Rate": false_omission_rate,
398
+ "Negative Predictive Value": negative_predictive_value,
399
+ "False Negative Rate": false_negative_rate,
400
+ "True Negative Rate": true_negative_rate,
401
+ "False Discovery Rate": false_discovery_rate,
402
+ "Total Positives Test": total_positives_test,
403
+ "Total Negatives Test": total_negatives_test,
404
+ "Total Positives Reference": total_positives_reference,
405
+ "total Negatives Reference": total_negatives_reference
406
+ }
source_code/SegMamba/light_training/examples/1_rename_mri_data_BraTS2023.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+
3
+
4
+ import os
5
+
6
+ # data_dir = "./data/raw_data/BraTS2023/ASNR-MICCAI-BraTS2023-GLI-Challenge-TrainingData/"
7
+ data_dir = "./data/raw_data/BraTS2023/ASNR-MICCAI-BraTS2023-GLI-Challenge-ValidationData/"
8
+
9
+ all_cases = os.listdir(data_dir)
10
+
11
+ for case_name in all_cases:
12
+ case_dir = os.path.join(data_dir, case_name)
13
+
14
+ for data_name in os.listdir(case_dir):
15
+
16
+ if "-" not in data_name:
17
+ continue
18
+ new_name = data_name.split("-")[-1]
19
+
20
+ new_path = os.path.join(case_dir, new_name)
21
+
22
+ old_path = os.path.join(case_dir, data_name)
23
+
24
+ os.rename(old_path, new_path)
25
+
26
+ print(f"{new_path} 命名成功")
27
+
source_code/SegMamba/light_training/examples/2_preprocessing_AIIB23.py ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ from light_training.preprocessing.preprocessors.default_preprocessor import DefaultPreprocessor
3
+ import numpy as np
4
+ import pickle
5
+ import json
6
+
7
+
8
+ def process_train():
9
+ # fullres spacing is [0.5 0.70410156 0.70410156]
10
+ # median_shape is [602.5 516.5 516.5]
11
+ base_dir = "./data/raw_data/AIIB23_Train_T1"
12
+ image_dir = "img"
13
+ label_dir = "gt"
14
+ preprocessor = DefaultPreprocessor(base_dir=base_dir,
15
+ image_dir=image_dir,
16
+ label_dir=label_dir,
17
+ )
18
+
19
+ out_spacing = [0.5, 0.70410156, 0.70410156]
20
+ output_dir = "./data/fullres/train/"
21
+
22
+ with open("./data_analysis_result.txt", "r") as f:
23
+ content = f.read().strip("\n")
24
+ print(content)
25
+ content = eval(content)
26
+ foreground_intensity_properties_per_channel = content["intensity_statistics_per_channel"]
27
+
28
+ preprocessor.run(output_spacing=out_spacing,
29
+ output_dir=output_dir,
30
+ all_labels=[1, ],
31
+ num_processes=16,
32
+ foreground_intensity_properties_per_channel=foreground_intensity_properties_per_channel)
33
+
34
+ def process_val():
35
+ # fullres spacing is [0.5 0.70410156 0.70410156]
36
+ # median_shape is [602.5 516.5 516.5]
37
+ base_dir = "./data/raw_data/Val"
38
+ image_dir = "img"
39
+ preprocessor = DefaultPreprocessor(base_dir=base_dir,
40
+ image_dir=image_dir,
41
+ label_dir=None,
42
+ )
43
+
44
+ out_spacing = [0.5, 0.70410156, 0.70410156]
45
+
46
+ with open("./data_analysis_result.txt", "r") as f:
47
+ content = f.read().strip("\n")
48
+ print(content)
49
+ content = eval(content)
50
+ foreground_intensity_properties_per_channel = content["intensity_statistics_per_channel"]
51
+
52
+ output_dir = "./data/fullres/val_test/"
53
+ preprocessor.run(output_spacing=out_spacing,
54
+ output_dir=output_dir,
55
+ all_labels=[1, ],
56
+ foreground_intensity_properties_per_channel=foreground_intensity_properties_per_channel,
57
+ num_processes=16)
58
+
59
+ def process_val_semi():
60
+ # fullres spacing is [0.5 0.70410156 0.70410156]
61
+ # median_shape is [602.5 516.5 516.5]
62
+ base_dir = "./data/raw_data/Val_semi_postprocess"
63
+ image_dir = "img"
64
+ preprocessor = DefaultPreprocessor(base_dir=base_dir,
65
+ image_dir=image_dir,
66
+ label_dir="gt",
67
+ )
68
+
69
+ out_spacing = [0.5, 0.70410156, 0.70410156]
70
+
71
+ with open("./data_analysis_result.txt", "r") as f:
72
+ content = f.read().strip("\n")
73
+ print(content)
74
+ content = eval(content)
75
+ foreground_intensity_properties_per_channel = content["intensity_statistics_per_channel"]
76
+
77
+ output_dir = "./data/fullres/val_semi_postprocess/"
78
+ preprocessor.run(output_spacing=out_spacing,
79
+ output_dir=output_dir,
80
+ all_labels=[1, ],
81
+ foreground_intensity_properties_per_channel=foreground_intensity_properties_per_channel)
82
+
83
+
84
+ def plan():
85
+ base_dir = "./data/raw_data/AIIB23_Train_T1"
86
+ image_dir = "img"
87
+ label_dir = "gt"
88
+
89
+ preprocessor = DefaultPreprocessor(base_dir=base_dir,
90
+ image_dir=image_dir,
91
+ label_dir=label_dir,
92
+ )
93
+
94
+ preprocessor.run_plan()
95
+
96
+ if __name__ == "__main__":
97
+
98
+ # plan()
99
+
100
+ process_train()
101
+ # import time
102
+ # s = time.time()
103
+ # process_val()
104
+ # e = time.time()
105
+
106
+ # print(f"preprocessing time is {e - s}")
107
+
108
+ # process_val_semi()
109
+
110
+
111
+ #
112
+ # preprocessor.run(output_spacing=[3, 0.9765625, 0.9765625], output_dir=output_dir)
113
+
114
+ # data = np.load("/home/xingzhaohu/sharefs/datasets/AIIB23_nnunet/train/AIIB23_96.npz")
115
+
116
+ # image = data["data"]
117
+ # label = data["seg"]
118
+ # print(image.shape)
119
+ # print(label.shape)
120
+
121
+ # import matplotlib.pyplot as plt
122
+
123
+ # for i in range(20):
124
+ # plt.imshow(image[0, i], cmap="gray")
125
+ # plt.show()
126
+
127
+ # df = open("/home/xingzhaohu/sharefs/datasets/AIIB23_nnunet/train/AIIB23_96.pkl", "rb")
128
+
129
+ # info = pickle.load(df)
130
+ # print(info)
source_code/SegMamba/light_training/examples/2_preprocessing_BraTS2023.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ from light_training.preprocessing.preprocessors.preprocessor_mri import MultiModalityPreprocessor
3
+ import numpy as np
4
+ import pickle
5
+ import json
6
+
7
+ data_filename = ["t2w.nii.gz",
8
+ "t2f.nii.gz",
9
+ "t1n.nii.gz",
10
+ "t1c.nii.gz"]
11
+ seg_filename = "seg.nii.gz"
12
+
13
+ def process_train():
14
+ # fullres spacing is [0.5 0.70410156 0.70410156]
15
+ # median_shape is [602.5 516.5 516.5]
16
+ base_dir = "./data/raw_data/BraTS2023/"
17
+ image_dir = "ASNR-MICCAI-BraTS2023-GLI-Challenge-TrainingData"
18
+ preprocessor = MultiModalityPreprocessor(base_dir=base_dir,
19
+ image_dir=image_dir,
20
+ data_filenames=data_filename,
21
+ seg_filename=seg_filename
22
+ )
23
+
24
+ out_spacing = [1.0, 1.0, 1.0]
25
+ output_dir = "./data/fullres/train/"
26
+
27
+ preprocessor.run(output_spacing=out_spacing,
28
+ output_dir=output_dir,
29
+ all_labels=[1, 2, 3],
30
+ )
31
+
32
+ def process_val():
33
+ base_dir = "./data/raw_data/BraTS2023/"
34
+ image_dir = "ASNR-MICCAI-BraTS2023-GLI-Challenge-ValidationData"
35
+ preprocessor = MultiModalityPreprocessor(base_dir=base_dir,
36
+ image_dir=image_dir,
37
+ data_filenames=data_filename,
38
+ seg_filename=""
39
+ )
40
+
41
+ out_spacing = [1.0, 1.0, 1.0]
42
+ output_dir = "./data/fullres/val/"
43
+
44
+ preprocessor.run(output_spacing=out_spacing,
45
+ output_dir=output_dir,
46
+ all_labels=[1, 2, 3],
47
+ )
48
+
49
+ def process_test():
50
+ # fullres spacing is [0.5 0.70410156 0.70410156]
51
+ # median_shape is [602.5 516.5 516.5]
52
+ base_dir = "/home/xingzhaohu/sharefs/datasets/WORD-V0.1.0/"
53
+ image_dir = "imagesTs"
54
+ label_dir = "labelsTs"
55
+ preprocessor = DefaultPreprocessor(base_dir=base_dir,
56
+ image_dir=image_dir,
57
+ label_dir=label_dir,
58
+ )
59
+
60
+ out_spacing = [3.0, 0.9765625, 0.9765625]
61
+
62
+ output_dir = "./data/fullres/test/"
63
+ with open("./data_analysis_result.txt", "r") as f:
64
+ content = f.read().strip("\n")
65
+ print(content)
66
+ content = json.loads(content)
67
+ foreground_intensity_properties_per_channel = content["intensity_statistics_per_channel"]
68
+
69
+ preprocessor.run(output_spacing=out_spacing,
70
+ output_dir=output_dir,
71
+ all_labels=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
72
+ foreground_intensity_properties_per_channel=foreground_intensity_properties_per_channel)
73
+
74
+
75
+ def plan():
76
+ base_dir = "./data/raw_data/BraTS2023/"
77
+ image_dir = "ASNR-MICCAI-BraTS2023-GLI-Challenge-TrainingData"
78
+ preprocessor = MultiModalityPreprocessor(base_dir=base_dir,
79
+ image_dir=image_dir,
80
+ data_filenames=data_filename,
81
+ seg_filename=seg_filename
82
+ )
83
+
84
+ preprocessor.run_plan()
85
+
86
+
87
+ if __name__ == "__main__":
88
+ #
89
+ # plan()
90
+
91
+ process_train()
92
+ # process_val()
93
+ # process_test()
94
+