diff --git a/.gitattributes b/.gitattributes
index f8705120b387f5c53396dbbf2ea8caac377cf879..d53e9fb3dd6205a30f9ab21d91007eb86aa44864 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -74,3 +74,20 @@ SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective
SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_fwd_bf16.o filter=lfs diff=lfs merge=lfs -text
SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_fwd_fp16.o filter=lfs diff=lfs merge=lfs -text
SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_fwd_fp32.o filter=lfs diff=lfs merge=lfs -text
+PRISM/SegMamba/causal-conv1d/build/lib.linux-x86_64-cpython-312/causal_conv1d_cuda.cpython-312-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
+PRISM/SegMamba/causal-conv1d/build/temp.linux-x86_64-cpython-312/csrc/causal_conv1d.o filter=lfs diff=lfs merge=lfs -text
+PRISM/SegMamba/causal-conv1d/build/temp.linux-x86_64-cpython-312/csrc/causal_conv1d_bwd.o filter=lfs diff=lfs merge=lfs -text
+PRISM/SegMamba/causal-conv1d/build/temp.linux-x86_64-cpython-312/csrc/causal_conv1d_fwd.o filter=lfs diff=lfs merge=lfs -text
+PRISM/SegMamba/causal-conv1d/build/temp.linux-x86_64-cpython-312/csrc/causal_conv1d_update.o filter=lfs diff=lfs merge=lfs -text
+PRISM/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/selective_scan_cuda.cpython-312-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
+PRISM/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/.ninja_deps filter=lfs diff=lfs merge=lfs -text
+PRISM/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan.o filter=lfs diff=lfs merge=lfs -text
+PRISM/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_bwd_bf16_complex.o filter=lfs diff=lfs merge=lfs -text
+PRISM/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_bwd_bf16_real.o filter=lfs diff=lfs merge=lfs -text
+PRISM/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_bwd_fp16_complex.o filter=lfs diff=lfs merge=lfs -text
+PRISM/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_bwd_fp16_real.o filter=lfs diff=lfs merge=lfs -text
+PRISM/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_bwd_fp32_complex.o filter=lfs diff=lfs merge=lfs -text
+PRISM/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_bwd_fp32_real.o filter=lfs diff=lfs merge=lfs -text
+PRISM/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_fwd_bf16.o filter=lfs diff=lfs merge=lfs -text
+PRISM/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_fwd_fp16.o filter=lfs diff=lfs merge=lfs -text
+PRISM/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_fwd_fp32.o filter=lfs diff=lfs merge=lfs -text
diff --git a/PRISM/LICENSE b/PRISM/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..261eeb9e9f8b2b4b0d119366dda99c6fd7d35c64
--- /dev/null
+++ b/PRISM/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/PRISM/README.md b/PRISM/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..d81b164e2982a16c9d3836f5da1317f106c8eec7
--- /dev/null
+++ b/PRISM/README.md
@@ -0,0 +1,147 @@
+# PRISM
+
+[PRISM](https://arxiv.org/abs/2404.15028): A **P**romptable and **R**obust **I**nteractive **S**egmentation **M**odel with Visual Prompts
+
+Placenta application:
+
+[PRISM Lite](https://arxiv.org/abs/2408.05372): A lightweight model for interactive 3D placenta segmentation in ultrasound
+
+Interactive Segmentation Model for Placenta Segmentation from 3D Ultrasound Images ([arXiv version](https://arxiv.org/abs/2407.08020))
+
+
+## News
+[07/07/24] Check out the decent performance/version of [PRISM on placenta segmentation in ultrasound images](https://github.com/MedICL-VU/PRISM-placenta).
+
+[05/13/24] Our work is early accepted by MICCAI 2024.
+
+[03/07/24] The [pretrained PRISM](https://drive.google.com/drive/u/1/folders/1B6Df44Gd9PEBGPkE1FwC8Ds4jefCekUB) models and [preprocessed datasets](https://drive.google.com/drive/folders/13uGNb2WQhSQcBQIUhnvYJere1LBYGDsW?usp=sharing) are uploaded.
+
+## TODO
+
+
+demo (gradio)
+
+
+
+## Introduction of PRISM
+
+
+PRISM is a robust model/method for interactive segmentation in medical imaging. We strive for human-level performance, as a human-in-loop interactive segmentation model with prompts should gradually refine its outcomes until they closely match inter-rater variability.
+
+
+
+## PRISM tumor segmentation examples
+Briefly, PRISM produces tumor segmentation with mean Dice values of **93.79 (colon), 94.48 (pancreas), 94.18 (liver), and 96.58 (kidney)**.
+
+ | | |
+:-------------------------:|:-------------------------:
+Iterative correction for colon tumor | 
+Iterative correction for multiple tumors | 
+Qualitative results with compared methods | 
+
+The quantitative results can be viewed in our [paper](https://arxiv.org/abs/2404.15028).
+
+## Datasets
+The anatomical differences among individuals and ambiguous boundaries are present in the datasets.
+
+- Our preprocessed
+
+ We used four public [datasets](https://drive.google.com/drive/folders/13uGNb2WQhSQcBQIUhnvYJere1LBYGDsW?usp=sharing) for 3D tumor segmentation in [colon](https://drive.google.com/drive/u/1/folders/1bt17794HCZfmJ2MLh5w0Y_IAJyUj6ti2), [pancreas](https://drive.google.com/drive/u/1/folders/1NncGDG5Cu795WJTmBse-Lm0GrJmtvTdc), [liver](https://drive.google.com/drive/u/1/folders/1vDM2VkNAT5dvFX5XTRhPe6b7zwYWqU_U) and [kidney](https://drive.google.com/drive/u/1/folders/12UDho-JEZHfK1c1laD5dBFNxvJumcoDF).
+
+- Original
+
+ Here are the links for the datasets: [MSD-colon](http://medicaldecathlon.com/), [MSD-pancreas](http://medicaldecathlon.com/), [LiTS2017](https://competitions.codalab.org/competitions/17094) and [KiTS2021](https://kits-challenge.org/kits21/).
+
+
+
+
+
+## Models
+| colon | pancreas | liver | kidney |
+|------------------------------|------------------------------|------------------------------|------------------------------|
+| [Download](https://drive.google.com/drive/u/1/folders/1nPUC0cCsyA_w-tKkhL_Bw7lesBorGzCl) |[Download](https://drive.google.com/drive/u/1/folders/1JPiF7wtSnbFdl0ZLmFQt1b4H-XH4FDrM)| [Download](https://drive.google.com/drive/u/1/folders/1JAFOca1FxWebzZjRa1lKo1OAv0HXqeh6) |[Download](https://drive.google.com/drive/u/1/folders/1sN0HQLM-LfWB5Kp119YwMsZIfv3VJj7S)|
+
+
+## Get Started
+
+**Installation**
+```
+conda create -n prism python=3.9
+conda activate prism
+sudo install git
+pip install torch==1.12.1+cu113 torchvision==0.13.1+cu113 --extra-index-url https://download.pytorch.org/whl/cu113 # install pytorch
+pip install git+https://github.com/facebookresearch/segment-anything.git # install segment anything packages
+pip install git+https://github.com/deepmind/surface-distance.git # for normalized surface dice (NSD) evaluation
+pip install -r requirements.txt
+```
+
+
+**Train**
+
+```
+python train.py --data colon --data_dir your_data_directory --save_name your_save_name --multiple_outputs --dynamic --use_box --refine
+```
+
+add "--use_scribble" and "--efficient_scribble" if you want to train with scribbles.
+
+**Train (Distributed Data Parallel)**
+
+the only difference between this and above (train) command is the use of "--ddp".
+```
+python train.py --data colon --data_dir your_data_directory --save_name your_save_name -multiple_outputs --dynamic --use_box --refine --ddp
+```
+
+
+
+**Test**
+
+put downloaded pretrained model under the implementation directory
+```
+python test.py --data colon --data_dir your_data_directory --split test --checkpoint best --save_name prism_pretrain --num_clicks 1 --iter_nums 11 --multiple_outputs --use_box --use_scribble --efficient_scribble --refine --refine_test
+```
+
+
+
+
+**FAQ**
+
+if you got the error as AttributeError: module 'cv2' has no attribute 'ximgproc', please check [this](https://stackoverflow.com/questions/57427233/module-cv2-cv2-has-no-attribute-ximgproc) out
+
+DDP mode has lower Dice and more epoch numbers may solve it
+
+On my end, combining trainer and trainer_basic speeds up
+
+training the model without refine module (as we reported in the paper) has better accuracy than with refine but not using it
+
+
+## License
+
+The model is licensed under the [Apache 2.0 license](LICENSE)
+
+
+## Acknowledgements
+Thanks for the code from: [SAM](https://github.com/facebookresearch/segment-anything), [SAM-Med3D](https://github.com/uni-medical/SAM-Med3D), [ProMISe](https://github.com/MedICL-VU/ProMISe), [ScribblePrompt](https://github.com/halleewong/ScribblePrompt), [nnU-Net](https://github.com/MIC-DKFZ/nnUNet)
+
+If you find this repository useful, please consider citing:
+```
+@inproceedings{li2024prism,
+ title={Prism: A promptable and robust interactive segmentation model with visual prompts},
+ author={Li, Hao and Liu, Han and Hu, Dewei and Wang, Jiacheng and Oguz, Ipek},
+ booktitle={International Conference on Medical Image Computing and Computer-Assisted Intervention},
+ pages={389--399},
+ year={2024},
+ organization={Springer}
+}
+```
+```
+@inproceedings{li2024interactive,
+ title={Interactive Segmentation Model for Placenta Segmentation from 3D Ultrasound Images},
+ author={Li, Hao and Oguz, Baris and Arenas, Gabriel and Yao, Xing and Wang, Jiacheng and Pouch, Alison and Byram, Brett and Schwartz, Nadav and Oguz, Ipek},
+ booktitle={International Workshop on Advances in Simplifying Medical Ultrasound},
+ pages={132--142},
+ year={2024},
+ organization={Springer}
+}
+```
+Please send an email to hao.li.1@vanderbilt.edu for any questions and always happy to help! :)
+
diff --git a/PRISM/SegMamba/.DS_Store b/PRISM/SegMamba/.DS_Store
new file mode 100644
index 0000000000000000000000000000000000000000..012e2d62b0baf86b81f29caf4c5324e021192325
Binary files /dev/null and b/PRISM/SegMamba/.DS_Store differ
diff --git a/PRISM/SegMamba/.gitignore b/PRISM/SegMamba/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..68bc17f9ff2104a9d7b6777058bb4c343ca72609
--- /dev/null
+++ b/PRISM/SegMamba/.gitignore
@@ -0,0 +1,160 @@
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+*$py.class
+
+# C extensions
+*.so
+
+# Distribution / packaging
+.Python
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+wheels/
+share/python-wheels/
+*.egg-info/
+.installed.cfg
+*.egg
+MANIFEST
+
+# PyInstaller
+# Usually these files are written by a python script from a template
+# before PyInstaller builds the exe, so as to inject date/other infos into it.
+*.manifest
+*.spec
+
+# Installer logs
+pip-log.txt
+pip-delete-this-directory.txt
+
+# Unit test / coverage reports
+htmlcov/
+.tox/
+.nox/
+.coverage
+.coverage.*
+.cache
+nosetests.xml
+coverage.xml
+*.cover
+*.py,cover
+.hypothesis/
+.pytest_cache/
+cover/
+
+# Translations
+*.mo
+*.pot
+
+# Django stuff:
+*.log
+local_settings.py
+db.sqlite3
+db.sqlite3-journal
+
+# Flask stuff:
+instance/
+.webassets-cache
+
+# Scrapy stuff:
+.scrapy
+
+# Sphinx documentation
+docs/_build/
+
+# PyBuilder
+.pybuilder/
+target/
+
+# Jupyter Notebook
+.ipynb_checkpoints
+
+# IPython
+profile_default/
+ipython_config.py
+
+# pyenv
+# For a library or package, you might want to ignore these files since the code is
+# intended to run in multiple environments; otherwise, check them in:
+# .python-version
+
+# pipenv
+# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
+# However, in case of collaboration, if having platform-specific dependencies or dependencies
+# having no cross-platform support, pipenv may install dependencies that don't work, or not
+# install all needed dependencies.
+#Pipfile.lock
+
+# poetry
+# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
+# This is especially recommended for binary packages to ensure reproducibility, and is more
+# commonly ignored for libraries.
+# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
+#poetry.lock
+
+# pdm
+# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
+#pdm.lock
+# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
+# in version control.
+# https://pdm.fming.dev/#use-with-ide
+.pdm.toml
+
+# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
+__pypackages__/
+
+# Celery stuff
+celerybeat-schedule
+celerybeat.pid
+
+# SageMath parsed files
+*.sage.py
+
+# Environments
+.env
+.venv
+env/
+venv/
+ENV/
+env.bak/
+venv.bak/
+
+# Spyder project settings
+.spyderproject
+.spyproject
+
+# Rope project settings
+.ropeproject
+
+# mkdocs documentation
+/site
+
+# mypy
+.mypy_cache/
+.dmypy.json
+dmypy.json
+
+# Pyre type checker
+.pyre/
+
+# pytype static type analyzer
+.pytype/
+
+# Cython debug symbols
+cython_debug/
+
+# PyCharm
+# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
+# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
+# and can be added to the global gitignore or merged into this file. For a more nuclear
+# option (not recommended) you can uncomment the following to ignore the entire idea folder.
+#.idea/
diff --git a/PRISM/SegMamba/0_inference.py b/PRISM/SegMamba/0_inference.py
new file mode 100644
index 0000000000000000000000000000000000000000..e7f2b4530214000a644360a860527648dce98546
--- /dev/null
+++ b/PRISM/SegMamba/0_inference.py
@@ -0,0 +1,20 @@
+
+
+import torch
+from model_segmamba.segmamba import SegMamba
+
+t1 = torch.rand(1, 4, 128, 128, 128).cuda()
+
+
+model = SegMamba(in_chans=4,
+ out_chans=4,
+ depths=[2,2,2,2],
+ feat_size=[48, 96, 192, 384]).cuda()
+
+out = model(t1)
+
+print(out.shape)
+
+
+
+
diff --git a/PRISM/SegMamba/1_rename_mri_data.py b/PRISM/SegMamba/1_rename_mri_data.py
new file mode 100644
index 0000000000000000000000000000000000000000..50c3f409e8a0de972e0a9e9c40f85ffbe4fe9978
--- /dev/null
+++ b/PRISM/SegMamba/1_rename_mri_data.py
@@ -0,0 +1,26 @@
+
+
+
+import os
+
+data_dir = "./data/raw_data/BraTS2023/ASNR-MICCAI-BraTS2023-GLI-Challenge-TrainingData/"
+
+all_cases = os.listdir(data_dir)
+
+for case_name in all_cases:
+ case_dir = os.path.join(data_dir, case_name)
+
+ for data_name in os.listdir(case_dir):
+
+ if "-" not in data_name:
+ continue
+ new_name = data_name.split("-")[-1]
+
+ new_path = os.path.join(case_dir, new_name)
+
+ old_path = os.path.join(case_dir, data_name)
+
+ os.rename(old_path, new_path)
+
+ print(f"{new_path} 命名成功")
+
diff --git a/PRISM/SegMamba/2_preprocessing_mri.py b/PRISM/SegMamba/2_preprocessing_mri.py
new file mode 100644
index 0000000000000000000000000000000000000000..7765a91d50fab108c93335e5e333a1994f1b4d8c
--- /dev/null
+++ b/PRISM/SegMamba/2_preprocessing_mri.py
@@ -0,0 +1,45 @@
+
+from light_training.preprocessing.preprocessors.preprocessor_mri import MultiModalityPreprocessor
+import numpy as np
+import pickle
+import json
+
+data_filename = ["t2w.nii.gz",
+ "t2f.nii.gz",
+ "t1n.nii.gz",
+ "t1c.nii.gz"]
+seg_filename = "seg.nii.gz"
+
+base_dir = "./data/raw_data/BraTS2023/"
+image_dir = "ASNR-MICCAI-BraTS2023-GLI-Challenge-TrainingData"
+
+def process_train():
+ preprocessor = MultiModalityPreprocessor(base_dir=base_dir,
+ image_dir=image_dir,
+ data_filenames=data_filename,
+ seg_filename=seg_filename
+ )
+
+ out_spacing = [1.0, 1.0, 1.0]
+ output_dir = "./data/fullres/train/"
+
+ preprocessor.run(output_spacing=out_spacing,
+ output_dir=output_dir,
+ all_labels=[1, 2, 3],
+ )
+
+def plan():
+ preprocessor = MultiModalityPreprocessor(base_dir=base_dir,
+ image_dir=image_dir,
+ data_filenames=data_filename,
+ seg_filename=seg_filename
+ )
+
+ preprocessor.run_plan()
+
+
+if __name__ == "__main__":
+
+ plan()
+ process_train()
+
diff --git a/PRISM/SegMamba/3_train.py b/PRISM/SegMamba/3_train.py
new file mode 100644
index 0000000000000000000000000000000000000000..60533ed9809a9253b019d7296b708c8aec3d13e6
--- /dev/null
+++ b/PRISM/SegMamba/3_train.py
@@ -0,0 +1,168 @@
+import numpy as np
+from light_training.dataloading.dataset import get_train_val_test_loader_from_train
+import torch
+import torch.nn as nn
+from monai.inferers import SlidingWindowInferer
+from light_training.evaluation.metric import dice
+from light_training.trainer import Trainer
+from monai.utils import set_determinism
+from light_training.utils.files_helper import save_new_model_and_delete_last
+from monai.losses.dice import DiceLoss
+set_determinism(123)
+import os
+
+data_dir = "./data/fullres/train"
+logdir = f"./logs/segmamba"
+
+model_save_path = os.path.join(logdir, "model")
+# augmentation = "nomirror"
+augmentation = True
+
+env = "pytorch"
+max_epoch = 1000
+batch_size = 2
+val_every = 2
+num_gpus = 1
+device = "cuda:0"
+roi_size = [128, 128, 128]
+
+def func(m, epochs):
+ return np.exp(-10*(1- m / epochs)**2)
+
+class BraTSTrainer(Trainer):
+ def __init__(self, env_type, max_epochs, batch_size, device="cpu", val_every=1, num_gpus=1, logdir="./logs/", master_ip='localhost', master_port=17750, training_script="train.py"):
+ super().__init__(env_type, max_epochs, batch_size, device, val_every, num_gpus, logdir, master_ip, master_port, training_script)
+ self.window_infer = SlidingWindowInferer(roi_size=roi_size,
+ sw_batch_size=1,
+ overlap=0.5)
+ self.augmentation = augmentation
+ from model_segmamba.segmamba import SegMamba
+
+ self.model = SegMamba(in_chans=4,
+ out_chans=4,
+ depths=[2,2,2,2],
+ feat_size=[48, 96, 192, 384])
+
+ self.patch_size = roi_size
+ self.best_mean_dice = 0.0
+ self.ce = nn.CrossEntropyLoss()
+ self.mse = nn.MSELoss()
+ self.train_process = 18
+ self.optimizer = torch.optim.SGD(self.model.parameters(), lr=1e-2, weight_decay=3e-5,
+ momentum=0.99, nesterov=True)
+
+ self.scheduler_type = "poly"
+ self.cross = nn.CrossEntropyLoss()
+
+ def training_step(self, batch):
+ image, label = self.get_input(batch)
+
+ pred = self.model(image)
+
+ loss = self.cross(pred, label)
+
+ self.log("training_loss", loss, step=self.global_step)
+
+ return loss
+
+ def convert_labels(self, labels):
+ ## TC, WT and ET
+ result = [(labels == 1) | (labels == 3), (labels == 1) | (labels == 3) | (labels == 2), labels == 3]
+
+ return torch.cat(result, dim=1).float()
+
+
+ def get_input(self, batch):
+ image = batch["data"]
+ label = batch["seg"]
+
+ label = label[:, 0].long()
+ return image, label
+
+ def cal_metric(self, gt, pred, voxel_spacing=[1.0, 1.0, 1.0]):
+ if pred.sum() > 0 and gt.sum() > 0:
+ d = dice(pred, gt)
+ return np.array([d, 50])
+
+ elif gt.sum() == 0 and pred.sum() == 0:
+ return np.array([1.0, 50])
+
+ else:
+ return np.array([0.0, 50])
+
+ def validation_step(self, batch):
+ image, label = self.get_input(batch)
+
+ output = self.model(image)
+
+ output = output.argmax(dim=1)
+
+ output = output[:, None]
+ output = self.convert_labels(output)
+
+ label = label[:, None]
+ label = self.convert_labels(label)
+
+ output = output.cpu().numpy()
+ target = label.cpu().numpy()
+
+ dices = []
+
+ c = 3
+ for i in range(0, c):
+ pred_c = output[:, i]
+ target_c = target[:, i]
+
+ cal_dice, _ = self.cal_metric(target_c, pred_c)
+ dices.append(cal_dice)
+
+ return dices
+
+ def validation_end(self, val_outputs):
+ dices = val_outputs
+
+ tc, wt, et = dices[0].mean(), dices[1].mean(), dices[2].mean()
+
+ print(f"dices is {tc, wt, et}")
+
+ mean_dice = (tc + wt + et) / 3
+
+ self.log("tc", tc, step=self.epoch)
+ self.log("wt", wt, step=self.epoch)
+ self.log("et", et, step=self.epoch)
+
+ self.log("mean_dice", mean_dice, step=self.epoch)
+
+ if mean_dice > self.best_mean_dice:
+ self.best_mean_dice = mean_dice
+ save_new_model_and_delete_last(self.model,
+ os.path.join(model_save_path,
+ f"best_model_{mean_dice:.4f}.pt"),
+ delete_symbol="best_model")
+
+ save_new_model_and_delete_last(self.model,
+ os.path.join(model_save_path,
+ f"final_model_{mean_dice:.4f}.pt"),
+ delete_symbol="final_model")
+
+
+ if (self.epoch + 1) % 100 == 0:
+ torch.save(self.model.state_dict(), os.path.join(model_save_path, f"tmp_model_ep{self.epoch}_{mean_dice:.4f}.pt"))
+
+ print(f"mean_dice is {mean_dice}")
+
+if __name__ == "__main__":
+
+ trainer = BraTSTrainer(env_type=env,
+ max_epochs=max_epoch,
+ batch_size=batch_size,
+ device=device,
+ logdir=logdir,
+ val_every=val_every,
+ num_gpus=num_gpus,
+ master_port=17759,
+ training_script=__file__)
+
+ train_ds, val_ds, test_ds = get_train_val_test_loader_from_train(data_dir)
+
+ trainer.train(train_dataset=train_ds, val_dataset=val_ds)
diff --git a/PRISM/SegMamba/4_predict.py b/PRISM/SegMamba/4_predict.py
new file mode 100644
index 0000000000000000000000000000000000000000..a2b5d48642b1ae5c5c89170bb05115a19dc124af
--- /dev/null
+++ b/PRISM/SegMamba/4_predict.py
@@ -0,0 +1,139 @@
+import numpy as np
+from light_training.dataloading.dataset import get_train_val_test_loader_from_train
+import torch
+import torch.nn as nn
+from monai.inferers import SlidingWindowInferer
+from light_training.evaluation.metric import dice
+from light_training.trainer import Trainer
+from monai.utils import set_determinism
+from light_training.evaluation.metric import dice
+set_determinism(123)
+import os
+from light_training.prediction import Predictor
+
+data_dir = "./data/fullres/train"
+env = "pytorch"
+max_epoch = 1000
+batch_size = 2
+val_every = 2
+num_gpus = 1
+device = "cuda:0"
+patch_size = [128, 128, 128]
+
+class BraTSTrainer(Trainer):
+ def __init__(self, env_type, max_epochs, batch_size, device="cpu", val_every=1, num_gpus=1, logdir="./logs/", master_ip='localhost', master_port=17750, training_script="train.py"):
+ super().__init__(env_type, max_epochs, batch_size, device, val_every, num_gpus, logdir, master_ip, master_port, training_script)
+
+ self.patch_size = patch_size
+ self.augmentation = False
+
+ def convert_labels(self, labels):
+ ## TC, WT and ET
+ result = [(labels == 1) | (labels == 3), (labels == 1) | (labels == 3) | (labels == 2), labels == 3]
+
+ return torch.cat(result, dim=1).float()
+
+ def get_input(self, batch):
+ image = batch["data"]
+ label = batch["seg"]
+ properties = batch["properties"]
+ label = self.convert_labels(label)
+
+ return image, label, properties
+
+ def define_model_segmamba(self):
+ from model_segmamba.segmamba import SegMamba
+ model = SegMamba(in_chans=4,
+ out_chans=4,
+ depths=[2,2,2,2],
+ feat_size=[48, 96, 192, 384])
+
+ model_path = "/home/xingzhaohu/dev/jiuding_code/brats23/logs/segmamba/model/final_model_0.9038.pt"
+ new_sd = self.filte_state_dict(torch.load(model_path, map_location="cpu"))
+ model.load_state_dict(new_sd)
+ model.eval()
+ window_infer = SlidingWindowInferer(roi_size=patch_size,
+ sw_batch_size=2,
+ overlap=0.5,
+ progress=True,
+ mode="gaussian")
+
+ predictor = Predictor(window_infer=window_infer,
+ mirror_axes=[0,1,2])
+
+ save_path = "./prediction_results/segmamba"
+ os.makedirs(save_path, exist_ok=True)
+
+ return model, predictor, save_path
+
+ def validation_step(self, batch):
+ image, label, properties = self.get_input(batch)
+ ddim = False
+
+ model, predictor, save_path = self.define_model_segmamba()
+
+ model_output = predictor.maybe_mirror_and_predict(image, model, device=device)
+
+ model_output = predictor.predict_raw_probability(model_output,
+ properties=properties)
+
+
+ model_output = model_output.argmax(dim=0)[None]
+ model_output = self.convert_labels_dim0(model_output)
+
+ label = label[0]
+ c = 3
+ dices = []
+ for i in range(0, c):
+ output_i = model_output[i].cpu().numpy()
+ label_i = label[i].cpu().numpy()
+ d = dice(output_i, label_i)
+ dices.append(d)
+
+ print(dices)
+
+ model_output = predictor.predict_noncrop_probability(model_output, properties)
+ predictor.save_to_nii(model_output,
+ raw_spacing=[1,1,1],
+ case_name = properties['name'][0],
+ save_dir=save_path)
+
+ return 0
+
+ def convert_labels_dim0(self, labels):
+ ## TC, WT and ET
+ result = [(labels == 1) | (labels == 3), (labels == 1) | (labels == 3) | (labels == 2), labels == 3]
+
+ return torch.cat(result, dim=0).float()
+
+
+ def filte_state_dict(self, sd):
+ if "module" in sd :
+ sd = sd["module"]
+ new_sd = {}
+ for k, v in sd.items():
+ k = str(k)
+ new_k = k[7:] if k.startswith("module") else k
+ new_sd[new_k] = v
+ del sd
+ return new_sd
+
+if __name__ == "__main__":
+
+ trainer = BraTSTrainer(env_type=env,
+ max_epochs=max_epoch,
+ batch_size=batch_size,
+ device=device,
+ logdir="",
+ val_every=val_every,
+ num_gpus=num_gpus,
+ master_port=17751,
+ training_script=__file__)
+
+ train_ds, val_ds, test_ds = get_train_val_test_loader_from_train(data_dir)
+
+ trainer.validation_single_gpu(test_ds)
+
+ # print(f"result is {v_mean}")
+
+
diff --git a/PRISM/SegMamba/5_compute_metrics.py b/PRISM/SegMamba/5_compute_metrics.py
new file mode 100644
index 0000000000000000000000000000000000000000..947252eaa9d5b00483dcde2600d4f5871eccd8a6
--- /dev/null
+++ b/PRISM/SegMamba/5_compute_metrics.py
@@ -0,0 +1,84 @@
+from light_training.dataloading.dataset import get_train_val_test_loader_from_train
+from monai.utils import set_determinism
+import torch
+import os
+import numpy as np
+import SimpleITK as sitk
+from medpy import metric
+import argparse
+from tqdm import tqdm
+
+import numpy as np
+
+set_determinism(123)
+
+parser = argparse.ArgumentParser()
+
+parser.add_argument("--pred_name", required=True, type=str)
+
+results_root = "prediction_results"
+args = parser.parse_args()
+
+pred_name = args.pred_name
+
+def cal_metric(gt, pred, voxel_spacing):
+ if pred.sum() > 0 and gt.sum() > 0:
+ dice = metric.binary.dc(pred, gt)
+ hd95 = metric.binary.hd95(pred, gt, voxelspacing=voxel_spacing)
+ return np.array([dice, hd95])
+ else:
+ return np.array([0.0, 50])
+
+def each_cases_metric(gt, pred, voxel_spacing):
+ classes_num = 3
+ class_wise_metric = np.zeros((classes_num, 2))
+ for cls in range(0, classes_num):
+ class_wise_metric[cls, ...] = cal_metric(pred[cls], gt[cls], voxel_spacing)
+ print(class_wise_metric)
+ return class_wise_metric
+
+def convert_labels(labels):
+ ## TC, WT and ET
+ labels = labels.unsqueeze(dim=0)
+
+ result = [(labels == 1) | (labels == 3), (labels == 1) | (labels == 3) | (labels == 2), labels == 3]
+
+ return torch.cat(result, dim=0).float()
+
+
+if __name__ == "__main__":
+ data_dir = "./data/fullres/train"
+ raw_data_dir = "./data/raw_data/BraTS2023/ASNR-MICCAI-BraTS2023-GLI-Challenge-TrainingData/"
+ train_ds, val_ds, test_ds = get_train_val_test_loader_from_train(data_dir)
+ print(len(test_ds))
+ all_results = np.zeros((250,3,2))
+
+ ind = 0
+ for batch in tqdm(test_ds, total=len(test_ds)):
+ properties = batch["properties"]
+ case_name = properties["name"]
+ gt_itk = os.path.join(raw_data_dir, case_name, f"seg.nii.gz")
+ voxel_spacing = [1, 1, 1]
+ gt_itk = sitk.ReadImage(gt_itk)
+ gt_array = sitk.GetArrayFromImage(gt_itk).astype(np.int32)
+ gt_array = torch.from_numpy(gt_array)
+ gt_array = convert_labels(gt_array).numpy()
+ pred_itk = sitk.ReadImage(f"./{results_root}/{pred_name}/{case_name}.nii.gz")
+ pred_array = sitk.GetArrayFromImage(pred_itk)
+
+ m = each_cases_metric(gt_array, pred_array, voxel_spacing)
+
+ all_results[ind, ...] = m
+
+ ind += 1
+
+ os.makedirs(f"./{results_root}/result_metrics/", exist_ok=True)
+ np.save(f"./{results_root}/result_metrics/{pred_name}.npy", all_results)
+
+ result = np.load(f"./{results_root}/result_metrics/{pred_name}.npy")
+ print(result.shape)
+ print(result.mean(axis=0))
+ print(result.std(axis=0))
+
+
+
diff --git a/PRISM/SegMamba/README.md b/PRISM/SegMamba/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..7d6f555a49e21b9d8ab942bb58545787229d64aa
--- /dev/null
+++ b/PRISM/SegMamba/README.md
@@ -0,0 +1,132 @@
+# SegMamba
+
+**Recent news: If you are interested in the research about vision language models, please refers to the latest work: https://github.com/MrGiovanni/RadGPT (ICCV2025)**
+
+**Now we have open-sourced the pre-processing, training, inference, and metrics computation codes.**
+
+SegMamba: Long-range Sequential Modeling Mamba For 3D Medical Image Segmentation
+
+[https://arxiv.org/abs/2401.13560](https://arxiv.org/abs/2401.13560)
+
+
+
+
+
+Our advantage in speed and memory.
+
+
+## Contact
+If you have any questions about our project, please feel free to contact us by email at zxing565@connect.hkust-gz.edu.cn or via WeChat at 18340097191. Furthermore, the data underlying this article will be shared on reasonable request to gaof57@mail.sysu.edu.cn.
+
+## Environment install
+Clone this repository and navigate to the root directory of the project.
+
+```bash
+git clone https://github.com/ge-xing/SegMamba.git
+
+cd SegMamba
+```
+### Install causal-conv1d
+
+```bash
+cd causal-conv1d
+
+python setup.py install
+```
+
+### Install mamba
+
+```bash
+cd mamba
+
+python setup.py install
+```
+
+### Install monai
+
+```bash
+pip install monai
+```
+
+## Simple test
+
+```bash
+python 0_inference.py
+```
+
+## Preprocessing, training, testing, inference, and metrics computation
+
+### Data downloading
+
+Data is from [https://arxiv.org/abs/2305.17033](https://arxiv.org/abs/2305.17033)
+
+Download from Baidu Disk [https://pan.baidu.com/s/1C0FUHdDtWNaYWLtDDP9TnA?pwd=ty22提取码ty22](https://pan.baidu.com/s/1C0FUHdDtWNaYWLtDDP9TnA?pwd=ty22)
+
+Download from OneDrive [https://hkustgz-my.sharepoint.com/:f:/g/personal/zxing565_connect_hkust-gz_edu_cn/EqqaINbHRxREuIj0XGicY2EBv8hjwEFKgFOhF_Ub0mvENw?e=yTpE9B](https://hkustgz-my.sharepoint.com/:f:/g/personal/zxing565_connect_hkust-gz_edu_cn/EqqaINbHRxREuIj0XGicY2EBv8hjwEFKgFOhF_Ub0mvENw?e=yTpE9B)
+
+### Preprocessing
+In my setting, the data directory of BraTS2023 is : "./data/raw_data/BraTS2023/ASNR-MICCAI-BraTS2023-GLI-Challenge-TrainingData/"
+
+First, we need to run the rename process.
+
+```bash
+python 1_rename_mri_data.py
+```
+
+Then, we need to run the pre-processing code to do resample, normalization, and crop processes.
+
+```bash
+python 2_preprocessing_mri.py
+```
+
+After pre-processing, the data structure will be in this format:
+
+
+### Training
+
+When the pre-processing process is done, we can train our model.
+
+We mainly use the pre-processde data from last step: **data_dir = "./data/fullres/train"**
+
+
+```bash
+python 3_train.py
+```
+
+The training logs and checkpoints are saved in:
+**logdir = f"./logs/segmamba"**
+
+
+
+
+### Inference
+
+When we have trained our models, we can inference all the data in testing set.
+
+```bash
+python 4_predict.py
+```
+
+When this process is done, the prediction cases will be put in this path:
+**save_path = "./prediction_results/segmamba"**
+
+### Metrics computation
+We can obtain the Dice score and HD95 on each segmentation target (WT, TC, ET for BraTS2023 dataset) using this code:
+
+```bash
+python 5_compute_metrics.py --pred_name="segmamba"
+```
+
+
+
+## Acknowledgement
+Many thanks for these repos for their great contribution!
+
+[https://github.com/MIC-DKFZ/nnUNet](https://github.com/MIC-DKFZ/nnUNet)
+
+[https://github.com/Project-MONAI/MONAI](https://github.com/Project-MONAI/MONAI)
+
+[https://github.com/hustvl/Vim](https://github.com/hustvl/Vim)
+
+[https://github.com/bowang-lab/U-Mamba](https://github.com/bowang-lab/U-Mamba)
+
diff --git a/PRISM/SegMamba/causal-conv1d/.DS_Store b/PRISM/SegMamba/causal-conv1d/.DS_Store
new file mode 100644
index 0000000000000000000000000000000000000000..c6665734a44cf14bd7d2d71492646e87eae6cfa1
Binary files /dev/null and b/PRISM/SegMamba/causal-conv1d/.DS_Store differ
diff --git a/PRISM/SegMamba/causal-conv1d/AUTHORS b/PRISM/SegMamba/causal-conv1d/AUTHORS
new file mode 100644
index 0000000000000000000000000000000000000000..88193855314bb723ced1860384e417954f559700
--- /dev/null
+++ b/PRISM/SegMamba/causal-conv1d/AUTHORS
@@ -0,0 +1 @@
+Tri Dao, tri@tridao.me
diff --git a/PRISM/SegMamba/causal-conv1d/LICENSE b/PRISM/SegMamba/causal-conv1d/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..5860e4b33f3d9d85fc636137c559331d51783a5b
--- /dev/null
+++ b/PRISM/SegMamba/causal-conv1d/LICENSE
@@ -0,0 +1,29 @@
+BSD 3-Clause License
+
+Copyright (c) 2022, the respective contributors, as shown by the AUTHORS file.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+* Neither the name of the copyright holder nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/PRISM/SegMamba/causal-conv1d/README.md b/PRISM/SegMamba/causal-conv1d/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..4e905425a650d77c5c4854e4c4a261778c4d2690
--- /dev/null
+++ b/PRISM/SegMamba/causal-conv1d/README.md
@@ -0,0 +1 @@
+# Causal depthwise conv1d in CUDA with a PyTorch interface
diff --git a/PRISM/SegMamba/causal-conv1d/build/lib.linux-x86_64-cpython-312/causal_conv1d/__init__.py b/PRISM/SegMamba/causal-conv1d/build/lib.linux-x86_64-cpython-312/causal_conv1d/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..cc4d610a1e557cabd723fb6e33438f03c5c4bf66
--- /dev/null
+++ b/PRISM/SegMamba/causal-conv1d/build/lib.linux-x86_64-cpython-312/causal_conv1d/__init__.py
@@ -0,0 +1,3 @@
+__version__ = "1.0.0"
+
+from causal_conv1d.causal_conv1d_interface import causal_conv1d_fn, causal_conv1d_update
diff --git a/PRISM/SegMamba/causal-conv1d/build/lib.linux-x86_64-cpython-312/causal_conv1d/causal_conv1d_interface.py b/PRISM/SegMamba/causal-conv1d/build/lib.linux-x86_64-cpython-312/causal_conv1d/causal_conv1d_interface.py
new file mode 100644
index 0000000000000000000000000000000000000000..f66143c39e767572ca12112811a384239b8beb63
--- /dev/null
+++ b/PRISM/SegMamba/causal-conv1d/build/lib.linux-x86_64-cpython-312/causal_conv1d/causal_conv1d_interface.py
@@ -0,0 +1,104 @@
+# Copyright (c) 2023, Tri Dao.
+
+import torch
+import torch.nn.functional as F
+
+
+import causal_conv1d_cuda
+
+
+class CausalConv1dFn(torch.autograd.Function):
+ @staticmethod
+ def forward(ctx, x, weight, bias=None, activation=None):
+ if activation not in [None, "silu", "swish"]:
+ raise NotImplementedError("activation must be None, silu, or swish")
+ if x.stride(2) != 1 and x.stride(1) != 1:
+ x = x.contiguous()
+ bias = bias.contiguous() if bias is not None else None
+ ctx.save_for_backward(x, weight, bias)
+ ctx.activation = activation in ["silu", "swish"]
+ out = causal_conv1d_cuda.causal_conv1d_fwd(x, weight, bias, ctx.activation)
+ return out
+
+ @staticmethod
+ def backward(ctx, dout):
+ x, weight, bias = ctx.saved_tensors
+ if dout.stride(2) != 1 and dout.stride(1) != 1:
+ dout = dout.contiguous()
+ # The kernel supports passing in a pre-allocated dx (e.g., in case we want to fuse the
+ # backward of conv1d with the backward of chunk).
+ # Here we just pass in None and dx will be allocated in the C++ code.
+ dx, dweight, dbias = causal_conv1d_cuda.causal_conv1d_bwd(
+ x, weight, bias, dout, None, ctx.activation
+ )
+ return dx, dweight, dbias if bias is not None else None, None
+
+
+def causal_conv1d_fn(x, weight, bias=None, activation=None):
+ """
+ x: (batch, dim, seqlen)
+ weight: (dim, width)
+ bias: (dim,)
+ activation: either None or "silu" or "swish"
+
+ out: (batch, dim, seqlen)
+ """
+ return CausalConv1dFn.apply(x, weight, bias, activation)
+
+
+def causal_conv1d_ref(x, weight, bias=None, activation=None):
+ """
+ x: (batch, dim, seqlen)
+ weight: (dim, width)
+ bias: (dim,)
+
+ out: (batch, dim, seqlen)
+ """
+ if activation not in [None, "silu", "swish"]:
+ raise NotImplementedError("activation must be None, silu, or swish")
+ dtype_in = x.dtype
+ x = x.to(weight.dtype)
+ seqlen = x.shape[-1]
+ dim, width = weight.shape
+ out = F.conv1d(x, weight.unsqueeze(1), bias, padding=width - 1, groups=dim)
+ out = out[..., :seqlen]
+ return (out if activation is None else F.silu(out)).to(dtype=dtype_in)
+
+
+def causal_conv1d_update(x, conv_state, weight, bias=None, activation=None):
+ """
+ x: (batch, dim)
+ conv_state: (batch, dim, width)
+ weight: (dim, width)
+ bias: (dim,)
+
+ out: (batch, dim)
+ """
+ if activation not in [None, "silu", "swish"]:
+ raise NotImplementedError("activation must be None, silu, or swish")
+ activation = activation in ["silu", "swish"]
+ return causal_conv1d_cuda.causal_conv1d_update(x, conv_state, weight, bias, activation)
+
+
+def causal_conv1d_update_ref(x, conv_state, weight, bias=None, activation=None):
+ """
+ x: (batch, dim)
+ conv_state: (batch, dim, width)
+ weight: (dim, width)
+ bias: (dim,)
+
+ out: (batch, dim)
+ """
+ if activation not in [None, "silu", "swish"]:
+ raise NotImplementedError("activation must be None, silu, or swish")
+ dtype_in = x.dtype
+ batch, dim = x.shape
+ width = weight.shape[1]
+ assert conv_state.shape == (batch, dim, width)
+ assert weight.shape == (dim, width)
+ conv_state.copy_(torch.roll(conv_state, shifts=-1, dims=-1)) # Update state (B D W)
+ conv_state[:, :, -1] = x
+ out = torch.sum(conv_state * weight, dim=-1) # (B D)
+ if bias is not None:
+ out += bias
+ return (out if activation is None else F.silu(out)).to(dtype=dtype_in)
diff --git a/PRISM/SegMamba/causal-conv1d/build/lib.linux-x86_64-cpython-312/causal_conv1d_cuda.cpython-312-x86_64-linux-gnu.so b/PRISM/SegMamba/causal-conv1d/build/lib.linux-x86_64-cpython-312/causal_conv1d_cuda.cpython-312-x86_64-linux-gnu.so
new file mode 100644
index 0000000000000000000000000000000000000000..6eedd45e2c4084a3f4504b362dd30eace347bdf4
--- /dev/null
+++ b/PRISM/SegMamba/causal-conv1d/build/lib.linux-x86_64-cpython-312/causal_conv1d_cuda.cpython-312-x86_64-linux-gnu.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:dedd9d99881bf7f043ac14c79ad2b71fea8e93f166482597bfe5a3a09849b627
+size 30227360
diff --git a/PRISM/SegMamba/causal-conv1d/build/temp.linux-x86_64-cpython-312/csrc/causal_conv1d.o b/PRISM/SegMamba/causal-conv1d/build/temp.linux-x86_64-cpython-312/csrc/causal_conv1d.o
new file mode 100644
index 0000000000000000000000000000000000000000..1b3dc32e2af79fd55ef2f2123d3f204448f61d67
--- /dev/null
+++ b/PRISM/SegMamba/causal-conv1d/build/temp.linux-x86_64-cpython-312/csrc/causal_conv1d.o
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ef366a1da5c5f81e3aa761f5cd37bc90c046b17068b504191043faaa162230e5
+size 377648
diff --git a/PRISM/SegMamba/causal-conv1d/build/temp.linux-x86_64-cpython-312/csrc/causal_conv1d_bwd.o b/PRISM/SegMamba/causal-conv1d/build/temp.linux-x86_64-cpython-312/csrc/causal_conv1d_bwd.o
new file mode 100644
index 0000000000000000000000000000000000000000..1caa3061396365bf5e2cfbc5f146b0fca6ec6322
--- /dev/null
+++ b/PRISM/SegMamba/causal-conv1d/build/temp.linux-x86_64-cpython-312/csrc/causal_conv1d_bwd.o
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e581d37f9a1a942c98bdb8b2986540fab333e42092bcdf19e211c12fcc347bdb
+size 22535976
diff --git a/PRISM/SegMamba/causal-conv1d/build/temp.linux-x86_64-cpython-312/csrc/causal_conv1d_fwd.o b/PRISM/SegMamba/causal-conv1d/build/temp.linux-x86_64-cpython-312/csrc/causal_conv1d_fwd.o
new file mode 100644
index 0000000000000000000000000000000000000000..ba054a6717e95ea43aeed852b5ee1ef1f9b0d629
--- /dev/null
+++ b/PRISM/SegMamba/causal-conv1d/build/temp.linux-x86_64-cpython-312/csrc/causal_conv1d_fwd.o
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a0f5650bf1e870dbbcaafaa14acbde28f03014ccaf73d8137ae5fa2967807af7
+size 6723096
diff --git a/PRISM/SegMamba/causal-conv1d/build/temp.linux-x86_64-cpython-312/csrc/causal_conv1d_update.o b/PRISM/SegMamba/causal-conv1d/build/temp.linux-x86_64-cpython-312/csrc/causal_conv1d_update.o
new file mode 100644
index 0000000000000000000000000000000000000000..4f5bfe9e6efb77a5d50ce32cb78229cb47a5bf23
--- /dev/null
+++ b/PRISM/SegMamba/causal-conv1d/build/temp.linux-x86_64-cpython-312/csrc/causal_conv1d_update.o
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1391cc2ab0159135d0887ac6d659fa2ec85466bb6c5978722d55868054b12726
+size 910152
diff --git a/PRISM/SegMamba/causal-conv1d/causal_conv1d.egg-info/PKG-INFO b/PRISM/SegMamba/causal-conv1d/causal_conv1d.egg-info/PKG-INFO
new file mode 100644
index 0000000000000000000000000000000000000000..e933a3d307c4158492494dae393112800cfd6b36
--- /dev/null
+++ b/PRISM/SegMamba/causal-conv1d/causal_conv1d.egg-info/PKG-INFO
@@ -0,0 +1,29 @@
+Metadata-Version: 2.4
+Name: causal_conv1d
+Version: 1.0.0
+Summary: Causal depthwise conv1d in CUDA, with a PyTorch interface
+Home-page: https://github.com/Dao-AILab/causal-conv1d
+Author: Tri Dao
+Author-email: tri@tridao.me
+Classifier: Programming Language :: Python :: 3
+Classifier: License :: OSI Approved :: BSD License
+Classifier: Operating System :: Unix
+Requires-Python: >=3.7
+Description-Content-Type: text/markdown
+License-File: LICENSE
+License-File: AUTHORS
+Requires-Dist: torch
+Requires-Dist: packaging
+Requires-Dist: ninja
+Dynamic: author
+Dynamic: author-email
+Dynamic: classifier
+Dynamic: description
+Dynamic: description-content-type
+Dynamic: home-page
+Dynamic: license-file
+Dynamic: requires-dist
+Dynamic: requires-python
+Dynamic: summary
+
+# Causal depthwise conv1d in CUDA with a PyTorch interface
diff --git a/PRISM/SegMamba/causal-conv1d/causal_conv1d.egg-info/SOURCES.txt b/PRISM/SegMamba/causal-conv1d/causal_conv1d.egg-info/SOURCES.txt
new file mode 100644
index 0000000000000000000000000000000000000000..085f8c1e16070f5d8c191ccba0365e90512ae180
--- /dev/null
+++ b/PRISM/SegMamba/causal-conv1d/causal_conv1d.egg-info/SOURCES.txt
@@ -0,0 +1,16 @@
+AUTHORS
+LICENSE
+README.md
+setup.py
+causal_conv1d/__init__.py
+causal_conv1d/causal_conv1d_interface.py
+causal_conv1d.egg-info/PKG-INFO
+causal_conv1d.egg-info/SOURCES.txt
+causal_conv1d.egg-info/dependency_links.txt
+causal_conv1d.egg-info/requires.txt
+causal_conv1d.egg-info/top_level.txt
+csrc/causal_conv1d.cpp
+csrc/causal_conv1d_bwd.cu
+csrc/causal_conv1d_fwd.cu
+csrc/causal_conv1d_update.cu
+tests/test_causal_conv1d.py
\ No newline at end of file
diff --git a/PRISM/SegMamba/causal-conv1d/causal_conv1d.egg-info/dependency_links.txt b/PRISM/SegMamba/causal-conv1d/causal_conv1d.egg-info/dependency_links.txt
new file mode 100644
index 0000000000000000000000000000000000000000..8b137891791fe96927ad78e64b0aad7bded08bdc
--- /dev/null
+++ b/PRISM/SegMamba/causal-conv1d/causal_conv1d.egg-info/dependency_links.txt
@@ -0,0 +1 @@
+
diff --git a/PRISM/SegMamba/causal-conv1d/causal_conv1d.egg-info/requires.txt b/PRISM/SegMamba/causal-conv1d/causal_conv1d.egg-info/requires.txt
new file mode 100644
index 0000000000000000000000000000000000000000..4abdfa4e1eeb60695a9dc850226f9ca2cf8d3c94
--- /dev/null
+++ b/PRISM/SegMamba/causal-conv1d/causal_conv1d.egg-info/requires.txt
@@ -0,0 +1,3 @@
+torch
+packaging
+ninja
diff --git a/PRISM/SegMamba/causal-conv1d/causal_conv1d.egg-info/top_level.txt b/PRISM/SegMamba/causal-conv1d/causal_conv1d.egg-info/top_level.txt
new file mode 100644
index 0000000000000000000000000000000000000000..4e7fccded0b0646bbe9f67acaba866f9861f3333
--- /dev/null
+++ b/PRISM/SegMamba/causal-conv1d/causal_conv1d.egg-info/top_level.txt
@@ -0,0 +1,2 @@
+causal_conv1d
+causal_conv1d_cuda
diff --git a/PRISM/SegMamba/causal-conv1d/causal_conv1d/__init__.py b/PRISM/SegMamba/causal-conv1d/causal_conv1d/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..cc4d610a1e557cabd723fb6e33438f03c5c4bf66
--- /dev/null
+++ b/PRISM/SegMamba/causal-conv1d/causal_conv1d/__init__.py
@@ -0,0 +1,3 @@
+__version__ = "1.0.0"
+
+from causal_conv1d.causal_conv1d_interface import causal_conv1d_fn, causal_conv1d_update
diff --git a/PRISM/SegMamba/causal-conv1d/causal_conv1d/causal_conv1d_interface.py b/PRISM/SegMamba/causal-conv1d/causal_conv1d/causal_conv1d_interface.py
new file mode 100644
index 0000000000000000000000000000000000000000..f66143c39e767572ca12112811a384239b8beb63
--- /dev/null
+++ b/PRISM/SegMamba/causal-conv1d/causal_conv1d/causal_conv1d_interface.py
@@ -0,0 +1,104 @@
+# Copyright (c) 2023, Tri Dao.
+
+import torch
+import torch.nn.functional as F
+
+
+import causal_conv1d_cuda
+
+
+class CausalConv1dFn(torch.autograd.Function):
+ @staticmethod
+ def forward(ctx, x, weight, bias=None, activation=None):
+ if activation not in [None, "silu", "swish"]:
+ raise NotImplementedError("activation must be None, silu, or swish")
+ if x.stride(2) != 1 and x.stride(1) != 1:
+ x = x.contiguous()
+ bias = bias.contiguous() if bias is not None else None
+ ctx.save_for_backward(x, weight, bias)
+ ctx.activation = activation in ["silu", "swish"]
+ out = causal_conv1d_cuda.causal_conv1d_fwd(x, weight, bias, ctx.activation)
+ return out
+
+ @staticmethod
+ def backward(ctx, dout):
+ x, weight, bias = ctx.saved_tensors
+ if dout.stride(2) != 1 and dout.stride(1) != 1:
+ dout = dout.contiguous()
+ # The kernel supports passing in a pre-allocated dx (e.g., in case we want to fuse the
+ # backward of conv1d with the backward of chunk).
+ # Here we just pass in None and dx will be allocated in the C++ code.
+ dx, dweight, dbias = causal_conv1d_cuda.causal_conv1d_bwd(
+ x, weight, bias, dout, None, ctx.activation
+ )
+ return dx, dweight, dbias if bias is not None else None, None
+
+
+def causal_conv1d_fn(x, weight, bias=None, activation=None):
+ """
+ x: (batch, dim, seqlen)
+ weight: (dim, width)
+ bias: (dim,)
+ activation: either None or "silu" or "swish"
+
+ out: (batch, dim, seqlen)
+ """
+ return CausalConv1dFn.apply(x, weight, bias, activation)
+
+
+def causal_conv1d_ref(x, weight, bias=None, activation=None):
+ """
+ x: (batch, dim, seqlen)
+ weight: (dim, width)
+ bias: (dim,)
+
+ out: (batch, dim, seqlen)
+ """
+ if activation not in [None, "silu", "swish"]:
+ raise NotImplementedError("activation must be None, silu, or swish")
+ dtype_in = x.dtype
+ x = x.to(weight.dtype)
+ seqlen = x.shape[-1]
+ dim, width = weight.shape
+ out = F.conv1d(x, weight.unsqueeze(1), bias, padding=width - 1, groups=dim)
+ out = out[..., :seqlen]
+ return (out if activation is None else F.silu(out)).to(dtype=dtype_in)
+
+
+def causal_conv1d_update(x, conv_state, weight, bias=None, activation=None):
+ """
+ x: (batch, dim)
+ conv_state: (batch, dim, width)
+ weight: (dim, width)
+ bias: (dim,)
+
+ out: (batch, dim)
+ """
+ if activation not in [None, "silu", "swish"]:
+ raise NotImplementedError("activation must be None, silu, or swish")
+ activation = activation in ["silu", "swish"]
+ return causal_conv1d_cuda.causal_conv1d_update(x, conv_state, weight, bias, activation)
+
+
+def causal_conv1d_update_ref(x, conv_state, weight, bias=None, activation=None):
+ """
+ x: (batch, dim)
+ conv_state: (batch, dim, width)
+ weight: (dim, width)
+ bias: (dim,)
+
+ out: (batch, dim)
+ """
+ if activation not in [None, "silu", "swish"]:
+ raise NotImplementedError("activation must be None, silu, or swish")
+ dtype_in = x.dtype
+ batch, dim = x.shape
+ width = weight.shape[1]
+ assert conv_state.shape == (batch, dim, width)
+ assert weight.shape == (dim, width)
+ conv_state.copy_(torch.roll(conv_state, shifts=-1, dims=-1)) # Update state (B D W)
+ conv_state[:, :, -1] = x
+ out = torch.sum(conv_state * weight, dim=-1) # (B D)
+ if bias is not None:
+ out += bias
+ return (out if activation is None else F.silu(out)).to(dtype=dtype_in)
diff --git a/PRISM/SegMamba/causal-conv1d/csrc/causal_conv1d.cpp b/PRISM/SegMamba/causal-conv1d/csrc/causal_conv1d.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..1c80516ac8599d4d80910a1d4d85c4c435cf1e4f
--- /dev/null
+++ b/PRISM/SegMamba/causal-conv1d/csrc/causal_conv1d.cpp
@@ -0,0 +1,333 @@
+/******************************************************************************
+ * Copyright (c) 2023, Tri Dao.
+ ******************************************************************************/
+
+#include
+#include
+#include
+#include
+
+#include "causal_conv1d.h"
+
+#define CHECK_SHAPE(x, ...) TORCH_CHECK(x.sizes() == torch::IntArrayRef({__VA_ARGS__}), #x " must have shape (" #__VA_ARGS__ ")")
+
+#define DISPATCH_ITYPE_FLOAT_AND_HALF_AND_BF16(ITYPE, NAME, ...) \
+ if (ITYPE == at::ScalarType::Half) { \
+ using input_t = at::Half; \
+ __VA_ARGS__(); \
+ } else if (ITYPE == at::ScalarType::BFloat16) { \
+ using input_t = at::BFloat16; \
+ __VA_ARGS__(); \
+ } else if (ITYPE == at::ScalarType::Float) { \
+ using input_t = float; \
+ __VA_ARGS__(); \
+ } else { \
+ AT_ERROR(#NAME, " not implemented for input type '", toString(ITYPE), "'"); \
+ }
+
+#define DISPATCH_WTYPE_FLOAT_AND_HALF_AND_BF16(WTYPE, NAME, ...) \
+ if (WTYPE == at::ScalarType::Half) { \
+ using weight_t = at::Half; \
+ __VA_ARGS__(); \
+ } else if (WTYPE == at::ScalarType::BFloat16) { \
+ using weight_t = at::BFloat16; \
+ __VA_ARGS__(); \
+ } else if (WTYPE == at::ScalarType::Float) { \
+ using weight_t = float; \
+ __VA_ARGS__(); \
+ } else { \
+ AT_ERROR(#NAME, " not implemented for weight type '", toString(WTYPE), "'"); \
+ }
+
+template
+void causal_conv1d_fwd_cuda(ConvParamsBase ¶ms, cudaStream_t stream);
+template
+void causal_conv1d_channellast_fwd_cuda(ConvParamsBase ¶ms, cudaStream_t stream);
+
+template
+void causal_conv1d_bwd_cuda(ConvParamsBwd ¶ms, cudaStream_t stream);
+template
+void causal_conv1d_channellast_bwd_cuda(ConvParamsBwd ¶ms, cudaStream_t stream);
+
+template
+void causal_conv1d_update_cuda(ConvParamsBase ¶ms, cudaStream_t stream);
+
+void set_conv_params_fwd(ConvParamsBase ¶ms,
+ // sizes
+ const size_t batch,
+ const size_t dim,
+ const size_t seqlen,
+ const size_t width,
+ // device pointers
+ const at::Tensor x,
+ const at::Tensor weight,
+ const at::Tensor out,
+ void* bias_ptr,
+ bool silu_activation) {
+
+ // Reset the parameters
+ memset(¶ms, 0, sizeof(params));
+
+ params.batch = batch;
+ params.dim = dim;
+ params.seqlen = seqlen;
+ params.width = width;
+
+ params.silu_activation = silu_activation;
+
+ // Set the pointers and strides.
+ params.x_ptr = x.data_ptr();
+ params.weight_ptr = weight.data_ptr();
+ params.bias_ptr = bias_ptr;
+ params.out_ptr = out.data_ptr();
+ // All stride are in elements, not bytes.
+ params.x_batch_stride = x.stride(0);
+ params.x_c_stride = x.stride(1);
+ params.x_l_stride = x.stride(-1);
+ params.weight_c_stride = weight.stride(0);
+ params.weight_width_stride = weight.stride(1);
+ params.out_batch_stride = out.stride(0);
+ params.out_c_stride = out.stride(1);
+ params.out_l_stride = out.stride(-1);
+}
+
+
+void set_conv_params_bwd(ConvParamsBwd ¶ms,
+ // sizes
+ const size_t batch,
+ const size_t dim,
+ const size_t seqlen,
+ const size_t width,
+ // device pointers
+ const at::Tensor x,
+ const at::Tensor weight,
+ void* bias_ptr,
+ const at::Tensor dout,
+ const at::Tensor dx,
+ const at::Tensor dweight,
+ void* dbias_ptr,
+ bool silu_activation) {
+ // Pass in "dout" instead of "out", we're not gonna use "out" at all.
+ set_conv_params_fwd(params, batch, dim, seqlen, width,
+ x, weight, dout, bias_ptr, silu_activation);
+
+ // Set the pointers and strides.
+ params.dout_ptr = dout.data_ptr();
+ params.dx_ptr = dx.data_ptr();
+ params.dweight_ptr = dweight.data_ptr();
+ params.dbias_ptr = dbias_ptr;
+ // All stride are in elements, not bytes.
+ params.dout_batch_stride = dout.stride(0);
+ params.dout_c_stride = dout.stride(1);
+ params.dout_l_stride = dout.stride(2);
+ params.dweight_c_stride = dweight.stride(0);
+ params.dweight_width_stride = dweight.stride(1);
+ params.dx_batch_stride = dx.stride(0);
+ params.dx_c_stride = dx.stride(1);
+ params.dx_l_stride = dx.stride(2);
+}
+
+at::Tensor
+causal_conv1d_fwd(const at::Tensor &x, const at::Tensor &weight,
+ const c10::optional &bias_,
+ bool silu_activation) {
+ auto input_type = x.scalar_type();
+ auto weight_type = weight.scalar_type();
+ TORCH_CHECK(input_type == at::ScalarType::Float || input_type == at::ScalarType::Half || input_type == at::ScalarType::BFloat16);
+ TORCH_CHECK(weight_type == at::ScalarType::Float || weight_type == at::ScalarType::Half || weight_type == at::ScalarType::BFloat16);
+
+ TORCH_CHECK(x.is_cuda());
+ TORCH_CHECK(weight.is_cuda());
+
+ const auto sizes = x.sizes();
+ const int batch_size = sizes[0];
+ const int dim = sizes[1];
+ const int seqlen = sizes[2];
+ const int width = weight.size(-1);
+
+ CHECK_SHAPE(x, batch_size, dim, seqlen);
+ CHECK_SHAPE(weight, dim, width);
+
+ TORCH_CHECK(x.stride(2) == 1 || x.stride(1) == 1);
+ const bool is_channel_last = x.stride(1) == 1 && x.stride(2) > 1;
+
+ if (is_channel_last) {
+ TORCH_CHECK(dim % 8 == 0, "causal_conv1d only supports channel dimension divisible by 8 for now");
+ }
+ TORCH_CHECK(width >= 2 && width <= 4, "causal_conv1d only supports width between 2 and 4");
+
+
+ if (bias_.has_value()) {
+ auto bias = bias_.value();
+ TORCH_CHECK(bias.scalar_type() == weight_type);
+ TORCH_CHECK(bias.is_cuda());
+ TORCH_CHECK(bias.stride(-1) == 1);
+ CHECK_SHAPE(bias, dim);
+ }
+
+ at::Tensor out = torch::empty_like(x);
+
+ ConvParamsBase params;
+ set_conv_params_fwd(params, batch_size, dim, seqlen, width, x, weight, out,
+ bias_.has_value() ? bias_.value().data_ptr() : nullptr,
+ silu_activation);
+
+ // Otherwise the kernel will be launched from cuda:0 device
+ // Cast to char to avoid compiler warning about narrowing
+ at::cuda::CUDAGuard device_guard{(char)x.get_device()};
+ auto stream = at::cuda::getCurrentCUDAStream().stream();
+ DISPATCH_ITYPE_FLOAT_AND_HALF_AND_BF16(x.scalar_type(), "causal_conv1d_fwd", [&] {
+ DISPATCH_WTYPE_FLOAT_AND_HALF_AND_BF16(weight.scalar_type(), "causal_conv1d_fwd", [&] {
+ if (!is_channel_last) {
+ causal_conv1d_fwd_cuda(params, stream);
+ } else {
+ causal_conv1d_channellast_fwd_cuda(params, stream);
+ }
+ });
+ });
+ return out;
+}
+
+std::vector
+causal_conv1d_bwd(const at::Tensor &x, const at::Tensor &weight,
+ const c10::optional &bias_,
+ at::Tensor &dout,
+ c10::optional &dx_,
+ bool silu_activation) {
+ auto input_type = x.scalar_type();
+ auto weight_type = weight.scalar_type();
+ TORCH_CHECK(input_type == at::ScalarType::Float || input_type == at::ScalarType::Half || input_type == at::ScalarType::BFloat16);
+ TORCH_CHECK(weight_type == at::ScalarType::Float || weight_type == at::ScalarType::Half || weight_type == at::ScalarType::BFloat16);
+
+ TORCH_CHECK(x.is_cuda());
+ TORCH_CHECK(weight.is_cuda());
+ TORCH_CHECK(dout.is_cuda());
+
+ const auto sizes = x.sizes();
+ const int batch_size = sizes[0];
+ const int dim = sizes[1];
+ const int seqlen = sizes[2];
+ const int width = weight.size(-1);
+
+ TORCH_CHECK(width >= 2 && width <= 4, "causal_conv1d only supports width between 2 and 4");
+
+ CHECK_SHAPE(x, batch_size, dim, seqlen);
+ CHECK_SHAPE(weight, dim, width);
+ CHECK_SHAPE(dout, batch_size, dim, seqlen);
+
+ TORCH_CHECK(x.stride(2) == 1 || x.stride(1) == 1);
+ const bool is_channel_last = x.stride(1) == 1 && x.stride(2) > 1;
+ if (!is_channel_last && dout.stride(2) != 1) { dout = dout.contiguous(); }
+ if (is_channel_last && dout.stride(1) != 1) { dout = dout.transpose(-1, -2).contiguous().transpose(-1, -2); }
+
+ if (bias_.has_value()) {
+ auto bias = bias_.value();
+ TORCH_CHECK(bias.scalar_type() == weight_type);
+ TORCH_CHECK(bias.is_cuda());
+ TORCH_CHECK(bias.stride(-1) == 1);
+ CHECK_SHAPE(bias, dim);
+ }
+
+ at::Tensor dx;
+ if (dx_.has_value()) {
+ dx = dx_.value();
+ TORCH_CHECK(dx.scalar_type() == input_type);
+ TORCH_CHECK(dx.is_cuda());
+ CHECK_SHAPE(dx, batch_size, dim, seqlen);
+ if (!is_channel_last) { TORCH_CHECK(dx.stride(2) == 1); }
+ if (is_channel_last) { TORCH_CHECK(dx.stride(1) == 1); }
+ } else {
+ dx = torch::empty_like(x);
+ }
+
+ // Otherwise the kernel will be launched from cuda:0 device
+ // Cast to char to avoid compiler warning about narrowing
+ at::cuda::CUDAGuard device_guard{(char)x.get_device()};
+
+ at::Tensor dweight = torch::zeros_like(weight, weight.options().dtype(at::kFloat));
+ at::Tensor dbias;
+ if (bias_.has_value()) { dbias = torch::zeros_like(bias_.value(), bias_.value().options().dtype(at::kFloat)); }
+
+ ConvParamsBwd params;
+ set_conv_params_bwd(params, batch_size, dim, seqlen, width,
+ x, weight, bias_.has_value() ? bias_.value().data_ptr() : nullptr,
+ dout, dx, dweight, bias_.has_value() ? dbias.data_ptr() : nullptr,
+ silu_activation);
+
+ auto stream = at::cuda::getCurrentCUDAStream().stream();
+ DISPATCH_ITYPE_FLOAT_AND_HALF_AND_BF16(x.scalar_type(), "causal_conv1d_bwd", [&] {
+ DISPATCH_WTYPE_FLOAT_AND_HALF_AND_BF16(weight.scalar_type(), "causal_conv1d_bwd", [&] {
+ if (!is_channel_last) {
+ causal_conv1d_bwd_cuda(params, stream);
+ } else {
+ causal_conv1d_channellast_bwd_cuda(params, stream);
+ }
+ });
+ });
+ return {dx, dweight.to(weight.dtype()), bias_.has_value() ? dbias.to(bias_.value().dtype()) : dbias};
+}
+
+at::Tensor
+causal_conv1d_update(const at::Tensor &x,
+ const at::Tensor &conv_state,
+ const at::Tensor &weight,
+ const c10::optional &bias_,
+ bool silu_activation) {
+ auto input_type = x.scalar_type();
+ auto weight_type = weight.scalar_type();
+ TORCH_CHECK(input_type == at::ScalarType::Float || input_type == at::ScalarType::Half || input_type == at::ScalarType::BFloat16);
+ TORCH_CHECK(weight_type == at::ScalarType::Float || weight_type == at::ScalarType::Half || weight_type == at::ScalarType::BFloat16);
+ TORCH_CHECK(conv_state.scalar_type() == input_type);
+
+ TORCH_CHECK(x.is_cuda());
+ TORCH_CHECK(conv_state.is_cuda());
+ TORCH_CHECK(weight.is_cuda());
+
+ const auto sizes = x.sizes();
+ const int batch_size = sizes[0];
+ const int dim = sizes[1];
+ const int width = weight.size(-1);
+
+ CHECK_SHAPE(x, batch_size, dim);
+ CHECK_SHAPE(conv_state, batch_size, dim, width);
+ CHECK_SHAPE(weight, dim, width);
+
+ TORCH_CHECK(width >= 2 && width <= 4, "causal_conv1d only supports width between 2 and 4");
+
+ if (bias_.has_value()) {
+ auto bias = bias_.value();
+ TORCH_CHECK(bias.scalar_type() == weight_type);
+ TORCH_CHECK(bias.is_cuda());
+ TORCH_CHECK(bias.stride(-1) == 1);
+ CHECK_SHAPE(bias, dim);
+ }
+
+ at::Tensor out = torch::empty_like(x);
+
+ ConvParamsBase params;
+ set_conv_params_fwd(params, batch_size, dim, /*seqlen=*/1, width, x, weight, out,
+ bias_.has_value() ? bias_.value().data_ptr() : nullptr,
+ silu_activation);
+ params.conv_state_ptr = conv_state.data_ptr();
+ // All stride are in elements, not bytes.
+ params.conv_state_batch_stride = conv_state.stride(0);
+ params.conv_state_c_stride = conv_state.stride(1);
+ params.conv_state_l_stride = conv_state.stride(2);
+
+ // Otherwise the kernel will be launched from cuda:0 device
+ // Cast to char to avoid compiler warning about narrowing
+ at::cuda::CUDAGuard device_guard{(char)x.get_device()};
+ auto stream = at::cuda::getCurrentCUDAStream().stream();
+ DISPATCH_ITYPE_FLOAT_AND_HALF_AND_BF16(x.scalar_type(), "causal_conv1d_update", [&] {
+ DISPATCH_WTYPE_FLOAT_AND_HALF_AND_BF16(weight.scalar_type(), "causal_conv1d_update", [&] {
+ causal_conv1d_update_cuda(params, stream);
+ });
+ });
+ return out;
+}
+
+PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
+ m.def("causal_conv1d_fwd", &causal_conv1d_fwd, "Causal conv1d forward");
+ m.def("causal_conv1d_bwd", &causal_conv1d_bwd, "Causal conv1d backward");
+ m.def("causal_conv1d_update", &causal_conv1d_update, "Causal conv1d update");
+}
diff --git a/PRISM/SegMamba/causal-conv1d/csrc/causal_conv1d.h b/PRISM/SegMamba/causal-conv1d/csrc/causal_conv1d.h
new file mode 100644
index 0000000000000000000000000000000000000000..844ed92cfc91a881e58fccfca001a13ebcc434cc
--- /dev/null
+++ b/PRISM/SegMamba/causal-conv1d/csrc/causal_conv1d.h
@@ -0,0 +1,53 @@
+/******************************************************************************
+ * Copyright (c) 2023, Tri Dao.
+ ******************************************************************************/
+
+#pragma once
+
+////////////////////////////////////////////////////////////////////////////////////////////////////
+
+struct ConvParamsBase {
+ using index_t = uint32_t;
+
+ int batch, dim, seqlen, width;
+ bool silu_activation;
+
+ index_t x_batch_stride;
+ index_t x_c_stride;
+ index_t x_l_stride;
+ index_t weight_c_stride;
+ index_t weight_width_stride;
+ index_t out_batch_stride;
+ index_t out_c_stride;
+ index_t out_l_stride;
+
+ index_t conv_state_batch_stride;
+ index_t conv_state_c_stride;
+ index_t conv_state_l_stride;
+
+ // Common data pointers.
+ void *__restrict__ x_ptr;
+ void *__restrict__ weight_ptr;
+ void *__restrict__ bias_ptr;
+ void *__restrict__ out_ptr;
+
+ void *__restrict__ conv_state_ptr;
+};
+
+struct ConvParamsBwd: public ConvParamsBase {
+ index_t dx_batch_stride;
+ index_t dx_c_stride;
+ index_t dx_l_stride;
+ index_t dweight_c_stride;
+ index_t dweight_width_stride;
+ index_t dout_batch_stride;
+ index_t dout_c_stride;
+ index_t dout_l_stride;
+
+ // Common data pointers.
+ void *__restrict__ dx_ptr;
+ void *__restrict__ dweight_ptr;
+ void *__restrict__ dbias_ptr;
+ void *__restrict__ dout_ptr;
+};
+
diff --git a/PRISM/SegMamba/causal-conv1d/csrc/causal_conv1d_bwd.cu b/PRISM/SegMamba/causal-conv1d/csrc/causal_conv1d_bwd.cu
new file mode 100644
index 0000000000000000000000000000000000000000..66609750a30a86a284451871ca163d79a0529047
--- /dev/null
+++ b/PRISM/SegMamba/causal-conv1d/csrc/causal_conv1d_bwd.cu
@@ -0,0 +1,525 @@
+/******************************************************************************
+ * Copyright (c) 2023, Tri Dao.
+ ******************************************************************************/
+
+#include
+#include
+#include // For C10_CUDA_CHECK and C10_CUDA_KERNEL_LAUNCH_CHECK
+
+#include
+#include
+#include
+
+#include "causal_conv1d.h"
+#include "causal_conv1d_common.h"
+#include "static_switch.h"
+
+template
+struct Causal_conv1d_bwd_kernel_traits {
+ using input_t = input_t_;
+ using weight_t = weight_t_;
+ static constexpr int kNThreads = kNThreads_;
+ static constexpr int kWidth = kWidth_;
+ static constexpr bool kSiluAct = kSiluAct_;
+ static constexpr int kNBytes = sizeof(input_t);
+ static_assert(kNBytes == 2 || kNBytes == 4);
+ static constexpr int kNElts = kNBytes == 4 ? 4 : 8;
+ static_assert(kWidth <= kNElts);
+ // It's possible that we need to do 2 rounds of exchange if input_t is 16 bits
+ // (since then we'd have 8 values of float, and each round we can exchange 4 floats).
+ static constexpr int kNExchangeRounds = sizeof(float) / sizeof(input_t);
+ static constexpr bool kIsVecLoad = kIsVecLoad_;
+ using vec_t = typename BytesToType::Type;
+ using BlockLoadT = cub::BlockLoad;
+ using BlockLoadVecT = cub::BlockLoad;
+ using BlockStoreT = cub::BlockStore;
+ using BlockStoreVecT = cub::BlockStore;
+ using BlockReduceFloatT = cub::BlockReduce;
+ static constexpr int kSmemIOSize = kIsVecLoad
+ ? 0
+ : std::max({sizeof(typename BlockLoadT::TempStorage), sizeof(typename BlockStoreT::TempStorage)});
+ static constexpr int kSmemExchangeSize = kNThreads * kNBytes * kNElts * (!kSiluAct ? 1 : kNExchangeRounds + 1);
+ static constexpr int kSmemSize = std::max({kSmemExchangeSize,
+ int(sizeof(typename BlockReduceFloatT::TempStorage))}) + (kIsVecLoad ? 0 : kSmemIOSize);
+};
+
+template
+__global__ __launch_bounds__(Ktraits::kNThreads)
+void causal_conv1d_bwd_kernel(ConvParamsBwd params) {
+ constexpr int kWidth = Ktraits::kWidth;
+ constexpr int kNThreads = Ktraits::kNThreads;
+ constexpr bool kSiluAct = Ktraits::kSiluAct;
+ constexpr int kNElts = Ktraits::kNElts;
+ constexpr int kNExchangeRounds = Ktraits::kNExchangeRounds;
+ constexpr bool kIsVecLoad = Ktraits::kIsVecLoad;
+ using input_t = typename Ktraits::input_t;
+ using vec_t = typename Ktraits::vec_t;
+ using weight_t = typename Ktraits::weight_t;
+
+ // Shared memory.
+ extern __shared__ char smem_[];
+ auto& smem_load = reinterpret_cast(smem_);
+ auto& smem_load_vec = reinterpret_cast(smem_);
+ auto& smem_store = reinterpret_cast(smem_);
+ auto& smem_store_vec = reinterpret_cast(smem_);
+ vec_t *smem_exchange = reinterpret_cast(smem_ + Ktraits::kSmemIOSize);
+ vec_t *smem_exchange_x = reinterpret_cast(smem_ + Ktraits::kSmemIOSize) + kNThreads * kNExchangeRounds;
+ auto& smem_reduce_float = *reinterpret_cast(smem_ + Ktraits::kSmemIOSize);
+
+ const int tidx = threadIdx.x;
+ const int batch_id = blockIdx.x;
+ const int dim_id = blockIdx.y;
+ input_t *x = reinterpret_cast(params.x_ptr) + batch_id * params.x_batch_stride
+ + dim_id * params.x_c_stride;
+ weight_t *weight = reinterpret_cast(params.weight_ptr) + dim_id * params.weight_c_stride;
+ input_t *dout = reinterpret_cast(params.dout_ptr) + batch_id * params.dout_batch_stride
+ + dim_id * params.dout_c_stride;
+ input_t *dx = reinterpret_cast(params.dx_ptr) + batch_id * params.dx_batch_stride
+ + dim_id * params.dx_c_stride;
+ float *dweight = reinterpret_cast(params.dweight_ptr) + dim_id * params.dweight_c_stride;
+ float bias_val = params.bias_ptr == nullptr ? 0.f : float(reinterpret_cast(params.bias_ptr)[dim_id]);
+
+ // Thread kNThreads - 1 will load the first elements of the next chunk so we initialize those to 0.
+ if (tidx == 0) {
+ if constexpr (!kSiluAct) {
+ input_t zeros[kNElts] = {0};
+ smem_exchange[0] = reinterpret_cast(zeros)[0];
+ } else {
+ float zeros[kNElts] = {0};
+ #pragma unroll
+ for (int r = 0; r < kNExchangeRounds; ++r) {
+ smem_exchange[r * kNThreads] = reinterpret_cast(zeros)[r];
+ }
+ }
+ }
+
+ float weight_vals[kWidth];
+ #pragma unroll
+ for (int i = 0; i < kWidth; ++i) { weight_vals[i] = weight[i * params.weight_width_stride]; }
+
+ float dweight_vals[kWidth] = {0};
+ float dbias_val = 0;
+
+ constexpr int kChunkSize = kNThreads * kNElts;
+ const int n_chunks = (params.seqlen + kChunkSize - 1) / kChunkSize;
+ x += (n_chunks - 1) * kChunkSize;
+ dout += (n_chunks - 1) * kChunkSize;
+ dx += (n_chunks - 1) * kChunkSize;
+ for (int chunk = n_chunks - 1; chunk >= 0; --chunk) {
+ input_t x_vals_load[2 * kNElts] = {0};
+ input_t dout_vals_load[2 * kNElts] = {0};
+ if constexpr(kIsVecLoad) {
+ Ktraits::BlockLoadVecT(smem_load_vec).Load(reinterpret_cast(x), *reinterpret_cast(&x_vals_load[kNElts]), (params.seqlen - chunk * kChunkSize) / kNElts);
+ Ktraits::BlockLoadVecT(smem_load_vec).Load(reinterpret_cast(dout), *reinterpret_cast(&dout_vals_load[0]), (params.seqlen - chunk * kChunkSize) / kNElts);
+ } else {
+ __syncthreads();
+ Ktraits::BlockLoadT(smem_load).Load(x, *reinterpret_cast(&x_vals_load[kNElts]), params.seqlen - chunk * kChunkSize);
+ __syncthreads();
+ Ktraits::BlockLoadT(smem_load).Load(dout, *reinterpret_cast(&dout_vals_load[0]), params.seqlen - chunk * kChunkSize);
+ }
+ float dout_vals[2 * kNElts], x_vals[2 * kNElts];
+ if constexpr (!kSiluAct) {
+ __syncthreads();
+ // Thread 0 don't write yet, so that thread kNThreads - 1 can read
+ // the first elements of the next chunk.
+ if (tidx > 0) { smem_exchange[tidx] = reinterpret_cast(dout_vals_load)[0]; }
+ __syncthreads();
+ reinterpret_cast(dout_vals_load)[1] = smem_exchange[tidx < kNThreads - 1 ? tidx + 1 : 0];
+ __syncthreads();
+ // Now thread 0 can write the first elements of the current chunk.
+ if (tidx == 0) { smem_exchange[tidx] = reinterpret_cast(dout_vals_load)[0]; }
+ #pragma unroll
+ for (int i = 0; i < 2 * kNElts; ++i) {
+ dout_vals[i] = float(dout_vals_load[i]);
+ x_vals[i] = float(x_vals_load[i]);
+ }
+ } else {
+ if (tidx == 0 && chunk > 0) {
+ if constexpr(kIsVecLoad) {
+ reinterpret_cast(x_vals_load)[0] = reinterpret_cast(x)[-1];
+ } else {
+ #pragma unroll
+ for (int i = 0; i < kNElts; ++i) {
+ if (chunk * kChunkSize + i < params.seqlen) { x_vals_load[i] = x[-kNElts + i]; }
+ }
+ }
+ }
+ __syncthreads();
+ smem_exchange_x[tidx] = reinterpret_cast(x_vals_load)[1];
+ __syncthreads();
+ if (tidx > 0) { reinterpret_cast(x_vals_load)[0] = smem_exchange_x[tidx - 1]; }
+ #pragma unroll
+ for (int i = 0; i < 2 * kNElts; ++i) { x_vals[i] = float(x_vals_load[i]); }
+ // Recompute the output
+ #pragma unroll
+ for (int i = 0; i < kNElts; ++i) {
+ float out_val = bias_val;
+ #pragma unroll
+ for (int w = 0; w < kWidth; ++w) {
+ out_val += weight_vals[w] * x_vals[kNElts + i - (kWidth - w - 1)];
+ }
+ float out_sigmoid_val = 1.0f / (1.0f + expf(-out_val));
+ dout_vals[i] = float(dout_vals_load[i]) * out_sigmoid_val
+ * (1.0f + out_val * (1.0f - out_sigmoid_val));
+ }
+ // Exchange the dout_vals. It's possible that we need to do 2 rounds of exchange
+ // if input_t is 16 bits (since then we'd have 8 values of float)
+ __syncthreads();
+ // Thread 0 don't write yet, so that thread kNThreads - 1 can read
+ // the first elements of the next chunk.
+ if (tidx > 0) {
+ #pragma unroll
+ for (int r = 0; r < kNExchangeRounds; ++r) {
+ smem_exchange[r * kNThreads + tidx] = reinterpret_cast(dout_vals)[r];
+ }
+ }
+ __syncthreads();
+ #pragma unroll
+ for (int r = 0; r < kNExchangeRounds; ++r) {
+ reinterpret_cast(dout_vals)[kNExchangeRounds + r]
+ = smem_exchange[r * kNThreads + (tidx < kNThreads - 1 ? tidx + 1 : 0)];
+ }
+ __syncthreads();
+ // Now thread 0 can write the first elements of the current chunk.
+ if (tidx == 0) {
+ #pragma unroll
+ for (int r = 0; r < kNExchangeRounds; ++r) {
+ smem_exchange[r * kNThreads + tidx] = reinterpret_cast(dout_vals)[r];
+ }
+ }
+ }
+ dout -= kChunkSize;
+ x -= kChunkSize;
+
+ #pragma unroll
+ for (int i = 0; i < kNElts; ++i) { dbias_val += dout_vals[i]; }
+
+ float dx_vals[kNElts] = {0};
+ #pragma unroll
+ for (int i = 0; i < kNElts; ++i) {
+ #pragma unroll
+ for (int w = 0; w < kWidth; ++w) {
+ dx_vals[i] += weight_vals[w] * dout_vals[i + kWidth - w - 1];
+ }
+ }
+
+ input_t dx_vals_store[kNElts];
+ #pragma unroll
+ for (int i = 0; i < kNElts; ++i) { dx_vals_store[i] = dx_vals[i]; }
+ if constexpr(kIsVecLoad) {
+ Ktraits::BlockStoreVecT(smem_store_vec).Store(reinterpret_cast(dx), reinterpret_cast(dx_vals_store), (params.seqlen - chunk * kChunkSize) / kNElts);
+ } else {
+ Ktraits::BlockStoreT(smem_store).Store(dx, dx_vals_store, params.seqlen - chunk * kChunkSize);
+ }
+ dx -= kChunkSize;
+
+ #pragma unroll
+ for (int w = 0; w < kWidth; ++w) {
+ #pragma unroll
+ for (int i = 0; i < kNElts; ++i) {
+ dweight_vals[w] += x_vals[kNElts + i] * dout_vals[i + kWidth - w - 1];
+ }
+ }
+ }
+
+ #pragma unroll
+ for (int w = 0; w < kWidth; ++w) {
+ __syncthreads();
+ dweight_vals[w] = Ktraits::BlockReduceFloatT(smem_reduce_float).Sum(dweight_vals[w]);
+ if (tidx == 0) {
+ atomicAdd(&reinterpret_cast(dweight)[w * params.dweight_width_stride], dweight_vals[w]);
+ }
+ }
+ if (params.bias_ptr != nullptr) {
+ __syncthreads();
+ dbias_val = Ktraits::BlockReduceFloatT(smem_reduce_float).Sum(dbias_val);
+ if (tidx == 0) {
+ atomicAdd(&reinterpret_cast(params.dbias_ptr)[dim_id], dbias_val);
+ }
+ }
+}
+
+template
+void causal_conv1d_bwd_launch(ConvParamsBwd ¶ms, cudaStream_t stream) {
+ static constexpr int kNElts = sizeof(input_t) == 4 ? 4 : 8;
+ BOOL_SWITCH(params.seqlen % kNElts == 0, kIsVecLoad, [&] {
+ BOOL_SWITCH(params.silu_activation, kSiluAct, [&] {
+ using Ktraits = Causal_conv1d_bwd_kernel_traits;
+ constexpr int kSmemSize = Ktraits::kSmemSize;
+ dim3 grid(params.batch, params.dim);
+ auto kernel = &causal_conv1d_bwd_kernel;
+ if (kSmemSize >= 48 * 1024) {
+ C10_CUDA_CHECK(cudaFuncSetAttribute(
+ kernel, cudaFuncAttributeMaxDynamicSharedMemorySize, kSmemSize));
+ }
+ kernel<<>>(params);
+ C10_CUDA_KERNEL_LAUNCH_CHECK();
+ });
+ });
+}
+
+template
+void causal_conv1d_bwd_cuda(ConvParamsBwd ¶ms, cudaStream_t stream) {
+ if (params.width == 2) {
+ causal_conv1d_bwd_launch<128, 2, input_t, weight_t>(params, stream);
+ } else if (params.width == 3) {
+ causal_conv1d_bwd_launch<128, 3, input_t, weight_t>(params, stream);
+ } else if (params.width == 4) {
+ causal_conv1d_bwd_launch<128, 4, input_t, weight_t>(params, stream);
+ }
+}
+
+template
+struct Causal_conv1d_channellast_bwd_kernel_traits {
+ // The cache line is 128 bytes, and we try to read 16 bytes per thread.
+ // So we have 8 threads per "row", so 32 or 64 elements in the channel dimension.
+ // That leaves 4 columns per warp, and so 16 columns per block (assuming each block has 128
+ // threads). Each each load is 16 x 32|64 elements in the L x C dimensions.
+ using input_t = input_t_;
+ using weight_t = weight_t_;
+ static constexpr bool kSiluAct = kSiluAct_;
+ static constexpr int kNThreads = kNThreads_;
+ static_assert(kNThreads % 32 == 0);
+ static constexpr int kNWarps = kNThreads / 32;
+ static constexpr int kWidth = kWidth_;
+ static constexpr int kChunkSizeL = kChunkSizeL_;
+ static constexpr int kNBytes = sizeof(input_t);
+ static_assert(kNBytes == 2 || kNBytes == 4);
+ static constexpr int kNElts = kNBytes == 4 ? 4 : 8;
+ static constexpr int kNEltsPerRow = 128 / kNBytes;
+ static constexpr int kNThreadsPerRow = kNEltsPerRow / kNElts; // Always 8 for now
+ static_assert(kNThreadsPerRow * kNBytes * kNElts == 128);
+ static constexpr int kNColsPerWarp = 32 / kNThreadsPerRow; // Always 4 for now
+ static_assert(kNColsPerWarp * kNThreadsPerRow == 32);
+ static constexpr int kNColsPerLoad = kNColsPerWarp * kNWarps;
+ static constexpr int kNLoads = kChunkSizeL / kNColsPerLoad;
+ static_assert(kNLoads * kNColsPerLoad == kChunkSizeL);
+ static constexpr bool kIsVecLoad = kIsVecLoad_;
+ using vec_t = typename BytesToType::Type;
+ // using BlockLoadT = cub::BlockLoad;
+ // using BlockStoreT = cub::BlockStore;
+ // static constexpr int kSmemSize = std::max({sizeof(typename BlockLoadT::TempStorage),
+ // sizeof(typename BlockStoreT::TempStorage)});
+ // static constexpr int kSmemSize = kChunkSizeL * kNEltsPerRow * kNBytes;
+};
+
+template
+__global__ __launch_bounds__(Ktraits::kNThreads)
+void causal_conv1d_channellast_bwd_kernel(ConvParamsBwd params) {
+ constexpr int kWidth = Ktraits::kWidth;
+ constexpr int kNThreads = Ktraits::kNThreads;
+ constexpr bool kSiluAct = Ktraits::kSiluAct;
+ constexpr int kNElts = Ktraits::kNElts;
+ constexpr int kNWarp = Ktraits::kNWarps;
+ constexpr int kNThreadsPerC = Ktraits::kNThreadsPerRow;
+ constexpr int kLPerLoad = Ktraits::kNColsPerLoad;
+ constexpr int kChunkSizeL = Ktraits::kChunkSizeL;
+ constexpr int kChunkSizeC = Ktraits::kNEltsPerRow;
+ using input_t = typename Ktraits::input_t;
+ using vec_t = typename Ktraits::vec_t;
+ using weight_t = typename Ktraits::weight_t;
+
+ // Shared memory.
+ __shared__ input_t dout_smem[kChunkSizeL + kWidth - 1][kChunkSizeC + kNElts];
+ __shared__ input_t x_smem[kWidth - 1 + kChunkSizeL + kWidth - 1][kChunkSizeC + kNElts];
+
+ const int tid = threadIdx.x;
+ const int l_idx = tid / kNThreadsPerC;
+ const int c_idx = tid % kNThreadsPerC;
+ const int batch_id = blockIdx.x;
+ const int chunk_l_id = blockIdx.y;
+ const int chunk_c_id = blockIdx.z;
+ input_t *x = reinterpret_cast(params.x_ptr) + batch_id * params.x_batch_stride
+ + (chunk_l_id * kChunkSizeL + l_idx) * params.x_l_stride + chunk_c_id * kChunkSizeC + c_idx * kNElts;
+ weight_t *weight = reinterpret_cast(params.weight_ptr)
+ + chunk_c_id * kChunkSizeC * params.weight_c_stride;
+ input_t *dout = reinterpret_cast(params.dout_ptr) + batch_id * params.dout_batch_stride
+ + (chunk_l_id * kChunkSizeL + l_idx) * params.dout_l_stride + chunk_c_id * kChunkSizeC + c_idx * kNElts;
+ input_t *dx = reinterpret_cast(params.dx_ptr) + batch_id * params.dx_batch_stride
+ + (chunk_l_id * kChunkSizeL + l_idx) * params.dx_l_stride + chunk_c_id * kChunkSizeC + c_idx * kNElts;
+ float *dweight = reinterpret_cast(params.dweight_ptr)
+ + chunk_c_id * kChunkSizeC * params.dweight_c_stride;
+
+ #pragma unroll
+ for (int l = 0; l < Ktraits::kNLoads; ++l) {
+ input_t dout_vals_load[kNElts] = {0};
+ input_t x_vals_load[kNElts] = {0};
+ if (chunk_l_id * kChunkSizeL + l * kLPerLoad + l_idx < params.seqlen
+ && chunk_c_id * kChunkSizeC + c_idx * kNElts < params.dim) {
+ reinterpret_cast(dout_vals_load)[0] = *reinterpret_cast(dout + l * kLPerLoad * params.dout_l_stride);
+ reinterpret_cast(x_vals_load)[0] = *reinterpret_cast(x + l * kLPerLoad * params.x_l_stride);
+ }
+ reinterpret_cast(dout_smem[l * kLPerLoad + l_idx])[c_idx] = reinterpret_cast(dout_vals_load)[0];
+ reinterpret_cast(x_smem[kWidth - 1 + l * kLPerLoad + l_idx])[c_idx] = reinterpret_cast(x_vals_load)[0];
+ }
+ // Load the elements from the previous chunk or next chunk that are needed for convolution.
+ if (l_idx < kWidth - 1) {
+ input_t dout_vals_load[kNElts] = {0};
+ input_t x_vals_load[kNElts] = {0};
+ if ((chunk_l_id + 1) * kChunkSizeL + l_idx < params.seqlen
+ && chunk_c_id * kChunkSizeC + c_idx * kNElts < params.dim) {
+ reinterpret_cast(dout_vals_load)[0] = *reinterpret_cast(dout + kChunkSizeL * params.dout_l_stride);
+ }
+ if (chunk_l_id * kChunkSizeL + l_idx - (kWidth - 1) >= 0
+ && chunk_l_id * kChunkSizeL + l_idx - (kWidth - 1) < params.seqlen
+ && chunk_c_id * kChunkSizeC + c_idx * kNElts < params.dim) {
+ reinterpret_cast(x_vals_load)[0] = *reinterpret_cast(x - (kWidth - 1) * params.x_l_stride);
+ }
+ reinterpret_cast(dout_smem[kChunkSizeL + l_idx])[c_idx] = reinterpret_cast(dout_vals_load)[0];
+ reinterpret_cast(x_smem[l_idx])[c_idx] = reinterpret_cast(x_vals_load)[0];
+ }
+ // Need to load (kWdith - 1) extra x's on the right to recompute the (kChunkSizeL + kWidth - 1) outputs
+ if constexpr (kSiluAct) {
+ if (l_idx < kWidth - 1) {
+ input_t x_vals_load[kNElts] = {0};
+ if ((chunk_l_id + 1) * kChunkSizeL + l_idx < params.seqlen
+ && chunk_c_id * kChunkSizeC + c_idx * kNElts < params.dim) {
+ reinterpret_cast(x_vals_load)[0] = *reinterpret_cast(x + kChunkSizeL * params.x_l_stride);
+ }
+ reinterpret_cast(x_smem[kWidth - 1 + kChunkSizeL + l_idx])[c_idx] = reinterpret_cast(x_vals_load)[0];
+ }
+ }
+
+ __syncthreads();
+
+ constexpr int kLPerThread = std::min(kChunkSizeL * kChunkSizeC / kNThreads, kChunkSizeL);
+ static_assert(kLPerThread * kNThreads == kChunkSizeL * kChunkSizeC);
+ constexpr int kNThreadsPerRow = kChunkSizeL / kLPerThread;
+ static_assert(kNThreadsPerRow * kLPerThread == kChunkSizeL);
+ // kChunkSizeL, kLPerThread, kNThreadsPerRow should be powers of 2 for simplicity
+ static_assert((kChunkSizeL & (kChunkSizeL - 1)) == 0);
+ static_assert((kLPerThread & (kLPerThread - 1)) == 0);
+ static_assert((kNThreadsPerRow & (kNThreadsPerRow - 1)) == 0);
+ static_assert(kNThreadsPerRow <= 32);
+
+ const int row_idx = tid / kNThreadsPerRow;
+ const int col_idx = tid % kNThreadsPerRow;
+
+ float bias_val = params.bias_ptr == nullptr || chunk_c_id * kChunkSizeC + row_idx >= params.dim ? 0.f : float(reinterpret_cast(params.bias_ptr)[chunk_c_id * kChunkSizeC + row_idx]);
+ float weight_vals[kWidth] = {0};
+ if (chunk_c_id * kChunkSizeC + row_idx < params.dim) {
+ #pragma unroll
+ for (int w = 0; w < kWidth; ++w) {
+ weight_vals[w] = weight[row_idx * params.weight_c_stride + w * params.weight_width_stride];
+ }
+ }
+ float dout_vals[kLPerThread + kWidth - 1];
+ float x_vals[kWidth - 1 + kLPerThread + kWidth - 1];
+ #pragma unroll
+ for (int i = 0; i < kWidth - 1 + kLPerThread; ++i) {
+ dout_vals[i] = float(dout_smem[col_idx * kLPerThread + i][row_idx]);
+ x_vals[i] = float(x_smem[col_idx * kLPerThread + i][row_idx]);
+ }
+
+ if constexpr (kSiluAct) { // Recompute the output
+ #pragma unroll
+ for (int i = kWidth - 1 + kLPerThread; i < kWidth - 1 + kLPerThread + kWidth - 1; ++i) {
+ x_vals[i] = float(x_smem[col_idx * kLPerThread + i][row_idx]);
+ }
+ #pragma unroll
+ for (int i = 0; i < kLPerThread + kWidth - 1; ++i) {
+ float out_val = bias_val;
+ #pragma unroll
+ for (int w = 0; w < kWidth; ++w) { out_val += weight_vals[w] * x_vals[i + w]; }
+ float out_val_sigmoid = 1.f / (1.f + expf(-out_val));
+ dout_vals[i] *= out_val_sigmoid * (1 + out_val * (1 - out_val_sigmoid));
+ }
+ }
+
+ float dweight_vals[kWidth] = {0};
+ SumOp sum_op;
+ #pragma unroll
+ for (int w = 0; w < kWidth; ++w) {
+ #pragma unroll
+ for (int i = 0; i < kLPerThread; ++i) { dweight_vals[w] += x_vals[i + w] * dout_vals[i]; }
+ dweight_vals[w] = Allreduce::run(dweight_vals[w], sum_op);
+ if (col_idx == 0 && chunk_c_id * kChunkSizeC + row_idx < params.dim) {
+ atomicAdd(&reinterpret_cast(dweight)[row_idx * params.dweight_c_stride + w * params.dweight_width_stride], dweight_vals[w]);
+ }
+ }
+
+ if (params.bias_ptr != nullptr) {
+ float dbias_val = 0.f;
+ for (int i = 0; i < kLPerThread; ++i) { dbias_val += dout_vals[i]; }
+ dbias_val = Allreduce::run(dbias_val, sum_op);
+ if (col_idx == 0 && chunk_c_id * kChunkSizeC + row_idx < params.dim) {
+ atomicAdd(&reinterpret_cast(params.dbias_ptr)[chunk_c_id * kChunkSizeC + row_idx], dbias_val);
+ }
+ }
+
+ float dx_vals[kLPerThread] = {0};
+ #pragma unroll
+ for (int i = 0; i < kLPerThread; ++i) {
+ #pragma unroll
+ for (int w = 0; w < kWidth; ++w) { dx_vals[i] += weight_vals[kWidth - 1 - w] * dout_vals[i + w]; }
+ }
+ // Since kNThreadsPerRow is a power of 2 and <= 32, we only need syncwarp and not syncthreads.
+ __syncwarp();
+ #pragma unroll
+ for (int i = 0; i < kLPerThread; ++i) { x_smem[col_idx * kLPerThread + i][row_idx] = dx_vals[i]; }
+ __syncthreads();
+
+ #pragma unroll
+ for (int l = 0; l < Ktraits::kNLoads; ++l) {
+ input_t dx_vals_store[kNElts];
+ reinterpret_cast(dx_vals_store)[0] = reinterpret_cast(x_smem[l * kLPerLoad + l_idx])[c_idx];
+ if (chunk_l_id * kChunkSizeL + l * kLPerLoad + l_idx < params.seqlen
+ && chunk_c_id * kChunkSizeC + c_idx * kNElts < params.dim) {
+ *reinterpret_cast(dx + l * kLPerLoad * params.dx_l_stride) = reinterpret_cast(dx_vals_store)[0];
+ }
+ }
+
+}
+
+template
+void causal_conv1d_channellast_bwd_launch(ConvParamsBwd ¶ms, cudaStream_t stream) {
+ BOOL_SWITCH(params.silu_activation, kSiluAct, [&] {
+ using Ktraits = Causal_conv1d_channellast_bwd_kernel_traits;
+ // constexpr int kSmemSize = Ktraits::kSmemSize;
+ constexpr int kChunkSizeL = Ktraits::kChunkSizeL;
+ constexpr int kChunkSizeC = Ktraits::kNEltsPerRow;
+ const int n_chunks_L = (params.seqlen + kChunkSizeL - 1) / kChunkSizeL;
+ const int n_chunks_C = (params.dim + kChunkSizeC - 1) / kChunkSizeC;
+ dim3 grid(params.batch, n_chunks_L, n_chunks_C);
+ dim3 block(Ktraits::kNThreads);
+ auto kernel = &causal_conv1d_channellast_bwd_kernel;
+ // if (kSmemSize >= 48 * 1024) {
+ // C10_CUDA_CHECK(cudaFuncSetAttribute(
+ // kernel, cudaFuncAttributeMaxDynamicSharedMemorySize, kSmemSize));
+ // }
+ // kernel<<>>(params);
+ kernel<<>>(params);
+ C10_CUDA_KERNEL_LAUNCH_CHECK();
+ });
+}
+
+template
+void causal_conv1d_channellast_bwd_cuda(ConvParamsBwd ¶ms, cudaStream_t stream) {
+ if (params.width == 2) {
+ causal_conv1d_channellast_bwd_launch<128, 2, input_t, weight_t>(params, stream);
+ } else if (params.width == 3) {
+ causal_conv1d_channellast_bwd_launch<128, 3, input_t, weight_t>(params, stream);
+ } else if (params.width == 4) {
+ causal_conv1d_channellast_bwd_launch<128, 4, input_t, weight_t>(params, stream);
+ }
+}
+
+template void causal_conv1d_bwd_cuda(ConvParamsBwd ¶ms, cudaStream_t stream);
+template void causal_conv1d_bwd_cuda(ConvParamsBwd ¶ms, cudaStream_t stream);
+template void causal_conv1d_bwd_cuda(ConvParamsBwd ¶ms, cudaStream_t stream);
+template void causal_conv1d_bwd_cuda(ConvParamsBwd ¶ms, cudaStream_t stream);
+template void causal_conv1d_bwd_cuda(ConvParamsBwd ¶ms, cudaStream_t stream);
+template void causal_conv1d_bwd_cuda(ConvParamsBwd ¶ms, cudaStream_t stream);
+template void causal_conv1d_bwd_cuda(ConvParamsBwd ¶ms, cudaStream_t stream);
+template void causal_conv1d_bwd_cuda(ConvParamsBwd ¶ms, cudaStream_t stream);
+template void causal_conv1d_bwd_cuda(ConvParamsBwd ¶ms, cudaStream_t stream);
+
+template void causal_conv1d_channellast_bwd_cuda(ConvParamsBwd ¶ms, cudaStream_t stream);
+template void causal_conv1d_channellast_bwd_cuda(ConvParamsBwd ¶ms, cudaStream_t stream);
+template void causal_conv1d_channellast_bwd_cuda(ConvParamsBwd ¶ms, cudaStream_t stream);
+template void causal_conv1d_channellast_bwd_cuda(ConvParamsBwd ¶ms, cudaStream_t stream);
+template void causal_conv1d_channellast_bwd_cuda(ConvParamsBwd ¶ms, cudaStream_t stream);
+template void causal_conv1d_channellast_bwd_cuda(ConvParamsBwd ¶ms, cudaStream_t stream);
+template void causal_conv1d_channellast_bwd_cuda(ConvParamsBwd ¶ms, cudaStream_t stream);
+template void causal_conv1d_channellast_bwd_cuda(ConvParamsBwd ¶ms, cudaStream_t stream);
+template void causal_conv1d_channellast_bwd_cuda(ConvParamsBwd ¶ms, cudaStream_t stream);
\ No newline at end of file
diff --git a/PRISM/SegMamba/causal-conv1d/csrc/causal_conv1d_common.h b/PRISM/SegMamba/causal-conv1d/csrc/causal_conv1d_common.h
new file mode 100644
index 0000000000000000000000000000000000000000..8dd6a333b52163986c085f71475709706ce8f9c3
--- /dev/null
+++ b/PRISM/SegMamba/causal-conv1d/csrc/causal_conv1d_common.h
@@ -0,0 +1,64 @@
+/******************************************************************************
+ * Copyright (c) 2023, Tri Dao.
+ ******************************************************************************/
+
+#pragma once
+
+#include
+#include
+
+////////////////////////////////////////////////////////////////////////////////////////////////////
+
+template struct BytesToType {};
+
+template<> struct BytesToType<16> {
+ using Type = uint4;
+ static_assert(sizeof(Type) == 16);
+};
+
+template<> struct BytesToType<8> {
+ using Type = uint64_t;
+ static_assert(sizeof(Type) == 8);
+};
+
+template<> struct BytesToType<4> {
+ using Type = uint32_t;
+ static_assert(sizeof(Type) == 4);
+};
+
+template<> struct BytesToType<2> {
+ using Type = uint16_t;
+ static_assert(sizeof(Type) == 2);
+};
+
+template<> struct BytesToType<1> {
+ using Type = uint8_t;
+ static_assert(sizeof(Type) == 1);
+};
+
+////////////////////////////////////////////////////////////////////////////////////////////////////
+
+template
+struct SumOp {
+__device__ inline T operator()(T const & x, T const & y) { return x + y; }
+};
+
+template
+struct Allreduce {
+ static_assert(THREADS == 32 || THREADS == 16 || THREADS == 8 || THREADS == 4);
+ template
+ static __device__ inline T run(T x, Operator &op) {
+ constexpr int OFFSET = THREADS / 2;
+ x = op(x, __shfl_xor_sync(uint32_t(-1), x, OFFSET));
+ return Allreduce::run(x, op);
+ }
+};
+
+template<>
+struct Allreduce<2> {
+template
+static __device__ inline T run(T x, Operator &op) {
+ x = op(x, __shfl_xor_sync(uint32_t(-1), x, 1));
+ return x;
+}
+};
diff --git a/PRISM/SegMamba/causal-conv1d/csrc/causal_conv1d_fwd.cu b/PRISM/SegMamba/causal-conv1d/csrc/causal_conv1d_fwd.cu
new file mode 100644
index 0000000000000000000000000000000000000000..74a1459f88a87ef427075a25e5081899e382efc0
--- /dev/null
+++ b/PRISM/SegMamba/causal-conv1d/csrc/causal_conv1d_fwd.cu
@@ -0,0 +1,350 @@
+/******************************************************************************
+ * Copyright (c) 2023, Tri Dao.
+ ******************************************************************************/
+
+#include
+#include
+#include // For C10_CUDA_CHECK and C10_CUDA_KERNEL_LAUNCH_CHECK
+
+#include
+#include
+
+#include "causal_conv1d.h"
+#include "causal_conv1d_common.h"
+#include "static_switch.h"
+
+template
+struct Causal_conv1d_fwd_kernel_traits {
+ using input_t = input_t_;
+ using weight_t = weight_t_;
+ static constexpr int kNThreads = kNThreads_;
+ static constexpr int kWidth = kWidth_;
+ static constexpr int kNBytes = sizeof(input_t);
+ static_assert(kNBytes == 2 || kNBytes == 4);
+ static constexpr int kNElts = kNBytes == 4 ? 4 : 8;
+ static_assert(kWidth <= kNElts);
+ static constexpr bool kIsVecLoad = kIsVecLoad_;
+ using vec_t = typename BytesToType::Type;
+ using BlockLoadT = cub::BlockLoad;
+ using BlockLoadVecT = cub::BlockLoad;
+ using BlockStoreT = cub::BlockStore;
+ using BlockStoreVecT = cub::BlockStore;
+ static constexpr int kSmemIOSize = kIsVecLoad
+ ? 0
+ : std::max({sizeof(typename BlockLoadT::TempStorage), sizeof(typename BlockStoreT::TempStorage)});
+ static constexpr int kSmemExchangeSize = kNThreads * kNBytes * kNElts;
+ static constexpr int kSmemSize = kSmemIOSize + kSmemExchangeSize;
+};
+
+template
+__global__ __launch_bounds__(Ktraits::kNThreads)
+void causal_conv1d_fwd_kernel(ConvParamsBase params) {
+ constexpr int kWidth = Ktraits::kWidth;
+ constexpr int kNThreads = Ktraits::kNThreads;
+ constexpr int kNElts = Ktraits::kNElts;
+ constexpr bool kIsVecLoad = Ktraits::kIsVecLoad;
+ using input_t = typename Ktraits::input_t;
+ using vec_t = typename Ktraits::vec_t;
+ using weight_t = typename Ktraits::weight_t;
+
+ // Shared memory.
+ extern __shared__ char smem_[];
+ auto& smem_load = reinterpret_cast(smem_);
+ auto& smem_load_vec = reinterpret_cast(smem_);
+ auto& smem_store = reinterpret_cast(smem_);
+ auto& smem_store_vec = reinterpret_cast(smem_);
+ vec_t *smem_exchange = reinterpret_cast(smem_ + Ktraits::kSmemIOSize);
+
+ const int tidx = threadIdx.x;
+ const int batch_id = blockIdx.x;
+ const int channel_id = blockIdx.y;
+ input_t *x = reinterpret_cast(params.x_ptr) + batch_id * params.x_batch_stride
+ + channel_id * params.x_c_stride;
+ weight_t *weight = reinterpret_cast(params.weight_ptr) + channel_id * params.weight_c_stride;
+ input_t *out = reinterpret_cast(params.out_ptr) + batch_id * params.out_batch_stride
+ + channel_id * params.out_c_stride;
+ float bias_val = params.bias_ptr == nullptr ? 0.f : float(reinterpret_cast(params.bias_ptr)[channel_id]);
+
+ // Thread 0 will load the last elements of the previous chunk, so we initialize those to 0.
+ if (tidx == 0) {
+ input_t zeros[kNElts] = {0};
+ smem_exchange[kNThreads - 1] = reinterpret_cast(zeros)[0];
+ }
+
+ float weight_vals[kWidth];
+ #pragma unroll
+ for (int i = 0; i < kWidth; ++i) { weight_vals[i] = float(weight[i * params.weight_width_stride]); }
+
+ constexpr int kChunkSize = kNThreads * kNElts;
+ const int n_chunks = (params.seqlen + kChunkSize - 1) / kChunkSize;
+ for (int chunk = 0; chunk < n_chunks; ++chunk) {
+ input_t x_vals_load[2 * kNElts] = {0};
+ if constexpr(kIsVecLoad) {
+ Ktraits::BlockLoadVecT(smem_load_vec).Load(reinterpret_cast(x), *reinterpret_cast(&x_vals_load[kNElts]), (params.seqlen - chunk * kChunkSize) / kNElts);
+ } else {
+ __syncthreads();
+ Ktraits::BlockLoadT(smem_load).Load(x, *reinterpret_cast(&x_vals_load[kNElts]), params.seqlen - chunk * kChunkSize);
+ }
+ x += kChunkSize;
+ __syncthreads();
+ // Thread kNThreads - 1 don't write yet, so that thread 0 can read
+ // the last elements of the previous chunk.
+ if (tidx < kNThreads - 1) { smem_exchange[tidx] = reinterpret_cast(x_vals_load)[1]; }
+ __syncthreads();
+ reinterpret_cast(x_vals_load)[0] = smem_exchange[tidx > 0 ? tidx - 1 : kNThreads - 1];
+ __syncthreads();
+ // Now thread kNThreads - 1 can write the last elements of the current chunk.
+ if (tidx == kNThreads - 1) { smem_exchange[tidx] = reinterpret_cast(x_vals_load)[1]; }
+
+ float x_vals[2 * kNElts];
+ #pragma unroll
+ for (int i = 0; i < 2 * kNElts; ++i) { x_vals[i] = float(x_vals_load[i]); }
+
+ float out_vals[kNElts];
+ #pragma unroll
+ for (int i = 0; i < kNElts; ++i) {
+ out_vals[i] = bias_val;
+ #pragma unroll
+ for (int w = 0; w < kWidth; ++w) {
+ out_vals[i] += weight_vals[w] * x_vals[kNElts + i - (kWidth - w - 1)];
+ }
+ }
+
+ if (params.silu_activation) {
+ #pragma unroll
+ for (int i = 0; i < kNElts; ++i) {
+ out_vals[i] = out_vals[i] / (1 + expf(-out_vals[i]));
+ }
+ }
+
+ input_t out_vals_store[kNElts];
+ #pragma unroll
+ for (int i = 0; i < kNElts; ++i) { out_vals_store[i] = out_vals[i]; }
+ if constexpr(kIsVecLoad) {
+ Ktraits::BlockStoreVecT(smem_store_vec).Store(reinterpret_cast(out), reinterpret_cast(out_vals_store), (params.seqlen - chunk * kChunkSize) / kNElts);
+ } else {
+ Ktraits::BlockStoreT(smem_store).Store(out, out_vals_store, params.seqlen - chunk * kChunkSize);
+ }
+ out += kChunkSize;
+ }
+}
+
+template
+void causal_conv1d_fwd_launch(ConvParamsBase ¶ms, cudaStream_t stream) {
+ static constexpr int kNElts = sizeof(input_t) == 4 ? 4 : 8;
+ BOOL_SWITCH(params.seqlen % kNElts == 0, kIsVecLoad, [&] {
+ using Ktraits = Causal_conv1d_fwd_kernel_traits;
+ constexpr int kSmemSize = Ktraits::kSmemSize;
+ dim3 grid(params.batch, params.dim);
+ auto kernel = &causal_conv1d_fwd_kernel;
+ if (kSmemSize >= 48 * 1024) {
+ C10_CUDA_CHECK(cudaFuncSetAttribute(
+ kernel, cudaFuncAttributeMaxDynamicSharedMemorySize, kSmemSize));
+ }
+ kernel<<>>(params);
+ C10_CUDA_KERNEL_LAUNCH_CHECK();
+ });
+}
+
+template
+void causal_conv1d_fwd_cuda(ConvParamsBase ¶ms, cudaStream_t stream) {
+ if (params.width == 2) {
+ causal_conv1d_fwd_launch<128, 2, input_t, weight_t>(params, stream);
+ } else if (params.width == 3) {
+ causal_conv1d_fwd_launch<128, 3, input_t, weight_t>(params, stream);
+ } else if (params.width == 4) {
+ causal_conv1d_fwd_launch<128, 4, input_t, weight_t>(params, stream);
+ }
+}
+
+template
+struct Causal_conv1d_channellast_fwd_kernel_traits {
+ // The cache line is 128 bytes, and we try to read 16 bytes per thread.
+ // So we have 8 threads per "row", so 32 or 64 elements in the channel dimension.
+ // That leaves 4 columns per warp, and so 16 columns per block (assuming each block has 128
+ // threads). Each each load is 16 x 32|64 elements in the L x C dimensions.
+ using input_t = input_t_;
+ using weight_t = weight_t_;
+ static constexpr int kNThreads = kNThreads_;
+ static_assert(kNThreads % 32 == 0);
+ static constexpr int kNWarps = kNThreads / 32;
+ static constexpr int kWidth = kWidth_;
+ static constexpr int kChunkSizeL = kChunkSizeL_;
+ static constexpr int kNBytes = sizeof(input_t);
+ static_assert(kNBytes == 2 || kNBytes == 4);
+ static constexpr int kNElts = kNBytes == 4 ? 4 : 8;
+ static constexpr int kNEltsPerRow = 128 / kNBytes;
+ static constexpr int kNThreadsPerRow = kNEltsPerRow / kNElts; // Always 8 for now
+ static_assert(kNThreadsPerRow * kNBytes * kNElts == 128);
+ static constexpr int kNColsPerWarp = 32 / kNThreadsPerRow; // Always 4 for now
+ static_assert(kNColsPerWarp * kNThreadsPerRow == 32);
+ static constexpr int kNColsPerLoad = kNColsPerWarp * kNWarps;
+ static constexpr int kNLoads = kChunkSizeL / kNColsPerLoad;
+ static_assert(kNLoads * kNColsPerLoad == kChunkSizeL);
+ static constexpr bool kIsVecLoad = kIsVecLoad_;
+ using vec_t = typename BytesToType::Type;
+ // using BlockLoadT = cub::BlockLoad;
+ // using BlockStoreT = cub::BlockStore;
+ // static constexpr int kSmemSize = std::max({sizeof(typename BlockLoadT::TempStorage),
+ // sizeof(typename BlockStoreT::TempStorage)});
+ // static constexpr int kSmemSize = kChunkSizeL * kNEltsPerRow * kNBytes;
+};
+
+template
+__global__ __launch_bounds__(Ktraits::kNThreads)
+void causal_conv1d_channellast_fwd_kernel(ConvParamsBase params) {
+ constexpr int kWidth = Ktraits::kWidth;
+ constexpr int kNThreads = Ktraits::kNThreads;
+ constexpr int kNElts = Ktraits::kNElts;
+ constexpr int kNWarp = Ktraits::kNWarps;
+ constexpr int kNThreadsPerC = Ktraits::kNThreadsPerRow;
+ constexpr int kLPerLoad = Ktraits::kNColsPerLoad;
+ constexpr int kChunkSizeL = Ktraits::kChunkSizeL;
+ constexpr int kChunkSizeC = Ktraits::kNEltsPerRow;
+ using input_t = typename Ktraits::input_t;
+ using vec_t = typename Ktraits::vec_t;
+ using weight_t = typename Ktraits::weight_t;
+
+ // Shared memory.
+ __shared__ input_t x_smem[kWidth - 1 + kChunkSizeL][kChunkSizeC + kNElts];
+
+ const int tid = threadIdx.x;
+ const int l_idx = tid / kNThreadsPerC;
+ const int c_idx = tid % kNThreadsPerC;
+ const int batch_id = blockIdx.x;
+ const int chunk_l_id = blockIdx.y;
+ const int chunk_c_id = blockIdx.z;
+ input_t *x = reinterpret_cast(params.x_ptr) + batch_id * params.x_batch_stride
+ + (chunk_l_id * kChunkSizeL + l_idx) * params.x_l_stride + chunk_c_id * kChunkSizeC + c_idx * kNElts;
+ weight_t *weight = reinterpret_cast(params.weight_ptr)
+ + chunk_c_id * kChunkSizeC * params.weight_c_stride;
+ input_t *out = reinterpret_cast(params.out_ptr) + batch_id * params.out_batch_stride
+ + (chunk_l_id * kChunkSizeL + l_idx) * params.out_l_stride + chunk_c_id * kChunkSizeC + c_idx * kNElts;
+
+ #pragma unroll
+ for (int l = 0; l < Ktraits::kNLoads; ++l) {
+ input_t x_vals_load[kNElts] = {0};
+ if (chunk_l_id * kChunkSizeL + l * kLPerLoad + l_idx < params.seqlen
+ && chunk_c_id * kChunkSizeC + c_idx * kNElts < params.dim) {
+ reinterpret_cast(x_vals_load)[0] = *reinterpret_cast(x + l * kLPerLoad * params.x_l_stride);
+ }
+ reinterpret_cast(x_smem[kWidth - 1 + l * kLPerLoad + l_idx])[c_idx] = reinterpret_cast(x_vals_load)[0];
+ }
+ // Load the elements from the previous chunk that are needed for convolution.
+ if (l_idx < kWidth - 1) {
+ input_t x_vals_load[kNElts] = {0};
+ if (chunk_l_id * kChunkSizeL + l_idx - (kWidth - 1) >= 0
+ && chunk_l_id * kChunkSizeL + l_idx - (kWidth - 1) < params.seqlen
+ && chunk_c_id * kChunkSizeC + c_idx * kNElts < params.dim) {
+ reinterpret_cast(x_vals_load)[0] = *reinterpret_cast(x - (kWidth - 1) * params.x_l_stride);
+ }
+ reinterpret_cast(x_smem[l_idx])[c_idx] = reinterpret_cast(x_vals_load)[0];
+ }
+
+ __syncthreads();
+
+ constexpr int kLPerThread = std::min(kChunkSizeL * kChunkSizeC / kNThreads, kChunkSizeL);
+ static_assert(kLPerThread * kNThreads == kChunkSizeL * kChunkSizeC);
+ constexpr int kNThreadsPerRow = kChunkSizeL / kLPerThread;
+ static_assert(kNThreadsPerRow * kLPerThread == kChunkSizeL);
+ // kChunkSizeL, kLPerThread, kNThreadsPerRow should be powers of 2 for simplicity
+ static_assert((kChunkSizeL & (kChunkSizeL - 1)) == 0);
+ static_assert((kLPerThread & (kLPerThread - 1)) == 0);
+ static_assert((kNThreadsPerRow & (kNThreadsPerRow - 1)) == 0);
+ static_assert(kNThreadsPerRow <= 32);
+
+ const int row_idx = tid / kNThreadsPerRow;
+ const int col_idx = tid % kNThreadsPerRow;
+
+ float bias_val = params.bias_ptr == nullptr || chunk_c_id * kChunkSizeC + row_idx >= params.dim ? 0.f : float(reinterpret_cast(params.bias_ptr)[chunk_c_id * kChunkSizeC + row_idx]);
+ float weight_vals[kWidth] = {0};
+ if (chunk_c_id + kChunkSizeC + row_idx < params.dim) {
+ #pragma unroll
+ for (int w = 0; w < kWidth; ++w) {
+ weight_vals[w] = weight[row_idx * params.weight_c_stride + w * params.weight_width_stride];
+ }
+ }
+ float x_vals[kWidth - 1 + kLPerThread];
+ #pragma unroll
+ for (int i = 0; i < kWidth - 1 + kLPerThread; ++i) {
+ x_vals[i] = float(x_smem[col_idx * kLPerThread + i][row_idx]);
+ }
+
+ float out_vals[kLPerThread];
+ #pragma unroll
+ for (int i = 0; i < kLPerThread; ++i) {
+ out_vals[i] = bias_val;
+ #pragma unroll
+ for (int w = 0; w < kWidth; ++w) { out_vals[i] += weight_vals[w] * x_vals[i + w]; }
+ if (params.silu_activation) {out_vals[i] = out_vals[i] / (1 + expf(-out_vals[i])); }
+ }
+
+ // Since kNThreadsPerRow is a power of 2 and <= 32, we only need syncwarp and not syncthreads.
+ __syncwarp();
+ #pragma unroll
+ for (int i = 0; i < kLPerThread; ++i) { x_smem[col_idx * kLPerThread + i][row_idx] = out_vals[i]; }
+ __syncthreads();
+
+ #pragma unroll
+ for (int l = 0; l < Ktraits::kNLoads; ++l) {
+ input_t out_vals_store[kNElts];
+ reinterpret_cast(out_vals_store)[0] = reinterpret_cast(x_smem[l * kLPerLoad + l_idx])[c_idx];
+ if (chunk_l_id * kChunkSizeL + l * kLPerLoad + l_idx < params.seqlen
+ && chunk_c_id * kChunkSizeC + c_idx * kNElts < params.dim) {
+ *reinterpret_cast(out + l * kLPerLoad * params.out_l_stride) = reinterpret_cast(out_vals_store)[0];
+ }
+ }
+
+}
+
+template
+void causal_conv1d_channellast_fwd_launch(ConvParamsBase ¶ms, cudaStream_t stream) {
+ using Ktraits = Causal_conv1d_channellast_fwd_kernel_traits;
+ // constexpr int kSmemSize = Ktraits::kSmemSize;
+ constexpr int kChunkSizeL = Ktraits::kChunkSizeL;
+ constexpr int kChunkSizeC = Ktraits::kNEltsPerRow;
+ const int n_chunks_L = (params.seqlen + kChunkSizeL - 1) / kChunkSizeL;
+ const int n_chunks_C = (params.dim + kChunkSizeC - 1) / kChunkSizeC;
+ // printf("n_chunks_L: %d, n_chunks_C: %d\n", n_chunks_L, n_chunks_C);
+ dim3 grid(params.batch, n_chunks_L, n_chunks_C);
+ dim3 block(Ktraits::kNThreads);
+ auto kernel = &causal_conv1d_channellast_fwd_kernel;
+ // if (kSmemSize >= 48 * 1024) {
+ // C10_CUDA_CHECK(cudaFuncSetAttribute(
+ // kernel, cudaFuncAttributeMaxDynamicSharedMemorySize, kSmemSize));
+ // }
+ // kernel<<>>(params);
+ kernel<<>>(params);
+ C10_CUDA_KERNEL_LAUNCH_CHECK();
+}
+
+template
+void causal_conv1d_channellast_fwd_cuda(ConvParamsBase ¶ms, cudaStream_t stream) {
+ if (params.width == 2) {
+ causal_conv1d_channellast_fwd_launch<128, 2, input_t, weight_t>(params, stream);
+ } else if (params.width == 3) {
+ causal_conv1d_channellast_fwd_launch<128, 3, input_t, weight_t>(params, stream);
+ } else if (params.width == 4) {
+ causal_conv1d_channellast_fwd_launch<128, 4, input_t, weight_t>(params, stream);
+ }
+}
+
+template void causal_conv1d_fwd_cuda(ConvParamsBase ¶ms, cudaStream_t stream);
+template void causal_conv1d_fwd_cuda(ConvParamsBase ¶ms, cudaStream_t stream);
+template void causal_conv1d_fwd_cuda(ConvParamsBase ¶ms, cudaStream_t stream);
+template void causal_conv1d_fwd_cuda(ConvParamsBase ¶ms, cudaStream_t stream);
+template void causal_conv1d_fwd_cuda(ConvParamsBase ¶ms, cudaStream_t stream);
+template void causal_conv1d_fwd_cuda(ConvParamsBase ¶ms, cudaStream_t stream);
+template void causal_conv1d_fwd_cuda(ConvParamsBase ¶ms, cudaStream_t stream);
+template void causal_conv1d_fwd_cuda(ConvParamsBase ¶ms, cudaStream_t stream);
+template void causal_conv1d_fwd_cuda(ConvParamsBase ¶ms, cudaStream_t stream);
+
+template void causal_conv1d_channellast_fwd_cuda(ConvParamsBase ¶ms, cudaStream_t stream);
+template void causal_conv1d_channellast_fwd_cuda(ConvParamsBase ¶ms, cudaStream_t stream);
+template void causal_conv1d_channellast_fwd_cuda(ConvParamsBase ¶ms, cudaStream_t stream);
+template void causal_conv1d_channellast_fwd_cuda(ConvParamsBase ¶ms, cudaStream_t stream);
+template void causal_conv1d_channellast_fwd_cuda(ConvParamsBase ¶ms, cudaStream_t stream);
+template void causal_conv1d_channellast_fwd_cuda(ConvParamsBase ¶ms, cudaStream_t stream);
+template void causal_conv1d_channellast_fwd_cuda(ConvParamsBase ¶ms, cudaStream_t stream);
+template void causal_conv1d_channellast_fwd_cuda(ConvParamsBase ¶ms, cudaStream_t stream);
+template void causal_conv1d_channellast_fwd_cuda(ConvParamsBase ¶ms, cudaStream_t stream);
\ No newline at end of file
diff --git a/PRISM/SegMamba/causal-conv1d/csrc/causal_conv1d_update.cu b/PRISM/SegMamba/causal-conv1d/csrc/causal_conv1d_update.cu
new file mode 100644
index 0000000000000000000000000000000000000000..713e0ac883853491f9bdb0015b578657c228c1e7
--- /dev/null
+++ b/PRISM/SegMamba/causal-conv1d/csrc/causal_conv1d_update.cu
@@ -0,0 +1,96 @@
+/******************************************************************************
+ * Copyright (c) 2023, Tri Dao.
+ ******************************************************************************/
+
+#include
+#include
+#include // For C10_CUDA_CHECK and C10_CUDA_KERNEL_LAUNCH_CHECK
+
+#include
+#include
+
+#include "causal_conv1d.h"
+#include "causal_conv1d_common.h"
+#include "static_switch.h"
+
+template
+struct Causal_conv1d_update_kernel_traits {
+ using input_t = input_t_;
+ using weight_t = weight_t_;
+ static constexpr int kNThreads = kNThreads_;
+ static constexpr int kWidth = kWidth_;
+ static constexpr int kNBytes = sizeof(input_t);
+ static_assert(kNBytes == 2 || kNBytes == 4);
+};
+
+template
+__global__ __launch_bounds__(Ktraits::kNThreads)
+void causal_conv1d_update_kernel(ConvParamsBase params) {
+ constexpr int kWidth = Ktraits::kWidth;
+ constexpr int kNThreads = Ktraits::kNThreads;
+ using input_t = typename Ktraits::input_t;
+ using weight_t = typename Ktraits::weight_t;
+
+ const int tidx = threadIdx.x;
+ const int batch_id = blockIdx.x;
+ const int channel_id = blockIdx.y * kNThreads + tidx;
+ input_t *x = reinterpret_cast(params.x_ptr) + batch_id * params.x_batch_stride
+ + channel_id * params.x_c_stride;
+ input_t *conv_state = reinterpret_cast(params.conv_state_ptr) + batch_id * params.conv_state_batch_stride
+ + channel_id * params.conv_state_c_stride;
+ weight_t *weight = reinterpret_cast(params.weight_ptr) + channel_id * params.weight_c_stride;
+ input_t *out = reinterpret_cast(params.out_ptr) + batch_id * params.out_batch_stride
+ + channel_id * params.out_c_stride;
+ float bias_val = params.bias_ptr == nullptr || channel_id >= params.dim ? 0.f : float(reinterpret_cast(params.bias_ptr)[channel_id]);
+
+ float weight_vals[kWidth] = {0};
+ if (channel_id < params.dim) {
+ #pragma unroll
+ for (int i = 0; i < kWidth; ++i) { weight_vals[i] = float(weight[i * params.weight_width_stride]); }
+ }
+
+ float x_vals[kWidth] = {0};
+ if (channel_id < params.dim) {
+ #pragma unroll
+ for (int i = 0; i < kWidth - 1; ++i) { x_vals[i] = float(conv_state[(i + 1) * params.conv_state_l_stride]); }
+ x_vals[kWidth - 1] = float(x[0]);
+ #pragma unroll
+ for (int i = 0; i < kWidth; ++i) { conv_state[i * params.conv_state_l_stride] = input_t(x_vals[i]); }
+ }
+
+ float out_val = bias_val;
+ #pragma unroll
+ for (int i = 0; i < kWidth; ++i) { out_val += weight_vals[i] * x_vals[i]; }
+ if (params.silu_activation) { out_val = out_val / (1 + expf(-out_val)); }
+ if (channel_id < params.dim) { out[0] = input_t(out_val); }
+}
+
+template
+void causal_conv1d_update_launch(ConvParamsBase ¶ms, cudaStream_t stream) {
+ using Ktraits = Causal_conv1d_update_kernel_traits;
+ dim3 grid(params.batch, (params.dim + kNThreads - 1) / kNThreads);
+ auto kernel = &causal_conv1d_update_kernel;
+ kernel<<>>(params);
+ C10_CUDA_KERNEL_LAUNCH_CHECK();
+}
+
+template
+void causal_conv1d_update_cuda(ConvParamsBase ¶ms, cudaStream_t stream) {
+ if (params.width == 2) {
+ causal_conv1d_update_launch<64, 2, input_t, weight_t>(params, stream);
+ } else if (params.width == 3) {
+ causal_conv1d_update_launch<64, 3, input_t, weight_t>(params, stream);
+ } else if (params.width == 4) {
+ causal_conv1d_update_launch<64, 4, input_t, weight_t>(params, stream);
+ }
+}
+
+template void causal_conv1d_update_cuda(ConvParamsBase ¶ms, cudaStream_t stream);
+template void causal_conv1d_update_cuda(ConvParamsBase ¶ms, cudaStream_t stream);
+template void causal_conv1d_update_cuda(ConvParamsBase ¶ms, cudaStream_t stream);
+template void causal_conv1d_update_cuda(ConvParamsBase ¶ms, cudaStream_t stream);
+template void causal_conv1d_update_cuda(ConvParamsBase ¶ms, cudaStream_t stream);
+template void causal_conv1d_update_cuda(ConvParamsBase ¶ms, cudaStream_t stream);
+template void causal_conv1d_update_cuda(ConvParamsBase ¶ms, cudaStream_t stream);
+template void causal_conv1d_update_cuda(ConvParamsBase ¶ms, cudaStream_t stream);
+template void causal_conv1d_update_cuda(ConvParamsBase ¶ms, cudaStream_t stream);
\ No newline at end of file
diff --git a/PRISM/SegMamba/causal-conv1d/csrc/static_switch.h b/PRISM/SegMamba/causal-conv1d/csrc/static_switch.h
new file mode 100644
index 0000000000000000000000000000000000000000..0f4ad3eb62235443d15c454b6691c2ec63645219
--- /dev/null
+++ b/PRISM/SegMamba/causal-conv1d/csrc/static_switch.h
@@ -0,0 +1,25 @@
+// Inspired by https://github.com/NVIDIA/DALI/blob/main/include/dali/core/static_switch.h
+// and https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/Dispatch.h
+
+#pragma once
+
+/// @param COND - a boolean expression to switch by
+/// @param CONST_NAME - a name given for the constexpr bool variable.
+/// @param ... - code to execute for true and false
+///
+/// Usage:
+/// ```
+/// BOOL_SWITCH(flag, BoolConst, [&] {
+/// some_function(...);
+/// });
+/// ```
+#define BOOL_SWITCH(COND, CONST_NAME, ...) \
+ [&] { \
+ if (COND) { \
+ static constexpr bool CONST_NAME = true; \
+ return __VA_ARGS__(); \
+ } else { \
+ static constexpr bool CONST_NAME = false; \
+ return __VA_ARGS__(); \
+ } \
+ }()
diff --git a/PRISM/SegMamba/causal-conv1d/setup.py b/PRISM/SegMamba/causal-conv1d/setup.py
new file mode 100644
index 0000000000000000000000000000000000000000..12e36bf988215a4c536278026e6f4401e66534da
--- /dev/null
+++ b/PRISM/SegMamba/causal-conv1d/setup.py
@@ -0,0 +1,264 @@
+# Copyright (c) 2023, Tri Dao.
+import sys
+import warnings
+import os
+import re
+import ast
+from pathlib import Path
+from packaging.version import parse, Version
+import platform
+
+from setuptools import setup, find_packages
+import subprocess
+
+import urllib.request
+import urllib.error
+from wheel.bdist_wheel import bdist_wheel as _bdist_wheel
+
+import torch
+from torch.utils.cpp_extension import (
+ BuildExtension,
+ CppExtension,
+ CUDAExtension,
+ CUDA_HOME,
+)
+
+
+with open("README.md", "r", encoding="utf-8") as fh:
+ long_description = fh.read()
+
+
+# ninja build does not work unless include_dirs are abs path
+this_dir = os.path.dirname(os.path.abspath(__file__))
+
+PACKAGE_NAME = "causal_conv1d"
+
+BASE_WHEEL_URL = "https://github.com/Dao-AILab/causal-conv1d/releases/download/{tag_name}/{wheel_name}"
+
+# FORCE_BUILD: Force a fresh build locally, instead of attempting to find prebuilt wheels
+# SKIP_CUDA_BUILD: Intended to allow CI to use a simple `python setup.py sdist` run to copy over raw files, without any cuda compilation
+FORCE_BUILD = os.getenv("CAUSAL_CONV1D_FORCE_BUILD", "FALSE") == "TRUE"
+SKIP_CUDA_BUILD = os.getenv("CAUSAL_CONV1D_SKIP_CUDA_BUILD", "FALSE") == "TRUE"
+# For CI, we want the option to build with C++11 ABI since the nvcr images use C++11 ABI
+FORCE_CXX11_ABI = os.getenv("CAUSAL_CONV1D_FORCE_CXX11_ABI", "FALSE") == "TRUE"
+
+
+def get_platform():
+ """
+ Returns the platform name as used in wheel filenames.
+ """
+ if sys.platform.startswith("linux"):
+ return "linux_x86_64"
+ elif sys.platform == "darwin":
+ mac_version = ".".join(platform.mac_ver()[0].split(".")[:2])
+ return f"macosx_{mac_version}_x86_64"
+ elif sys.platform == "win32":
+ return "win_amd64"
+ else:
+ raise ValueError("Unsupported platform: {}".format(sys.platform))
+
+
+def get_cuda_bare_metal_version(cuda_dir):
+ raw_output = subprocess.check_output(
+ [cuda_dir + "/bin/nvcc", "-V"], universal_newlines=True
+ )
+ output = raw_output.split()
+ release_idx = output.index("release") + 1
+ bare_metal_version = parse(output[release_idx].split(",")[0])
+
+ return raw_output, bare_metal_version
+
+
+def check_if_cuda_home_none(global_option: str) -> None:
+ if CUDA_HOME is not None:
+ return
+ # warn instead of error because user could be downloading prebuilt wheels, so nvcc won't be necessary
+ # in that case.
+ warnings.warn(
+ f"{global_option} was requested, but nvcc was not found. Are you sure your environment has nvcc available? "
+ "If you're installing within a container from https://hub.docker.com/r/pytorch/pytorch, "
+ "only images whose names contain 'devel' will provide nvcc."
+ )
+
+
+def append_nvcc_threads(nvcc_extra_args):
+ return nvcc_extra_args + ["--threads", "4"]
+
+
+cmdclass = {}
+ext_modules = []
+
+if not SKIP_CUDA_BUILD:
+ print("\n\ntorch.__version__ = {}\n\n".format(torch.__version__))
+ TORCH_MAJOR = int(torch.__version__.split(".")[0])
+ TORCH_MINOR = int(torch.__version__.split(".")[1])
+
+ check_if_cuda_home_none("causal_conv1d")
+ # Check, if CUDA11 is installed for compute capability 8.0
+ cc_flag = []
+ if CUDA_HOME is not None:
+ _, bare_metal_version = get_cuda_bare_metal_version(CUDA_HOME)
+ if bare_metal_version < Version("11.6"):
+ raise RuntimeError(
+ "causal_conv1d is only supported on CUDA 11.6 and above. "
+ "Note: make sure nvcc has a supported version by running nvcc -V."
+ )
+
+ cc_flag.append("-gencode")
+ cc_flag.append("arch=compute_70,code=sm_70")
+ cc_flag.append("-gencode")
+ cc_flag.append("arch=compute_80,code=sm_80")
+ if bare_metal_version >= Version("11.8"):
+ cc_flag.append("-gencode")
+ cc_flag.append("arch=compute_90,code=sm_90")
+
+ # HACK: The compiler flag -D_GLIBCXX_USE_CXX11_ABI is set to be the same as
+ # torch._C._GLIBCXX_USE_CXX11_ABI
+ # https://github.com/pytorch/pytorch/blob/8472c24e3b5b60150096486616d98b7bea01500b/torch/utils/cpp_extension.py#L920
+ if FORCE_CXX11_ABI:
+ torch._C._GLIBCXX_USE_CXX11_ABI = True
+
+ ext_modules.append(
+ CUDAExtension(
+ name="causal_conv1d_cuda",
+ sources=[
+ "csrc/causal_conv1d.cpp",
+ "csrc/causal_conv1d_fwd.cu",
+ "csrc/causal_conv1d_bwd.cu",
+ "csrc/causal_conv1d_update.cu",
+ ],
+ extra_compile_args={
+ "cxx": ["-O3"],
+ "nvcc": append_nvcc_threads(
+ [
+ "-O3",
+ "-U__CUDA_NO_HALF_OPERATORS__",
+ "-U__CUDA_NO_HALF_CONVERSIONS__",
+ "-U__CUDA_NO_BFLOAT16_OPERATORS__",
+ "-U__CUDA_NO_BFLOAT16_CONVERSIONS__",
+ "-U__CUDA_NO_BFLOAT162_OPERATORS__",
+ "-U__CUDA_NO_BFLOAT162_CONVERSIONS__",
+ "--expt-relaxed-constexpr",
+ "--expt-extended-lambda",
+ "--use_fast_math",
+ "--ptxas-options=-v",
+ "-lineinfo",
+ ]
+ + cc_flag
+ ),
+ },
+ include_dirs=[this_dir],
+ )
+ )
+
+
+def get_package_version():
+ with open(Path(this_dir) / "causal_conv1d" / "__init__.py", "r") as f:
+ version_match = re.search(r"^__version__\s*=\s*(.*)$", f.read(), re.MULTILINE)
+ public_version = ast.literal_eval(version_match.group(1))
+ local_version = os.environ.get("CAUSAL_CONV1D_LOCAL_VERSION")
+ if local_version:
+ return f"{public_version}+{local_version}"
+ else:
+ return str(public_version)
+
+
+def get_wheel_url():
+ # Determine the version numbers that will be used to determine the correct wheel
+ # We're using the CUDA version used to build torch, not the one currently installed
+ # _, cuda_version_raw = get_cuda_bare_metal_version(CUDA_HOME)
+ torch_cuda_version = parse(torch.version.cuda)
+ torch_version_raw = parse(torch.__version__)
+ # For CUDA 11, we only compile for CUDA 11.8, and for CUDA 12 we only compile for CUDA 12.2
+ # to save CI time. Minor versions should be compatible.
+ torch_cuda_version = parse("11.8") if torch_cuda_version.major == 11 else parse("12.2")
+ python_version = f"cp{sys.version_info.major}{sys.version_info.minor}"
+ platform_name = get_platform()
+ causal_conv1d_version = get_package_version()
+ # cuda_version = f"{cuda_version_raw.major}{cuda_version_raw.minor}"
+ cuda_version = f"{torch_cuda_version.major}{torch_cuda_version.minor}"
+ torch_version = f"{torch_version_raw.major}.{torch_version_raw.minor}"
+ cxx11_abi = str(torch._C._GLIBCXX_USE_CXX11_ABI).upper()
+
+ # Determine wheel URL based on CUDA version, torch version, python version and OS
+ wheel_filename = f"{PACKAGE_NAME}-{causal_conv1d_version}+cu{cuda_version}torch{torch_version}cxx11abi{cxx11_abi}-{python_version}-{python_version}-{platform_name}.whl"
+ wheel_url = BASE_WHEEL_URL.format(
+ tag_name=f"v{causal_conv1d_version}", wheel_name=wheel_filename
+ )
+ return wheel_url, wheel_filename
+
+
+class CachedWheelsCommand(_bdist_wheel):
+ """
+ The CachedWheelsCommand plugs into the default bdist wheel, which is ran by pip when it cannot
+ find an existing wheel (which is currently the case for all installs). We use
+ the environment parameters to detect whether there is already a pre-built version of a compatible
+ wheel available and short-circuits the standard full build pipeline.
+ """
+
+ def run(self):
+ if FORCE_BUILD:
+ return super().run()
+
+ wheel_url, wheel_filename = get_wheel_url()
+ print("Guessing wheel URL: ", wheel_url)
+ try:
+ urllib.request.urlretrieve(wheel_url, wheel_filename)
+
+ # Make the archive
+ # Lifted from the root wheel processing command
+ # https://github.com/pypa/wheel/blob/cf71108ff9f6ffc36978069acb28824b44ae028e/src/wheel/bdist_wheel.py#LL381C9-L381C85
+ if not os.path.exists(self.dist_dir):
+ os.makedirs(self.dist_dir)
+
+ impl_tag, abi_tag, plat_tag = self.get_tag()
+ archive_basename = f"{self.wheel_dist_name}-{impl_tag}-{abi_tag}-{plat_tag}"
+
+ wheel_path = os.path.join(self.dist_dir, archive_basename + ".whl")
+ print("Raw wheel path", wheel_path)
+ os.rename(wheel_filename, wheel_path)
+ except urllib.error.HTTPError:
+ print("Precompiled wheel not found. Building from source...")
+ # If the wheel could not be downloaded, build from source
+ super().run()
+
+
+setup(
+ name=PACKAGE_NAME,
+ version=get_package_version(),
+ packages=find_packages(
+ exclude=(
+ "build",
+ "csrc",
+ "include",
+ "tests",
+ "dist",
+ "docs",
+ "benchmarks",
+ "causal_conv1d.egg-info",
+ )
+ ),
+ author="Tri Dao",
+ author_email="tri@tridao.me",
+ description="Causal depthwise conv1d in CUDA, with a PyTorch interface",
+ long_description=long_description,
+ long_description_content_type="text/markdown",
+ url="https://github.com/Dao-AILab/causal-conv1d",
+ classifiers=[
+ "Programming Language :: Python :: 3",
+ "License :: OSI Approved :: BSD License",
+ "Operating System :: Unix",
+ ],
+ ext_modules=ext_modules,
+ cmdclass={"bdist_wheel": CachedWheelsCommand, "build_ext": BuildExtension}
+ if ext_modules
+ else {
+ "bdist_wheel": CachedWheelsCommand,
+ },
+ python_requires=">=3.7",
+ install_requires=[
+ "torch",
+ "packaging",
+ "ninja",
+ ],
+)
diff --git a/PRISM/SegMamba/causal-conv1d/tests/test_causal_conv1d.py b/PRISM/SegMamba/causal-conv1d/tests/test_causal_conv1d.py
new file mode 100644
index 0000000000000000000000000000000000000000..6e5985cfb0582e6656afb1d8b5c1de78f24f4276
--- /dev/null
+++ b/PRISM/SegMamba/causal-conv1d/tests/test_causal_conv1d.py
@@ -0,0 +1,173 @@
+# Copyright (C) 2023, Tri Dao.
+
+import math
+
+import torch
+import pytest
+
+from einops import rearrange
+
+from causal_conv1d.causal_conv1d_interface import causal_conv1d_fn, causal_conv1d_ref
+from causal_conv1d.causal_conv1d_interface import causal_conv1d_update, causal_conv1d_update_ref
+
+
+@pytest.mark.parametrize("channel_last", [False, True])
+# @pytest.mark.parametrize('channel_last', [True])
+@pytest.mark.parametrize("itype", [torch.float32, torch.float16, torch.bfloat16])
+# @pytest.mark.parametrize('itype', [torch.float16])
+@pytest.mark.parametrize("silu_activation", [False, True])
+# @pytest.mark.parametrize('silu_activation', [True])
+@pytest.mark.parametrize("has_bias", [False, True])
+# @pytest.mark.parametrize('has_bias', [True])
+@pytest.mark.parametrize("width", [2, 3, 4])
+# @pytest.mark.parametrize('width', [2])
+@pytest.mark.parametrize(
+ "seqlen", [8, 16, 32, 64, 128, 151, 256, 372, 512, 784, 1024, 1134, 2048, 4096]
+)
+# @pytest.mark.parametrize('seqlen', [8, 16, 32, 64, 128, 256, 512, 784, 1024, 2048, 4096])
+# @pytest.mark.parametrize('seqlen', [128])
+def test_causal_conv1d(seqlen, width, has_bias, silu_activation, itype, channel_last):
+ device = "cuda"
+ rtol, atol = (3e-4, 1e-3) if itype == torch.float32 else (3e-3, 5e-3)
+ if itype == torch.bfloat16:
+ rtol, atol = 1e-2, 5e-2
+ rtolw, atolw = (1e-3, 1e-3)
+ # set seed
+ torch.random.manual_seed(0)
+ batch_size = 2
+ # batch_size = 1
+ dim = 4096 + 32 # Try dim not divisible by 64
+ # dim = 64
+ if not channel_last:
+ x = torch.randn(batch_size, 4096 + dim + 64, seqlen, device=device, dtype=itype)[:, 4096:4096 + dim, :].requires_grad_()
+ else:
+ x = rearrange(
+ torch.randn(batch_size, seqlen, 4096 + dim + 64, device=device, dtype=itype)[:, :, 4096:4096 + dim], "b s d -> b d s"
+ ).requires_grad_()
+ weight = torch.randn(dim, width, device=device, dtype=torch.float32, requires_grad=True)
+ if has_bias:
+ bias = torch.randn(dim, device=device, dtype=torch.float32, requires_grad=True)
+ else:
+ bias = None
+ x_ref = x.detach().clone().requires_grad_()
+ weight_ref = weight.detach().clone().requires_grad_()
+ bias_ref = bias.detach().clone().requires_grad_() if bias is not None else None
+ activation = None if not silu_activation else "silu"
+ out = causal_conv1d_fn(x, weight, bias, activation=activation)
+ out_ref = causal_conv1d_ref(x_ref, weight_ref, bias_ref, activation=activation)
+
+ print(f"Output max diff: {(out - out_ref).abs().max().item()}")
+ print(f"Output mean diff: {(out - out_ref).abs().mean().item()}")
+ assert torch.allclose(out, out_ref, rtol=rtol, atol=atol)
+
+ g = torch.randn_like(out)
+ out_ref.backward(g)
+ out.backward(g)
+
+ print(f"dx max diff: {(x.grad - x_ref.grad).abs().max().item()}")
+ print(f"dweight max diff: {(weight.grad - weight_ref.grad).abs().max().item()}")
+ if has_bias:
+ print(f"dbias max diff: {(bias.grad - bias_ref.grad).abs().max().item()}")
+
+ assert torch.allclose(x.grad, x_ref.grad.to(dtype=itype), rtol=rtol, atol=atol)
+ assert torch.allclose(weight.grad, weight_ref.grad, rtol=rtolw, atol=atolw)
+ if has_bias:
+ assert torch.allclose(bias.grad, bias_ref.grad, rtol=rtolw, atol=atolw)
+
+
+@pytest.mark.parametrize("itype", [torch.float32, torch.float16, torch.bfloat16])
+# @pytest.mark.parametrize('itype', [torch.float16])
+@pytest.mark.parametrize("silu_activation", [False, True])
+# @pytest.mark.parametrize('silu_activation', [False])
+@pytest.mark.parametrize("has_bias", [False, True])
+# @pytest.mark.parametrize('has_bias', [True])
+@pytest.mark.parametrize("width", [2, 3, 4])
+# @pytest.mark.parametrize('width', [2])
+@pytest.mark.parametrize("dim", [2048, 2048 + 16, 4096])
+# @pytest.mark.parametrize("dim", [2048])
+def test_causal_conv1d_update(dim, width, has_bias, silu_activation, itype):
+ device = "cuda"
+ rtol, atol = (3e-4, 1e-3) if itype == torch.float32 else (3e-3, 5e-3)
+ if itype == torch.bfloat16:
+ rtol, atol = 1e-2, 5e-2
+ rtolw, atolw = (1e-3, 1e-3)
+ # set seed
+ torch.random.manual_seed(0)
+ batch_size = 2
+ # batch_size = 1
+ # dim = 64
+ x = torch.randn(batch_size, dim, device=device, dtype=itype)
+ conv_state = torch.randn(batch_size, dim, width, device=device, dtype=itype)
+ weight = torch.randn(dim, width, device=device, dtype=torch.float32, requires_grad=True)
+ if has_bias:
+ bias = torch.randn(dim, device=device, dtype=torch.float32, requires_grad=True)
+ else:
+ bias = None
+ conv_state_ref = conv_state.detach().clone()
+ activation = None if not silu_activation else "silu"
+ out = causal_conv1d_update(x, conv_state, weight, bias, activation=activation)
+ out_ref = causal_conv1d_update_ref(x, conv_state_ref, weight, bias, activation=activation)
+
+ print(f"Output max diff: {(out - out_ref).abs().max().item()}")
+ print(f"Output mean diff: {(out - out_ref).abs().mean().item()}")
+ assert torch.equal(conv_state, conv_state_ref)
+ assert torch.allclose(out, out_ref, rtol=rtol, atol=atol)
+
+
+# @pytest.mark.parametrize("channel_last", [False, True])
+@pytest.mark.parametrize('channel_last', [True])
+# @pytest.mark.parametrize("itype", [torch.float32, torch.float16, torch.bfloat16])
+@pytest.mark.parametrize('itype', [torch.bfloat16])
+# @pytest.mark.parametrize("silu_activation", [False, True])
+@pytest.mark.parametrize('silu_activation', [True])
+# @pytest.mark.parametrize("has_bias", [False, True])
+@pytest.mark.parametrize('has_bias', [True])
+# @pytest.mark.parametrize("width", [2, 3, 4])
+@pytest.mark.parametrize('width', [4])
+@pytest.mark.parametrize(
+ # "seqlen", [8, 16, 32, 64, 128, 151, 256, 372, 512, 784, 1024, 1134, 2048, 4096]
+ "seqlen", [2048]
+)
+# @pytest.mark.parametrize('seqlen', [8, 16, 32, 64, 128, 256, 512, 784, 1024, 2048, 4096])
+# @pytest.mark.parametrize('seqlen', [128])
+def test_causal_conv1d_race_condition(seqlen, width, has_bias, silu_activation, itype, channel_last):
+ device = "cuda"
+ # set seed
+ torch.random.manual_seed(0)
+ batch_size = 2
+ # batch_size = 1
+ dim = 4096 + 32 # Try dim not divisible by 64
+ # dim = 64
+ if not channel_last:
+ x = torch.randn(batch_size, 4096 + dim + 64, seqlen, device=device, dtype=itype)[:, 4096:4096 + dim, :].requires_grad_()
+ else:
+ x = rearrange(
+ torch.randn(batch_size, seqlen, 4096 + dim + 64, device=device, dtype=itype)[:, :, 4096:4096 + dim], "b s d -> b d s"
+ ).requires_grad_()
+ weight = torch.randn(dim, width, device=device, dtype=torch.float32, requires_grad=True)
+ if has_bias:
+ bias = torch.randn(dim, device=device, dtype=torch.float32, requires_grad=True)
+ else:
+ bias = None
+ activation = None if not silu_activation else "silu"
+ out0 = causal_conv1d_fn(x, weight, bias, activation=activation)
+ g = torch.randn_like(out0)
+ dx0, dw0, db0 = torch.autograd.grad(out0, (x, weight, bias), g)
+ dw_atol = 1e-4
+ db_atol = 1e-4
+
+ for i in range(10000):
+ out = causal_conv1d_fn(x, weight, bias, activation=activation)
+ dx, dw, db = torch.autograd.grad(out, (x, weight, bias), g)
+ dw_equal = torch.allclose(dw, dw0, atol=dw_atol)
+ # if not dw_equal:
+ # breakpoint()
+ if has_bias:
+ db_equal = torch.allclose(db, db0, atol=db_atol)
+ # if not db_equal:
+ # breakpoint()
+ assert torch.equal(out, out0)
+ assert torch.equal(dx, dx0)
+ assert dw_equal
+ if has_bias:
+ assert dw_equal
diff --git a/PRISM/SegMamba/images/data_structure.jpg b/PRISM/SegMamba/images/data_structure.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..2b5f2715d2295080924d8fdf64d3c849639538e0
--- /dev/null
+++ b/PRISM/SegMamba/images/data_structure.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:811073efa09d8196f0b0dd9a37418025d5969f204f226b055be9349dec8117db
+size 45010
diff --git a/PRISM/SegMamba/images/method_figure.jpg b/PRISM/SegMamba/images/method_figure.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..682d984ff8c8f6e15104ea4bcf6cf88baf5b60fa
--- /dev/null
+++ b/PRISM/SegMamba/images/method_figure.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:91fc1b545acec5d4f48af3a5d4498e25bc5c973639b7527076eebab46e78e2ba
+size 215324
diff --git a/PRISM/SegMamba/images/modules.jpg b/PRISM/SegMamba/images/modules.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..d2dc0fcdcc8a392473ee01dfb739941c28eceeb3
--- /dev/null
+++ b/PRISM/SegMamba/images/modules.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e511d5a09ef804053ea85d19075f7a8d922ea59aba475f8b223f2c9a6a15c23c
+size 152643
diff --git a/PRISM/SegMamba/images/segmamba_ablation.jpg b/PRISM/SegMamba/images/segmamba_ablation.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..fe655bb596358f285f12aeced37ba79c161cce71
--- /dev/null
+++ b/PRISM/SegMamba/images/segmamba_ablation.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7bbaaa25995dd1f3b7a3b1c326dd939dce40c80722c74f4104ae787ed6d936db
+size 99994
diff --git a/PRISM/SegMamba/light_training/.DS_Store b/PRISM/SegMamba/light_training/.DS_Store
new file mode 100644
index 0000000000000000000000000000000000000000..bbf3ff2ee2fe133356efb34f5891a28e41a58550
Binary files /dev/null and b/PRISM/SegMamba/light_training/.DS_Store differ
diff --git a/PRISM/SegMamba/light_training/augment/multi_processor.py b/PRISM/SegMamba/light_training/augment/multi_processor.py
new file mode 100644
index 0000000000000000000000000000000000000000..84efb9bd7d8b65f8f05020cf2bfef3db396eaa2c
--- /dev/null
+++ b/PRISM/SegMamba/light_training/augment/multi_processor.py
@@ -0,0 +1,10 @@
+from batchgenerators.dataloading.nondet_multi_threaded_augmenter import NonDetMultiThreadedAugmenter
+
+
+class LimitedLenWrapper(NonDetMultiThreadedAugmenter):
+ def __init__(self, my_imaginary_length, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self.len = my_imaginary_length
+
+ def __len__(self):
+ return self.len
\ No newline at end of file
diff --git a/PRISM/SegMamba/light_training/augment/train_augment.py b/PRISM/SegMamba/light_training/augment/train_augment.py
new file mode 100644
index 0000000000000000000000000000000000000000..086f133487cc8e4920531b0284edc166b3b20c79
--- /dev/null
+++ b/PRISM/SegMamba/light_training/augment/train_augment.py
@@ -0,0 +1,279 @@
+import inspect
+import multiprocessing
+import os
+import shutil
+import sys
+import warnings
+from copy import deepcopy
+from datetime import datetime
+from time import time, sleep
+from typing import Union, Tuple, List
+import numpy as np
+import torch
+from batchgenerators.dataloading.single_threaded_augmenter import SingleThreadedAugmenter
+from batchgenerators.transforms.abstract_transforms import AbstractTransform, Compose
+from batchgenerators.transforms.color_transforms import BrightnessMultiplicativeTransform, \
+ ContrastAugmentationTransform, GammaTransform
+from batchgenerators.transforms.noise_transforms import GaussianNoiseTransform, GaussianBlurTransform
+from batchgenerators.transforms.resample_transforms import SimulateLowResolutionTransform
+from batchgenerators.transforms.spatial_transforms import SpatialTransform, MirrorTransform
+from batchgenerators.transforms.utility_transforms import RemoveLabelTransform, RenameTransform, NumpyToTensor
+
+
+def get_train_transforms(patch_size, mirror_axes=None):
+ tr_transforms = []
+ patch_size_spatial = patch_size
+ ignore_axes = None
+ angle = (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi)
+
+ tr_transforms.append(SpatialTransform(
+ patch_size_spatial, patch_center_dist_from_border=None,
+ do_elastic_deform=False, alpha=(0, 0), sigma=(0, 0),
+ do_rotation=True, angle_x=angle, angle_y=angle, angle_z=angle,
+ p_rot_per_axis=1, # todo experiment with this
+ do_scale=True, scale=(0.7, 1.4),
+ border_mode_data="constant", border_cval_data=0, order_data=3,
+ border_mode_seg="constant", border_cval_seg=-1, order_seg=1,
+ random_crop=False, # random cropping is part of our dataloaders
+ p_el_per_sample=0, p_scale_per_sample=0.2, p_rot_per_sample=0.2,
+ independent_scale_for_each_axis=False # todo experiment with this
+ ))
+
+ tr_transforms.append(GaussianNoiseTransform(p_per_sample=0.1))
+ tr_transforms.append(GaussianBlurTransform((0.5, 1.), different_sigma_per_channel=True, p_per_sample=0.2,
+ p_per_channel=0.5))
+ tr_transforms.append(BrightnessMultiplicativeTransform(multiplier_range=(0.75, 1.25), p_per_sample=0.15))
+ tr_transforms.append(ContrastAugmentationTransform(p_per_sample=0.15))
+ tr_transforms.append(SimulateLowResolutionTransform(zoom_range=(0.5, 1), per_channel=True,
+ p_per_channel=0.5,
+ order_downsample=0, order_upsample=3, p_per_sample=0.25,
+ ignore_axes=ignore_axes))
+ tr_transforms.append(GammaTransform((0.7, 1.5), True, True, retain_stats=True, p_per_sample=0.1))
+ tr_transforms.append(GammaTransform((0.7, 1.5), False, True, retain_stats=True, p_per_sample=0.3))
+
+ if mirror_axes is not None and len(mirror_axes) > 0:
+ tr_transforms.append(MirrorTransform(mirror_axes))
+
+ tr_transforms.append(RemoveLabelTransform(-1, 0))
+ tr_transforms.append(NumpyToTensor(['data', 'seg'], 'float'))
+
+ tr_transforms = Compose(tr_transforms)
+
+ return tr_transforms
+
+def get_train_transforms_nomirror(patch_size, mirror_axes=None):
+ tr_transforms = []
+ patch_size_spatial = patch_size
+ ignore_axes = None
+ angle = (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi)
+
+ tr_transforms.append(SpatialTransform(
+ patch_size_spatial, patch_center_dist_from_border=None,
+ do_elastic_deform=False, alpha=(0, 0), sigma=(0, 0),
+ do_rotation=True, angle_x=angle, angle_y=angle, angle_z=angle,
+ p_rot_per_axis=1, # todo experiment with this
+ do_scale=True, scale=(0.7, 1.4),
+ border_mode_data="constant", border_cval_data=0, order_data=3,
+ border_mode_seg="constant", border_cval_seg=-1, order_seg=1,
+ random_crop=False, # random cropping is part of our dataloaders
+ p_el_per_sample=0, p_scale_per_sample=0.2, p_rot_per_sample=0.2,
+ independent_scale_for_each_axis=False # todo experiment with this
+ ))
+
+ tr_transforms.append(GaussianNoiseTransform(p_per_sample=0.1))
+ tr_transforms.append(GaussianBlurTransform((0.5, 1.), different_sigma_per_channel=True, p_per_sample=0.2,
+ p_per_channel=0.5))
+ tr_transforms.append(BrightnessMultiplicativeTransform(multiplier_range=(0.75, 1.25), p_per_sample=0.15))
+ tr_transforms.append(ContrastAugmentationTransform(p_per_sample=0.15))
+ tr_transforms.append(SimulateLowResolutionTransform(zoom_range=(0.5, 1), per_channel=True,
+ p_per_channel=0.5,
+ order_downsample=0, order_upsample=3, p_per_sample=0.25,
+ ignore_axes=ignore_axes))
+ tr_transforms.append(GammaTransform((0.7, 1.5), True, True, retain_stats=True, p_per_sample=0.1))
+ tr_transforms.append(GammaTransform((0.7, 1.5), False, True, retain_stats=True, p_per_sample=0.3))
+
+ # if mirror_axes is not None and len(mirror_axes) > 0:
+ # tr_transforms.append(MirrorTransform(mirror_axes))
+
+ tr_transforms.append(RemoveLabelTransform(-1, 0))
+ tr_transforms.append(NumpyToTensor(['data', 'seg'], 'float'))
+
+ tr_transforms = Compose(tr_transforms)
+
+ return tr_transforms
+
+def get_train_transforms_onlymirror(patch_size, mirror_axes=None):
+ tr_transforms = []
+ patch_size_spatial = patch_size
+ ignore_axes = None
+ angle = (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi)
+
+ # tr_transforms.append(SpatialTransform(
+ # patch_size_spatial, patch_center_dist_from_border=None,
+ # do_elastic_deform=False, alpha=(0, 0), sigma=(0, 0),
+ # do_rotation=True, angle_x=angle, angle_y=angle, angle_z=angle,
+ # p_rot_per_axis=1, # todo experiment with this
+ # do_scale=True, scale=(0.7, 1.4),
+ # border_mode_data="constant", border_cval_data=0, order_data=3,
+ # border_mode_seg="constant", border_cval_seg=-1, order_seg=1,
+ # random_crop=False, # random cropping is part of our dataloaders
+ # p_el_per_sample=0, p_scale_per_sample=0.2, p_rot_per_sample=0.2,
+ # independent_scale_for_each_axis=False # todo experiment with this
+ # ))
+
+ tr_transforms.append(GaussianNoiseTransform(p_per_sample=0.1))
+ tr_transforms.append(GaussianBlurTransform((0.5, 1.), different_sigma_per_channel=True, p_per_sample=0.2,
+ p_per_channel=0.5))
+ tr_transforms.append(BrightnessMultiplicativeTransform(multiplier_range=(0.75, 1.25), p_per_sample=0.15))
+ tr_transforms.append(ContrastAugmentationTransform(p_per_sample=0.15))
+ tr_transforms.append(SimulateLowResolutionTransform(zoom_range=(0.5, 1), per_channel=True,
+ p_per_channel=0.5,
+ order_downsample=0, order_upsample=3, p_per_sample=0.25,
+ ignore_axes=ignore_axes))
+ tr_transforms.append(GammaTransform((0.7, 1.5), True, True, retain_stats=True, p_per_sample=0.1))
+ tr_transforms.append(GammaTransform((0.7, 1.5), False, True, retain_stats=True, p_per_sample=0.3))
+
+ if mirror_axes is not None and len(mirror_axes) > 0:
+ tr_transforms.append(MirrorTransform(mirror_axes))
+
+ tr_transforms.append(RemoveLabelTransform(-1, 0))
+ tr_transforms.append(NumpyToTensor(['data', 'seg'], 'float'))
+
+ tr_transforms = Compose(tr_transforms)
+
+ return tr_transforms
+
+def get_train_transforms_onlyspatial(patch_size, mirror_axes=None):
+ tr_transforms = []
+ patch_size_spatial = patch_size
+ ignore_axes = None
+ angle = (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi)
+
+ tr_transforms.append(SpatialTransform(
+ patch_size_spatial, patch_center_dist_from_border=None,
+ do_elastic_deform=False, alpha=(0, 0), sigma=(0, 0),
+ do_rotation=True, angle_x=angle, angle_y=angle, angle_z=angle,
+ p_rot_per_axis=1, # todo experiment with this
+ do_scale=True, scale=(0.7, 1.4),
+ border_mode_data="constant", border_cval_data=0, order_data=3,
+ border_mode_seg="constant", border_cval_seg=-1, order_seg=1,
+ random_crop=False, # random cropping is part of our dataloaders
+ p_el_per_sample=0, p_scale_per_sample=0.2, p_rot_per_sample=0.2,
+ independent_scale_for_each_axis=False # todo experiment with this
+ ))
+
+ # tr_transforms.append(GaussianNoiseTransform(p_per_sample=0.1))
+ # tr_transforms.append(GaussianBlurTransform((0.5, 1.), different_sigma_per_channel=True, p_per_sample=0.2,
+ # p_per_channel=0.5))
+ # tr_transforms.append(BrightnessMultiplicativeTransform(multiplier_range=(0.75, 1.25), p_per_sample=0.15))
+ # tr_transforms.append(ContrastAugmentationTransform(p_per_sample=0.15))
+ # tr_transforms.append(SimulateLowResolutionTransform(zoom_range=(0.5, 1), per_channel=True,
+ # p_per_channel=0.5,
+ # order_downsample=0, order_upsample=3, p_per_sample=0.25,
+ # ignore_axes=ignore_axes))
+ # tr_transforms.append(GammaTransform((0.7, 1.5), True, True, retain_stats=True, p_per_sample=0.1))
+ # tr_transforms.append(GammaTransform((0.7, 1.5), False, True, retain_stats=True, p_per_sample=0.3))
+
+ if mirror_axes is not None and len(mirror_axes) > 0:
+ tr_transforms.append(MirrorTransform(mirror_axes))
+
+ tr_transforms.append(RemoveLabelTransform(-1, 0))
+ tr_transforms.append(NumpyToTensor(['data', 'seg'], 'float'))
+
+ tr_transforms = Compose(tr_transforms)
+
+ return tr_transforms
+
+def get_train_transforms_noaug(patch_size, mirror_axes=None):
+ tr_transforms = []
+ # patch_size_spatial = patch_size
+ # ignore_axes = None
+ # angle = (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi)
+
+ # tr_transforms.append(SpatialTransform(
+ # patch_size_spatial, patch_center_dist_from_border=None,
+ # do_elastic_deform=False, alpha=(0, 0), sigma=(0, 0),
+ # do_rotation=True, angle_x=angle, angle_y=angle, angle_z=angle,
+ # p_rot_per_axis=1, # todo experiment with this
+ # do_scale=True, scale=(0.7, 1.4),
+ # border_mode_data="constant", border_cval_data=0, order_data=3,
+ # border_mode_seg="constant", border_cval_seg=-1, order_seg=1,
+ # random_crop=False, # random cropping is part of our dataloaders
+ # p_el_per_sample=0, p_scale_per_sample=0.2, p_rot_per_sample=0.2,
+ # independent_scale_for_each_axis=False # todo experiment with this
+ # ))
+
+ # tr_transforms.append(GaussianNoiseTransform(p_per_sample=0.1))
+ # tr_transforms.append(GaussianBlurTransform((0.5, 1.), different_sigma_per_channel=True, p_per_sample=0.2,
+ # p_per_channel=0.5))
+ # tr_transforms.append(BrightnessMultiplicativeTransform(multiplier_range=(0.75, 1.25), p_per_sample=0.15))
+ # tr_transforms.append(ContrastAugmentationTransform(p_per_sample=0.15))
+ # tr_transforms.append(SimulateLowResolutionTransform(zoom_range=(0.5, 1), per_channel=True,
+ # p_per_channel=0.5,
+ # order_downsample=0, order_upsample=3, p_per_sample=0.25,
+ # ignore_axes=ignore_axes))
+ # tr_transforms.append(GammaTransform((0.7, 1.5), True, True, retain_stats=True, p_per_sample=0.1))
+ # tr_transforms.append(GammaTransform((0.7, 1.5), False, True, retain_stats=True, p_per_sample=0.3))
+
+ # if mirror_axes is not None and len(mirror_axes) > 0:
+ # tr_transforms.append(MirrorTransform(mirror_axes))
+
+ tr_transforms.append(RemoveLabelTransform(-1, 0))
+ tr_transforms.append(NumpyToTensor(['data', 'seg'], 'float'))
+
+ tr_transforms = Compose(tr_transforms)
+
+ return tr_transforms
+
+def get_validation_transforms() -> AbstractTransform:
+ val_transforms = []
+ val_transforms.append(RemoveLabelTransform(-1, 0))
+
+ # val_transforms.append(RenameTransform('seg', 'target', True))
+
+ val_transforms.append(NumpyToTensor(['data', 'seg'], 'float'))
+ val_transforms = Compose(val_transforms)
+ return val_transforms
+
+# import SimpleITK as sitk
+# import matplotlib.pyplot as plt
+
+# image = sitk.ReadImage("/Users/xingzhaohu/Documents/工作/code/medical_image_processing/SSL/BraTS20_Training_365/BraTS20_Training_365_flair.nii.gz")
+# label = sitk.ReadImage("/Users/xingzhaohu/Documents/工作/code/medical_image_processing/SSL/BraTS20_Training_365/BraTS20_Training_365_seg.nii.gz")
+
+# # image = sitk.ReadImage("./AIIB/image/AIIB23_171.nii.gz")
+# # label = sitk.ReadImage("./AIIB/gt/AIIB23_171.nii.gz")
+
+# image_arr = sitk.GetArrayFromImage(image)
+# label_arr = sitk.GetArrayFromImage(label)
+# intensityproperties = {}
+
+# norm = RescaleTo01Normalization(intensityproperties=intensityproperties)
+# image_arr = image_arr[0:128, 0:128, 0:128][None, None]
+# label_arr = label_arr[0:128, 0:128, 0:128][None, None]
+
+
+# image_arr = norm.run(image_arr, label_arr)
+
+# print(image_arr.shape, label_arr.shape)
+
+# tr_transforms = Compose(tr_transforms)
+
+# trans_out = tr_transforms(data=image_arr, seg=label_arr)
+
+# image_arr_aug = trans_out["data"]
+# label_arr_aug = trans_out["seg"]
+
+# print(image_arr_aug.shape, label_arr_aug.shape)
+
+
+# for i in range(40, 128):
+# plt.subplot(1, 4, 1)
+# plt.imshow(image_arr[0, 0, i], cmap="gray")
+# plt.subplot(1, 4, 2)
+# plt.imshow(label_arr[0, 0, i], cmap="gray")
+# plt.subplot(1, 4, 3)
+# plt.imshow(image_arr_aug[0, 0, i], cmap="gray")
+# plt.subplot(1, 4, 4)
+# plt.imshow(label_arr_aug[0, 0, i], cmap="gray")
+# plt.show()
\ No newline at end of file
diff --git a/PRISM/SegMamba/light_training/dataloading/__init__.py b/PRISM/SegMamba/light_training/dataloading/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/PRISM/SegMamba/light_training/dataloading/base_data_loader.py b/PRISM/SegMamba/light_training/dataloading/base_data_loader.py
new file mode 100644
index 0000000000000000000000000000000000000000..e22a4438c6f0f2e2ac6715caff55eefcf2855a1a
--- /dev/null
+++ b/PRISM/SegMamba/light_training/dataloading/base_data_loader.py
@@ -0,0 +1,213 @@
+import numpy as np
+from typing import Union, Tuple
+import time
+
+class DataLoaderMultiProcess:
+ def __init__(self, dataset,
+ patch_size,
+ batch_size=2,
+ oversample_foreground_percent=0.33,
+ probabilistic_oversampling=False,
+ print_time=False):
+ pass
+ self.dataset = dataset
+ self.patch_size = patch_size
+ # self.annotated_classes_key = annotated_classes_key ## (1, 2, 3 ..)
+ self.batch_size = batch_size
+ self.keys = [i for i in range(len(dataset))]
+ self.thread_id = 0
+ self.oversample_foreground_percent = oversample_foreground_percent
+ self.need_to_pad = (np.array([0, 0, 0])).astype(int)
+
+ self.get_do_oversample = self._oversample_last_XX_percent if not probabilistic_oversampling \
+ else self._probabilistic_oversampling
+ self.data_shape = None
+ self.seg_shape = None
+ self.print_time = print_time
+
+ def determine_shapes(self):
+ # load one case
+ item = self.dataset.__getitem__(0)
+ data, seg, properties = item["data"], item["seg"], item["properties"]
+ num_color_channels = data.shape[0]
+ num_output_channels = seg.shape[0]
+ patch_size = self.patch_size
+ data_shape = (self.batch_size, num_color_channels, patch_size[0], patch_size[1], patch_size[2])
+ seg_shape = (self.batch_size, num_output_channels, patch_size[0], patch_size[1], patch_size[2])
+ return data_shape, seg_shape
+
+ def generate_train_batch(self):
+
+ selected_keys = np.random.choice(self.keys, self.batch_size, True, None)
+ if self.data_shape is None:
+ self.data_shape, self.seg_shape = self.determine_shapes()
+
+ data_all = np.zeros(self.data_shape, dtype=np.float32)
+ data_all_global = np.zeros(self.data_shape, dtype=np.float32)
+ seg_all_global = np.zeros(self.seg_shape, dtype=np.float32)
+ data_global = None
+ seg_global = None
+ seg_all = np.zeros(self.seg_shape, dtype=np.float32)
+
+ case_properties = []
+
+ index = 0
+ for j, key in enumerate(selected_keys):
+
+ force_fg = self.get_do_oversample(j)
+ s = time.time()
+ item = self.dataset.__getitem__(key)
+ e = time.time()
+ if self.print_time:
+ print(f"read single data time is {e - s}")
+ # print(f"read data time is {e - s}")
+ data, seg, properties = item["data"], item["seg"], item["properties"]
+
+ if "data_global" in item:
+ data_global = item["data_global"]
+
+ if "seg_global" in item:
+ seg_global = item["seg_global"]
+
+ case_properties.append(properties)
+ # If we are doing the cascade then the segmentation from the previous stage will already have been loaded by
+ # self._data.load_case(i) (see nnUNetDataset.load_case)
+ shape = data.shape[1:]
+ dim = len(shape)
+
+ s = time.time()
+ bbox_lbs, bbox_ubs = self.get_bbox(shape, force_fg, properties['class_locations'])
+ e = time.time()
+ if self.print_time:
+ print(f"get bbox time is {e - s}")
+ # whoever wrote this knew what he was doing (hint: it was me). We first crop the data to the region of the
+ # bbox that actually lies within the data. This will result in a smaller array which is then faster to pad.
+ # valid_bbox is just the coord that lied within the data cube. It will be padded to match the patch size
+ # later
+ valid_bbox_lbs = [max(0, bbox_lbs[i]) for i in range(dim)]
+ valid_bbox_ubs = [min(shape[i], bbox_ubs[i]) for i in range(dim)]
+
+ # At this point you might ask yourself why we would treat seg differently from seg_from_previous_stage.
+ # Why not just concatenate them here and forget about the if statements? Well that's because segneeds to
+ # be padded with -1 constant whereas seg_from_previous_stage needs to be padded with 0s (we could also
+ # remove label -1 in the data augmentation but this way it is less error prone)
+ this_slice = tuple([slice(0, data.shape[0])] + [slice(i, j) for i, j in zip(valid_bbox_lbs, valid_bbox_ubs)])
+ data = data[this_slice]
+
+ this_slice = tuple([slice(0, seg.shape[0])] + [slice(i, j) for i, j in zip(valid_bbox_lbs, valid_bbox_ubs)])
+ seg = seg[this_slice]
+
+
+ s = time.time()
+ padding = [(-min(0, bbox_lbs[i]), max(bbox_ubs[i] - shape[i], 0)) for i in range(dim)]
+ # print(f"box is {bbox_lbs, bbox_ubs}, padding is {padding}")
+ data_all[j] = np.pad(data, ((0, 0), *padding), 'constant', constant_values=0)
+ seg_all[j] = np.pad(seg, ((0, 0), *padding), 'constant', constant_values=0)
+
+ if data_global is not None :
+ data_all_global[j] = data_global
+
+ if seg_global is not None :
+ seg_all_global[j] = seg_global
+
+
+ e = time.time()
+ if self.print_time:
+ print(f"box is {bbox_lbs, bbox_ubs}, padding is {padding}")
+ print(f"setting data value time is {e - s}")
+
+
+ if data_global is None:
+ return {'data': data_all,
+ 'seg': seg_all, 'properties': case_properties,
+ 'keys': selected_keys}
+
+ return {'data': data_all, "data_global": data_all_global,
+ "seg_global": seg_all_global,
+ 'seg': seg_all, 'properties': case_properties,
+ 'keys': selected_keys}
+
+ def __next__(self):
+
+ return self.generate_train_batch()
+
+ def set_thread_id(self, thread_id):
+ self.thread_id = thread_id
+
+ def _oversample_last_XX_percent(self, sample_idx: int) -> bool:
+ """
+ determines whether sample sample_idx in a minibatch needs to be guaranteed foreground
+ """
+ return not sample_idx < round(self.batch_size * (1 - self.oversample_foreground_percent))
+
+ def _probabilistic_oversampling(self, sample_idx: int) -> bool:
+ # print('YEAH BOIIIIII')
+ return np.random.uniform() < self.oversample_foreground_percent
+
+ def get_bbox(self, data_shape: np.ndarray, force_fg: bool, class_locations: Union[dict, None],
+ overwrite_class: Union[int, Tuple[int, ...]] = None, verbose: bool = False):
+ # in dataloader 2d we need to select the slice prior to this and also modify the class_locations to only have
+ # locations for the given slice
+ need_to_pad = self.need_to_pad.copy()
+ dim = len(data_shape)
+
+ for d in range(dim):
+ # if case_all_data.shape + need_to_pad is still < patch size we need to pad more! We pad on both sides
+ # always
+ if need_to_pad[d] + data_shape[d] < self.patch_size[d]:
+ need_to_pad[d] = self.patch_size[d] - data_shape[d]
+
+ # we can now choose the bbox from -need_to_pad // 2 to shape - patch_size + need_to_pad // 2. Here we
+ # define what the upper and lower bound can be to then sample form them with np.random.randint
+ lbs = [- need_to_pad[i] // 2 for i in range(dim)]
+ ubs = [data_shape[i] + need_to_pad[i] // 2 + need_to_pad[i] % 2 - self.patch_size[i] for i in range(dim)]
+
+ # if not force_fg then we can just sample the bbox randomly from lb and ub. Else we need to make sure we get
+ # at least one of the foreground classes in the patch
+ if not force_fg:
+ bbox_lbs = [np.random.randint(lbs[i], ubs[i] + 1) for i in range(dim)]
+ # print('I want a random location')
+ else:
+ assert class_locations is not None, 'if force_fg is set class_locations cannot be None'
+ if overwrite_class is not None:
+ assert overwrite_class in class_locations.keys(), 'desired class ("overwrite_class") does not ' \
+ 'have class_locations (missing key)'
+ # this saves us a np.unique. Preprocessing already did that for all cases. Neat.
+ # class_locations keys can also be tuple
+ eligible_classes_or_regions = [i for i in class_locations.keys() if len(class_locations[i]) > 0]
+
+ # if we have annotated_classes_key locations and other classes are present, remove the annotated_classes_key from the list
+ # strange formulation needed to circumvent
+ # ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()
+ # tmp = [i == self.annotated_classes_key if isinstance(i, tuple) else False for i in eligible_classes_or_regions]
+ # if any(tmp):
+ # if len(eligible_classes_or_regions) > 1:
+ # eligible_classes_or_regions.pop(np.where(tmp)[0][0])
+
+ if len(eligible_classes_or_regions) == 0:
+ # this only happens if some image does not contain foreground voxels at all
+ selected_class = None
+ if verbose:
+ print('case does not contain any foreground classes')
+ else:
+ # I hate myself. Future me aint gonna be happy to read this
+ # 2022_11_25: had to read it today. Wasn't too bad
+ selected_class = eligible_classes_or_regions[np.random.choice(len(eligible_classes_or_regions))] if \
+ (overwrite_class is None or (overwrite_class not in eligible_classes_or_regions)) else overwrite_class
+ # print(f'I want to have foreground, selected class: {selected_class}')
+
+ voxels_of_that_class = class_locations[selected_class] if selected_class is not None else None
+
+ if voxels_of_that_class is not None and len(voxels_of_that_class) > 0:
+ selected_voxel = voxels_of_that_class[np.random.choice(len(voxels_of_that_class))]
+ # selected voxel is center voxel. Subtract half the patch size to get lower bbox voxel.
+ # Make sure it is within the bounds of lb and ub
+ # i + 1 because we have first dimension 0!
+ bbox_lbs = [max(lbs[i], selected_voxel[i + 1] - self.patch_size[i] // 2) for i in range(dim)]
+ else:
+ # If the image does not contain any foreground classes, we fall back to random cropping
+ bbox_lbs = [np.random.randint(lbs[i], ubs[i] + 1) for i in range(dim)]
+
+ bbox_ubs = [bbox_lbs[i] + self.patch_size[i] for i in range(dim)]
+
+ return bbox_lbs, bbox_ubs
\ No newline at end of file
diff --git a/PRISM/SegMamba/light_training/dataloading/dataset.py b/PRISM/SegMamba/light_training/dataloading/dataset.py
new file mode 100644
index 0000000000000000000000000000000000000000..51a9d9631ff7eb35651eb4acf140c684f83b44fc
--- /dev/null
+++ b/PRISM/SegMamba/light_training/dataloading/dataset.py
@@ -0,0 +1,319 @@
+
+# Copyright 2020 - 2022 MONAI Consortium
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from sklearn.model_selection import KFold ## K折交叉验证
+import pickle
+import os
+import json
+import math
+import numpy as np
+import torch
+from monai import transforms
+import SimpleITK as sitk
+from tqdm import tqdm
+from torch.utils.data import Dataset
+import glob
+from light_training.dataloading.utils import unpack_dataset
+import random
+
+class MedicalDataset(Dataset):
+ def __init__(self, datalist, test=False) -> None:
+ super().__init__()
+
+ self.datalist = datalist
+ self.test = test
+
+ self.data_cached = []
+ for p in tqdm(self.datalist, total=len(self.datalist)):
+ info = self.load_pkl(p)
+
+ self.data_cached.append(info)
+
+ ## unpacking
+ print(f"unpacking data ....")
+ # for
+ folder = []
+ for p in self.datalist:
+ f = os.path.dirname(p)
+ if f not in folder:
+ folder.append(f)
+ for f in folder:
+ unpack_dataset(f,
+ unpack_segmentation=True,
+ overwrite_existing=False,
+ num_processes=8)
+
+
+ print(f"data length is {len(self.datalist)}")
+
+ def load_pkl(self, data_path):
+ pass
+ properties_path = f"{data_path[:-4]}.pkl"
+ df = open(properties_path, "rb")
+ info = pickle.load(df)
+
+ return info
+
+ def post(self, batch_data):
+ return batch_data
+
+ def read_data(self, data_path):
+
+ image_path = data_path.replace(".npz", ".npy")
+ seg_path = data_path.replace(".npz", "_seg.npy")
+ image_data = np.load(image_path, "r+")
+
+ seg_data = None
+ if not self.test:
+ seg_data = np.load(seg_path, "r+")
+ return image_data, seg_data
+
+ def __getitem__(self, i):
+
+ image, seg = self.read_data(self.datalist[i])
+
+ properties = self.data_cached[i]
+
+ if seg is None:
+ return {
+ "data": image,
+ "properties": properties
+ }
+ else :
+ return {
+ "data": image,
+ "seg": seg,
+ "properties": properties
+ }
+
+ def __len__(self):
+ return len(self.datalist)
+
+def get_train_test_loader_from_test_list(data_dir, test_list):
+ all_paths = glob.glob(f"{data_dir}/*.npz")
+
+ test_datalist = []
+ train_datalist = []
+
+ test_list_1 = []
+ for t in test_list:
+ test_list_1.append(t.replace(".nii.gz", ""))
+
+ test_list = test_list_1
+ for p in all_paths:
+ p2 = p.split("/")[-1].split(".")[0]
+ if p2 in test_list:
+ test_datalist.append(p)
+ else :
+ train_datalist.append(p)
+
+ print(f"training data is {len(train_datalist)}")
+ print(f"test data is {len(test_datalist)}", test_datalist)
+
+ train_ds = MedicalDataset(train_datalist)
+ test_ds = MedicalDataset(test_datalist)
+
+ loader = [train_ds, test_ds]
+
+ return loader
+
+def get_kfold_data(data_paths, n_splits, shuffle=False):
+ X = np.arange(len(data_paths))
+ kfold = KFold(n_splits=n_splits, shuffle=shuffle) ## kfold为KFolf类的一个对象
+ return_res = []
+ for a, b in kfold.split(X):
+ fold_train = []
+ fold_val = []
+ for i in a:
+ fold_train.append(data_paths[i])
+ for j in b:
+ fold_val.append(data_paths[j])
+ return_res.append({"train_data": fold_train, "val_data": fold_val})
+
+ return return_res
+
+def get_kfold_loader(data_dir, fold=0, test_dir=None):
+
+ all_paths = glob.glob(f"{data_dir}/*.npz")
+ fold_data = get_kfold_data(all_paths, 5)[fold]
+
+ train_datalist = fold_data["train_data"]
+ val_datalist = fold_data["val_data"]
+
+ print(f"training data is {len(train_datalist)}")
+ print(f"validation data is {len(val_datalist)}")
+ train_ds = MedicalDataset(train_datalist)
+
+ val_ds = MedicalDataset(val_datalist)
+
+ if test_dir is not None:
+ test_paths = glob.glob(f"{test_dir}/*.npz")
+ test_ds = MedicalDataset(test_paths, test=True)
+ else:
+ test_ds = None
+
+ loader = [train_ds, val_ds, test_ds]
+
+ return loader
+
+def get_all_training_loader(data_dir, fold=0, test_dir=None):
+ ## train all labeled data
+ ## fold denote the validation data in training data
+ all_paths = glob.glob(f"{data_dir}/*.npz")
+ fold_data = get_kfold_data(all_paths, 5)[fold]
+
+ train_datalist = all_paths
+ val_datalist = fold_data["val_data"]
+
+ print(f"training data is {len(train_datalist)}")
+ print(f"validation data is {len(val_datalist)}")
+ train_ds = MedicalDataset(train_datalist)
+
+ val_ds = MedicalDataset(val_datalist)
+
+ if test_dir is not None:
+ test_paths = glob.glob(f"{test_dir}/*.npz")
+ test_ds = MedicalDataset(test_paths, test=True)
+ else:
+ test_ds = None
+
+ loader = [train_ds, val_ds, test_ds]
+
+ return loader
+
+def get_train_val_test_loader_seperate(train_dir, val_dir, test_dir=None):
+ train_datalist = glob.glob(f"{train_dir}/*.npz")
+ val_datalist = glob.glob(f"{val_dir}/*.npz")
+
+ print(f"training data is {len(train_datalist)}")
+ print(f"validation data is {len(val_datalist)}")
+
+ if test_dir is not None:
+ test_datalist = glob.glob(f"{test_dir}/*.npz")
+ print(f"test data is {len(test_datalist)}")
+ test_ds = MedicalDataset(test_datalist, test=True)
+ else :
+ test_ds = None
+
+ train_ds = MedicalDataset(train_datalist)
+ val_ds = MedicalDataset(val_datalist)
+
+ loader = [train_ds, val_ds, test_ds]
+
+ return loader
+
+def get_train_val_test_loader_from_split_json(data_dir, split_json_file):
+ import json
+
+ with open(split_json_file, "r") as f:
+
+ datalist = json.loads(f.read())
+
+ train_datalist = datalist["train"]
+ val_datalist = datalist["validation"]
+ test_datalist = datalist["test"]
+
+ def add_pre(datalist):
+ for i in range(len(datalist)):
+ datalist[i] = os.path.join(data_dir, datalist[i])
+
+ add_pre(train_datalist)
+ add_pre(val_datalist)
+ add_pre(test_datalist)
+ print(f"training data is {len(train_datalist)}")
+ print(f"validation data is {len(val_datalist)}")
+ print(f"test data is {len(test_datalist)}", sorted(test_datalist))
+
+ train_ds = MedicalDataset(train_datalist)
+ val_ds = MedicalDataset(val_datalist)
+ test_ds = MedicalDataset(test_datalist)
+
+ loader = [train_ds, val_ds, test_ds]
+
+ return loader
+
+
+def get_train_val_test_loader_from_train(data_dir, train_rate=0.7, val_rate=0.1, test_rate=0.2, seed=42):
+ ## train all labeled data
+ ## fold denote the validation data in training data
+ all_paths = glob.glob(f"{data_dir}/*.npz")
+ # fold_data = get_kfold_data(all_paths, 5)[fold]
+
+ train_number = int(len(all_paths) * train_rate)
+ val_number = int(len(all_paths) * val_rate)
+ test_number = int(len(all_paths) * test_rate)
+ random.seed(seed)
+ # random_state = random.random
+ random.shuffle(all_paths)
+
+ train_datalist = all_paths[:train_number]
+ val_datalist = all_paths[train_number: train_number + val_number]
+ test_datalist = all_paths[-test_number:]
+
+ print(f"training data is {len(train_datalist)}")
+ print(f"validation data is {len(val_datalist)}")
+ print(f"test data is {len(test_datalist)}", sorted(test_datalist))
+
+ train_ds = MedicalDataset(train_datalist)
+ val_ds = MedicalDataset(val_datalist)
+ test_ds = MedicalDataset(test_datalist)
+
+ loader = [train_ds, val_ds, test_ds]
+
+ return loader
+
+def get_train_loader_from_train(data_dir):
+ ## train all labeled data
+ ## fold denote the validation data in training data
+ all_paths = glob.glob(f"{data_dir}/*.npz")
+ # fold_data = get_kfold_data(all_paths, 5)[fold]
+
+ train_ds = MedicalDataset(all_paths)
+
+ return train_ds
+
+def get_test_loader_from_test(data_dir):
+ all_paths = glob.glob(f"{data_dir}/*.npz")
+
+ test_ds = MedicalDataset(all_paths)
+
+ return test_ds
+
+def get_multi_dir_training_loader(data_dir, fold=0, test_dir=None):
+ ## train all labeled data
+ ## fold denote the validation data in training data
+ all_paths = []
+ for p in data_dir:
+ paths = glob.glob(f"{p}/*.npz")
+ for pp in paths:
+ all_paths.append(pp)
+
+ # print(all_paths)
+ fold_data = get_kfold_data(all_paths, 5)[fold]
+
+ train_datalist = all_paths
+ val_datalist = fold_data["val_data"]
+
+ print(f"training data is {len(train_datalist)}")
+ print(f"validation data is {len(val_datalist)}")
+ train_ds = MedicalDataset(train_datalist)
+
+ val_ds = MedicalDataset(val_datalist)
+
+ if test_dir is not None:
+ test_paths = glob.glob(f"{test_dir}/*.npz")
+ test_ds = MedicalDataset(test_paths, test=True)
+ else:
+ test_ds = None
+
+ loader = [train_ds, val_ds, test_ds]
+
+ return loader
\ No newline at end of file
diff --git a/PRISM/SegMamba/light_training/dataloading/dataset_sdm_edge.py b/PRISM/SegMamba/light_training/dataloading/dataset_sdm_edge.py
new file mode 100644
index 0000000000000000000000000000000000000000..496d906b6b50b5fc2dde0b265ce4684b9ebc2394
--- /dev/null
+++ b/PRISM/SegMamba/light_training/dataloading/dataset_sdm_edge.py
@@ -0,0 +1,331 @@
+
+# Copyright 2020 - 2022 MONAI Consortium
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from sklearn.model_selection import KFold ## K折交叉验证
+import pickle
+import os
+import json
+import math
+import numpy as np
+import torch
+from monai import transforms
+import SimpleITK as sitk
+from tqdm import tqdm
+from torch.utils.data import Dataset
+import glob
+from light_training.dataloading.utils import unpack_dataset
+import random
+import torch
+import numpy as np
+from scipy.ndimage import distance_transform_edt as distance
+from skimage import segmentation as skimage_seg
+from skimage.morphology import dilation, disk
+import scipy.ndimage as ndimage
+
+def get_edge_points(img):
+ """
+ get edge points of a binary segmentation result
+ """
+ dim = len(img.shape)
+ if (dim == 2):
+ strt = ndimage.generate_binary_structure(2, 1)
+ else:
+ strt = ndimage.generate_binary_structure(3, 1)
+ ero = ndimage.binary_erosion(img, strt)
+ edge = np.asarray(img, np.uint8) - np.asarray(ero, np.uint8)
+ return edge
+
+def edge_3d(image_3d):
+ # image_3d = torch.from_numpy(image_3d)
+ return_edge = np.zeros_like(image_3d)
+
+ for i in range(image_3d.shape[0]):
+ for j in range(image_3d.shape[1]):
+ return_edge[i, j] = get_edge_points(image_3d[i, j])
+
+ return return_edge
+
+def compute_sdf(img_gt, out_shape):
+ """
+ compute the signed distance map of binary mask
+ input: segmentation, shape = (batch_size,c, x, y, z)
+ output: the Signed Distance Map (SDM)
+ sdf(x) = 0; x in segmentation boundary
+ -inf|x-y|; x in segmentation
+ +inf|x-y|; x out of segmentation
+ normalize sdf to [-1,1]
+
+ """
+
+ img_gt = img_gt.astype(np.uint8)
+ normalized_sdf = np.zeros(out_shape)
+
+ for b in range(out_shape[0]): # batch size
+ for c in range(out_shape[1]):
+ posmask = img_gt[b, c].astype(np.bool_)
+ if posmask.any():
+ negmask = ~posmask
+ posdis = distance(posmask)
+ negdis = distance(negmask)
+ boundary = skimage_seg.find_boundaries(posmask, mode='inner').astype(np.uint8)
+ sdf = (negdis-np.min(negdis))/(np.max(negdis)-np.min(negdis)) - (posdis-np.min(posdis))/(np.max(posdis)-np.min(posdis))
+ sdf[boundary==1] = 0
+ normalized_sdf[b][c] = sdf
+ assert np.min(sdf) == -1.0, print(np.min(posdis), np.max(posdis), np.min(negdis), np.max(negdis))
+ assert np.max(sdf) == 1.0, print(np.min(posdis), np.min(negdis), np.max(posdis), np.max(negdis))
+
+ return normalized_sdf
+
+def convert_labels(labels):
+ ## TC, WT and ET
+ labels = labels[None, None]
+ result = [(labels == 1) | (labels == 3), (labels == 1) | (labels == 3) | (labels == 2), labels == 3]
+
+ return torch.cat(result, dim=1).float()
+
+class MedicalDataset(Dataset):
+ def __init__(self, datalist, test=False) -> None:
+ super().__init__()
+
+ self.datalist = datalist
+ self.test = test
+
+ self.data_cached = []
+ for p in tqdm(self.datalist, total=len(self.datalist)):
+ info = self.load_pkl(p)
+
+ self.data_cached.append(info)
+
+ ## unpacking
+ print(f"unpacking data ....")
+ # for
+ folder = []
+ for p in self.datalist:
+ f = os.path.dirname(p)
+ if f not in folder:
+ folder.append(f)
+ for f in folder:
+ unpack_dataset(f,
+ unpack_segmentation=True,
+ overwrite_existing=False,
+ num_processes=8)
+
+
+ print(f"data length is {len(self.datalist)}")
+
+ def load_pkl(self, data_path):
+ pass
+ properties_path = f"{data_path[:-4]}.pkl"
+ df = open(properties_path, "rb")
+ info = pickle.load(df)
+
+ return info
+
+ def read_data(self, data_path):
+
+ image_path = data_path.replace(".npz", ".npy")
+ seg_path = data_path.replace(".npz", "_seg.npy")
+ image_data = np.load(image_path, "r")
+
+ seg_data = None
+ if not self.test:
+ seg_data = np.load(seg_path, "r")
+ return image_data, seg_data
+
+ # def post(self, batch_data):
+ # seg = convert_labels(batch_data["seg"]).numpy()
+ # seg_shape = seg.shape
+ # seg_edge = edge_3d(seg)
+ # seg_sdm = 1 - compute_sdf(seg, out_shape=seg_shape)
+ # seg_sdm = seg_sdm + seg_edge
+
+ # seg_edge = torch.from_numpy(seg_edge)
+ # seg_sdm = torch.from_numpy(seg_sdm)
+
+ # batch_data["seg_edge"] = seg_edge
+ # batch_data["seg_sdm"] = seg_sdm
+
+ # print(f"post!!!!!!!!!")
+ # return batch_data
+
+ def __getitem__(self, i):
+
+ image, seg = self.read_data(self.datalist[i])
+
+ properties = self.data_cached[i]
+ case_name = properties["name"]
+
+ if seg is not None:
+ sdm = np.load(os.path.join("./data/fullres/train_sdm/", f"{case_name}_seg_sdm.npy"), "r")
+
+ # print(seg.shape, sdm.shape)
+ sdm = sdm[0]
+ seg = np.concatenate([seg, sdm], axis=0)
+
+ # print(f"sdm sum is {sdm.sum()}")
+ if seg is None:
+ return {
+ "data": image,
+ "properties": properties
+ }
+ else :
+ return {
+ "data": image,
+ "seg": seg,
+ "properties": properties
+ }
+
+ def __len__(self):
+ return len(self.datalist)
+
+def get_kfold_data(data_paths, n_splits, shuffle=False):
+ X = np.arange(len(data_paths))
+ kfold = KFold(n_splits=n_splits, shuffle=shuffle) ## kfold为KFolf类的一个对象
+ return_res = []
+ for a, b in kfold.split(X):
+ fold_train = []
+ fold_val = []
+ for i in a:
+ fold_train.append(data_paths[i])
+ for j in b:
+ fold_val.append(data_paths[j])
+ return_res.append({"train_data": fold_train, "val_data": fold_val})
+
+ return return_res
+
+def get_kfold_loader(data_dir, fold=0, test_dir=None):
+
+ all_paths = glob.glob(f"{data_dir}/*.npz")
+ fold_data = get_kfold_data(all_paths, 5)[fold]
+
+ train_datalist = fold_data["train_data"]
+ val_datalist = fold_data["val_data"]
+
+ print(f"training data is {len(train_datalist)}")
+ print(f"validation data is {len(val_datalist)}")
+ train_ds = MedicalDataset(train_datalist)
+
+ val_ds = MedicalDataset(val_datalist)
+
+ if test_dir is not None:
+ test_paths = glob.glob(f"{test_dir}/*.npz")
+ test_ds = MedicalDataset(test_paths, test=True)
+ else:
+ test_ds = None
+
+ loader = [train_ds, val_ds, test_ds]
+
+ return loader
+
+def get_all_training_loader(data_dir, fold=0, test_dir=None):
+ ## train all labeled data
+ ## fold denote the validation data in training data
+ all_paths = glob.glob(f"{data_dir}/*.npz")
+ fold_data = get_kfold_data(all_paths, 5)[fold]
+
+ train_datalist = all_paths
+ val_datalist = fold_data["val_data"]
+
+ print(f"training data is {len(train_datalist)}")
+ print(f"validation data is {len(val_datalist)}")
+ train_ds = MedicalDataset(train_datalist)
+
+ val_ds = MedicalDataset(val_datalist)
+
+ if test_dir is not None:
+ test_paths = glob.glob(f"{test_dir}/*.npz")
+ test_ds = MedicalDataset(test_paths, test=True)
+ else:
+ test_ds = None
+
+ loader = [train_ds, val_ds, test_ds]
+
+ return loader
+
+def get_train_val_test_loader_seperate(train_dir, val_dir, test_dir=None):
+ train_datalist = glob.glob(f"{train_dir}/*.npz")
+ val_datalist = glob.glob(f"{val_dir}/*.npz")
+
+ print(f"training data is {len(train_datalist)}")
+ print(f"validation data is {len(val_datalist)}")
+
+ if test_dir is not None:
+ test_datalist = glob.glob(f"{test_dir}/*.npz")
+ print(f"test data is {len(test_datalist)}")
+ test_ds = MedicalDataset(test_datalist, test=True)
+ else :
+ test_ds = None
+
+ train_ds = MedicalDataset(train_datalist)
+ val_ds = MedicalDataset(val_datalist)
+
+ loader = [train_ds, val_ds, test_ds]
+
+ return loader
+
+def get_train_val_test_loader_from_train(data_dir, train_rate=0.7, val_rate=0.1, test_rate=0.2):
+ ## train all labeled data
+ ## fold denote the validation data in training data
+ all_paths = glob.glob(f"{data_dir}/*.npz")
+ # fold_data = get_kfold_data(all_paths, 5)[fold]
+
+ train_number = int(len(all_paths) * train_rate)
+ val_number = int(len(all_paths) * val_rate)
+ test_number = int(len(all_paths) * test_rate)
+
+ random.shuffle(all_paths)
+
+ train_datalist = all_paths[:train_number]
+ val_datalist = all_paths[train_number: train_number + val_number]
+ test_datalist = all_paths[-test_number:]
+
+ print(f"training data is {len(train_datalist)}")
+ print(f"validation data is {len(val_datalist)}")
+ print(f"test data is {len(test_datalist)}")
+
+ train_ds = MedicalDataset(train_datalist)
+ val_ds = MedicalDataset(val_datalist)
+ test_ds = MedicalDataset(test_datalist)
+
+ loader = [train_ds, val_ds, test_ds]
+
+ return loader
+
+def get_multi_dir_training_loader(data_dir, fold=0, test_dir=None):
+ ## train all labeled data
+ ## fold denote the validation data in training data
+ all_paths = []
+ for p in data_dir:
+ paths = glob.glob(f"{p}/*.npz")
+ for pp in paths:
+ all_paths.append(pp)
+
+ # print(all_paths)
+ fold_data = get_kfold_data(all_paths, 5)[fold]
+
+ train_datalist = all_paths
+ val_datalist = fold_data["val_data"]
+
+ print(f"training data is {len(train_datalist)}")
+ print(f"validation data is {len(val_datalist)}")
+ train_ds = MedicalDataset(train_datalist)
+
+ val_ds = MedicalDataset(val_datalist)
+
+ if test_dir is not None:
+ test_paths = glob.glob(f"{test_dir}/*.npz")
+ test_ds = MedicalDataset(test_paths, test=True)
+ else:
+ test_ds = None
+
+ loader = [train_ds, val_ds, test_ds]
+
+ return loader
\ No newline at end of file
diff --git a/PRISM/SegMamba/light_training/dataloading/get_train_val_test_datalist.py b/PRISM/SegMamba/light_training/dataloading/get_train_val_test_datalist.py
new file mode 100644
index 0000000000000000000000000000000000000000..22edcd46c83c6347fc8dbcc59c4cd5bb0789515a
--- /dev/null
+++ b/PRISM/SegMamba/light_training/dataloading/get_train_val_test_datalist.py
@@ -0,0 +1,36 @@
+
+import glob
+import random
+import json
+
+def get_train_val_test_list_from_fulldata(data_dir, train_rate=0.7, val_rate=0.1, test_rate=0.2, seed=42):
+ all_paths = glob.glob(f"{data_dir}/*.npz")
+
+ ## eliminate the pre
+ all_paths_save = []
+ for p in all_paths:
+ all_paths_save.append(p.split("/")[-1])
+ all_paths = all_paths_save
+ train_number = int(len(all_paths) * train_rate)
+ val_number = int(len(all_paths) * val_rate)
+ test_number = int(len(all_paths) * test_rate)
+ random.seed(seed)
+ random.shuffle(all_paths)
+ train_datalist = all_paths[:train_number]
+ val_datalist = all_paths[train_number: train_number + val_number]
+ test_datalist = all_paths[-test_number:]
+
+ print(f"training data is {len(train_datalist)}")
+ print(f"validation data is {len(val_datalist)}")
+ print(f"test data is {len(test_datalist)}", sorted(test_datalist))
+
+ datalist = {
+ "train": train_datalist,
+ "validation": val_datalist,
+ "test": test_datalist
+ }
+
+ datalist = json.dumps(datalist)
+
+ with open("./data_split.json", "w") as f:
+ f.write(datalist)
diff --git a/PRISM/SegMamba/light_training/dataloading/utils.py b/PRISM/SegMamba/light_training/dataloading/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..3cc60e7b992fb915c01bb7a7a5f6857bb1fb4dc8
--- /dev/null
+++ b/PRISM/SegMamba/light_training/dataloading/utils.py
@@ -0,0 +1,25 @@
+import numpy as np
+import os
+from batchgenerators.utilities.file_and_folder_operations import isfile, subfiles
+import multiprocessing
+
+def _convert_to_npy(npz_file: str, unpack_segmentation: bool = True, overwrite_existing: bool = False) -> None:
+ # try:
+ a = np.load(npz_file) # inexpensive, no compression is done here. This just reads metadata
+ if overwrite_existing or not isfile(npz_file[:-3] + "npy"):
+ np.save(npz_file[:-3] + "npy", a['data'])
+
+ if unpack_segmentation and (overwrite_existing or not isfile(npz_file[:-4] + "_seg.npy")):
+ np.save(npz_file[:-4] + "_seg.npy", a['seg'])
+
+def unpack_dataset(folder: str, unpack_segmentation: bool = True, overwrite_existing: bool = False,
+ num_processes: int = 8):
+ """
+ all npz files in this folder belong to the dataset, unpack them all
+ """
+ with multiprocessing.get_context("spawn").Pool(num_processes) as p:
+ npz_files = subfiles(folder, True, None, ".npz", True)
+ p.starmap(_convert_to_npy, zip(npz_files,
+ [unpack_segmentation] * len(npz_files),
+ [overwrite_existing] * len(npz_files))
+ )
diff --git a/PRISM/SegMamba/light_training/dataloading_global/__init__.py b/PRISM/SegMamba/light_training/dataloading_global/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/PRISM/SegMamba/light_training/dataloading_global/dataset.py b/PRISM/SegMamba/light_training/dataloading_global/dataset.py
new file mode 100644
index 0000000000000000000000000000000000000000..2fbf3646921db9d0a390fd250099ab76e2296d1b
--- /dev/null
+++ b/PRISM/SegMamba/light_training/dataloading_global/dataset.py
@@ -0,0 +1,329 @@
+
+# Copyright 2020 - 2022 MONAI Consortium
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from sklearn.model_selection import KFold ## K折交叉验证
+import pickle
+import os
+import json
+import math
+import numpy as np
+import torch
+from monai import transforms
+import SimpleITK as sitk
+from tqdm import tqdm
+from torch.utils.data import Dataset
+import glob
+from light_training.dataloading_global.utils import unpack_dataset
+import random
+
+class MedicalDataset(Dataset):
+ def __init__(self, datalist, test=False) -> None:
+ super().__init__()
+
+ self.datalist = datalist
+ self.test = test
+
+ self.data_cached = []
+ for p in tqdm(self.datalist, total=len(self.datalist)):
+ info = self.load_pkl(p)
+
+ self.data_cached.append(info)
+
+ ## unpacking
+ print(f"unpacking data ....")
+ # for
+ folder = []
+ for p in self.datalist:
+ f = os.path.dirname(p)
+ if f not in folder:
+ folder.append(f)
+ for f in folder:
+ unpack_dataset(f,
+ unpack_segmentation=True,
+ overwrite_existing=False,
+ num_processes=8)
+
+
+ print(f"data length is {len(self.datalist)}")
+
+ def load_pkl(self, data_path):
+ pass
+ properties_path = f"{data_path[:-4]}.pkl"
+ df = open(properties_path, "rb")
+ info = pickle.load(df)
+
+ return info
+
+ def post(self, batch_data):
+ return batch_data
+
+ def read_data(self, data_path):
+
+ image_path = data_path.replace(".npz", ".npy")
+ seg_path = data_path.replace(".npz", "_seg.npy")
+ image_global_path = data_path.replace(".npz", "_global.npy")
+ seg_global_path = data_path.replace(".npz", "_global_seg.npy")
+
+ image_data = np.load(image_path, "r+")
+ image_data_global = np.load(image_global_path, "r+")
+
+ seg_data = None
+ if not self.test:
+ seg_data = np.load(seg_path, "r+")
+ seg_global_data = np.load(seg_global_path, "r+")
+
+ return image_data, image_data_global, seg_data, seg_global_data
+
+
+ def __getitem__(self, i):
+
+ image, image_data_global, seg, seg_global = self.read_data(self.datalist[i])
+
+ # print(image_data_global.shape)
+ properties = self.data_cached[i]
+
+ if seg is None:
+ return {
+ "data": image,
+ "data_global": image_data_global,
+ "properties": properties
+ }
+ else :
+ return {
+ "data": image,
+ "data_global": image_data_global,
+ "seg": seg,
+ "seg_global": seg_global,
+ "properties": properties
+ }
+
+ def __len__(self):
+ return len(self.datalist)
+
+def get_train_test_loader_from_test_list(data_dir, test_list):
+ all_paths = glob.glob(f"{data_dir}/*.npz")
+
+ test_datalist = []
+ train_datalist = []
+
+ test_list_1 = []
+ for t in test_list:
+ test_list_1.append(t.replace(".nii.gz", ""))
+
+ test_list = test_list_1
+ for p in all_paths:
+ p2 = p.split("/")[-1].split(".")[0]
+ if p2 in test_list:
+ test_datalist.append(p)
+ else :
+ train_datalist.append(p)
+
+ print(f"training data is {len(train_datalist)}")
+ print(f"test data is {len(test_datalist)}", test_datalist)
+
+ train_ds = MedicalDataset(train_datalist)
+ test_ds = MedicalDataset(test_datalist)
+
+ loader = [train_ds, test_ds]
+
+ return loader
+
+def get_kfold_data(data_paths, n_splits, shuffle=False):
+ X = np.arange(len(data_paths))
+ kfold = KFold(n_splits=n_splits, shuffle=shuffle) ## kfold为KFolf类的一个对象
+ return_res = []
+ for a, b in kfold.split(X):
+ fold_train = []
+ fold_val = []
+ for i in a:
+ fold_train.append(data_paths[i])
+ for j in b:
+ fold_val.append(data_paths[j])
+ return_res.append({"train_data": fold_train, "val_data": fold_val})
+
+ return return_res
+
+def get_kfold_loader(data_dir, fold=0, test_dir=None):
+
+ all_paths = glob.glob(f"{data_dir}/*.npz")
+ fold_data = get_kfold_data(all_paths, 5)[fold]
+
+ train_datalist = fold_data["train_data"]
+ val_datalist = fold_data["val_data"]
+
+ print(f"training data is {len(train_datalist)}")
+ print(f"validation data is {len(val_datalist)}")
+ train_ds = MedicalDataset(train_datalist)
+
+ val_ds = MedicalDataset(val_datalist)
+
+ if test_dir is not None:
+ test_paths = glob.glob(f"{test_dir}/*.npz")
+ test_ds = MedicalDataset(test_paths, test=True)
+ else:
+ test_ds = None
+
+ loader = [train_ds, val_ds, test_ds]
+
+ return loader
+
+def get_all_training_loader(data_dir, fold=0, test_dir=None):
+ ## train all labeled data
+ ## fold denote the validation data in training data
+ all_paths = glob.glob(f"{data_dir}/*.npz")
+ fold_data = get_kfold_data(all_paths, 5)[fold]
+
+ train_datalist = all_paths
+ val_datalist = fold_data["val_data"]
+
+ print(f"training data is {len(train_datalist)}")
+ print(f"validation data is {len(val_datalist)}")
+ train_ds = MedicalDataset(train_datalist)
+
+ val_ds = MedicalDataset(val_datalist)
+
+ if test_dir is not None:
+ test_paths = glob.glob(f"{test_dir}/*.npz")
+ test_ds = MedicalDataset(test_paths, test=True)
+ else:
+ test_ds = None
+
+ loader = [train_ds, val_ds, test_ds]
+
+ return loader
+
+def get_train_val_test_loader_seperate(train_dir, val_dir, test_dir=None):
+ train_datalist = glob.glob(f"{train_dir}/*.npz")
+ val_datalist = glob.glob(f"{val_dir}/*.npz")
+
+ print(f"training data is {len(train_datalist)}")
+ print(f"validation data is {len(val_datalist)}")
+
+ if test_dir is not None:
+ test_datalist = glob.glob(f"{test_dir}/*.npz")
+ print(f"test data is {len(test_datalist)}")
+ test_ds = MedicalDataset(test_datalist, test=True)
+ else :
+ test_ds = None
+
+ train_ds = MedicalDataset(train_datalist)
+ val_ds = MedicalDataset(val_datalist)
+
+ loader = [train_ds, val_ds, test_ds]
+
+ return loader
+
+def get_train_val_test_loader_from_train(data_dir, train_rate=0.7, val_rate=0.1, test_rate=0.2, seed=42):
+ ## train all labeled data
+ ## fold denote the validation data in training data
+ all_paths = glob.glob(f"{data_dir}/*.npz")
+ # fold_data = get_kfold_data(all_paths, 5)[fold]
+
+ train_number = int(len(all_paths) * train_rate)
+ val_number = int(len(all_paths) * val_rate)
+ test_number = int(len(all_paths) * test_rate)
+ random.seed(seed)
+ # random_state = random.random
+ random.shuffle(all_paths)
+ train_datalist = all_paths[:train_number]
+ val_datalist = all_paths[train_number: train_number + val_number]
+ test_datalist = all_paths[-test_number:]
+
+ print(f"training data is {len(train_datalist)}")
+ print(f"validation data is {len(val_datalist)}")
+ print(f"test data is {len(test_datalist)}", sorted(test_datalist))
+
+ train_ds = MedicalDataset(train_datalist)
+ val_ds = MedicalDataset(val_datalist)
+ test_ds = MedicalDataset(test_datalist)
+
+ loader = [train_ds, val_ds, test_ds]
+
+ return loader
+
+def get_train_val_test_loader_from_split_json(data_dir, split_json_file):
+ import json
+
+ with open(split_json_file, "r") as f:
+
+ datalist = json.loads(f.read())
+
+ train_datalist = datalist["train"]
+ val_datalist = datalist["validation"]
+ test_datalist = datalist["test"]
+
+ def add_pre(datalist):
+ for i in range(len(datalist)):
+ datalist[i] = os.path.join(data_dir, datalist[i])
+
+ add_pre(train_datalist)
+ add_pre(val_datalist)
+ add_pre(test_datalist)
+
+ print(f"training data is {len(train_datalist)}")
+ print(f"validation data is {len(val_datalist)}")
+ print(f"test data is {len(test_datalist)}", sorted(test_datalist))
+
+ train_ds = MedicalDataset(train_datalist)
+ val_ds = MedicalDataset(val_datalist)
+ test_ds = MedicalDataset(test_datalist)
+
+ loader = [train_ds, val_ds, test_ds]
+
+ return loader
+
+def get_train_loader_from_train(data_dir):
+ ## train all labeled data
+ ## fold denote the validation data in training data
+ all_paths = glob.glob(f"{data_dir}/*.npz")
+ # fold_data = get_kfold_data(all_paths, 5)[fold]
+
+ train_ds = MedicalDataset(all_paths)
+
+ return train_ds
+
+def get_test_loader_from_test(data_dir):
+ all_paths = glob.glob(f"{data_dir}/*.npz")
+
+ test_ds = MedicalDataset(all_paths)
+
+ return test_ds
+
+def get_multi_dir_training_loader(data_dir, fold=0, test_dir=None):
+ ## train all labeled data
+ ## fold denote the validation data in training data
+ all_paths = []
+ for p in data_dir:
+ paths = glob.glob(f"{p}/*.npz")
+ for pp in paths:
+ all_paths.append(pp)
+
+ # print(all_paths)
+ fold_data = get_kfold_data(all_paths, 5)[fold]
+
+ train_datalist = all_paths
+ val_datalist = fold_data["val_data"]
+
+ print(f"training data is {len(train_datalist)}")
+ print(f"validation data is {len(val_datalist)}")
+ train_ds = MedicalDataset(train_datalist)
+
+ val_ds = MedicalDataset(val_datalist)
+
+ if test_dir is not None:
+ test_paths = glob.glob(f"{test_dir}/*.npz")
+ test_ds = MedicalDataset(test_paths, test=True)
+ else:
+ test_ds = None
+
+ loader = [train_ds, val_ds, test_ds]
+
+ return loader
\ No newline at end of file
diff --git a/PRISM/SegMamba/light_training/dataloading_global/utils.py b/PRISM/SegMamba/light_training/dataloading_global/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..cf25e09c6e722316c21b4fefddaf146de1cd4358
--- /dev/null
+++ b/PRISM/SegMamba/light_training/dataloading_global/utils.py
@@ -0,0 +1,27 @@
+import numpy as np
+import os
+from batchgenerators.utilities.file_and_folder_operations import isfile, subfiles
+import multiprocessing
+
+def _convert_to_npy(npz_file: str, unpack_segmentation: bool = True, overwrite_existing: bool = False) -> None:
+ # try:
+ a = np.load(npz_file) # inexpensive, no compression is done here. This just reads metadata
+ if overwrite_existing or not isfile(npz_file[:-3] + "npy"):
+ np.save(npz_file[:-3] + "npy", a['data'])
+ np.save(npz_file[:-4] + "_global.npy", a['data_global'])
+ np.save(npz_file[:-4] + "_global_seg.npy", a['seg_global'])
+
+ if unpack_segmentation and (overwrite_existing or not isfile(npz_file[:-4] + "_seg.npy")):
+ np.save(npz_file[:-4] + "_seg.npy", a['seg'])
+
+def unpack_dataset(folder: str, unpack_segmentation: bool = True, overwrite_existing: bool = False,
+ num_processes: int = 8):
+ """
+ all npz files in this folder belong to the dataset, unpack them all
+ """
+ with multiprocessing.get_context("spawn").Pool(num_processes) as p:
+ npz_files = subfiles(folder, True, None, ".npz", True)
+ p.starmap(_convert_to_npy, zip(npz_files,
+ [unpack_segmentation] * len(npz_files),
+ [overwrite_existing] * len(npz_files))
+ )
diff --git a/PRISM/SegMamba/light_training/evaluation/metric.py b/PRISM/SegMamba/light_training/evaluation/metric.py
new file mode 100644
index 0000000000000000000000000000000000000000..eed0b2c9debaad190b3807ff14b113920789059c
--- /dev/null
+++ b/PRISM/SegMamba/light_training/evaluation/metric.py
@@ -0,0 +1,406 @@
+# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import numpy as np
+from medpy import metric
+
+
+def assert_shape(test, reference):
+
+ assert test.shape == reference.shape, "Shape mismatch: {} and {}".format(
+ test.shape, reference.shape)
+
+
+class ConfusionMatrix:
+
+ def __init__(self, test=None, reference=None):
+
+ self.tp = None
+ self.fp = None
+ self.tn = None
+ self.fn = None
+ self.size = None
+ self.reference_empty = None
+ self.reference_full = None
+ self.test_empty = None
+ self.test_full = None
+ self.set_reference(reference)
+ self.set_test(test)
+
+ def set_test(self, test):
+
+ self.test = test
+ self.reset()
+
+ def set_reference(self, reference):
+
+ self.reference = reference
+ self.reset()
+
+ def reset(self):
+
+ self.tp = None
+ self.fp = None
+ self.tn = None
+ self.fn = None
+ self.size = None
+ self.test_empty = None
+ self.test_full = None
+ self.reference_empty = None
+ self.reference_full = None
+
+ def compute(self):
+
+ if self.test is None or self.reference is None:
+ raise ValueError("'test' and 'reference' must both be set to compute confusion matrix.")
+
+ assert_shape(self.test, self.reference)
+
+ self.tp = int(((self.test != 0) * (self.reference != 0)).sum())
+ self.fp = int(((self.test != 0) * (self.reference == 0)).sum())
+ self.tn = int(((self.test == 0) * (self.reference == 0)).sum())
+ self.fn = int(((self.test == 0) * (self.reference != 0)).sum())
+ self.size = int(np.prod(self.reference.shape, dtype=np.int64))
+ self.test_empty = not np.any(self.test)
+ self.test_full = np.all(self.test)
+ self.reference_empty = not np.any(self.reference)
+ self.reference_full = np.all(self.reference)
+
+ def get_matrix(self):
+
+ for entry in (self.tp, self.fp, self.tn, self.fn):
+ if entry is None:
+ self.compute()
+ break
+
+ return self.tp, self.fp, self.tn, self.fn
+
+ def get_size(self):
+
+ if self.size is None:
+ self.compute()
+ return self.size
+
+ def get_existence(self):
+
+ for case in (self.test_empty, self.test_full, self.reference_empty, self.reference_full):
+ if case is None:
+ self.compute()
+ break
+
+ return self.test_empty, self.test_full, self.reference_empty, self.reference_full
+
+
+def dice(test=None, reference=None, confusion_matrix=None, nan_for_nonexisting=True, **kwargs):
+ """2TP / (2TP + FP + FN)"""
+
+ if confusion_matrix is None:
+ confusion_matrix = ConfusionMatrix(test, reference)
+
+ tp, fp, tn, fn = confusion_matrix.get_matrix()
+ test_empty, test_full, reference_empty, reference_full = confusion_matrix.get_existence()
+
+ if test_empty and reference_empty:
+ if nan_for_nonexisting:
+ return float("NaN")
+ else:
+ return 0.
+
+ return float(2. * tp / (2 * tp + fp + fn))
+
+
+def jaccard(test=None, reference=None, confusion_matrix=None, nan_for_nonexisting=True, **kwargs):
+ """TP / (TP + FP + FN)"""
+
+ if confusion_matrix is None:
+ confusion_matrix = ConfusionMatrix(test, reference)
+
+ tp, fp, tn, fn = confusion_matrix.get_matrix()
+ test_empty, test_full, reference_empty, reference_full = confusion_matrix.get_existence()
+
+ if test_empty and reference_empty:
+ if nan_for_nonexisting:
+ return float("NaN")
+ else:
+ return 0.
+
+ return float(tp / (tp + fp + fn))
+
+
+def precision(test=None, reference=None, confusion_matrix=None, nan_for_nonexisting=True, **kwargs):
+ """TP / (TP + FP)"""
+
+ if confusion_matrix is None:
+ confusion_matrix = ConfusionMatrix(test, reference)
+
+ tp, fp, tn, fn = confusion_matrix.get_matrix()
+ test_empty, test_full, reference_empty, reference_full = confusion_matrix.get_existence()
+
+ if test_empty:
+ if nan_for_nonexisting:
+ return float("NaN")
+ else:
+ return 0.
+
+ return float(tp / (tp + fp))
+
+
+def sensitivity(test=None, reference=None, confusion_matrix=None, nan_for_nonexisting=True, **kwargs):
+ """TP / (TP + FN)"""
+
+ if confusion_matrix is None:
+ confusion_matrix = ConfusionMatrix(test, reference)
+
+ tp, fp, tn, fn = confusion_matrix.get_matrix()
+ test_empty, test_full, reference_empty, reference_full = confusion_matrix.get_existence()
+
+ if reference_empty:
+ if nan_for_nonexisting:
+ return float("NaN")
+ else:
+ return 0.
+
+ return float(tp / (tp + fn))
+
+
+def recall(test=None, reference=None, confusion_matrix=None, nan_for_nonexisting=True, **kwargs):
+ """TP / (TP + FN)"""
+
+ return sensitivity(test, reference, confusion_matrix, nan_for_nonexisting, **kwargs)
+
+
+def specificity(test=None, reference=None, confusion_matrix=None, nan_for_nonexisting=True, **kwargs):
+ """TN / (TN + FP)"""
+
+ if confusion_matrix is None:
+ confusion_matrix = ConfusionMatrix(test, reference)
+
+ tp, fp, tn, fn = confusion_matrix.get_matrix()
+ test_empty, test_full, reference_empty, reference_full = confusion_matrix.get_existence()
+
+ if reference_full:
+ if nan_for_nonexisting:
+ return float("NaN")
+ else:
+ return 0.
+
+ return float(tn / (tn + fp))
+
+
+def accuracy(test=None, reference=None, confusion_matrix=None, **kwargs):
+ """(TP + TN) / (TP + FP + FN + TN)"""
+
+ if confusion_matrix is None:
+ confusion_matrix = ConfusionMatrix(test, reference)
+
+ tp, fp, tn, fn = confusion_matrix.get_matrix()
+
+ return float((tp + tn) / (tp + fp + tn + fn))
+
+
+def fscore(test=None, reference=None, confusion_matrix=None, nan_for_nonexisting=True, beta=1., **kwargs):
+ """(1 + b^2) * TP / ((1 + b^2) * TP + b^2 * FN + FP)"""
+
+ precision_ = precision(test, reference, confusion_matrix, nan_for_nonexisting)
+ recall_ = recall(test, reference, confusion_matrix, nan_for_nonexisting)
+
+ return (1 + beta*beta) * precision_ * recall_ /\
+ ((beta*beta * precision_) + recall_)
+
+
+def false_positive_rate(test=None, reference=None, confusion_matrix=None, nan_for_nonexisting=True, **kwargs):
+ """FP / (FP + TN)"""
+
+ return 1 - specificity(test, reference, confusion_matrix, nan_for_nonexisting)
+
+
+def false_omission_rate(test=None, reference=None, confusion_matrix=None, nan_for_nonexisting=True, **kwargs):
+ """FN / (TN + FN)"""
+
+ if confusion_matrix is None:
+ confusion_matrix = ConfusionMatrix(test, reference)
+
+ tp, fp, tn, fn = confusion_matrix.get_matrix()
+ test_empty, test_full, reference_empty, reference_full = confusion_matrix.get_existence()
+
+ if test_full:
+ if nan_for_nonexisting:
+ return float("NaN")
+ else:
+ return 0.
+
+ return float(fn / (fn + tn))
+
+
+def false_negative_rate(test=None, reference=None, confusion_matrix=None, nan_for_nonexisting=True, **kwargs):
+ """FN / (TP + FN)"""
+
+ return 1 - sensitivity(test, reference, confusion_matrix, nan_for_nonexisting)
+
+
+def true_negative_rate(test=None, reference=None, confusion_matrix=None, nan_for_nonexisting=True, **kwargs):
+ """TN / (TN + FP)"""
+
+ return specificity(test, reference, confusion_matrix, nan_for_nonexisting)
+
+
+def false_discovery_rate(test=None, reference=None, confusion_matrix=None, nan_for_nonexisting=True, **kwargs):
+ """FP / (TP + FP)"""
+
+ return 1 - precision(test, reference, confusion_matrix, nan_for_nonexisting)
+
+
+def negative_predictive_value(test=None, reference=None, confusion_matrix=None, nan_for_nonexisting=True, **kwargs):
+ """TN / (TN + FN)"""
+
+ return 1 - false_omission_rate(test, reference, confusion_matrix, nan_for_nonexisting)
+
+
+def total_positives_test(test=None, reference=None, confusion_matrix=None, **kwargs):
+ """TP + FP"""
+
+ if confusion_matrix is None:
+ confusion_matrix = ConfusionMatrix(test, reference)
+
+ tp, fp, tn, fn = confusion_matrix.get_matrix()
+
+ return tp + fp
+
+
+def total_negatives_test(test=None, reference=None, confusion_matrix=None, **kwargs):
+ """TN + FN"""
+
+ if confusion_matrix is None:
+ confusion_matrix = ConfusionMatrix(test, reference)
+
+ tp, fp, tn, fn = confusion_matrix.get_matrix()
+
+ return tn + fn
+
+
+def total_positives_reference(test=None, reference=None, confusion_matrix=None, **kwargs):
+ """TP + FN"""
+
+ if confusion_matrix is None:
+ confusion_matrix = ConfusionMatrix(test, reference)
+
+ tp, fp, tn, fn = confusion_matrix.get_matrix()
+
+ return tp + fn
+
+
+def total_negatives_reference(test=None, reference=None, confusion_matrix=None, **kwargs):
+ """TN + FP"""
+
+ if confusion_matrix is None:
+ confusion_matrix = ConfusionMatrix(test, reference)
+
+ tp, fp, tn, fn = confusion_matrix.get_matrix()
+
+ return tn + fp
+
+
+def hausdorff_distance(test=None, reference=None, confusion_matrix=None, nan_for_nonexisting=True, voxel_spacing=None, connectivity=1, **kwargs):
+
+ if confusion_matrix is None:
+ confusion_matrix = ConfusionMatrix(test, reference)
+
+ test_empty, test_full, reference_empty, reference_full = confusion_matrix.get_existence()
+
+ if test_empty or test_full or reference_empty or reference_full:
+ if nan_for_nonexisting:
+ return float("NaN")
+ else:
+ return 0
+
+ test, reference = confusion_matrix.test, confusion_matrix.reference
+
+ return metric.hd(test, reference, voxel_spacing, connectivity)
+
+
+def hausdorff_distance_95(test=None, reference=None, confusion_matrix=None, nan_for_nonexisting=True, voxel_spacing=None, connectivity=1, **kwargs):
+
+ if confusion_matrix is None:
+ confusion_matrix = ConfusionMatrix(test, reference)
+
+ test_empty, test_full, reference_empty, reference_full = confusion_matrix.get_existence()
+
+ if test_empty or test_full or reference_empty or reference_full:
+ if nan_for_nonexisting:
+ return float("NaN")
+ else:
+ return 0
+
+ test, reference = confusion_matrix.test, confusion_matrix.reference
+
+ return metric.hd95(test, reference, voxel_spacing, connectivity)
+
+
+def avg_surface_distance(test=None, reference=None, confusion_matrix=None, nan_for_nonexisting=True, voxel_spacing=None, connectivity=1, **kwargs):
+
+ if confusion_matrix is None:
+ confusion_matrix = ConfusionMatrix(test, reference)
+
+ test_empty, test_full, reference_empty, reference_full = confusion_matrix.get_existence()
+
+ if test_empty or test_full or reference_empty or reference_full:
+ if nan_for_nonexisting:
+ return float("NaN")
+ else:
+ return 0
+
+ test, reference = confusion_matrix.test, confusion_matrix.reference
+
+ return metric.asd(test, reference, voxel_spacing, connectivity)
+
+
+def avg_surface_distance_symmetric(test=None, reference=None, confusion_matrix=None, nan_for_nonexisting=True, voxel_spacing=None, connectivity=1, **kwargs):
+
+ if confusion_matrix is None:
+ confusion_matrix = ConfusionMatrix(test, reference)
+
+ test_empty, test_full, reference_empty, reference_full = confusion_matrix.get_existence()
+
+ if test_empty or test_full or reference_empty or reference_full:
+ if nan_for_nonexisting:
+ return float("NaN")
+ else:
+ return 0
+
+ test, reference = confusion_matrix.test, confusion_matrix.reference
+
+ return metric.assd(test, reference, voxel_spacing, connectivity)
+
+
+ALL_METRICS = {
+ "False Positive Rate": false_positive_rate,
+ "Dice": dice,
+ "Jaccard": jaccard,
+ "Hausdorff Distance": hausdorff_distance,
+ "Hausdorff Distance 95": hausdorff_distance_95,
+ "Precision": precision,
+ "Recall": recall,
+ "Avg. Symmetric Surface Distance": avg_surface_distance_symmetric,
+ "Avg. Surface Distance": avg_surface_distance,
+ "Accuracy": accuracy,
+ "False Omission Rate": false_omission_rate,
+ "Negative Predictive Value": negative_predictive_value,
+ "False Negative Rate": false_negative_rate,
+ "True Negative Rate": true_negative_rate,
+ "False Discovery Rate": false_discovery_rate,
+ "Total Positives Test": total_positives_test,
+ "Total Negatives Test": total_negatives_test,
+ "Total Positives Reference": total_positives_reference,
+ "total Negatives Reference": total_negatives_reference
+}
\ No newline at end of file
diff --git a/PRISM/SegMamba/light_training/examples/1_rename_mri_data_BraTS2023.py b/PRISM/SegMamba/light_training/examples/1_rename_mri_data_BraTS2023.py
new file mode 100644
index 0000000000000000000000000000000000000000..223733edcf5f4c52c832b52df8c1a9d29513182d
--- /dev/null
+++ b/PRISM/SegMamba/light_training/examples/1_rename_mri_data_BraTS2023.py
@@ -0,0 +1,27 @@
+
+
+
+import os
+
+# data_dir = "./data/raw_data/BraTS2023/ASNR-MICCAI-BraTS2023-GLI-Challenge-TrainingData/"
+data_dir = "./data/raw_data/BraTS2023/ASNR-MICCAI-BraTS2023-GLI-Challenge-ValidationData/"
+
+all_cases = os.listdir(data_dir)
+
+for case_name in all_cases:
+ case_dir = os.path.join(data_dir, case_name)
+
+ for data_name in os.listdir(case_dir):
+
+ if "-" not in data_name:
+ continue
+ new_name = data_name.split("-")[-1]
+
+ new_path = os.path.join(case_dir, new_name)
+
+ old_path = os.path.join(case_dir, data_name)
+
+ os.rename(old_path, new_path)
+
+ print(f"{new_path} 命名成功")
+
diff --git a/PRISM/SegMamba/light_training/examples/2_preprocessing_AIIB23.py b/PRISM/SegMamba/light_training/examples/2_preprocessing_AIIB23.py
new file mode 100644
index 0000000000000000000000000000000000000000..77b52c2050af19bdbfe8a2998d9cf35d918cef95
--- /dev/null
+++ b/PRISM/SegMamba/light_training/examples/2_preprocessing_AIIB23.py
@@ -0,0 +1,130 @@
+
+from light_training.preprocessing.preprocessors.default_preprocessor import DefaultPreprocessor
+import numpy as np
+import pickle
+import json
+
+
+def process_train():
+ # fullres spacing is [0.5 0.70410156 0.70410156]
+ # median_shape is [602.5 516.5 516.5]
+ base_dir = "./data/raw_data/AIIB23_Train_T1"
+ image_dir = "img"
+ label_dir = "gt"
+ preprocessor = DefaultPreprocessor(base_dir=base_dir,
+ image_dir=image_dir,
+ label_dir=label_dir,
+ )
+
+ out_spacing = [0.5, 0.70410156, 0.70410156]
+ output_dir = "./data/fullres/train/"
+
+ with open("./data_analysis_result.txt", "r") as f:
+ content = f.read().strip("\n")
+ print(content)
+ content = eval(content)
+ foreground_intensity_properties_per_channel = content["intensity_statistics_per_channel"]
+
+ preprocessor.run(output_spacing=out_spacing,
+ output_dir=output_dir,
+ all_labels=[1, ],
+ num_processes=16,
+ foreground_intensity_properties_per_channel=foreground_intensity_properties_per_channel)
+
+def process_val():
+ # fullres spacing is [0.5 0.70410156 0.70410156]
+ # median_shape is [602.5 516.5 516.5]
+ base_dir = "./data/raw_data/Val"
+ image_dir = "img"
+ preprocessor = DefaultPreprocessor(base_dir=base_dir,
+ image_dir=image_dir,
+ label_dir=None,
+ )
+
+ out_spacing = [0.5, 0.70410156, 0.70410156]
+
+ with open("./data_analysis_result.txt", "r") as f:
+ content = f.read().strip("\n")
+ print(content)
+ content = eval(content)
+ foreground_intensity_properties_per_channel = content["intensity_statistics_per_channel"]
+
+ output_dir = "./data/fullres/val_test/"
+ preprocessor.run(output_spacing=out_spacing,
+ output_dir=output_dir,
+ all_labels=[1, ],
+ foreground_intensity_properties_per_channel=foreground_intensity_properties_per_channel,
+ num_processes=16)
+
+def process_val_semi():
+ # fullres spacing is [0.5 0.70410156 0.70410156]
+ # median_shape is [602.5 516.5 516.5]
+ base_dir = "./data/raw_data/Val_semi_postprocess"
+ image_dir = "img"
+ preprocessor = DefaultPreprocessor(base_dir=base_dir,
+ image_dir=image_dir,
+ label_dir="gt",
+ )
+
+ out_spacing = [0.5, 0.70410156, 0.70410156]
+
+ with open("./data_analysis_result.txt", "r") as f:
+ content = f.read().strip("\n")
+ print(content)
+ content = eval(content)
+ foreground_intensity_properties_per_channel = content["intensity_statistics_per_channel"]
+
+ output_dir = "./data/fullres/val_semi_postprocess/"
+ preprocessor.run(output_spacing=out_spacing,
+ output_dir=output_dir,
+ all_labels=[1, ],
+ foreground_intensity_properties_per_channel=foreground_intensity_properties_per_channel)
+
+
+def plan():
+ base_dir = "./data/raw_data/AIIB23_Train_T1"
+ image_dir = "img"
+ label_dir = "gt"
+
+ preprocessor = DefaultPreprocessor(base_dir=base_dir,
+ image_dir=image_dir,
+ label_dir=label_dir,
+ )
+
+ preprocessor.run_plan()
+
+if __name__ == "__main__":
+
+ # plan()
+
+ process_train()
+ # import time
+ # s = time.time()
+ # process_val()
+ # e = time.time()
+
+ # print(f"preprocessing time is {e - s}")
+
+ # process_val_semi()
+
+
+#
+ # preprocessor.run(output_spacing=[3, 0.9765625, 0.9765625], output_dir=output_dir)
+
+ # data = np.load("/home/xingzhaohu/sharefs/datasets/AIIB23_nnunet/train/AIIB23_96.npz")
+
+ # image = data["data"]
+ # label = data["seg"]
+ # print(image.shape)
+ # print(label.shape)
+
+ # import matplotlib.pyplot as plt
+
+ # for i in range(20):
+ # plt.imshow(image[0, i], cmap="gray")
+ # plt.show()
+
+ # df = open("/home/xingzhaohu/sharefs/datasets/AIIB23_nnunet/train/AIIB23_96.pkl", "rb")
+
+ # info = pickle.load(df)
+ # print(info)
\ No newline at end of file
diff --git a/PRISM/SegMamba/light_training/examples/2_preprocessing_BraTS2023.py b/PRISM/SegMamba/light_training/examples/2_preprocessing_BraTS2023.py
new file mode 100644
index 0000000000000000000000000000000000000000..ac97dbdeea89eab4c281db066b23452599b638cd
--- /dev/null
+++ b/PRISM/SegMamba/light_training/examples/2_preprocessing_BraTS2023.py
@@ -0,0 +1,94 @@
+
+from light_training.preprocessing.preprocessors.preprocessor_mri import MultiModalityPreprocessor
+import numpy as np
+import pickle
+import json
+
+data_filename = ["t2w.nii.gz",
+ "t2f.nii.gz",
+ "t1n.nii.gz",
+ "t1c.nii.gz"]
+seg_filename = "seg.nii.gz"
+
+def process_train():
+ # fullres spacing is [0.5 0.70410156 0.70410156]
+ # median_shape is [602.5 516.5 516.5]
+ base_dir = "./data/raw_data/BraTS2023/"
+ image_dir = "ASNR-MICCAI-BraTS2023-GLI-Challenge-TrainingData"
+ preprocessor = MultiModalityPreprocessor(base_dir=base_dir,
+ image_dir=image_dir,
+ data_filenames=data_filename,
+ seg_filename=seg_filename
+ )
+
+ out_spacing = [1.0, 1.0, 1.0]
+ output_dir = "./data/fullres/train/"
+
+ preprocessor.run(output_spacing=out_spacing,
+ output_dir=output_dir,
+ all_labels=[1, 2, 3],
+ )
+
+def process_val():
+ base_dir = "./data/raw_data/BraTS2023/"
+ image_dir = "ASNR-MICCAI-BraTS2023-GLI-Challenge-ValidationData"
+ preprocessor = MultiModalityPreprocessor(base_dir=base_dir,
+ image_dir=image_dir,
+ data_filenames=data_filename,
+ seg_filename=""
+ )
+
+ out_spacing = [1.0, 1.0, 1.0]
+ output_dir = "./data/fullres/val/"
+
+ preprocessor.run(output_spacing=out_spacing,
+ output_dir=output_dir,
+ all_labels=[1, 2, 3],
+ )
+
+def process_test():
+ # fullres spacing is [0.5 0.70410156 0.70410156]
+ # median_shape is [602.5 516.5 516.5]
+ base_dir = "/home/xingzhaohu/sharefs/datasets/WORD-V0.1.0/"
+ image_dir = "imagesTs"
+ label_dir = "labelsTs"
+ preprocessor = DefaultPreprocessor(base_dir=base_dir,
+ image_dir=image_dir,
+ label_dir=label_dir,
+ )
+
+ out_spacing = [3.0, 0.9765625, 0.9765625]
+
+ output_dir = "./data/fullres/test/"
+ with open("./data_analysis_result.txt", "r") as f:
+ content = f.read().strip("\n")
+ print(content)
+ content = json.loads(content)
+ foreground_intensity_properties_per_channel = content["intensity_statistics_per_channel"]
+
+ preprocessor.run(output_spacing=out_spacing,
+ output_dir=output_dir,
+ all_labels=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
+ foreground_intensity_properties_per_channel=foreground_intensity_properties_per_channel)
+
+
+def plan():
+ base_dir = "./data/raw_data/BraTS2023/"
+ image_dir = "ASNR-MICCAI-BraTS2023-GLI-Challenge-TrainingData"
+ preprocessor = MultiModalityPreprocessor(base_dir=base_dir,
+ image_dir=image_dir,
+ data_filenames=data_filename,
+ seg_filename=seg_filename
+ )
+
+ preprocessor.run_plan()
+
+
+if __name__ == "__main__":
+#
+ # plan()
+
+ process_train()
+ # process_val()
+ # process_test()
+
diff --git a/PRISM/SegMamba/light_training/examples/AbdomenAtlas1.0Mini/2_preprocessing_AbdomenAtlas1_0Mini.py b/PRISM/SegMamba/light_training/examples/AbdomenAtlas1.0Mini/2_preprocessing_AbdomenAtlas1_0Mini.py
new file mode 100644
index 0000000000000000000000000000000000000000..00d5a587a8ee9b197990ee1f6538fce386062963
--- /dev/null
+++ b/PRISM/SegMamba/light_training/examples/AbdomenAtlas1.0Mini/2_preprocessing_AbdomenAtlas1_0Mini.py
@@ -0,0 +1,122 @@
+
+from light_training.preprocessing.preprocessors.default_preprocessor_AbdomenAtlas1_0Mini import DefaultPreprocessor
+import numpy as np
+import pickle
+import json
+
+
+def process_train():
+ # fullres spacing is [0.5 0.70410156 0.70410156]
+ # median_shape is [602.5 516.5 516.5]
+ base_dir = "/home/xingzhaohu/data/AbdomenAtlas1.0Mini"
+
+ preprocessor = DefaultPreprocessor(base_dir=base_dir)
+
+ out_spacing = [2.0, 0.8134765, 0.83007812]
+ output_dir = "./data/fullres/train/"
+
+ with open("./data_analysis_result.txt", "r") as f:
+ content = f.read().strip("\n")
+ print(content)
+ content = eval(content)
+ foreground_intensity_properties_per_channel = content["intensity_statistics_per_channel"]
+
+ preprocessor.run(output_spacing=out_spacing,
+ output_dir=output_dir,
+ all_labels=[1, 2, 3, 4, 5, 6, 7, 8, 9],
+ num_processes=16,
+ foreground_intensity_properties_per_channel=foreground_intensity_properties_per_channel)
+
+def process_val():
+ # fullres spacing is [0.5 0.70410156 0.70410156]
+ # median_shape is [602.5 516.5 516.5]
+ base_dir = "./data/raw_data/Val"
+ image_dir = "img"
+ preprocessor = DefaultPreprocessor(base_dir=base_dir,
+ image_dir=image_dir,
+ label_dir=None,
+ )
+
+ out_spacing = [0.5, 0.70410156, 0.70410156]
+
+ with open("./data_analysis_result.txt", "r") as f:
+ content = f.read().strip("\n")
+ print(content)
+ content = eval(content)
+ foreground_intensity_properties_per_channel = content["intensity_statistics_per_channel"]
+
+ output_dir = "./data/fullres/val_test/"
+ preprocessor.run(output_spacing=out_spacing,
+ output_dir=output_dir,
+ all_labels=[1, ],
+ foreground_intensity_properties_per_channel=foreground_intensity_properties_per_channel,
+ num_processes=16)
+
+def process_test():
+ # fullres spacing is [0.5 0.70410156 0.70410156]
+ # median_shape is [602.5 516.5 516.5]
+ base_dir = "/home/xingzhaohu/data/AbdomenAtlas1.0Mini_test"
+ preprocessor = DefaultPreprocessor(base_dir=base_dir)
+
+ out_spacing = [2.0, 0.8134765, 0.83007812]
+ output_dir = "./data/fullres/test/"
+
+ with open("./data_analysis_result.txt", "r") as f:
+ content = f.read().strip("\n")
+ print(content)
+ content = eval(content)
+ foreground_intensity_properties_per_channel = content["intensity_statistics_per_channel"]
+
+ preprocessor.run(output_spacing=out_spacing,
+ output_dir=output_dir,
+ all_labels=[1, 2, 3, 4, 5, 6, 7, 8, 9],
+ num_processes=16,
+ foreground_intensity_properties_per_channel=foreground_intensity_properties_per_channel)
+
+
+def plan():
+ base_dir = "/home/xingzhaohu/data/AbdomenAtlas1.0Mini"
+
+ preprocessor = DefaultPreprocessor(base_dir=base_dir,
+
+ )
+
+ preprocessor.run_plan()
+
+if __name__ == "__main__":
+
+ # plan()
+
+ # process_train()
+
+ process_test()
+ # import time
+ # s = time.time()
+ # process_val()
+ # e = time.time()
+
+ # print(f"preprocessing time is {e - s}")
+
+ # process_val_semi()
+
+
+#
+ # preprocessor.run(output_spacing=[3, 0.9765625, 0.9765625], output_dir=output_dir)
+
+ # data = np.load("/home/xingzhaohu/sharefs/datasets/AIIB23_nnunet/train/AIIB23_96.npz")
+
+ # image = data["data"]
+ # label = data["seg"]
+ # print(image.shape)
+ # print(label.shape)
+
+ # import matplotlib.pyplot as plt
+
+ # for i in range(20):
+ # plt.imshow(image[0, i], cmap="gray")
+ # plt.show()
+
+ # df = open("/home/xingzhaohu/sharefs/datasets/AIIB23_nnunet/train/AIIB23_96.pkl", "rb")
+
+ # info = pickle.load(df)
+ # print(info)
\ No newline at end of file
diff --git a/PRISM/SegMamba/light_training/examples/AbdomenAtlas1.0Mini/3_train_diffunet.py b/PRISM/SegMamba/light_training/examples/AbdomenAtlas1.0Mini/3_train_diffunet.py
new file mode 100644
index 0000000000000000000000000000000000000000..eebd98d634e7dbd2efc4e8fc31c9745ee84b46da
--- /dev/null
+++ b/PRISM/SegMamba/light_training/examples/AbdomenAtlas1.0Mini/3_train_diffunet.py
@@ -0,0 +1,215 @@
+import numpy as np
+from light_training.dataloading.dataset import get_train_val_test_loader_from_train
+# from dataset.brats_data_utils_resample128 import get_loader_brats
+import torch
+import torch.nn as nn
+# from ddim_seg.basic_unet import BasicUNet
+from monai.networks.nets.unetr import UNETR
+from monai.networks.nets.swin_unetr import SwinUNETR
+from monai.inferers import SlidingWindowInferer
+from light_training.evaluation.metric import dice
+from light_training.trainer import Trainer
+from monai.utils import set_determinism
+from light_training.utils.lr_scheduler import LinearWarmupCosineAnnealingLR
+from light_training.utils.files_helper import save_new_model_and_delete_last
+from models.uent2d import UNet2D
+from models.uent3d import UNet3D
+from monai.networks.nets.segresnet import SegResNet
+# from ddim_seg.unet3d import DiffusionUNet
+# from ddim_seg.ddim import DDIM
+# from ddim_seg.nnunet3d_raw import Generic_UNet
+# from ddim_seg.basic_unet_denose import BasicUNetDe
+# from ddim_seg.basic_unet import BasicUNetEncoder
+from models.transbts.TransBTS_downsample8x_skipconnection import TransBTS
+import argparse
+from monai.losses.dice import DiceLoss
+# from light_training.model.bit_diffusion import decimal_to_bits, bits_to_decimal
+
+# from guided_diffusion.gaussian_diffusion import get_named_beta_schedule, ModelMeanType, ModelVarType,LossType
+# from guided_diffusion.respace import SpacedDiffusion, space_timesteps
+# from guided_diffusion.resample import UniformSampler
+set_determinism(123)
+import os
+from scipy import ndimage
+
+
+os.environ["CUDA_VISIBLE_DEVICES"] = "6,7"
+data_dir = "./data/fullres/train"
+
+logdir = f"./logs_gpu4/diffunet_ep2000"
+
+model_save_path = os.path.join(logdir, "model")
+# augmentation = "nomirror"
+augmentation = True
+
+env = "pytorch"
+max_epoch = 2000
+batch_size = 2
+val_every = 2
+num_gpus = 1
+device = "cuda:0"
+roi_size = [128, 128, 128]
+
+def get_edge_points(img):
+ """
+ get edge points of a binary segmentation result
+ """
+ dim = len(img.shape)
+ if (dim == 2):
+ strt = ndimage.generate_binary_structure(2, 1)
+ else:
+ strt = ndimage.generate_binary_structure(3, 1)
+ ero = ndimage.binary_erosion(img, strt)
+ edge = np.asarray(img, np.uint8) - np.asarray(ero, np.uint8)
+ return edge
+
+def edge_3d(image_3d):
+ # image_3d = torch.from_numpy(image_3d)
+ b, c, d, h, w = image_3d.shape
+
+ image_3d = image_3d[:, 0] > 0
+
+ return_edge = []
+
+ for i in range(image_3d.shape[0]):
+ return_edge.append(get_edge_points(image_3d[i])[None,])
+
+ return_edge = np.concatenate(return_edge, axis=0)
+
+ return return_edge
+
+class BraTSTrainer(Trainer):
+ def __init__(self, env_type, max_epochs, batch_size, device="cpu", val_every=1, num_gpus=1, logdir="./logs/", master_ip='localhost', master_port=17750, training_script="train.py"):
+ super().__init__(env_type, max_epochs, batch_size, device, val_every, num_gpus, logdir, master_ip, master_port, training_script)
+ self.window_infer = SlidingWindowInferer(roi_size=roi_size,
+ sw_batch_size=1,
+ overlap=0.5)
+ self.augmentation = augmentation
+
+ from models.nnunet_denoise_ddp_infer.get_unet3d_denoise_uncer_edge import DiffUNet
+ self.model = DiffUNet(1, 10, 3, 1, bta=True)
+
+ self.patch_size = roi_size
+ self.best_mean_dice = 0.0
+ self.ce = nn.CrossEntropyLoss()
+ self.mse = nn.MSELoss()
+ self.train_process = 20
+ self.optimizer = torch.optim.SGD(self.model.parameters(), lr=1e-2, weight_decay=3e-5,
+ momentum=0.99, nesterov=True)
+
+ self.scheduler_type = "poly"
+ self.bce = nn.BCEWithLogitsLoss()
+ self.dice_loss = DiceLoss(sigmoid=True)
+ self.cross = nn.CrossEntropyLoss()
+
+ def training_step(self, batch):
+ image, label = self.get_input(batch)
+
+ pred, pred_edge = self.model(image, label)
+
+ loss_edge = self.cross(pred_edge, label)
+ loss_seg = self.cross(pred, label)
+
+ self.log("loss_seg", loss_seg, step=self.global_step)
+ self.log("loss_edge", loss_edge, step=self.global_step)
+
+ loss = loss_edge + loss_seg
+ return loss
+
+
+ def get_input(self, batch):
+ image = batch["data"]
+ label = batch["seg"]
+ # label = self.convert_labels(label)
+
+ # label = label.float()
+ label = label[:, 0].long()
+ return image, label
+
+ def cal_metric(self, gt, pred, voxel_spacing=[1.0, 1.0, 1.0]):
+ if pred.sum() > 0 and gt.sum() > 0:
+ d = dice(pred, gt)
+ # hd95 = metric.binary.hd95(pred, gt)
+ return np.array([d, 50])
+
+ elif gt.sum() == 0 and pred.sum() == 0:
+ return np.array([1.0, 50])
+
+ else:
+ return np.array([0.0, 50])
+
+ def validation_step(self, batch):
+ image, label = self.get_input(batch)
+
+ output = self.model(image, ddim=True)
+
+ # output = output > 0
+ output = output.argmax(dim=1)
+
+ output = output.cpu().numpy()
+ target = label.cpu().numpy()
+
+ dices = []
+
+ c = 10
+ for i in range(1, c):
+ pred_c = output == i
+ target_c = target == i
+
+ cal_dice, _ = self.cal_metric(target_c, pred_c)
+ dices.append(cal_dice)
+
+ return dices
+
+ def validation_end(self, val_outputs):
+ dices = val_outputs
+
+ dices_mean = []
+ c = 9
+ for i in range(0, c):
+ dices_mean.append(dices[i].mean())
+
+ mean_dice = sum(dices_mean) / len(dices_mean)
+
+ self.log("0", dices_mean[0], step=self.epoch)
+ self.log("1", dices_mean[1], step=self.epoch)
+ self.log("2", dices_mean[2], step=self.epoch)
+ self.log("3", dices_mean[3], step=self.epoch)
+ self.log("4", dices_mean[4], step=self.epoch)
+ self.log("5", dices_mean[5], step=self.epoch)
+ self.log("6", dices_mean[6], step=self.epoch)
+ self.log("7", dices_mean[7], step=self.epoch)
+ self.log("8", dices_mean[8], step=self.epoch)
+
+ self.log("mean_dice", mean_dice, step=self.epoch)
+
+ if mean_dice > self.best_mean_dice:
+ self.best_mean_dice = mean_dice
+ save_new_model_and_delete_last(self.model,
+ os.path.join(model_save_path,
+ f"best_model_{mean_dice:.4f}.pt"),
+ delete_symbol="best_model")
+
+ save_new_model_and_delete_last(self.model,
+ os.path.join(model_save_path,
+ f"final_model_{mean_dice:.4f}.pt"),
+ delete_symbol="final_model")
+
+
+ print(f"mean_dice is {mean_dice}")
+
+if __name__ == "__main__":
+
+ trainer = BraTSTrainer(env_type=env,
+ max_epochs=max_epoch,
+ batch_size=batch_size,
+ device=device,
+ logdir=logdir,
+ val_every=val_every,
+ num_gpus=num_gpus,
+ master_port=17759,
+ training_script=__file__)
+
+ train_ds, val_ds, test_ds = get_train_val_test_loader_from_train(data_dir)
+
+ trainer.train(train_dataset=train_ds, val_dataset=val_ds)
diff --git a/PRISM/SegMamba/light_training/examples/AbdomenAtlas1.0Mini/3_train_diffunet_train_all.py b/PRISM/SegMamba/light_training/examples/AbdomenAtlas1.0Mini/3_train_diffunet_train_all.py
new file mode 100644
index 0000000000000000000000000000000000000000..de03789c5e55196b542fdfe678e2e79e6466caa5
--- /dev/null
+++ b/PRISM/SegMamba/light_training/examples/AbdomenAtlas1.0Mini/3_train_diffunet_train_all.py
@@ -0,0 +1,215 @@
+import numpy as np
+from light_training.dataloading.dataset import get_train_loader_from_train
+# from dataset.brats_data_utils_resample128 import get_loader_brats
+import torch
+import torch.nn as nn
+# from ddim_seg.basic_unet import BasicUNet
+from monai.networks.nets.unetr import UNETR
+from monai.networks.nets.swin_unetr import SwinUNETR
+from monai.inferers import SlidingWindowInferer
+from light_training.evaluation.metric import dice
+from light_training.trainer import Trainer
+from monai.utils import set_determinism
+from light_training.utils.lr_scheduler import LinearWarmupCosineAnnealingLR
+from light_training.utils.files_helper import save_new_model_and_delete_last
+from models.uent2d import UNet2D
+from models.uent3d import UNet3D
+from monai.networks.nets.segresnet import SegResNet
+# from ddim_seg.unet3d import DiffusionUNet
+# from ddim_seg.ddim import DDIM
+# from ddim_seg.nnunet3d_raw import Generic_UNet
+# from ddim_seg.basic_unet_denose import BasicUNetDe
+# from ddim_seg.basic_unet import BasicUNetEncoder
+from models.transbts.TransBTS_downsample8x_skipconnection import TransBTS
+import argparse
+from monai.losses.dice import DiceLoss
+# from light_training.model.bit_diffusion import decimal_to_bits, bits_to_decimal
+
+# from guided_diffusion.gaussian_diffusion import get_named_beta_schedule, ModelMeanType, ModelVarType,LossType
+# from guided_diffusion.respace import SpacedDiffusion, space_timesteps
+# from guided_diffusion.resample import UniformSampler
+set_determinism(123)
+import os
+from scipy import ndimage
+
+
+os.environ["CUDA_VISIBLE_DEVICES"] = "4,5,6,7"
+data_dir = "./data/fullres/train"
+
+logdir = f"./logs_gpu4/diffunet_ep2000_train_all_data"
+
+model_save_path = os.path.join(logdir, "model")
+# augmentation = "nomirror"
+augmentation = True
+
+env = "pytorch"
+max_epoch = 2000
+batch_size = 2
+val_every = 2
+num_gpus = 1
+device = "cuda:0"
+roi_size = [128, 128, 128]
+
+def get_edge_points(img):
+ """
+ get edge points of a binary segmentation result
+ """
+ dim = len(img.shape)
+ if (dim == 2):
+ strt = ndimage.generate_binary_structure(2, 1)
+ else:
+ strt = ndimage.generate_binary_structure(3, 1)
+ ero = ndimage.binary_erosion(img, strt)
+ edge = np.asarray(img, np.uint8) - np.asarray(ero, np.uint8)
+ return edge
+
+def edge_3d(image_3d):
+ # image_3d = torch.from_numpy(image_3d)
+ b, c, d, h, w = image_3d.shape
+
+ image_3d = image_3d[:, 0] > 0
+
+ return_edge = []
+
+ for i in range(image_3d.shape[0]):
+ return_edge.append(get_edge_points(image_3d[i])[None,])
+
+ return_edge = np.concatenate(return_edge, axis=0)
+
+ return return_edge
+
+class BraTSTrainer(Trainer):
+ def __init__(self, env_type, max_epochs, batch_size, device="cpu", val_every=1, num_gpus=1, logdir="./logs/", master_ip='localhost', master_port=17750, training_script="train.py"):
+ super().__init__(env_type, max_epochs, batch_size, device, val_every, num_gpus, logdir, master_ip, master_port, training_script)
+ self.window_infer = SlidingWindowInferer(roi_size=roi_size,
+ sw_batch_size=1,
+ overlap=0.5)
+ self.augmentation = augmentation
+
+ from models.nnunet_denoise_ddp_infer.get_unet3d_denoise_uncer_edge import DiffUNet
+ self.model = DiffUNet(1, 10, 3, 1, bta=True)
+
+ self.patch_size = roi_size
+ self.best_mean_dice = 0.0
+ self.ce = nn.CrossEntropyLoss()
+ self.mse = nn.MSELoss()
+ self.train_process = 24
+ self.optimizer = torch.optim.SGD(self.model.parameters(), lr=1e-2, weight_decay=3e-5,
+ momentum=0.99, nesterov=True)
+
+ self.scheduler_type = "poly"
+ self.bce = nn.BCEWithLogitsLoss()
+ self.dice_loss = DiceLoss(sigmoid=True)
+ self.cross = nn.CrossEntropyLoss()
+
+ def training_step(self, batch):
+ image, label = self.get_input(batch)
+
+ pred, pred_edge = self.model(image, label)
+
+ loss_edge = self.cross(pred_edge, label)
+ loss_seg = self.cross(pred, label)
+
+ self.log("loss_seg", loss_seg, step=self.global_step)
+ self.log("loss_edge", loss_edge, step=self.global_step)
+
+ loss = loss_edge + loss_seg
+ return loss
+
+
+ def get_input(self, batch):
+ image = batch["data"]
+ label = batch["seg"]
+ # label = self.convert_labels(label)
+
+ # label = label.float()
+ label = label[:, 0].long()
+ return image, label
+
+ def cal_metric(self, gt, pred, voxel_spacing=[1.0, 1.0, 1.0]):
+ if pred.sum() > 0 and gt.sum() > 0:
+ d = dice(pred, gt)
+ # hd95 = metric.binary.hd95(pred, gt)
+ return np.array([d, 50])
+
+ elif gt.sum() == 0 and pred.sum() == 0:
+ return np.array([1.0, 50])
+
+ else:
+ return np.array([0.0, 50])
+
+ def validation_step(self, batch):
+ image, label = self.get_input(batch)
+
+ output = self.model(image, ddim=True)
+
+ # output = output > 0
+ output = output.argmax(dim=1)
+
+ output = output.cpu().numpy()
+ target = label.cpu().numpy()
+
+ dices = []
+
+ c = 10
+ for i in range(1, c):
+ pred_c = output == i
+ target_c = target == i
+
+ cal_dice, _ = self.cal_metric(target_c, pred_c)
+ dices.append(cal_dice)
+
+ return dices
+
+ def validation_end(self, val_outputs):
+ dices = val_outputs
+
+ dices_mean = []
+ c = 9
+ for i in range(0, c):
+ dices_mean.append(dices[i].mean())
+
+ mean_dice = sum(dices_mean) / len(dices_mean)
+
+ self.log("0", dices_mean[0], step=self.epoch)
+ self.log("1", dices_mean[1], step=self.epoch)
+ self.log("2", dices_mean[2], step=self.epoch)
+ self.log("3", dices_mean[3], step=self.epoch)
+ self.log("4", dices_mean[4], step=self.epoch)
+ self.log("5", dices_mean[5], step=self.epoch)
+ self.log("6", dices_mean[6], step=self.epoch)
+ self.log("7", dices_mean[7], step=self.epoch)
+ self.log("8", dices_mean[8], step=self.epoch)
+
+ self.log("mean_dice", mean_dice, step=self.epoch)
+
+ if mean_dice > self.best_mean_dice:
+ self.best_mean_dice = mean_dice
+ save_new_model_and_delete_last(self.model,
+ os.path.join(model_save_path,
+ f"best_model_{mean_dice:.4f}.pt"),
+ delete_symbol="best_model")
+
+ save_new_model_and_delete_last(self.model,
+ os.path.join(model_save_path,
+ f"final_model_{mean_dice:.4f}.pt"),
+ delete_symbol="final_model")
+
+
+ print(f"mean_dice is {mean_dice}")
+
+if __name__ == "__main__":
+
+ trainer = BraTSTrainer(env_type=env,
+ max_epochs=max_epoch,
+ batch_size=batch_size,
+ device=device,
+ logdir=logdir,
+ val_every=val_every,
+ num_gpus=num_gpus,
+ master_port=17759,
+ training_script=__file__)
+
+ train_ds = get_train_loader_from_train(data_dir)
+
+ trainer.train(train_dataset=train_ds, val_dataset=train_ds)
diff --git a/PRISM/SegMamba/light_training/examples/AbdomenAtlas1.0Mini/4_predict_diffunet.py b/PRISM/SegMamba/light_training/examples/AbdomenAtlas1.0Mini/4_predict_diffunet.py
new file mode 100644
index 0000000000000000000000000000000000000000..470b04e65af3d1032ee7f2fcb83dca8d8d441d37
--- /dev/null
+++ b/PRISM/SegMamba/light_training/examples/AbdomenAtlas1.0Mini/4_predict_diffunet.py
@@ -0,0 +1,141 @@
+import numpy as np
+from light_training.dataloading.dataset import get_test_loader_from_test
+import torch
+import torch.nn as nn
+from monai.networks.nets.basic_unet import BasicUNet
+from monai.networks.nets.swin_unetr import SwinUNETR
+from monai.inferers import SlidingWindowInferer
+from light_training.evaluation.metric import dice
+from light_training.trainer import Trainer
+from monai.utils import set_determinism
+from light_training.utils.files_helper import save_new_model_and_delete_last
+from models.uent3d import UNet3D
+from monai.networks.nets.segresnet import SegResNet
+from models.transbts.TransBTS_downsample8x_skipconnection import TransBTS
+from einops import rearrange
+from models.modelgenesis.unet3d import UNet3DModelGen
+from models.transvw.models.ynet3d import UNet3DTransVW
+from monai.networks.nets.basic_unet import BasicUNet
+from monai.networks.nets.attentionunet import AttentionUnet
+from light_training.loss.compound_losses import DC_and_CE_loss
+from light_training.loss.dice import MemoryEfficientSoftDiceLoss
+from light_training.evaluation.metric import dice
+set_determinism(123)
+from light_training.loss.compound_losses import DC_and_CE_loss
+import os
+from medpy import metric
+from light_training.prediction import Predictor
+
+
+data_dir = "./data/fullres/test"
+env = "pytorch"
+max_epoch = 1000
+batch_size = 2
+val_every = 2
+num_gpus = 1
+device = "cuda:2"
+patch_size = [128, 128, 128]
+
+class BraTSTrainer(Trainer):
+ def __init__(self, env_type, max_epochs, batch_size, device="cpu", val_every=1, num_gpus=1, logdir="./logs/", master_ip='localhost', master_port=17750, training_script="train.py"):
+ super().__init__(env_type, max_epochs, batch_size, device, val_every, num_gpus, logdir, master_ip, master_port, training_script)
+
+ self.patch_size = patch_size
+
+ def get_input(self, batch):
+ image = batch["data"]
+ label = batch["seg"]
+ properties = batch["properties"]
+ # label = self.convert_labels(label)
+ del batch
+ return image, label, properties
+
+ def define_model_diffunet(self):
+ from models.nnunet_denoise_ddp_infer.get_unet3d_denoise_uncer_edge import DiffUNet
+ model = DiffUNet(1, 10, 3, 1, bta=True)
+
+ model_path = "/home/xingzhaohu/zongweizhou/logs_gpu4/diffunet/model/final_model_0.8384.pt"
+ new_sd = self.filte_state_dict(torch.load(model_path, map_location="cpu"))
+ model.load_state_dict(new_sd, strict=False)
+ model.eval()
+ window_infer = SlidingWindowInferer(roi_size=patch_size,
+ sw_batch_size=2,
+ overlap=0.3,
+ progress=True,
+ mode="gaussian")
+
+ predictor = Predictor(window_infer=window_infer,
+ mirror_axes=[0,1,2])
+ save_path = "./prediction_results/diffunet_ep1000_test"
+
+ os.makedirs(save_path, exist_ok=True)
+
+ return model, predictor, save_path
+
+ def validation_step(self, batch):
+ image, label, properties = self.get_input(batch)
+ print(properties['spacing'])
+
+ ddim = True
+ model, predictor, save_path = self.define_model_diffunet()
+
+ if ddim:
+ model_output = predictor.maybe_mirror_and_predict(image, model, device=device, ddim=True)
+ else :
+ model_output = predictor.maybe_mirror_and_predict(image, model, device=device)
+
+ model_output = predictor.predict_raw_probability(model_output,
+ properties=properties).cpu()
+
+
+ model_output = model_output.argmax(dim=0)
+
+ model_output = predictor.predict_noncrop_probability(model_output, properties)
+ print(f"save shape is {model_output.shape}")
+
+
+ seg_list = ["aorta", "gall_bladder", "kidney_left",
+ "kidney_right", "liver", "pancreas",
+ "postcava", "spleen", "stomach"]
+
+ save_path = os.path.join(save_path, properties['name'][0], "predictions")
+ # print(f"save_path is {save_path}")
+ os.makedirs(save_path, exist_ok=True)
+ for i in range(1, len(seg_list) + 1):
+ model_output_c = model_output == i
+ predictor.save_to_nii(model_output_c,
+ raw_spacing=properties['spacing'],
+ case_name=seg_list[i-1],
+ save_dir=save_path)
+
+ return 0
+
+
+ def filte_state_dict(self, sd):
+ if "module" in sd :
+ sd = sd["module"]
+ new_sd = {}
+ for k, v in sd.items():
+ k = str(k)
+ new_k = k[7:] if k.startswith("module") else k
+ new_sd[new_k] = v
+ del sd
+ return new_sd
+
+if __name__ == "__main__":
+
+ trainer = BraTSTrainer(env_type=env,
+ max_epochs=max_epoch,
+ batch_size=batch_size,
+ device=device,
+ logdir="",
+ val_every=val_every,
+ num_gpus=num_gpus,
+ master_port=17751,
+ training_script=__file__)
+
+ test_ds = get_test_loader_from_test(data_dir=data_dir)
+
+ trainer.validation_single_gpu(test_ds)
+
+
diff --git a/PRISM/SegMamba/light_training/examples/liver2017/2_preprocessing_liver2017.py b/PRISM/SegMamba/light_training/examples/liver2017/2_preprocessing_liver2017.py
new file mode 100644
index 0000000000000000000000000000000000000000..dcb78601a53ae6141d80fff7ba791e4cab922d5d
--- /dev/null
+++ b/PRISM/SegMamba/light_training/examples/liver2017/2_preprocessing_liver2017.py
@@ -0,0 +1,123 @@
+
+from light_training.preprocessing.preprocessors.default_preprocessor_liver_2017 import DefaultPreprocessor
+import numpy as np
+import pickle
+import json
+
+
+def process_train():
+ # fullres spacing is [0.5 0.70410156 0.70410156]
+ # median_shape is [602.5 516.5 516.5]
+ base_dir = "/home/xingzhaohu/data/Liver_2017"
+
+ preprocessor = DefaultPreprocessor(base_dir=base_dir)
+
+ out_spacing = [1.0, 0.76757812, 0.76757812]
+ output_dir = "./data/fullres/train/"
+
+ with open("./data_analysis_result.txt", "r") as f:
+ content = f.read().strip("\n")
+ print(content)
+ content = eval(content)
+ foreground_intensity_properties_per_channel = content["intensity_statistics_per_channel"]
+
+ preprocessor.run(output_spacing=out_spacing,
+ output_dir=output_dir,
+ all_labels=[1, 2],
+ num_processes=16,
+ foreground_intensity_properties_per_channel=foreground_intensity_properties_per_channel)
+
+def process_val():
+ # fullres spacing is [0.5 0.70410156 0.70410156]
+ # median_shape is [602.5 516.5 516.5]
+ base_dir = "./data/raw_data/Val"
+ image_dir = "img"
+ preprocessor = DefaultPreprocessor(base_dir=base_dir,
+ image_dir=image_dir,
+ label_dir=None,
+ )
+
+ out_spacing = [0.5, 0.70410156, 0.70410156]
+
+ with open("./data_analysis_result.txt", "r") as f:
+ content = f.read().strip("\n")
+ print(content)
+ content = eval(content)
+ foreground_intensity_properties_per_channel = content["intensity_statistics_per_channel"]
+
+ output_dir = "./data/fullres/val_test/"
+ preprocessor.run(output_spacing=out_spacing,
+ output_dir=output_dir,
+ all_labels=[1, ],
+ foreground_intensity_properties_per_channel=foreground_intensity_properties_per_channel,
+ num_processes=16)
+
+def process_val_semi():
+ # fullres spacing is [0.5 0.70410156 0.70410156]
+ # median_shape is [602.5 516.5 516.5]
+ base_dir = "./data/raw_data/Val_semi_postprocess"
+ image_dir = "img"
+ preprocessor = DefaultPreprocessor(base_dir=base_dir,
+ image_dir=image_dir,
+ label_dir="gt",
+ )
+
+ out_spacing = [0.5, 0.70410156, 0.70410156]
+
+ with open("./data_analysis_result.txt", "r") as f:
+ content = f.read().strip("\n")
+ print(content)
+ content = eval(content)
+ foreground_intensity_properties_per_channel = content["intensity_statistics_per_channel"]
+
+ output_dir = "./data/fullres/val_semi_postprocess/"
+ preprocessor.run(output_spacing=out_spacing,
+ output_dir=output_dir,
+ all_labels=[1, ],
+ foreground_intensity_properties_per_channel=foreground_intensity_properties_per_channel)
+
+
+def plan():
+ base_dir = "/home/xingzhaohu/data/Liver_2017"
+
+ preprocessor = DefaultPreprocessor(base_dir=base_dir,
+
+ )
+
+ preprocessor.run_plan()
+
+if __name__ == "__main__":
+
+ # plan()
+
+ process_train()
+ # import time
+ # s = time.time()
+ # process_val()
+ # e = time.time()
+
+ # print(f"preprocessing time is {e - s}")
+
+ # process_val_semi()
+
+
+#
+ # preprocessor.run(output_spacing=[3, 0.9765625, 0.9765625], output_dir=output_dir)
+
+ # data = np.load("/home/xingzhaohu/sharefs/datasets/AIIB23_nnunet/train/AIIB23_96.npz")
+
+ # image = data["data"]
+ # label = data["seg"]
+ # print(image.shape)
+ # print(label.shape)
+
+ # import matplotlib.pyplot as plt
+
+ # for i in range(20):
+ # plt.imshow(image[0, i], cmap="gray")
+ # plt.show()
+
+ # df = open("/home/xingzhaohu/sharefs/datasets/AIIB23_nnunet/train/AIIB23_96.pkl", "rb")
+
+ # info = pickle.load(df)
+ # print(info)
\ No newline at end of file
diff --git a/PRISM/SegMamba/light_training/examples/read_pickle.py b/PRISM/SegMamba/light_training/examples/read_pickle.py
new file mode 100644
index 0000000000000000000000000000000000000000..3833df033e6aff5fcc2b505140f7f6d15ecd584e
--- /dev/null
+++ b/PRISM/SegMamba/light_training/examples/read_pickle.py
@@ -0,0 +1,8 @@
+import pickle
+
+f = "/home/xingzhaohu/jiuding_code/SegRap2023/data/fullres/train/segrap_0000.pkl"
+
+with open(f, "rb") as ff:
+ s = pickle.load(ff)
+
+ print(s)
\ No newline at end of file
diff --git a/PRISM/SegMamba/light_training/launch.py b/PRISM/SegMamba/light_training/launch.py
new file mode 100644
index 0000000000000000000000000000000000000000..35bdc0017c419750c68fc99f9319f093a85eb6f1
--- /dev/null
+++ b/PRISM/SegMamba/light_training/launch.py
@@ -0,0 +1,117 @@
+# Copyright 2020 The Microsoft DeepSpeed Team
+"""
+sailing runner is the main front-end to launching multi-worker
+training jobs with DeepSpeed. By default this uses pdsh to parallel
+ssh into multiple worker nodes and launch all the necessary processes
+per rank for training.
+"""
+
+import os
+import sys
+import json
+import subprocess
+import collections
+import socket
+import signal
+import logging
+
+import torch.distributed as dist
+
+
+def fetch_hostfile(hostfile_path):
+ if not os.path.isfile(hostfile_path):
+ print("Unable to find hostfile, will proceed with training "
+ "with local resources only.")
+ return None
+ # e.g., worker-0 slots=16
+ with open(hostfile_path, 'r') as fd:
+ resource_pool = collections.OrderedDict()
+ for line in fd.readlines():
+ line = line.strip()
+ if line == '':
+ # skip empty lines
+ continue
+ try:
+ hostname, slots = line.split()
+ _, slot_count = slots.split("=")
+ slot_count = int(slot_count)
+ except ValueError as err:
+ raise err
+ if hostname in resource_pool:
+ raise ValueError(f"host {hostname} is already defined")
+ resource_pool[hostname] = slot_count
+
+ return resource_pool
+
+
+def cmd_load_hyperparam(config_path=None, format="json", encoding="utf-8"):
+ """
+ shell load arguments form argparse and config file
+ """
+ # config_path='config/config_block_large_chinese.json'
+ format = config_path.rsplit('.')[-1]
+ with open(config_path, 'r', encoding=encoding) as f:
+ if format == "json":
+ config_dict = json.load(f)
+ else:
+ raise NameError("current format%s for hyperparam file is invalid" %
+ format)
+ config_cmd = []
+ for key in config_dict:
+ if len(str(config_dict[key])) == 0:
+ config_cmd.append('--' + key)
+ else:
+ config_cmd.append('--' + key)
+ config_cmd.append(str(config_dict[key]))
+ return config_cmd
+
+
+def launch_dist(
+ env_type="DDP",
+ num_nodes=1,
+ gpus_per_node=1,
+ master_addr='localhost',
+ master_port=17500,
+ training_script='train.py',
+ ):
+
+ if num_nodes != 1:
+ print("多机多卡待测试。暂不支持。")
+ os._exit(0)
+ if env_type == "DDP":
+ cmd_launch = []
+ cmd_launch.extend([
+ # 'export NUM_NODES=' + str(num_nodes) + ';',
+ # 'export GPUS_PER_NODE=' + str(gpus_per_node) + ';',
+ # sys.executable,
+ # "python",
+ # '-m',
+ "torchrun"
+ # 'torch.distributed.launch'
+ ])
+ torch_distributed_args = [
+ '--nproc_per_node',
+ str(gpus_per_node),
+ '--nnodes',
+ str(num_nodes),
+ '--node_rank',
+ str(0),
+ '--master_addr',
+ master_addr,
+ '--master_port',
+ str(master_port),
+ ]
+ cmd_launch.extend(torch_distributed_args)
+ cmd_launch.append(training_script)
+ cmd_launch.append('--not_call_launch')
+ run_cmd = ' '.join(cmd_launch)
+ p = subprocess.Popen(run_cmd, shell=True, preexec_fn=os.setsid)
+ def signal_handler(signal, frame):
+ os.killpg(os.getpgid(p.pid), 9)
+ signal.signal(signal.SIGINT, signal_handler)
+ p.wait()
+ print ('finish')
+
+ else :
+ print("不支持的env_type")
+ os._exit(0)
diff --git a/PRISM/SegMamba/light_training/loss/__init__.py b/PRISM/SegMamba/light_training/loss/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/PRISM/SegMamba/light_training/loss/compound_losses.py b/PRISM/SegMamba/light_training/loss/compound_losses.py
new file mode 100644
index 0000000000000000000000000000000000000000..c7b1912a83e70cd8083fc6f2aafc915fae20e9e6
--- /dev/null
+++ b/PRISM/SegMamba/light_training/loss/compound_losses.py
@@ -0,0 +1,151 @@
+import torch
+from .dice import SoftDiceLoss, MemoryEfficientSoftDiceLoss
+from .robust_ce_loss import RobustCrossEntropyLoss, TopKLoss
+from .helpers import softmax_helper_dim1
+from torch import nn
+
+
+class DC_and_CE_loss(nn.Module):
+ def __init__(self, soft_dice_kwargs, ce_kwargs, weight_ce=1, weight_dice=1, ignore_label=None,
+ dice_class=SoftDiceLoss):
+ """
+ Weights for CE and Dice do not need to sum to one. You can set whatever you want.
+ :param soft_dice_kwargs:
+ :param ce_kwargs:
+ :param aggregate:
+ :param square_dice:
+ :param weight_ce:
+ :param weight_dice:
+ """
+ super(DC_and_CE_loss, self).__init__()
+ if ignore_label is not None:
+ ce_kwargs['ignore_index'] = ignore_label
+
+ self.weight_dice = weight_dice
+ self.weight_ce = weight_ce
+ self.ignore_label = ignore_label
+
+ self.ce = RobustCrossEntropyLoss(**ce_kwargs)
+ self.dc = dice_class(apply_nonlin=softmax_helper_dim1, **soft_dice_kwargs)
+
+ def forward(self, net_output: torch.Tensor, target: torch.Tensor):
+ """
+ target must be b, c, x, y(, z) with c=1
+ :param net_output:
+ :param target:
+ :return:
+ """
+ if self.ignore_label is not None:
+ assert target.shape[1] == 1, 'ignore label is not implemented for one hot encoded target variables ' \
+ '(DC_and_CE_loss)'
+ mask = (target != self.ignore_label).bool()
+ # remove ignore label from target, replace with one of the known labels. It doesn't matter because we
+ # ignore gradients in those areas anyway
+ target_dice = torch.clone(target)
+ target_dice[target == self.ignore_label] = 0
+ num_fg = mask.sum()
+ else:
+ target_dice = target
+ mask = None
+
+ dc_loss = self.dc(net_output, target_dice, loss_mask=mask) \
+ if self.weight_dice != 0 else 0
+ ce_loss = self.ce(net_output, target[:, 0].long()) \
+ if self.weight_ce != 0 and (self.ignore_label is None or num_fg > 0) else 0
+
+ result = self.weight_ce * ce_loss + self.weight_dice * dc_loss
+ return result
+
+
+class DC_and_BCE_loss(nn.Module):
+ def __init__(self, bce_kwargs, soft_dice_kwargs, weight_ce=1, weight_dice=1, use_ignore_label: bool = False,
+ dice_class=MemoryEfficientSoftDiceLoss):
+ """
+ DO NOT APPLY NONLINEARITY IN YOUR NETWORK!
+
+ target mut be one hot encoded
+ IMPORTANT: We assume use_ignore_label is located in target[:, -1]!!!
+
+ :param soft_dice_kwargs:
+ :param bce_kwargs:
+ :param aggregate:
+ """
+ super(DC_and_BCE_loss, self).__init__()
+ if use_ignore_label:
+ bce_kwargs['reduction'] = 'none'
+
+ self.weight_dice = weight_dice
+ self.weight_ce = weight_ce
+ self.use_ignore_label = use_ignore_label
+
+ self.ce = nn.BCEWithLogitsLoss(**bce_kwargs)
+ self.dc = dice_class(apply_nonlin=torch.sigmoid, **soft_dice_kwargs)
+
+ def forward(self, net_output: torch.Tensor, target: torch.Tensor):
+ if self.use_ignore_label:
+ # target is one hot encoded here. invert it so that it is True wherever we can compute the loss
+ mask = (1 - target[:, -1:]).bool()
+ # remove ignore channel now that we have the mask
+ target_regions = torch.clone(target[:, :-1])
+ else:
+ target_regions = target
+ mask = None
+
+ dc_loss = self.dc(net_output, target_regions, loss_mask=mask)
+ if mask is not None:
+ ce_loss = (self.ce(net_output, target_regions) * mask).sum() / torch.clip(mask.sum(), min=1e-8)
+ else:
+ ce_loss = self.ce(net_output, target_regions)
+ result = self.weight_ce * ce_loss + self.weight_dice * dc_loss
+ return result
+
+
+class DC_and_topk_loss(nn.Module):
+ def __init__(self, soft_dice_kwargs, ce_kwargs, weight_ce=1, weight_dice=1, ignore_label=None):
+ """
+ Weights for CE and Dice do not need to sum to one. You can set whatever you want.
+ :param soft_dice_kwargs:
+ :param ce_kwargs:
+ :param aggregate:
+ :param square_dice:
+ :param weight_ce:
+ :param weight_dice:
+ """
+ super().__init__()
+ if ignore_label is not None:
+ ce_kwargs['ignore_index'] = ignore_label
+
+ self.weight_dice = weight_dice
+ self.weight_ce = weight_ce
+ self.ignore_label = ignore_label
+
+ self.ce = TopKLoss(**ce_kwargs)
+ self.dc = SoftDiceLoss(apply_nonlin=softmax_helper_dim1, **soft_dice_kwargs)
+
+ def forward(self, net_output: torch.Tensor, target: torch.Tensor):
+ """
+ target must be b, c, x, y(, z) with c=1
+ :param net_output:
+ :param target:
+ :return:
+ """
+ if self.ignore_label is not None:
+ assert target.shape[1] == 1, 'ignore label is not implemented for one hot encoded target variables ' \
+ '(DC_and_CE_loss)'
+ mask = (target != self.ignore_label).bool()
+ # remove ignore label from target, replace with one of the known labels. It doesn't matter because we
+ # ignore gradients in those areas anyway
+ target_dice = torch.clone(target)
+ target_dice[target == self.ignore_label] = 0
+ num_fg = mask.sum()
+ else:
+ target_dice = target
+ mask = None
+
+ dc_loss = self.dc(net_output, target_dice, loss_mask=mask) \
+ if self.weight_dice != 0 else 0
+ ce_loss = self.ce(net_output, target) \
+ if self.weight_ce != 0 and (self.ignore_label is None or num_fg > 0) else 0
+
+ result = self.weight_ce * ce_loss + self.weight_dice * dc_loss
+ return result
diff --git a/PRISM/SegMamba/light_training/loss/ddp_allgather.py b/PRISM/SegMamba/light_training/loss/ddp_allgather.py
new file mode 100644
index 0000000000000000000000000000000000000000..c42b3ef654f361904d5fe1868621b3f6f5cd29a6
--- /dev/null
+++ b/PRISM/SegMamba/light_training/loss/ddp_allgather.py
@@ -0,0 +1,49 @@
+# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import Any, Optional, Tuple
+
+import torch
+from torch import distributed
+
+
+def print_if_rank0(*args):
+ if distributed.get_rank() == 0:
+ print(*args)
+
+
+class AllGatherGrad(torch.autograd.Function):
+ # stolen from pytorch lightning
+ @staticmethod
+ def forward(
+ ctx: Any,
+ tensor: torch.Tensor,
+ group: Optional["torch.distributed.ProcessGroup"] = None,
+ ) -> torch.Tensor:
+ ctx.group = group
+
+ gathered_tensor = [torch.zeros_like(tensor) for _ in range(torch.distributed.get_world_size())]
+
+ torch.distributed.all_gather(gathered_tensor, tensor, group=group)
+ gathered_tensor = torch.stack(gathered_tensor, dim=0)
+
+ return gathered_tensor
+
+ @staticmethod
+ def backward(ctx: Any, *grad_output: torch.Tensor) -> Tuple[torch.Tensor, None]:
+ grad_output = torch.cat(grad_output)
+
+ torch.distributed.all_reduce(grad_output, op=torch.distributed.ReduceOp.SUM, async_op=False, group=ctx.group)
+
+ return grad_output[torch.distributed.get_rank()], None
+
diff --git a/PRISM/SegMamba/light_training/loss/deepsupervision.py b/PRISM/SegMamba/light_training/loss/deepsupervision.py
new file mode 100644
index 0000000000000000000000000000000000000000..e8a3cf639c7b961317859aadf55c93c9744de657
--- /dev/null
+++ b/PRISM/SegMamba/light_training/loss/deepsupervision.py
@@ -0,0 +1,65 @@
+import torch
+import torch.nn as nn
+import numpy as np
+
+class DeepSupervisionWrapper(nn.Module):
+ def __init__(self, loss, weight_factors=None):
+ """
+ Wraps a loss function so that it can be applied to multiple outputs. Forward accepts an arbitrary number of
+ inputs. Each input is expected to be a tuple/list. Each tuple/list must have the same length. The loss is then
+ applied to each entry like this:
+ l = w0 * loss(input0[0], input1[0], ...) + w1 * loss(input0[1], input1[1], ...) + ...
+ If weights are None, all w will be 1.
+ """
+ super(DeepSupervisionWrapper, self).__init__()
+ self.weight_factors = weight_factors
+ self.loss = loss
+
+ def forward(self, *args):
+ for i in args:
+ assert isinstance(i, (tuple, list)), "all args must be either tuple or list, got %s" % type(i)
+ # we could check for equal lengths here as well but we really shouldn't overdo it with checks because
+ # this code is executed a lot of times!
+
+ if self.weight_factors is None:
+ weights = [1] * len(args[0])
+ else:
+ weights = self.weight_factors
+
+ # we initialize the loss like this instead of 0 to ensure it sits on the correct device, not sure if that's
+ # really necessary
+ l = weights[0] * self.loss(*[j[0] for j in args])
+ for i, inputs in enumerate(zip(*args)):
+ if i == 0:
+ continue
+ l += weights[i] * self.loss(*inputs)
+ return l
+
+
+
+class AutoDeepSupervision(nn.Module):
+ def __init__(self, loss, label_scale) -> None:
+ super().__init__()
+
+ weights = np.array([1 / (2 ** i) for i in range(len(label_scale))])
+ weights[-1] = 0
+ # we don't use the lowest 2 outputs. Normalize weights so that they sum to 1
+ weights = weights / weights.sum()
+ print(f"loss weights is {weights}")
+
+ self.warpper = DeepSupervisionWrapper(loss, weights)
+ self.label_scale = label_scale
+
+ def forward(self, preds, label):
+ pred_len = len(preds)
+ assert pred_len == len(self.label_scale)
+ labels = []
+ for scale in self.label_scale:
+ labels.append(torch.nn.functional.interpolate(label, scale_factor=scale, mode="nearest"))
+ # label_1_2 = torch.nn.functional.interpolate(label, scale_factor=self.label_scale[1], mode="nearest")
+ # label_1_4 = torch.nn.functional.interpolate(label, scale_factor=self.label_scale[2], mode="nearest")
+ # label_1_8 = torch.nn.functional.interpolate(label, scale_factor=self.label_scale[3], mode="nearest")
+ # label_1_16 = torch.nn.functional.interpolate(label, scale_factor=self.label_scale[4], mode="nearest")
+ # labels = [label, label_1_2, label_1_4, label_1_8, label_1_16]
+
+ return self.warpper(preds, labels)
\ No newline at end of file
diff --git a/PRISM/SegMamba/light_training/loss/dice.py b/PRISM/SegMamba/light_training/loss/dice.py
new file mode 100644
index 0000000000000000000000000000000000000000..7ae7d0dd75c9d8582279ea5dd28a9c13f9f533a8
--- /dev/null
+++ b/PRISM/SegMamba/light_training/loss/dice.py
@@ -0,0 +1,192 @@
+from typing import Callable
+
+import torch
+from .ddp_allgather import AllGatherGrad
+from .tensor_utilities import sum_tensor
+from torch import nn
+
+
+class SoftDiceLoss(nn.Module):
+ def __init__(self, apply_nonlin: Callable = None, batch_dice: bool = False, do_bg: bool = True, smooth: float = 1.,
+ ddp: bool = True, clip_tp: float = None):
+ """
+ """
+ super(SoftDiceLoss, self).__init__()
+
+ self.do_bg = do_bg
+ self.batch_dice = batch_dice
+ self.apply_nonlin = apply_nonlin
+ self.smooth = smooth
+ self.clip_tp = clip_tp
+ self.ddp = ddp
+
+ def forward(self, x, y, loss_mask=None):
+ shp_x = x.shape
+
+ if self.batch_dice:
+ axes = [0] + list(range(2, len(shp_x)))
+ else:
+ axes = list(range(2, len(shp_x)))
+
+ if self.apply_nonlin is not None:
+ x = self.apply_nonlin(x)
+
+ tp, fp, fn, _ = get_tp_fp_fn_tn(x, y, axes, loss_mask, False)
+
+ if self.ddp and self.batch_dice:
+ tp = AllGatherGrad.apply(tp).sum(0)
+ fp = AllGatherGrad.apply(fp).sum(0)
+ fn = AllGatherGrad.apply(fn).sum(0)
+
+ if self.clip_tp is not None:
+ tp = torch.clip(tp, min=self.clip_tp , max=None)
+
+ nominator = 2 * tp
+ denominator = 2 * tp + fp + fn
+
+ dc = (nominator + self.smooth) / (torch.clip(denominator + self.smooth, 1e-8))
+
+ if not self.do_bg:
+ if self.batch_dice:
+ dc = dc[1:]
+ else:
+ dc = dc[:, 1:]
+ dc = dc.mean()
+
+ return -dc
+
+class MemoryEfficientSoftDiceLoss(nn.Module):
+ def __init__(self, apply_nonlin: Callable = None, batch_dice: bool = False, do_bg: bool = True, smooth: float = 1.,
+ ddp: bool = True):
+ """
+ saves 1.6 GB on Dataset017 3d_lowres
+ """
+ super(MemoryEfficientSoftDiceLoss, self).__init__()
+
+ self.do_bg = do_bg
+ self.batch_dice = batch_dice
+ self.apply_nonlin = apply_nonlin
+ self.smooth = smooth
+ self.ddp = ddp
+
+ def forward(self, x, y, loss_mask=None):
+ shp_x, shp_y = x.shape, y.shape
+
+ if self.apply_nonlin is not None:
+ x = self.apply_nonlin(x)
+
+ if not self.do_bg:
+ x = x[:, 1:]
+
+ # make everything shape (b, c)
+ axes = list(range(2, len(shp_x)))
+
+ with torch.no_grad():
+ if len(shp_x) != len(shp_y):
+ y = y.view((shp_y[0], 1, *shp_y[1:]))
+
+ if all([i == j for i, j in zip(shp_x, shp_y)]):
+ # if this is the case then gt is probably already a one hot encoding
+ y_onehot = y
+ else:
+ gt = y.long()
+ y_onehot = torch.zeros(shp_x, device=x.device, dtype=torch.bool)
+ y_onehot.scatter_(1, gt, 1)
+
+ if not self.do_bg:
+ y_onehot = y_onehot[:, 1:]
+ sum_gt = y_onehot.sum(axes) if loss_mask is None else (y_onehot * loss_mask).sum(axes)
+
+ intersect = (x * y_onehot).sum(axes) if loss_mask is None else (x * y_onehot * loss_mask).sum(axes)
+ sum_pred = x.sum(axes) if loss_mask is None else (x * loss_mask).sum(axes)
+
+ if self.ddp and self.batch_dice:
+ intersect = AllGatherGrad.apply(intersect).sum(0)
+ sum_pred = AllGatherGrad.apply(sum_pred).sum(0)
+ sum_gt = AllGatherGrad.apply(sum_gt).sum(0)
+
+ if self.batch_dice:
+ intersect = intersect.sum(0)
+ sum_pred = sum_pred.sum(0)
+ sum_gt = sum_gt.sum(0)
+
+ dc = (2 * intersect + self.smooth) / (torch.clip(sum_gt + sum_pred + self.smooth, 1e-8))
+
+ dc = dc.mean()
+ return -dc
+
+def get_tp_fp_fn_tn(net_output, gt, axes=None, mask=None, square=False):
+ """
+ net_output must be (b, c, x, y(, z)))
+ gt must be a label map (shape (b, 1, x, y(, z)) OR shape (b, x, y(, z))) or one hot encoding (b, c, x, y(, z))
+ if mask is provided it must have shape (b, 1, x, y(, z)))
+ :param net_output:
+ :param gt:
+ :param axes: can be (, ) = no summation
+ :param mask: mask must be 1 for valid pixels and 0 for invalid pixels
+ :param square: if True then fp, tp and fn will be squared before summation
+ :return:
+ """
+ if axes is None:
+ axes = tuple(range(2, len(net_output.size())))
+
+ shp_x = net_output.shape
+ shp_y = gt.shape
+
+ with torch.no_grad():
+ if len(shp_x) != len(shp_y):
+ gt = gt.view((shp_y[0], 1, *shp_y[1:]))
+
+ if all([i == j for i, j in zip(net_output.shape, gt.shape)]):
+ # if this is the case then gt is probably already a one hot encoding
+ y_onehot = gt
+ else:
+ gt = gt.long()
+ y_onehot = torch.zeros(shp_x, device=net_output.device)
+ y_onehot.scatter_(1, gt, 1)
+
+ tp = net_output * y_onehot
+ fp = net_output * (1 - y_onehot)
+ fn = (1 - net_output) * y_onehot
+ tn = (1 - net_output) * (1 - y_onehot)
+
+ if mask is not None:
+ with torch.no_grad():
+ mask_here = torch.tile(mask, (1, tp.shape[1], *[1 for i in range(2, len(tp.shape))]))
+ tp *= mask_here
+ fp *= mask_here
+ fn *= mask_here
+ tn *= mask_here
+ # benchmark whether tiling the mask would be faster (torch.tile). It probably is for large batch sizes
+ # OK it barely makes a difference but the implementation above is a tiny bit faster + uses less vram
+ # (using nnUNetv2_train 998 3d_fullres 0)
+ # tp = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(tp, dim=1)), dim=1)
+ # fp = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(fp, dim=1)), dim=1)
+ # fn = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(fn, dim=1)), dim=1)
+ # tn = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(tn, dim=1)), dim=1)
+
+ if square:
+ tp = tp ** 2
+ fp = fp ** 2
+ fn = fn ** 2
+ tn = tn ** 2
+
+ if len(axes) > 0:
+ tp = sum_tensor(tp, axes, keepdim=False)
+ fp = sum_tensor(fp, axes, keepdim=False)
+ fn = sum_tensor(fn, axes, keepdim=False)
+ tn = sum_tensor(tn, axes, keepdim=False)
+
+ return tp, fp, fn, tn
+
+
+if __name__ == '__main__':
+ from nnunetv2.utilities.helpers import softmax_helper_dim1
+ pred = torch.rand((2, 3, 32, 32, 32))
+ ref = torch.randint(0, 3, (2, 32, 32, 32))
+
+ dl_old = SoftDiceLoss(apply_nonlin=softmax_helper_dim1, batch_dice=True, do_bg=False, smooth=0, ddp=False)
+ dl_new = MemoryEfficientSoftDiceLoss(apply_nonlin=softmax_helper_dim1, batch_dice=True, do_bg=False, smooth=0, ddp=False)
+ res_old = dl_old(pred, ref)
+ res_new = dl_new(pred, ref)
+ print(res_old, res_new)
diff --git a/PRISM/SegMamba/light_training/loss/helpers.py b/PRISM/SegMamba/light_training/loss/helpers.py
new file mode 100644
index 0000000000000000000000000000000000000000..42448e3f9c3de88ba13568ff7585797ee29607ab
--- /dev/null
+++ b/PRISM/SegMamba/light_training/loss/helpers.py
@@ -0,0 +1,27 @@
+import torch
+
+
+def softmax_helper_dim0(x: torch.Tensor) -> torch.Tensor:
+ return torch.softmax(x, 0)
+
+
+def softmax_helper_dim1(x: torch.Tensor) -> torch.Tensor:
+ return torch.softmax(x, 1)
+
+
+def empty_cache(device: torch.device):
+ if device.type == 'cuda':
+ torch.cuda.empty_cache()
+ elif device.type == 'mps':
+ from torch import mps
+ mps.empty_cache()
+ else:
+ pass
+
+
+class dummy_context(object):
+ def __enter__(self):
+ pass
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ pass
diff --git a/PRISM/SegMamba/light_training/loss/robust_ce_loss.py b/PRISM/SegMamba/light_training/loss/robust_ce_loss.py
new file mode 100644
index 0000000000000000000000000000000000000000..ad46659193ce1dbbff8ee6829bbf5e4223b6ed8f
--- /dev/null
+++ b/PRISM/SegMamba/light_training/loss/robust_ce_loss.py
@@ -0,0 +1,33 @@
+import torch
+from torch import nn, Tensor
+import numpy as np
+
+
+class RobustCrossEntropyLoss(nn.CrossEntropyLoss):
+ """
+ this is just a compatibility layer because my target tensor is float and has an extra dimension
+
+ input must be logits, not probabilities!
+ """
+ def forward(self, input: Tensor, target: Tensor) -> Tensor:
+ if len(target.shape) == len(input.shape):
+ assert target.shape[1] == 1
+ target = target[:, 0]
+ return super().forward(input, target.long())
+
+
+class TopKLoss(RobustCrossEntropyLoss):
+ """
+ input must be logits, not probabilities!
+ """
+ def __init__(self, weight=None, ignore_index: int = -100, k: float = 10, label_smoothing: float = 0):
+ self.k = k
+ super(TopKLoss, self).__init__(weight, False, ignore_index, reduce=False, label_smoothing=label_smoothing)
+
+ def forward(self, inp, target):
+ target = target[:, 0].long()
+ res = super(TopKLoss, self).forward(inp, target)
+ num_voxels = np.prod(res.shape, dtype=np.int64)
+ res, _ = torch.topk(res.view((-1, )), int(num_voxels * self.k / 100), sorted=False)
+ return res.mean()
+
diff --git a/PRISM/SegMamba/light_training/loss/tensor_utilities.py b/PRISM/SegMamba/light_training/loss/tensor_utilities.py
new file mode 100644
index 0000000000000000000000000000000000000000..b16ffcac2e46d93c19522937098f0af5b208aca7
--- /dev/null
+++ b/PRISM/SegMamba/light_training/loss/tensor_utilities.py
@@ -0,0 +1,15 @@
+from typing import Union, List, Tuple
+
+import numpy as np
+import torch
+
+
+def sum_tensor(inp: torch.Tensor, axes: Union[np.ndarray, Tuple, List], keepdim: bool = False) -> torch.Tensor:
+ axes = np.unique(axes).astype(int)
+ if keepdim:
+ for ax in axes:
+ inp = inp.sum(int(ax), keepdim=True)
+ else:
+ for ax in sorted(axes, reverse=True):
+ inp = inp.sum(int(ax))
+ return inp
diff --git a/PRISM/SegMamba/light_training/prediction.py b/PRISM/SegMamba/light_training/prediction.py
new file mode 100644
index 0000000000000000000000000000000000000000..a90f5c943cab192b083a8d2d0b414593baa56430
--- /dev/null
+++ b/PRISM/SegMamba/light_training/prediction.py
@@ -0,0 +1,227 @@
+
+import torch
+import numpy as np
+import SimpleITK as sitk
+import os
+from light_training.preprocessing.resampling.default_resampling import resample_data_or_seg_to_shape
+from scipy import ndimage
+import skimage.measure as measure
+
+class dummy_context(object):
+ def __enter__(self):
+ pass
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ pass
+
+def large_connected_domain(label):
+ cd, num = measure.label(label, return_num=True, connectivity=1)
+ volume = np.zeros([num])
+ for k in range(num):
+ volume[k] = ((cd == (k + 1)).astype(np.uint8)).sum()
+ volume_sort = np.argsort(volume)
+ # print(volume_sort)
+ label = (cd == (volume_sort[-1] + 1)).astype(np.uint8)
+ label = ndimage.binary_fill_holes(label)
+ label = label.astype(np.uint8)
+ return label
+
+class Predictor:
+ def __init__(self, window_infer, mirror_axes=None) -> None:
+ self.window_infer = window_infer
+ self.mirror_axes = mirror_axes
+
+ @staticmethod
+ def predict_raw_probability(model_output, properties):
+ if len(model_output.shape) == 5:
+ model_output = model_output[0]
+
+ device = model_output.device
+ shape_after_cropping_before_resample = properties["shape_after_cropping_before_resample"]
+ d, w, h = shape_after_cropping_before_resample[0], shape_after_cropping_before_resample[1], shape_after_cropping_before_resample[2]
+ print(f"resample....")
+ channel = model_output.shape[0]
+
+ try:
+ with torch.no_grad():
+ resample_output = torch.zeros((channel, d, w, h), dtype=torch.half, device=device)
+ for c in range(channel):
+ resample_output[c] = torch.nn.functional.interpolate(model_output[c][None, None], mode="trilinear", size=(d, w, h))[0, 0]
+
+ del model_output
+
+ except RuntimeError:
+ with torch.no_grad():
+ model_output = model_output.to("cpu")
+ resample_output = torch.zeros((channel, d, w, h))
+ for c in range(channel):
+ resample_output[c] = torch.nn.functional.interpolate(model_output[c][None, None], mode="trilinear", size=(d, w, h))[0, 0]
+ del model_output
+
+ torch.cuda.empty_cache()
+
+ return resample_output
+
+ @staticmethod
+ def predict_noncrop_probability(model_output, properties):
+
+ print(f"restoring noncrop region......")
+ if isinstance(model_output, torch.Tensor):
+ model_output = model_output.cpu().numpy()
+
+ torch.cuda.empty_cache()
+
+ if len(model_output.shape) == 3:
+ shape_before_cropping = properties["shape_before_cropping"]
+ if isinstance(shape_before_cropping[0], torch.Tensor):
+ shape_before_cropping = [shape_before_cropping[0].item(), shape_before_cropping[1].item(), shape_before_cropping[2].item()]
+
+ none_crop_pred = np.zeros([shape_before_cropping[0], shape_before_cropping[1], shape_before_cropping[2]], dtype=np.uint8)
+ bbox_used_for_cropping = properties["bbox_used_for_cropping"]
+
+ none_crop_pred[
+ bbox_used_for_cropping[0][0]: bbox_used_for_cropping[0][1],
+ bbox_used_for_cropping[1][0]: bbox_used_for_cropping[1][1],
+ bbox_used_for_cropping[2][0]: bbox_used_for_cropping[2][1]] = model_output
+ del model_output
+ return none_crop_pred
+
+ elif len(model_output.shape) == 4:
+ shape_before_cropping = properties["shape_before_cropping"]
+ if isinstance(shape_before_cropping[0], torch.Tensor):
+ shape_before_cropping = [shape_before_cropping[0].item(), shape_before_cropping[1].item(), shape_before_cropping[2].item()]
+
+ none_crop_pred = np.zeros([model_output.shape[0], shape_before_cropping[0], shape_before_cropping[1], shape_before_cropping[2]], dtype=np.uint8)
+ bbox_used_for_cropping = properties["bbox_used_for_cropping"]
+
+ none_crop_pred[
+ :,
+ bbox_used_for_cropping[0][0]: bbox_used_for_cropping[0][1],
+ bbox_used_for_cropping[1][0]: bbox_used_for_cropping[1][1],
+ bbox_used_for_cropping[2][0]: bbox_used_for_cropping[2][1]] = model_output
+ del model_output
+
+ return none_crop_pred
+
+ else:
+ print(f"restore crop error")
+ exit(0)
+
+ def maybe_mirror_and_predict(self, x, model, device=torch.device("cpu"), **kwargs) -> torch.Tensor:
+ # mirror_axes = [0, 1, 2]
+ window_infer = self.window_infer
+ if type(device) is str:
+ device = torch.device(device)
+
+ model.to(device)
+ # if type(x) is list:
+ # for i in range(len(x)):
+ # x[i] = x[i].to(device)
+ # else :
+ x = x.to(device)
+ with torch.no_grad():
+ print(f"predicting....")
+ with torch.autocast("cuda", enabled=True) if device.type == "cuda" else dummy_context():
+ prediction = window_infer(x, model, **kwargs).cpu()
+ mirror_axes = self.mirror_axes
+
+ if mirror_axes is not None:
+ # check for invalid numbers in mirror_axes
+ # x should be 5d for 3d images and 4d for 2d. so the max value of mirror_axes cannot exceed len(x.shape) - 3
+ assert max(mirror_axes) <= len(x.shape) - 3, 'mirror_axes does not match the dimension of the input!'
+
+ num_predictons = 2 ** len(mirror_axes)
+ if 0 in mirror_axes:
+ prediction += torch.flip(window_infer(torch.flip(x, (2,)), model, **kwargs), (2,)).cpu()
+ torch.cuda.empty_cache()
+ if 1 in mirror_axes:
+ prediction += torch.flip(window_infer(torch.flip(x, (3,)), model, **kwargs), (3,)).cpu()
+ torch.cuda.empty_cache()
+ if 2 in mirror_axes:
+ prediction += torch.flip(window_infer(torch.flip(x, (4,)), model, **kwargs), (4,)).cpu()
+ torch.cuda.empty_cache()
+ if 0 in mirror_axes and 1 in mirror_axes:
+ prediction += torch.flip(window_infer(torch.flip(x, (2, 3)), model, **kwargs), (2, 3)).cpu()
+ torch.cuda.empty_cache()
+ if 0 in mirror_axes and 2 in mirror_axes:
+ prediction += torch.flip(window_infer(torch.flip(x, (2, 4)), model, **kwargs), (2, 4)).cpu()
+ torch.cuda.empty_cache()
+ if 1 in mirror_axes and 2 in mirror_axes:
+ prediction += torch.flip(window_infer(torch.flip(x, (3, 4)), model, **kwargs), (3, 4)).cpu()
+ torch.cuda.empty_cache()
+ if 0 in mirror_axes and 1 in mirror_axes and 2 in mirror_axes:
+ prediction += torch.flip(window_infer(torch.flip(x, (2, 3, 4)), model, **kwargs), (2, 3, 4)).cpu()
+ torch.cuda.empty_cache()
+ prediction /= num_predictons
+
+ torch.cuda.empty_cache()
+ del x
+ return prediction
+
+ def maybe_mirror_and_predict_cuda(self, x, model, device=torch.device("cpu"), **kwargs) -> torch.Tensor:
+ # mirror_axes = [0, 1, 2]
+ window_infer = self.window_infer
+ if type(device) is str:
+ device = torch.device(device)
+
+ model.to(device)
+ x = x.to(device)
+ with torch.no_grad():
+ print(f"predicting....")
+ with torch.autocast("cuda", enabled=True) if device.type == "cuda" else dummy_context():
+ prediction = window_infer(x, model, **kwargs)
+ mirror_axes = self.mirror_axes
+
+ if mirror_axes is not None:
+ # check for invalid numbers in mirror_axes
+ # x should be 5d for 3d images and 4d for 2d. so the max value of mirror_axes cannot exceed len(x.shape) - 3
+ assert max(mirror_axes) <= len(x.shape) - 3, 'mirror_axes does not match the dimension of the input!'
+
+ num_predictons = 2 ** len(mirror_axes)
+ if 0 in mirror_axes:
+ prediction += torch.flip(window_infer(torch.flip(x, (2,)), model, **kwargs), (2,))
+ torch.cuda.empty_cache()
+ if 1 in mirror_axes:
+ prediction += torch.flip(window_infer(torch.flip(x, (3,)), model, **kwargs), (3,))
+ torch.cuda.empty_cache()
+ if 2 in mirror_axes:
+ prediction += torch.flip(window_infer(torch.flip(x, (4,)), model, **kwargs), (4,))
+ torch.cuda.empty_cache()
+ if 0 in mirror_axes and 1 in mirror_axes:
+ prediction += torch.flip(window_infer(torch.flip(x, (2, 3)), model, **kwargs), (2, 3))
+ torch.cuda.empty_cache()
+ if 0 in mirror_axes and 2 in mirror_axes:
+ prediction += torch.flip(window_infer(torch.flip(x, (2, 4)), model, **kwargs), (2, 4))
+ torch.cuda.empty_cache()
+ if 1 in mirror_axes and 2 in mirror_axes:
+ prediction += torch.flip(window_infer(torch.flip(x, (3, 4)), model, **kwargs), (3, 4))
+ torch.cuda.empty_cache()
+ if 0 in mirror_axes and 1 in mirror_axes and 2 in mirror_axes:
+ prediction += torch.flip(window_infer(torch.flip(x, (2, 3, 4)), model, **kwargs), (2, 3, 4)).cpu()
+ torch.cuda.empty_cache()
+ prediction /= num_predictons
+
+ torch.cuda.empty_cache()
+ del x
+ return prediction
+
+ def save_to_nii(self, return_output,
+ raw_spacing,
+ save_dir,
+ case_name,
+ postprocess=False):
+ return_output = return_output.astype(np.uint8)
+
+ # # postprocessing
+ if postprocess:
+ return_output = large_connected_domain(return_output)
+
+ return_output = sitk.GetImageFromArray(return_output)
+ if isinstance(raw_spacing[0], torch.Tensor):
+ raw_spacing = [raw_spacing[0].item(), raw_spacing[1].item(), raw_spacing[2].item()]
+
+ return_output.SetSpacing((raw_spacing[0], raw_spacing[1], raw_spacing[2]))
+
+ sitk.WriteImage(return_output, os.path.join(save_dir, f"{case_name}.nii.gz"))
+
+ print(f"{os.path.join(save_dir, f'{case_name}.nii.gz')} is saved successfully")
\ No newline at end of file
diff --git a/PRISM/SegMamba/light_training/prediction_fp32.py b/PRISM/SegMamba/light_training/prediction_fp32.py
new file mode 100644
index 0000000000000000000000000000000000000000..be3776bbade56b98a1fb55143dd26b355607d491
--- /dev/null
+++ b/PRISM/SegMamba/light_training/prediction_fp32.py
@@ -0,0 +1,142 @@
+
+import torch
+import numpy as np
+import SimpleITK as sitk
+import os
+from light_training.preprocessing.resampling.default_resampling import resample_data_or_seg_to_shape
+from scipy import ndimage
+import skimage.measure as measure
+
+class dummy_context(object):
+ def __enter__(self):
+ pass
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ pass
+
+def large_connected_domain(label):
+ cd, num = measure.label(label, return_num=True, connectivity=1)
+ volume = np.zeros([num])
+ for k in range(num):
+ volume[k] = ((cd == (k + 1)).astype(np.uint8)).sum()
+ volume_sort = np.argsort(volume)
+ # print(volume_sort)
+ label = (cd == (volume_sort[-1] + 1)).astype(np.uint8)
+ label = ndimage.binary_fill_holes(label)
+ label = label.astype(np.uint8)
+ return label
+
+class Predictor:
+ def __init__(self, window_infer, mirror_axes=None) -> None:
+ self.window_infer = window_infer
+ self.mirror_axes = mirror_axes
+
+ @staticmethod
+ def predict_raw_probability(model_output, properties):
+ if len(model_output.shape) == 5:
+ model_output = model_output[0]
+
+ shape_before_resample = model_output.shape
+ if isinstance(model_output, torch.Tensor):
+ model_output = model_output.cpu().numpy()
+
+ spacing = properties["spacing"]
+ new_spacing = [spacing[0].item(), spacing[1].item(), spacing[2].item()]
+ new_spacing_trans = new_spacing[::-1]
+
+ print(f"current spacing is {[0.5, 0.70410156, 0.70410156]}, new_spacing is {new_spacing_trans}")
+ shape_after_cropping_before_resample = properties["shape_after_cropping_before_resample"]
+ d, w, h = shape_after_cropping_before_resample[0].item(), shape_after_cropping_before_resample[1].item(), shape_after_cropping_before_resample[2].item()
+ # model_output = torch.nn.functional.interpolate(model_output, mode="nearest", size=(d, w, h))
+ model_output = resample_data_or_seg_to_shape(model_output,
+ new_shape=(d, w, h),
+ current_spacing=[0.5, 0.70410156, 0.70410156],
+ new_spacing=new_spacing_trans,
+ is_seg=False,
+ order=1,
+ order_z=0)
+ shape_after_resample = model_output.shape
+ print(f"before resample shape: {shape_before_resample}, after resample shape: {shape_after_resample}")
+
+ return model_output
+
+ @staticmethod
+ def apply_nonlinear(model_output, nonlinear_type="softmax"):
+ if isinstance(model_output, np.ndarray):
+ model_output = torch.from_numpy(model_output)
+ assert len(model_output.shape) == 4
+
+ assert nonlinear_type in ["softmax", "sigmoid"]
+
+ if nonlinear_type == "softmax":
+ model_output = torch.softmax(model_output, dim=0)
+ model_output = model_output.argmax(dim=0)
+ else :
+ model_output = torch.sigmoid(model_output)
+
+ return model_output.numpy()
+
+
+ @staticmethod
+ def predict_noncrop_probability(model_output, properties):
+ assert len(model_output.shape) == 3
+
+ shape_before_cropping = properties["shape_before_cropping"]
+ none_crop_pred = np.zeros([shape_before_cropping[0], shape_before_cropping[1], shape_before_cropping[2]], dtype=np.uint8)
+ bbox_used_for_cropping = properties["bbox_used_for_cropping"]
+
+ none_crop_pred[
+ bbox_used_for_cropping[0][0]: bbox_used_for_cropping[0][1],
+ bbox_used_for_cropping[1][0]: bbox_used_for_cropping[1][1],
+ bbox_used_for_cropping[2][0]: bbox_used_for_cropping[2][1]] = model_output
+
+ return model_output
+
+ def maybe_mirror_and_predict(self, x, model, **kwargs) -> torch.Tensor:
+ # mirror_axes = [0, 1, 2]
+ window_infer = self.window_infer
+ device = next(model.parameters()).device
+
+ with torch.no_grad():
+ prediction = window_infer(x, model, **kwargs)
+ mirror_axes = self.mirror_axes
+
+ if mirror_axes is not None:
+ # check for invalid numbers in mirror_axes
+ # x should be 5d for 3d images and 4d for 2d. so the max value of mirror_axes cannot exceed len(x.shape) - 3
+ assert max(mirror_axes) <= len(x.shape) - 3, 'mirror_axes does not match the dimension of the input!'
+
+ num_predictons = 2 ** len(mirror_axes)
+ if 0 in mirror_axes:
+ prediction += torch.flip(window_infer(torch.flip(x, (2,)), model, **kwargs), (2,))
+ if 1 in mirror_axes:
+ prediction += torch.flip(window_infer(torch.flip(x, (3,)), model, **kwargs), (3,))
+ if 2 in mirror_axes:
+ prediction += torch.flip(window_infer(torch.flip(x, (4,)), model, **kwargs), (4,))
+ if 0 in mirror_axes and 1 in mirror_axes:
+ prediction += torch.flip(window_infer(torch.flip(x, (2, 3)), model, **kwargs), (2, 3))
+ if 0 in mirror_axes and 2 in mirror_axes:
+ prediction += torch.flip(window_infer(torch.flip(x, (2, 4)), model, **kwargs), (2, 4))
+ if 1 in mirror_axes and 2 in mirror_axes:
+ prediction += torch.flip(window_infer(torch.flip(x, (3, 4)), model, **kwargs), (3, 4))
+ if 0 in mirror_axes and 1 in mirror_axes and 2 in mirror_axes:
+ prediction += torch.flip(window_infer(torch.flip(x, (2, 3, 4)), model, **kwargs), (2, 3, 4))
+ prediction /= num_predictons
+
+ return prediction
+
+ def save_to_nii(self, return_output,
+ raw_spacing,
+ save_dir,
+ case_name,
+ postprocess=False):
+ return_output = return_output.astype(np.uint8)
+
+ # # postprocessing
+ if postprocess:
+ return_output = large_connected_domain(return_output)
+
+ return_output = sitk.GetImageFromArray(return_output)
+ return_output.SetSpacing((raw_spacing[0].item(), raw_spacing[1].item(), raw_spacing[2].item()))
+
+ sitk.WriteImage(return_output, os.path.join(save_dir, f"{case_name}.nii.gz"))
\ No newline at end of file
diff --git a/PRISM/SegMamba/light_training/preprocessing/__init__.py b/PRISM/SegMamba/light_training/preprocessing/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/PRISM/SegMamba/light_training/preprocessing/cropping/__init__.py b/PRISM/SegMamba/light_training/preprocessing/cropping/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/PRISM/SegMamba/light_training/preprocessing/cropping/cropping.py b/PRISM/SegMamba/light_training/preprocessing/cropping/cropping.py
new file mode 100644
index 0000000000000000000000000000000000000000..cb6052c7adaf8322e94c1003e1a86a5396e4afe4
--- /dev/null
+++ b/PRISM/SegMamba/light_training/preprocessing/cropping/cropping.py
@@ -0,0 +1,51 @@
+import numpy as np
+
+
+# Hello! crop_to_nonzero is the function you are looking for. Ignore the rest.
+from acvl_utils.cropping_and_padding.bounding_boxes import get_bbox_from_mask, crop_to_bbox, bounding_box_to_slice
+
+
+def create_nonzero_mask(data):
+ """
+
+ :param data:
+ :return: the mask is True where the data is nonzero
+ """
+ from scipy.ndimage import binary_fill_holes
+ assert len(data.shape) == 4 or len(data.shape) == 3, "data must have shape (C, X, Y, Z) or shape (C, X, Y)"
+ nonzero_mask = np.zeros(data.shape[1:], dtype=bool)
+ for c in range(data.shape[0]):
+ this_mask = data[c] != 0
+ nonzero_mask = nonzero_mask | this_mask
+ nonzero_mask = binary_fill_holes(nonzero_mask)
+ return nonzero_mask
+
+
+def crop_to_nonzero(data, seg=None, nonzero_label=-1):
+ """
+
+ :param data:
+ :param seg:
+ :param nonzero_label: this will be written into the segmentation map
+ :return:
+ """
+ nonzero_mask = create_nonzero_mask(data)
+ bbox = get_bbox_from_mask(nonzero_mask)
+
+ slicer = bounding_box_to_slice(bbox)
+ data = data[tuple([slice(None), *slicer])]
+
+ if seg is not None:
+ seg = seg[tuple([slice(None), *slicer])]
+
+ nonzero_mask = nonzero_mask[slicer][None]
+ if seg is not None:
+ seg[(seg == 0) & (~nonzero_mask)] = nonzero_label
+ else:
+ nonzero_mask = nonzero_mask.astype(np.int8)
+ nonzero_mask[nonzero_mask == 0] = nonzero_label
+ nonzero_mask[nonzero_mask > 0] = 0
+ seg = nonzero_mask
+ return data, seg, bbox
+
+
diff --git a/PRISM/SegMamba/light_training/preprocessing/normalization/__init__.py b/PRISM/SegMamba/light_training/preprocessing/normalization/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/PRISM/SegMamba/light_training/preprocessing/normalization/default_normalization_schemes.py b/PRISM/SegMamba/light_training/preprocessing/normalization/default_normalization_schemes.py
new file mode 100644
index 0000000000000000000000000000000000000000..ef96408773b6f53247173b83999313aa8ad4ff8d
--- /dev/null
+++ b/PRISM/SegMamba/light_training/preprocessing/normalization/default_normalization_schemes.py
@@ -0,0 +1,126 @@
+from abc import ABC, abstractmethod
+from typing import Type
+
+import numpy as np
+from numpy import number
+from monai.transforms.utils_pytorch_numpy_unification import clip
+
+
+class ImageNormalization(ABC):
+ leaves_pixels_outside_mask_at_zero_if_use_mask_for_norm_is_true = None
+
+ def __init__(self, use_mask_for_norm: bool = None, intensityproperties: dict = None,
+ target_dtype: Type[number] = np.float32):
+ assert use_mask_for_norm is None or isinstance(use_mask_for_norm, bool)
+ self.use_mask_for_norm = use_mask_for_norm
+ assert isinstance(intensityproperties, dict)
+ self.intensityproperties = intensityproperties
+ self.target_dtype = target_dtype
+
+ @abstractmethod
+ def run(self, image: np.ndarray, seg: np.ndarray = None) -> np.ndarray:
+ """
+ Image and seg must have the same shape. Seg is not always used
+ """
+ pass
+
+
+class ZScoreNormalization(ImageNormalization):
+ leaves_pixels_outside_mask_at_zero_if_use_mask_for_norm_is_true = True
+
+ def run(self, image: np.ndarray, seg: np.ndarray = None) -> np.ndarray:
+ """
+ here seg is used to store the zero valued region. The value for that region in the segmentation is -1 by
+ default.
+ """
+ image = image.astype(self.target_dtype)
+ if self.use_mask_for_norm is not None and self.use_mask_for_norm:
+ # negative values in the segmentation encode the 'outside' region (think zero values around the brain as
+ # in BraTS). We want to run the normalization only in the brain region, so we need to mask the image.
+ # The default nnU-net sets use_mask_for_norm to True if cropping to the nonzero region substantially
+ # reduced the image size.
+ mask = seg >= 0
+ mean = image[mask].mean()
+ std = image[mask].std()
+ image[mask] = (image[mask] - mean) / (max(std, 1e-8))
+ else:
+ mean = image.mean()
+ std = image.std()
+ image = (image - mean) / (max(std, 1e-8))
+ return image
+
+
+class CTNormStandard:
+ def __init__(
+ self,
+ a_min: float,
+ a_max: float,
+ b_min,
+ b_max,
+ clip=False,
+ dtype=np.float32,
+ ):
+ self.a_min = a_min
+ self.a_max = a_max
+ self.b_min = b_min
+ self.b_max = b_max
+ self.clip = clip
+ self.dtype = dtype
+
+ def __call__(self, img):
+ """
+ Apply the transform to `img`.
+ """
+
+ img = (img - self.a_min) / (self.a_max - self.a_min)
+ if (self.b_min is not None) and (self.b_max is not None):
+ img = img * (self.b_max - self.b_min) + self.b_min
+ if self.clip:
+ img = clip(img, self.b_min, self.b_max)
+
+ return img
+
+class CTNormalization(ImageNormalization):
+ leaves_pixels_outside_mask_at_zero_if_use_mask_for_norm_is_true = False
+
+ def run(self, image: np.ndarray, seg: np.ndarray = None) -> np.ndarray:
+ assert self.intensityproperties is not None, "CTNormalization requires intensity properties"
+ image = image.astype(self.target_dtype)
+ mean_intensity = self.intensityproperties['mean']
+ std_intensity = self.intensityproperties['std']
+ lower_bound = self.intensityproperties['percentile_00_5']
+ upper_bound = self.intensityproperties['percentile_99_5']
+ image = np.clip(image, lower_bound, upper_bound)
+ image = (image - mean_intensity) / max(std_intensity, 1e-8)
+ return image
+
+
+class NoNormalization(ImageNormalization):
+ leaves_pixels_outside_mask_at_zero_if_use_mask_for_norm_is_true = False
+
+ def run(self, image: np.ndarray, seg: np.ndarray = None) -> np.ndarray:
+ return image.astype(self.target_dtype)
+
+
+class RescaleTo01Normalization(ImageNormalization):
+ leaves_pixels_outside_mask_at_zero_if_use_mask_for_norm_is_true = False
+
+ def run(self, image: np.ndarray, seg: np.ndarray = None) -> np.ndarray:
+ image = image.astype(self.target_dtype)
+ image = image - image.min()
+ image = image / np.clip(image.max(), a_min=1e-8, a_max=None)
+ return image
+
+
+class RGBTo01Normalization(ImageNormalization):
+ leaves_pixels_outside_mask_at_zero_if_use_mask_for_norm_is_true = False
+
+ def run(self, image: np.ndarray, seg: np.ndarray = None) -> np.ndarray:
+ assert image.min() >= 0, "RGB images are uint 8, for whatever reason I found pixel values smaller than 0. " \
+ "Your images do not seem to be RGB images"
+ assert image.max() <= 255, "RGB images are uint 8, for whatever reason I found pixel values greater than 255" \
+ ". Your images do not seem to be RGB images"
+ image = image.astype(self.target_dtype)
+ image = image / 255.
+ return image
+
diff --git a/PRISM/SegMamba/light_training/preprocessing/normalization/map_channel_name_to_normalization.py b/PRISM/SegMamba/light_training/preprocessing/normalization/map_channel_name_to_normalization.py
new file mode 100644
index 0000000000000000000000000000000000000000..e82165069a078b1290e1ba96e2061e4d450cb12d
--- /dev/null
+++ b/PRISM/SegMamba/light_training/preprocessing/normalization/map_channel_name_to_normalization.py
@@ -0,0 +1,24 @@
+from typing import Type
+
+from nnunetv2.preprocessing.normalization.default_normalization_schemes import CTNormalization, NoNormalization, \
+ ZScoreNormalization, RescaleTo01Normalization, RGBTo01Normalization, ImageNormalization
+
+channel_name_to_normalization_mapping = {
+ 'CT': CTNormalization,
+ 'noNorm': NoNormalization,
+ 'zscore': ZScoreNormalization,
+ 'rescale_0_1': RescaleTo01Normalization,
+ 'rgb_to_0_1': RGBTo01Normalization
+}
+
+
+def get_normalization_scheme(channel_name: str) -> Type[ImageNormalization]:
+ """
+ If we find the channel_name in channel_name_to_normalization_mapping return the corresponding normalization. If it is
+ not found, use the default (ZScoreNormalization)
+ """
+ norm_scheme = channel_name_to_normalization_mapping.get(channel_name)
+ if norm_scheme is None:
+ norm_scheme = ZScoreNormalization
+ # print('Using %s for image normalization' % norm_scheme.__name__)
+ return norm_scheme
diff --git a/PRISM/SegMamba/light_training/preprocessing/normalization/readme.md b/PRISM/SegMamba/light_training/preprocessing/normalization/readme.md
new file mode 100644
index 0000000000000000000000000000000000000000..7b5439612571240eba0926370bb1fed5044eecce
--- /dev/null
+++ b/PRISM/SegMamba/light_training/preprocessing/normalization/readme.md
@@ -0,0 +1,5 @@
+The channel_names entry in dataset.json only determines the normlaization scheme. So if you want to use something different
+then you can just
+- create a new subclass of ImageNormalization
+- map your custom channel identifier to that subclass in channel_name_to_normalization_mapping
+- run plan and preprocess again with your custom normlaization scheme
\ No newline at end of file
diff --git a/PRISM/SegMamba/light_training/preprocessing/preprocessors/__init__.py b/PRISM/SegMamba/light_training/preprocessing/preprocessors/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/PRISM/SegMamba/light_training/preprocessing/preprocessors/default_preprocessor.py b/PRISM/SegMamba/light_training/preprocessing/preprocessors/default_preprocessor.py
new file mode 100644
index 0000000000000000000000000000000000000000..15b7f599733069c5542930588f8b5e070fea3c5e
--- /dev/null
+++ b/PRISM/SegMamba/light_training/preprocessing/preprocessors/default_preprocessor.py
@@ -0,0 +1,528 @@
+# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import multiprocessing
+import shutil
+from time import sleep
+from typing import Union, Tuple
+import glob
+import numpy as np
+from batchgenerators.utilities.file_and_folder_operations import *
+from light_training.preprocessing.cropping.cropping import crop_to_nonzero
+# from .default_resampling import resample_data_or_seg_to_spacing, resample_img
+from light_training.preprocessing.resampling.default_resampling import resample_data_or_seg_to_shape, compute_new_shape
+from tqdm import tqdm
+from light_training.preprocessing.normalization.default_normalization_schemes import CTNormalization, ZScoreNormalization
+import SimpleITK as sitk
+from tqdm import tqdm
+from copy import deepcopy
+import json
+
+def create_image(image_arr, spacing):
+ image = sitk.GetImageFromArray(image_arr)
+ image.SetSpacing(spacing)
+ return image
+
+def get_shape_must_be_divisible_by(net_numpool_per_axis):
+ return 2 ** np.array(net_numpool_per_axis)
+
+def pad_shape(shape, must_be_divisible_by):
+ """
+ pads shape so that it is divisible by must_be_divisible_by
+ :param shape:
+ :param must_be_divisible_by:
+ :return:
+ """
+ if not isinstance(must_be_divisible_by, (tuple, list, np.ndarray)):
+ must_be_divisible_by = [must_be_divisible_by] * len(shape)
+ else:
+ assert len(must_be_divisible_by) == len(shape)
+
+ new_shp = [shape[i] + must_be_divisible_by[i] - shape[i] % must_be_divisible_by[i] for i in range(len(shape))]
+
+ for i in range(len(shape)):
+ if shape[i] % must_be_divisible_by[i] == 0:
+ new_shp[i] -= must_be_divisible_by[i]
+ new_shp = np.array(new_shp).astype(int)
+ return new_shp
+
+def get_pool_and_conv_props(spacing, patch_size, min_feature_map_size, max_numpool):
+ """
+ this is the same as get_pool_and_conv_props_v2 from old nnunet
+
+ :param spacing:
+ :param patch_size:
+ :param min_feature_map_size: min edge length of feature maps in bottleneck
+ :param max_numpool:
+ :return:
+ """
+ # todo review this code
+ dim = len(spacing)
+
+ current_spacing = deepcopy(list(spacing))
+ current_size = deepcopy(list(patch_size))
+
+ pool_op_kernel_sizes = [[1] * len(spacing)]
+ conv_kernel_sizes = []
+
+ num_pool_per_axis = [0] * dim
+ kernel_size = [1] * dim
+
+ while True:
+ # exclude axes that we cannot pool further because of min_feature_map_size constraint
+ valid_axes_for_pool = [i for i in range(dim) if current_size[i] >= 2*min_feature_map_size]
+ if len(valid_axes_for_pool) < 1:
+ break
+
+ spacings_of_axes = [current_spacing[i] for i in valid_axes_for_pool]
+
+ # find axis that are within factor of 2 within smallest spacing
+ min_spacing_of_valid = min(spacings_of_axes)
+ valid_axes_for_pool = [i for i in valid_axes_for_pool if current_spacing[i] / min_spacing_of_valid < 2]
+
+ # max_numpool constraint
+ valid_axes_for_pool = [i for i in valid_axes_for_pool if num_pool_per_axis[i] < max_numpool]
+
+ if len(valid_axes_for_pool) == 1:
+ if current_size[valid_axes_for_pool[0]] >= 3 * min_feature_map_size:
+ pass
+ else:
+ break
+ if len(valid_axes_for_pool) < 1:
+ break
+
+ # now we need to find kernel sizes
+ # kernel sizes are initialized to 1. They are successively set to 3 when their associated axis becomes within
+ # factor 2 of min_spacing. Once they are 3 they remain 3
+ for d in range(dim):
+ if kernel_size[d] == 3:
+ continue
+ else:
+ if spacings_of_axes[d] / min(current_spacing) < 2:
+ kernel_size[d] = 3
+
+ other_axes = [i for i in range(dim) if i not in valid_axes_for_pool]
+
+ pool_kernel_sizes = [0] * dim
+ for v in valid_axes_for_pool:
+ pool_kernel_sizes[v] = 2
+ num_pool_per_axis[v] += 1
+ current_spacing[v] *= 2
+ current_size[v] = np.ceil(current_size[v] / 2)
+ for nv in other_axes:
+ pool_kernel_sizes[nv] = 1
+
+ pool_op_kernel_sizes.append(pool_kernel_sizes)
+ conv_kernel_sizes.append(deepcopy(kernel_size))
+ #print(conv_kernel_sizes)
+
+ must_be_divisible_by = get_shape_must_be_divisible_by(num_pool_per_axis)
+ patch_size = pad_shape(patch_size, must_be_divisible_by)
+
+ # we need to add one more conv_kernel_size for the bottleneck. We always use 3x3(x3) conv here
+ conv_kernel_sizes.append([3]*dim)
+ return num_pool_per_axis, pool_op_kernel_sizes, conv_kernel_sizes, patch_size, must_be_divisible_by
+
+
+class DefaultPreprocessor(object):
+ def __init__(self,
+ base_dir,
+ image_dir,
+ # output_dir,
+ # out_spacing,
+ label_dir=None,
+ data_type="CT"):
+ """
+ Everything we need is in the plans. Those are given when run() is called
+ """
+ self.base_dir = base_dir
+ self.image_dir = image_dir
+ self.label_dir = label_dir
+
+ self.data_type = data_type
+
+ def run_case_npy(self, data: np.ndarray, seg, properties: dict):
+ # let's not mess up the inputs!
+ data = np.copy(data)
+ old_shape = data.shape
+ original_spacing = list(properties['spacing'])
+ ## 由于old spacing读出来是反的,因此这里需要转置一下
+
+ original_spacing_trans = original_spacing[::-1]
+ properties["original_spacing_trans"] = original_spacing_trans
+ properties["target_spacing_trans"] = self.out_spacing
+
+ shape_before_cropping = data.shape[1:]
+ ## crop
+ properties['shape_before_cropping'] = shape_before_cropping
+ # this command will generate a segmentation. This is important because of the nonzero mask which we may need
+ data, seg, bbox = crop_to_nonzero(data, seg)
+ properties['bbox_used_for_cropping'] = bbox
+
+ # crop, remember to store size before cropping!
+ shape_before_resample = data.shape[1:]
+ properties['shape_after_cropping_before_resample'] = shape_before_resample
+
+ new_shape = compute_new_shape(data.shape[1:], original_spacing_trans, self.out_spacing)
+
+ if seg is None :
+ seg_norm = np.zeros_like(data)
+ else :
+ seg_norm = seg
+ data = self._normalize(data, seg_norm,
+ self.foreground_intensity_properties_per_channel)
+
+ assert len(data.shape) == 4
+
+ data = resample_data_or_seg_to_shape(data, new_shape,
+ original_spacing,
+ self.out_spacing,
+ order=3,
+ order_z=0)
+ properties['shape_after_resample'] = new_shape
+
+ if seg is not None :
+ assert len(seg.shape) == 4
+ seg = resample_data_or_seg_to_shape(seg, new_shape,
+ original_spacing,
+ self.out_spacing,
+ is_seg=True,
+ order=1,
+ order_z=0)
+
+ properties['class_locations'] = self._sample_foreground_locations(seg,
+ self.all_labels,
+ )
+
+ if np.max(seg) > 127:
+ seg = seg.astype(np.int16)
+ else:
+ seg = seg.astype(np.int8)
+
+ # assert len(seg.shape) == 4
+ # seg = create_image(seg[0], original_spacing)
+ # seg = resample_img(seg, out_spacing=self.out_spacing, is_label=True)
+ # seg = sitk.GetArrayFromImage(seg)[None]
+ # print(f"all_labels is {np.unique(seg)}")
+ # if np.max(seg) > 127:
+ # seg = seg.astype(np.int16)
+ # else:
+ # seg = seg.astype(np.int8)
+ # seg = resample_data_or_seg_to_spacing(seg, current_spacing=original_spacing,
+ # new_spacing=self.out_spacing, is_seg=True)
+
+ print(f'old shape: {old_shape}, new_shape after crop and resample: {new_shape}, old_spacing: {original_spacing}, '
+ f'new_spacing: {self.out_spacing}, boxes is {bbox}')
+
+ return data, seg
+
+ # need to modify
+ def get_iterable_list(self):
+ all_cases = os.listdir(os.path.join(self.base_dir, self.image_dir))
+
+ return all_cases
+
+ def _normalize(self, data: np.ndarray, seg: np.ndarray,
+ foreground_intensity_properties_per_channel: dict) -> np.ndarray:
+ for c in range(data.shape[0]):
+ normalizer_class = CTNormalization
+ normalizer = normalizer_class(use_mask_for_norm=False,
+ intensityproperties=foreground_intensity_properties_per_channel[str(c)])
+ data[c] = normalizer.run(data[c], seg[0])
+ return data
+
+ # need to modify
+ def read_data(self, case_name):
+ ## only for CT dataset
+ data = sitk.ReadImage(os.path.join(self.base_dir, self.image_dir, case_name))
+ seg_arr = None
+ ## 一定要是float32!!!!
+ data_arr = sitk.GetArrayFromImage(data).astype(np.float32)
+ data_arr = data_arr[None]
+
+ if self.label_dir is not None:
+ seg = sitk.ReadImage(os.path.join(self.base_dir, self.label_dir, case_name))
+ ## 读出来以后一定转float32!!!
+ seg_arr = sitk.GetArrayFromImage(seg).astype(np.float32)
+ seg_arr = seg_arr[None]
+ intensities_per_channel, intensity_statistics_per_channel = self.collect_foreground_intensities(seg_arr, data_arr)
+
+ else :
+ intensities_per_channel = []
+ intensity_statistics_per_channel = []
+
+ properties = {"spacing": data.GetSpacing(),
+ "raw_size": data_arr.shape[1:],
+ "name": case_name.split(".")[0],
+ "intensities_per_channel": intensities_per_channel,
+ "intensity_statistics_per_channel": intensity_statistics_per_channel}
+
+ return data_arr, seg_arr, properties
+
+ def run_case(self, case_name):
+ """
+ seg file can be none (test cases)
+
+ order of operations is: transpose -> crop -> resample
+ so when we export we need to run the following order: resample -> crop -> transpose (we could also run
+ transpose at a different place, but reverting the order of operations done during preprocessing seems cleaner)
+ """
+ data, seg, properties = self.read_data(case_name)
+
+ data, seg = self.run_case_npy(data, seg, properties)
+ return data, seg, properties
+
+ def run_case_save(self, case_name):
+ print(case_name + "~~~~~~~~" * 10)
+ data, seg, properties = self.run_case(case_name)
+ # print('dtypes', data.dtype, seg.dtype)
+ case_name = case_name.split(".")[0]
+ np.savez_compressed(os.path.join(self.output_dir, case_name) + '.npz', data=data, seg=seg)
+ write_pickle(properties, os.path.join(self.output_dir, case_name) + '.pkl')
+ print(f"data is saved at: {os.path.join(self.output_dir, case_name) + '.npz'}")
+
+ def experiment_plan(self, case_name):
+
+ data, seg, properties = self.read_data(case_name)
+ print(f"labels is {np.unique(seg)}")
+ spacing = properties["spacing"]
+ raw_size = properties["raw_size"]
+ intensities_per_channel = properties["intensities_per_channel"]
+
+ return spacing, raw_size, intensities_per_channel
+
+ def determine_fullres_target_spacing(self, spacings, sizes) -> np.ndarray:
+ # if self.overwrite_target_spacing is not None:
+ # return np.array(self.overwrite_target_spacing)
+
+ # spacings = self.dataset_fingerprint['spacings']
+ # sizes = self.dataset_fingerprint['shapes_after_crop']
+
+ target = np.percentile(np.vstack(spacings), 50, 0)
+ target_size = np.percentile(np.vstack(sizes), 50, 0)
+ # we need to identify datasets for which a different target spacing could be beneficial. These datasets have
+ # the following properties:
+ # - one axis which much lower resolution than the others
+ # - the lowres axis has much less voxels than the others
+ # - (the size in mm of the lowres axis is also reduced)
+ worst_spacing_axis = np.argmax(target)
+ other_axes = [i for i in range(len(target)) if i != worst_spacing_axis]
+ other_spacings = [target[i] for i in other_axes]
+ other_sizes = [target_size[i] for i in other_axes]
+
+ has_aniso_spacing = target[worst_spacing_axis] > (3 * max(other_spacings))
+ has_aniso_voxels = target_size[worst_spacing_axis] * 3 < min(other_sizes)
+
+ if has_aniso_spacing and has_aniso_voxels:
+ spacings_of_that_axis = np.vstack(spacings)[:, worst_spacing_axis]
+ target_spacing_of_that_axis = np.percentile(spacings_of_that_axis, 10)
+ # don't let the spacing of that axis get higher than the other axes
+ if target_spacing_of_that_axis < max(other_spacings):
+ target_spacing_of_that_axis = max(max(other_spacings), target_spacing_of_that_axis) + 1e-5
+ target[worst_spacing_axis] = target_spacing_of_that_axis
+ return target
+
+ def compute_new_shape(self, old_shape: Union[Tuple[int, ...], List[int], np.ndarray],
+ old_spacing: Union[Tuple[float, ...], List[float], np.ndarray],
+ new_spacing: Union[Tuple[float, ...], List[float], np.ndarray]) -> np.ndarray:
+ ## spacing need to be transposed
+ old_spacing = list(old_spacing)[::-1]
+ new_spacing = list(new_spacing)[::-1]
+
+ assert len(old_spacing) == len(old_shape)
+ assert len(old_shape) == len(new_spacing)
+ new_shape = np.array([int(round(i / j * k)) for i, j, k in zip(old_spacing, new_spacing, old_shape)])
+ return new_shape
+
+ def run_plan(self):
+ all_iter = self.get_iterable_list()
+ spacings = []
+ sizes = []
+ intensities_per_channels = []
+ print(f"analysing data......")
+ for case in tqdm(all_iter, total=len(all_iter)):
+ spacing, size, intensities_per_channel = self.experiment_plan(case)
+ spacings.append(spacing)
+ sizes.append(size)
+ intensities_per_channels.append(intensities_per_channel)
+
+ print(f"all spacing is {spacings}")
+ print(f"all sizes is {sizes}")
+ foreground_intensities_per_channel = [np.concatenate([r[i] for r in intensities_per_channels]) for i in
+ range(len(intensities_per_channels[0]))]
+
+ num_channels = len(intensities_per_channels[0])
+
+ intensity_statistics_per_channel = {}
+ for i in range(num_channels):
+ intensity_statistics_per_channel[i] = {
+ 'mean': float(np.mean(foreground_intensities_per_channel[i])),
+ 'median': float(np.median(foreground_intensities_per_channel[i])),
+ 'std': float(np.std(foreground_intensities_per_channel[i])),
+ 'min': float(np.min(foreground_intensities_per_channel[i])),
+ 'max': float(np.max(foreground_intensities_per_channel[i])),
+ 'percentile_99_5': float(np.percentile(foreground_intensities_per_channel[i], 99.5)),
+ 'percentile_00_5': float(np.percentile(foreground_intensities_per_channel[i], 0.5)),
+ }
+
+ print(f"intensity_statistics_per_channel is {intensity_statistics_per_channel}")
+
+ fullres_spacing = self.determine_fullres_target_spacing(spacings, sizes)
+ print(f"fullres spacing is {fullres_spacing[::-1]}")
+
+ # get transposed new median shape (what we would have after resampling)
+ new_shapes = [self.compute_new_shape(j, i, fullres_spacing) for i, j in
+ zip(spacings, sizes)]
+ new_median_shape = np.median(new_shapes, 0)
+ print(f"median_shape is {new_median_shape}")
+
+ tmp = 1 / np.array(fullres_spacing)
+ initial_patch_size = [round(i) for i in tmp * (256 ** 3 / np.prod(tmp)) ** (1 / 3)]
+
+ print(f"initial_patch_size is {initial_patch_size[::-1]}")
+
+ network_num_pool_per_axis, pool_op_kernel_sizes, conv_kernel_sizes, patch_size, \
+ shape_must_be_divisible_by = get_pool_and_conv_props(fullres_spacing, initial_patch_size,
+ 4,
+ 999999)
+ print(f"target medium patch size is {patch_size[::-1]}")
+
+ analysis_path = "./data_analysis_result.txt"
+ with open(analysis_path, "w") as f:
+
+ f.write(json.dumps({
+ "intensity_statistics_per_channel": intensity_statistics_per_channel,
+ "fullres spacing": fullres_spacing.tolist(),
+ "median_shape": new_median_shape.tolist(),
+ "initial_patch_size": initial_patch_size,
+ "target medium patch size": patch_size[::-1].tolist()
+ }))
+ print(f"Analysis done, save to {analysis_path}")
+
+
+ def collect_foreground_intensities(self, segmentation: np.ndarray, images: np.ndarray, seed: int = 1234,
+ num_samples: int = 10000):
+ """
+ images=image with multiple channels = shape (c, x, y(, z))
+ """
+ assert len(images.shape) == 4
+ assert len(segmentation.shape) == 4
+
+ assert not np.any(np.isnan(segmentation)), "Segmentation contains NaN values. grrrr.... :-("
+ assert not np.any(np.isnan(images)), "Images contains NaN values. grrrr.... :-("
+
+ rs = np.random.RandomState(seed)
+
+ intensities_per_channel = []
+ # we don't use the intensity_statistics_per_channel at all, it's just something that might be nice to have
+ intensity_statistics_per_channel = []
+
+ # segmentation is 4d: 1,x,y,z. We need to remove the empty dimension for the following code to work
+ foreground_mask = segmentation[0] > 0
+
+ for i in range(len(images)):
+ foreground_pixels = images[i][foreground_mask]
+ num_fg = len(foreground_pixels)
+ # sample with replacement so that we don't get issues with cases that have less than num_samples
+ # foreground_pixels. We could also just sample less in those cases but that would than cause these
+ # training cases to be underrepresented
+ intensities_per_channel.append(
+ rs.choice(foreground_pixels, num_samples, replace=True) if num_fg > 0 else [])
+ intensity_statistics_per_channel.append({
+ 'mean': np.mean(foreground_pixels) if num_fg > 0 else np.nan,
+ 'median': np.median(foreground_pixels) if num_fg > 0 else np.nan,
+ 'min': np.min(foreground_pixels) if num_fg > 0 else np.nan,
+ 'max': np.max(foreground_pixels) if num_fg > 0 else np.nan,
+ 'percentile_99_5': np.percentile(foreground_pixels, 99.5) if num_fg > 0 else np.nan,
+ 'percentile_00_5': np.percentile(foreground_pixels, 0.5) if num_fg > 0 else np.nan,
+
+ })
+
+ return intensities_per_channel, intensity_statistics_per_channel
+
+ @staticmethod
+ def _sample_foreground_locations(seg: np.ndarray, classes_or_regions: Union[List[int], List[Tuple[int, ...]]],
+ seed: int = 1234, verbose: bool = False):
+ num_samples = 10000
+ min_percent_coverage = 0.01 # at least 1% of the class voxels need to be selected, otherwise it may be too
+ # sparse
+ rndst = np.random.RandomState(seed)
+ class_locs = {}
+ for c in classes_or_regions:
+ k = c if not isinstance(c, list) else tuple(c)
+ if isinstance(c, (tuple, list)):
+ ## region
+ mask = seg == c[0]
+ for cc in c[1:]:
+ mask = mask | (seg == cc)
+ all_locs = np.argwhere(mask)
+ else:
+ all_locs = np.argwhere(seg == c)
+ if len(all_locs) == 0:
+ class_locs[k] = []
+ continue
+ target_num_samples = min(num_samples, len(all_locs))
+ target_num_samples = max(target_num_samples, int(np.ceil(len(all_locs) * min_percent_coverage)))
+
+ selected = all_locs[rndst.choice(len(all_locs), target_num_samples, replace=False)]
+ class_locs[k] = selected
+ if verbose:
+ print(c, target_num_samples)
+
+ return class_locs
+
+ def run(self, output_spacing,
+ output_dir,
+ all_labels,
+ foreground_intensity_properties_per_channel=None,
+ num_processes=8):
+ self.out_spacing = output_spacing
+ self.all_labels = all_labels
+ self.output_dir = output_dir
+ self.foreground_intensity_properties_per_channel = foreground_intensity_properties_per_channel
+
+ all_iter = self.get_iterable_list()
+
+ maybe_mkdir_p(self.output_dir)
+
+ # test_run
+ for case_name in all_iter:
+ self.run_case_save(case_name)
+ break
+
+ # multiprocessing magic.
+ r = []
+ with multiprocessing.get_context("spawn").Pool(num_processes) as p:
+ for case_name in all_iter:
+ r.append(p.starmap_async(self.run_case_save,
+ ((case_name, ),)))
+ remaining = list(range(len(all_iter)))
+ # p is pretty nifti. If we kill workers they just respawn but don't do any work.
+ # So we need to store the original pool of workers.
+ workers = [j for j in p._pool]
+ with tqdm(desc=None, total=len(all_iter)) as pbar:
+ while len(remaining) > 0:
+ all_alive = all([j.is_alive() for j in workers])
+ if not all_alive:
+ raise RuntimeError('Some background worker is 6 feet under. Yuck. \n'
+ 'OK jokes aside.\n'
+ 'One of your background processes is missing. This could be because of '
+ 'an error (look for an error message) or because it was killed '
+ 'by your OS due to running out of RAM. If you don\'t see '
+ 'an error message, out of RAM is likely the problem. In that case '
+ 'reducing the number of workers might help')
+ done = [i for i in remaining if r[i].ready()]
+ for _ in done:
+ pbar.update()
+ remaining = [i for i in remaining if i not in done]
+ sleep(0.1)
\ No newline at end of file
diff --git a/PRISM/SegMamba/light_training/preprocessing/preprocessors/default_preprocessor_AbdomenAtlas1_0Mini.py b/PRISM/SegMamba/light_training/preprocessing/preprocessors/default_preprocessor_AbdomenAtlas1_0Mini.py
new file mode 100644
index 0000000000000000000000000000000000000000..05672b29a3e7546e9cfdbaa8354d184f1af72182
--- /dev/null
+++ b/PRISM/SegMamba/light_training/preprocessing/preprocessors/default_preprocessor_AbdomenAtlas1_0Mini.py
@@ -0,0 +1,540 @@
+# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import multiprocessing
+import shutil
+from time import sleep
+from typing import Union, Tuple
+import glob
+import numpy as np
+from batchgenerators.utilities.file_and_folder_operations import *
+from light_training.preprocessing.cropping.cropping import crop_to_nonzero
+# from .default_resampling import resample_data_or_seg_to_spacing, resample_img
+from light_training.preprocessing.resampling.default_resampling import resample_data_or_seg_to_shape, compute_new_shape
+from tqdm import tqdm
+from light_training.preprocessing.normalization.default_normalization_schemes import CTNormalization, ZScoreNormalization
+import SimpleITK as sitk
+from tqdm import tqdm
+from copy import deepcopy
+import json
+
+def create_image(image_arr, spacing):
+ image = sitk.GetImageFromArray(image_arr)
+ image.SetSpacing(spacing)
+ return image
+
+def get_shape_must_be_divisible_by(net_numpool_per_axis):
+ return 2 ** np.array(net_numpool_per_axis)
+
+def pad_shape(shape, must_be_divisible_by):
+ """
+ pads shape so that it is divisible by must_be_divisible_by
+ :param shape:
+ :param must_be_divisible_by:
+ :return:
+ """
+ if not isinstance(must_be_divisible_by, (tuple, list, np.ndarray)):
+ must_be_divisible_by = [must_be_divisible_by] * len(shape)
+ else:
+ assert len(must_be_divisible_by) == len(shape)
+
+ new_shp = [shape[i] + must_be_divisible_by[i] - shape[i] % must_be_divisible_by[i] for i in range(len(shape))]
+
+ for i in range(len(shape)):
+ if shape[i] % must_be_divisible_by[i] == 0:
+ new_shp[i] -= must_be_divisible_by[i]
+ new_shp = np.array(new_shp).astype(int)
+ return new_shp
+
+def get_pool_and_conv_props(spacing, patch_size, min_feature_map_size, max_numpool):
+ """
+ this is the same as get_pool_and_conv_props_v2 from old nnunet
+
+ :param spacing:
+ :param patch_size:
+ :param min_feature_map_size: min edge length of feature maps in bottleneck
+ :param max_numpool:
+ :return:
+ """
+ # todo review this code
+ dim = len(spacing)
+
+ current_spacing = deepcopy(list(spacing))
+ current_size = deepcopy(list(patch_size))
+
+ pool_op_kernel_sizes = [[1] * len(spacing)]
+ conv_kernel_sizes = []
+
+ num_pool_per_axis = [0] * dim
+ kernel_size = [1] * dim
+
+ while True:
+ # exclude axes that we cannot pool further because of min_feature_map_size constraint
+ valid_axes_for_pool = [i for i in range(dim) if current_size[i] >= 2*min_feature_map_size]
+ if len(valid_axes_for_pool) < 1:
+ break
+
+ spacings_of_axes = [current_spacing[i] for i in valid_axes_for_pool]
+
+ # find axis that are within factor of 2 within smallest spacing
+ min_spacing_of_valid = min(spacings_of_axes)
+ valid_axes_for_pool = [i for i in valid_axes_for_pool if current_spacing[i] / min_spacing_of_valid < 2]
+
+ # max_numpool constraint
+ valid_axes_for_pool = [i for i in valid_axes_for_pool if num_pool_per_axis[i] < max_numpool]
+
+ if len(valid_axes_for_pool) == 1:
+ if current_size[valid_axes_for_pool[0]] >= 3 * min_feature_map_size:
+ pass
+ else:
+ break
+ if len(valid_axes_for_pool) < 1:
+ break
+
+ # now we need to find kernel sizes
+ # kernel sizes are initialized to 1. They are successively set to 3 when their associated axis becomes within
+ # factor 2 of min_spacing. Once they are 3 they remain 3
+ for d in range(dim):
+ if kernel_size[d] == 3:
+ continue
+ else:
+ if spacings_of_axes[d] / min(current_spacing) < 2:
+ kernel_size[d] = 3
+
+ other_axes = [i for i in range(dim) if i not in valid_axes_for_pool]
+
+ pool_kernel_sizes = [0] * dim
+ for v in valid_axes_for_pool:
+ pool_kernel_sizes[v] = 2
+ num_pool_per_axis[v] += 1
+ current_spacing[v] *= 2
+ current_size[v] = np.ceil(current_size[v] / 2)
+ for nv in other_axes:
+ pool_kernel_sizes[nv] = 1
+
+ pool_op_kernel_sizes.append(pool_kernel_sizes)
+ conv_kernel_sizes.append(deepcopy(kernel_size))
+ #print(conv_kernel_sizes)
+
+ must_be_divisible_by = get_shape_must_be_divisible_by(num_pool_per_axis)
+ patch_size = pad_shape(patch_size, must_be_divisible_by)
+
+ # we need to add one more conv_kernel_size for the bottleneck. We always use 3x3(x3) conv here
+ conv_kernel_sizes.append([3]*dim)
+ return num_pool_per_axis, pool_op_kernel_sizes, conv_kernel_sizes, patch_size, must_be_divisible_by
+
+
+class DefaultPreprocessor(object):
+ def __init__(self,
+ base_dir,
+ ):
+ """
+ Everything we need is in the plans. Those are given when run() is called
+ """
+ self.base_dir = base_dir
+ self.image_name = "ct.nii.gz"
+ self.seg_dir = "segmentations"
+ self.seg_list = ["aorta.nii.gz", "gall_bladder.nii.gz", "kidney_left.nii.gz",
+ "kidney_right.nii.gz", "liver.nii.gz", "pancreas.nii.gz",
+ "postcava.nii.gz", "spleen.nii.gz", "stomach.nii.gz"]
+
+ def run_case_npy(self, data: np.ndarray, seg, properties: dict):
+ # let's not mess up the inputs!
+ data = np.copy(data)
+ old_shape = data.shape
+ original_spacing = list(properties['spacing'])
+ ## 由于old spacing读出来是反的,因此这里需要转置一下
+
+ original_spacing_trans = original_spacing[::-1]
+ properties["original_spacing_trans"] = original_spacing_trans
+ properties["target_spacing_trans"] = self.out_spacing
+
+ shape_before_cropping = data.shape[1:]
+ ## crop
+ properties['shape_before_cropping'] = shape_before_cropping
+ # this command will generate a segmentation. This is important because of the nonzero mask which we may need
+ data, seg, bbox = crop_to_nonzero(data, seg)
+ properties['bbox_used_for_cropping'] = bbox
+
+ # crop, remember to store size before cropping!
+ shape_before_resample = data.shape[1:]
+ properties['shape_after_cropping_before_resample'] = shape_before_resample
+
+ new_shape = compute_new_shape(data.shape[1:], original_spacing_trans, self.out_spacing)
+
+ if seg is None :
+ seg_norm = np.zeros_like(data)
+ else :
+ seg_norm = seg
+ data = self._normalize(data, seg_norm,
+ self.foreground_intensity_properties_per_channel)
+
+ assert len(data.shape) == 4
+
+ data = resample_data_or_seg_to_shape(data, new_shape,
+ original_spacing,
+ self.out_spacing,
+ order=3,
+ order_z=0)
+ properties['shape_after_resample'] = new_shape
+
+ if seg is not None :
+ assert len(seg.shape) == 4
+ seg = resample_data_or_seg_to_shape(seg, new_shape,
+ original_spacing,
+ self.out_spacing,
+ is_seg=True,
+ order=1,
+ order_z=0)
+
+ properties['class_locations'] = self._sample_foreground_locations(seg,
+ self.all_labels,
+ )
+
+ if np.max(seg) > 127:
+ seg = seg.astype(np.int16)
+ else:
+ seg = seg.astype(np.int8)
+
+ print(f'old shape: {old_shape}, new_shape after crop and resample: {new_shape}, old_spacing: {original_spacing}, '
+ f'new_spacing: {self.out_spacing}, boxes is {bbox}')
+
+ return data, seg
+
+ # need to modify
+ def get_iterable_list(self):
+ all_cases = os.listdir(os.path.join(self.base_dir))
+
+ all_cases_2 = []
+ for c in all_cases:
+ if os.path.isdir(os.path.join(self.base_dir, c)):
+ all_cases_2.append(c)
+
+ return all_cases_2
+
+ def _normalize(self, data: np.ndarray, seg: np.ndarray,
+ foreground_intensity_properties_per_channel: dict) -> np.ndarray:
+ for c in range(data.shape[0]):
+ normalizer_class = CTNormalization
+ normalizer = normalizer_class(use_mask_for_norm=False,
+ intensityproperties=foreground_intensity_properties_per_channel[str(c)])
+ data[c] = normalizer.run(data[c], seg[0])
+ return data
+
+ # need to modify
+ def read_data(self, case_name):
+ ## only for CT dataset
+ try:
+ data = sitk.ReadImage(os.path.join(self.base_dir, case_name, self.image_name))
+ except:
+ print(f"data read error: {self.base_dir, case_name}")
+ return None, None, None
+ seg_arr = None
+ ## 一定要是float32!!!!
+ data_arr = sitk.GetArrayFromImage(data).astype(np.float32)
+ data_arr = data_arr[None]
+
+ if os.path.exists(os.path.join(self.base_dir, case_name, self.seg_dir)):
+ segs = None
+ index = 0
+ for target in self.seg_list:
+ index += 1
+ seg = sitk.ReadImage(os.path.join(self.base_dir, case_name, self.seg_dir, target))
+ ## 读出来以后一定转float32!!!
+ seg_arr = sitk.GetArrayFromImage(seg).astype(np.float32)
+ if segs is None:
+ segs = seg_arr
+ else :
+ segs[seg_arr == 1] = index
+
+ segs = segs[None]
+ intensities_per_channel, intensity_statistics_per_channel = self.collect_foreground_intensities(segs, data_arr)
+ else :
+ intensities_per_channel = []
+ intensity_statistics_per_channel = []
+
+ properties = {"spacing": data.GetSpacing(),
+ "raw_size": data_arr.shape[1:],
+ "name": case_name.split(".")[0],
+ "intensities_per_channel": intensities_per_channel,
+ "intensity_statistics_per_channel": intensity_statistics_per_channel}
+
+ return data_arr, segs, properties
+
+ def run_case(self, case_name):
+ """
+ seg file can be none (test cases)
+
+ order of operations is: transpose -> crop -> resample
+ so when we export we need to run the following order: resample -> crop -> transpose (we could also run
+ transpose at a different place, but reverting the order of operations done during preprocessing seems cleaner)
+ """
+ data, seg, properties = self.read_data(case_name)
+ if data is not None:
+ data, seg = self.run_case_npy(data, seg, properties)
+ return data, seg, properties
+ else :
+ return None, None, None
+
+ def run_case_save(self, case_name):
+ print(case_name + "~~~~~~~~" * 10)
+ data, seg, properties = self.run_case(case_name)
+ if data is not None:
+ # print('dtypes', data.dtype, seg.dtype)
+ case_name = case_name.split(".")[0]
+ np.savez_compressed(os.path.join(self.output_dir, case_name) + '.npz', data=data, seg=seg)
+ write_pickle(properties, os.path.join(self.output_dir, case_name) + '.pkl')
+ print(f"data is saved at: {os.path.join(self.output_dir, case_name) + '.npz'}")
+
+ def experiment_plan(self, case_name):
+
+ data, seg, properties = self.read_data(case_name)
+ if data is None:
+ return None, None, None
+
+ print(f"labels is {np.unique(seg)}")
+ spacing = properties["spacing"]
+ raw_size = properties["raw_size"]
+ intensities_per_channel = properties["intensities_per_channel"]
+
+ return spacing, raw_size, intensities_per_channel
+
+ def determine_fullres_target_spacing(self, spacings, sizes) -> np.ndarray:
+ # if self.overwrite_target_spacing is not None:
+ # return np.array(self.overwrite_target_spacing)
+
+ # spacings = self.dataset_fingerprint['spacings']
+ # sizes = self.dataset_fingerprint['shapes_after_crop']
+
+ target = np.percentile(np.vstack(spacings), 50, 0)
+ target_size = np.percentile(np.vstack(sizes), 50, 0)
+ # we need to identify datasets for which a different target spacing could be beneficial. These datasets have
+ # the following properties:
+ # - one axis which much lower resolution than the others
+ # - the lowres axis has much less voxels than the others
+ # - (the size in mm of the lowres axis is also reduced)
+ worst_spacing_axis = np.argmax(target)
+ other_axes = [i for i in range(len(target)) if i != worst_spacing_axis]
+ other_spacings = [target[i] for i in other_axes]
+ other_sizes = [target_size[i] for i in other_axes]
+
+ has_aniso_spacing = target[worst_spacing_axis] > (3 * max(other_spacings))
+ has_aniso_voxels = target_size[worst_spacing_axis] * 3 < min(other_sizes)
+
+ if has_aniso_spacing and has_aniso_voxels:
+ spacings_of_that_axis = np.vstack(spacings)[:, worst_spacing_axis]
+ target_spacing_of_that_axis = np.percentile(spacings_of_that_axis, 10)
+ # don't let the spacing of that axis get higher than the other axes
+ if target_spacing_of_that_axis < max(other_spacings):
+ target_spacing_of_that_axis = max(max(other_spacings), target_spacing_of_that_axis) + 1e-5
+ target[worst_spacing_axis] = target_spacing_of_that_axis
+ return target
+
+ def compute_new_shape(self, old_shape: Union[Tuple[int, ...], List[int], np.ndarray],
+ old_spacing: Union[Tuple[float, ...], List[float], np.ndarray],
+ new_spacing: Union[Tuple[float, ...], List[float], np.ndarray]) -> np.ndarray:
+ ## spacing need to be transposed
+ old_spacing = list(old_spacing)[::-1]
+ new_spacing = list(new_spacing)[::-1]
+
+ assert len(old_spacing) == len(old_shape)
+ assert len(old_shape) == len(new_spacing)
+ new_shape = np.array([int(round(i / j * k)) for i, j, k in zip(old_spacing, new_spacing, old_shape)])
+ return new_shape
+
+ def run_plan(self):
+ all_iter = self.get_iterable_list()
+ spacings = []
+ sizes = []
+ intensities_per_channels = []
+ print(f"analysing data......")
+ for case in tqdm(all_iter, total=len(all_iter)):
+ if os.path.isdir(os.path.join(self.base_dir, case)):
+ spacing, size, intensities_per_channel = self.experiment_plan(case)
+ if spacing is None:
+ continue
+
+ spacings.append(spacing)
+ sizes.append(size)
+ intensities_per_channels.append(intensities_per_channel)
+
+ print(f"all spacing is {spacings}")
+ print(f"all sizes is {sizes}")
+ foreground_intensities_per_channel = [np.concatenate([r[i] for r in intensities_per_channels]) for i in
+ range(len(intensities_per_channels[0]))]
+
+ num_channels = len(intensities_per_channels[0])
+
+ intensity_statistics_per_channel = {}
+ for i in range(num_channels):
+ intensity_statistics_per_channel[i] = {
+ 'mean': float(np.mean(foreground_intensities_per_channel[i])),
+ 'median': float(np.median(foreground_intensities_per_channel[i])),
+ 'std': float(np.std(foreground_intensities_per_channel[i])),
+ 'min': float(np.min(foreground_intensities_per_channel[i])),
+ 'max': float(np.max(foreground_intensities_per_channel[i])),
+ 'percentile_99_5': float(np.percentile(foreground_intensities_per_channel[i], 99.5)),
+ 'percentile_00_5': float(np.percentile(foreground_intensities_per_channel[i], 0.5)),
+ }
+
+ print(f"intensity_statistics_per_channel is {intensity_statistics_per_channel}")
+
+ fullres_spacing = self.determine_fullres_target_spacing(spacings, sizes)
+ print(f"fullres spacing is {fullres_spacing[::-1]}")
+
+ # get transposed new median shape (what we would have after resampling)
+ new_shapes = [self.compute_new_shape(j, i, fullres_spacing) for i, j in
+ zip(spacings, sizes)]
+ new_median_shape = np.median(new_shapes, 0)
+ print(f"median_shape is {new_median_shape}")
+
+ tmp = 1 / np.array(fullres_spacing)
+ initial_patch_size = [round(i) for i in tmp * (256 ** 3 / np.prod(tmp)) ** (1 / 3)]
+
+ print(f"initial_patch_size is {initial_patch_size[::-1]}")
+
+ network_num_pool_per_axis, pool_op_kernel_sizes, conv_kernel_sizes, patch_size, \
+ shape_must_be_divisible_by = get_pool_and_conv_props(fullres_spacing, initial_patch_size,
+ 4,
+ 999999)
+ print(f"target medium patch size is {patch_size[::-1]}")
+
+ analysis_path = "./data_analysis_result.txt"
+ with open(analysis_path, "w") as f:
+
+ f.write(json.dumps({
+ "intensity_statistics_per_channel": intensity_statistics_per_channel,
+ "fullres spacing": fullres_spacing.tolist(),
+ "median_shape": new_median_shape.tolist(),
+ "initial_patch_size": initial_patch_size,
+ "target medium patch size": patch_size[::-1].tolist()
+ }))
+ print(f"Analysis done, save to {analysis_path}")
+
+
+ def collect_foreground_intensities(self, segmentation: np.ndarray, images: np.ndarray, seed: int = 1234,
+ num_samples: int = 10000):
+ """
+ images=image with multiple channels = shape (c, x, y(, z))
+ """
+ assert len(images.shape) == 4
+ assert len(segmentation.shape) == 4
+
+ assert not np.any(np.isnan(segmentation)), "Segmentation contains NaN values. grrrr.... :-("
+ assert not np.any(np.isnan(images)), "Images contains NaN values. grrrr.... :-("
+
+ rs = np.random.RandomState(seed)
+
+ intensities_per_channel = []
+ # we don't use the intensity_statistics_per_channel at all, it's just something that might be nice to have
+ intensity_statistics_per_channel = []
+
+ # segmentation is 4d: 1,x,y,z. We need to remove the empty dimension for the following code to work
+ foreground_mask = segmentation[0] > 0
+
+ for i in range(len(images)):
+ foreground_pixels = images[i][foreground_mask]
+ num_fg = len(foreground_pixels)
+ # sample with replacement so that we don't get issues with cases that have less than num_samples
+ # foreground_pixels. We could also just sample less in those cases but that would than cause these
+ # training cases to be underrepresented
+ intensities_per_channel.append(
+ rs.choice(foreground_pixels, num_samples, replace=True) if num_fg > 0 else [])
+ intensity_statistics_per_channel.append({
+ 'mean': np.mean(foreground_pixels) if num_fg > 0 else np.nan,
+ 'median': np.median(foreground_pixels) if num_fg > 0 else np.nan,
+ 'min': np.min(foreground_pixels) if num_fg > 0 else np.nan,
+ 'max': np.max(foreground_pixels) if num_fg > 0 else np.nan,
+ 'percentile_99_5': np.percentile(foreground_pixels, 99.5) if num_fg > 0 else np.nan,
+ 'percentile_00_5': np.percentile(foreground_pixels, 0.5) if num_fg > 0 else np.nan,
+
+ })
+
+ return intensities_per_channel, intensity_statistics_per_channel
+
+ @staticmethod
+ def _sample_foreground_locations(seg: np.ndarray, classes_or_regions: Union[List[int], List[Tuple[int, ...]]],
+ seed: int = 1234, verbose: bool = False):
+ num_samples = 10000
+ min_percent_coverage = 0.01 # at least 1% of the class voxels need to be selected, otherwise it may be too
+ # sparse
+ rndst = np.random.RandomState(seed)
+ class_locs = {}
+ for c in classes_or_regions:
+ k = c if not isinstance(c, list) else tuple(c)
+ if isinstance(c, (tuple, list)):
+ ## region
+ mask = seg == c[0]
+ for cc in c[1:]:
+ mask = mask | (seg == cc)
+ all_locs = np.argwhere(mask)
+ else:
+ all_locs = np.argwhere(seg == c)
+ if len(all_locs) == 0:
+ class_locs[k] = []
+ continue
+ target_num_samples = min(num_samples, len(all_locs))
+ target_num_samples = max(target_num_samples, int(np.ceil(len(all_locs) * min_percent_coverage)))
+
+ selected = all_locs[rndst.choice(len(all_locs), target_num_samples, replace=False)]
+ class_locs[k] = selected
+ if verbose:
+ print(c, target_num_samples)
+
+ return class_locs
+
+ def run(self, output_spacing,
+ output_dir,
+ all_labels,
+ foreground_intensity_properties_per_channel=None,
+ num_processes=8):
+ self.out_spacing = output_spacing
+ self.all_labels = all_labels
+ self.output_dir = output_dir
+ self.foreground_intensity_properties_per_channel = foreground_intensity_properties_per_channel
+
+ all_iter = self.get_iterable_list()
+
+ maybe_mkdir_p(self.output_dir)
+
+ # test_run
+ for case_name in all_iter:
+ self.run_case_save(case_name)
+ break
+
+ # multiprocessing magic.
+ r = []
+ with multiprocessing.get_context("spawn").Pool(num_processes) as p:
+ for case_name in all_iter:
+ r.append(p.starmap_async(self.run_case_save,
+ ((case_name, ),)))
+ remaining = list(range(len(all_iter)))
+ # p is pretty nifti. If we kill workers they just respawn but don't do any work.
+ # So we need to store the original pool of workers.
+ workers = [j for j in p._pool]
+ with tqdm(desc=None, total=len(all_iter)) as pbar:
+ while len(remaining) > 0:
+ all_alive = all([j.is_alive() for j in workers])
+ if not all_alive:
+ raise RuntimeError('Some background worker is 6 feet under. Yuck. \n'
+ 'OK jokes aside.\n'
+ 'One of your background processes is missing. This could be because of '
+ 'an error (look for an error message) or because it was killed '
+ 'by your OS due to running out of RAM. If you don\'t see '
+ 'an error message, out of RAM is likely the problem. In that case '
+ 'reducing the number of workers might help')
+ done = [i for i in remaining if r[i].ready()]
+ for _ in done:
+ pbar.update()
+ remaining = [i for i in remaining if i not in done]
+ sleep(0.1)
\ No newline at end of file
diff --git a/PRISM/SegMamba/light_training/preprocessing/preprocessors/default_preprocessor_liver_2017.py b/PRISM/SegMamba/light_training/preprocessing/preprocessors/default_preprocessor_liver_2017.py
new file mode 100644
index 0000000000000000000000000000000000000000..5fb40d91dd6644d76fad9e1a0efe4b610dc5659b
--- /dev/null
+++ b/PRISM/SegMamba/light_training/preprocessing/preprocessors/default_preprocessor_liver_2017.py
@@ -0,0 +1,526 @@
+# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import multiprocessing
+import shutil
+from time import sleep
+from typing import Union, Tuple
+import glob
+import numpy as np
+from batchgenerators.utilities.file_and_folder_operations import *
+from light_training.preprocessing.cropping.cropping import crop_to_nonzero
+# from .default_resampling import resample_data_or_seg_to_spacing, resample_img
+from light_training.preprocessing.resampling.default_resampling import resample_data_or_seg_to_shape, compute_new_shape
+from tqdm import tqdm
+from light_training.preprocessing.normalization.default_normalization_schemes import CTNormalization, ZScoreNormalization
+import SimpleITK as sitk
+from tqdm import tqdm
+from copy import deepcopy
+import json
+
+def create_image(image_arr, spacing):
+ image = sitk.GetImageFromArray(image_arr)
+ image.SetSpacing(spacing)
+ return image
+
+def get_shape_must_be_divisible_by(net_numpool_per_axis):
+ return 2 ** np.array(net_numpool_per_axis)
+
+def pad_shape(shape, must_be_divisible_by):
+ """
+ pads shape so that it is divisible by must_be_divisible_by
+ :param shape:
+ :param must_be_divisible_by:
+ :return:
+ """
+ if not isinstance(must_be_divisible_by, (tuple, list, np.ndarray)):
+ must_be_divisible_by = [must_be_divisible_by] * len(shape)
+ else:
+ assert len(must_be_divisible_by) == len(shape)
+
+ new_shp = [shape[i] + must_be_divisible_by[i] - shape[i] % must_be_divisible_by[i] for i in range(len(shape))]
+
+ for i in range(len(shape)):
+ if shape[i] % must_be_divisible_by[i] == 0:
+ new_shp[i] -= must_be_divisible_by[i]
+ new_shp = np.array(new_shp).astype(int)
+ return new_shp
+
+def get_pool_and_conv_props(spacing, patch_size, min_feature_map_size, max_numpool):
+ """
+ this is the same as get_pool_and_conv_props_v2 from old nnunet
+
+ :param spacing:
+ :param patch_size:
+ :param min_feature_map_size: min edge length of feature maps in bottleneck
+ :param max_numpool:
+ :return:
+ """
+ # todo review this code
+ dim = len(spacing)
+
+ current_spacing = deepcopy(list(spacing))
+ current_size = deepcopy(list(patch_size))
+
+ pool_op_kernel_sizes = [[1] * len(spacing)]
+ conv_kernel_sizes = []
+
+ num_pool_per_axis = [0] * dim
+ kernel_size = [1] * dim
+
+ while True:
+ # exclude axes that we cannot pool further because of min_feature_map_size constraint
+ valid_axes_for_pool = [i for i in range(dim) if current_size[i] >= 2*min_feature_map_size]
+ if len(valid_axes_for_pool) < 1:
+ break
+
+ spacings_of_axes = [current_spacing[i] for i in valid_axes_for_pool]
+
+ # find axis that are within factor of 2 within smallest spacing
+ min_spacing_of_valid = min(spacings_of_axes)
+ valid_axes_for_pool = [i for i in valid_axes_for_pool if current_spacing[i] / min_spacing_of_valid < 2]
+
+ # max_numpool constraint
+ valid_axes_for_pool = [i for i in valid_axes_for_pool if num_pool_per_axis[i] < max_numpool]
+
+ if len(valid_axes_for_pool) == 1:
+ if current_size[valid_axes_for_pool[0]] >= 3 * min_feature_map_size:
+ pass
+ else:
+ break
+ if len(valid_axes_for_pool) < 1:
+ break
+
+ # now we need to find kernel sizes
+ # kernel sizes are initialized to 1. They are successively set to 3 when their associated axis becomes within
+ # factor 2 of min_spacing. Once they are 3 they remain 3
+ for d in range(dim):
+ if kernel_size[d] == 3:
+ continue
+ else:
+ if spacings_of_axes[d] / min(current_spacing) < 2:
+ kernel_size[d] = 3
+
+ other_axes = [i for i in range(dim) if i not in valid_axes_for_pool]
+
+ pool_kernel_sizes = [0] * dim
+ for v in valid_axes_for_pool:
+ pool_kernel_sizes[v] = 2
+ num_pool_per_axis[v] += 1
+ current_spacing[v] *= 2
+ current_size[v] = np.ceil(current_size[v] / 2)
+ for nv in other_axes:
+ pool_kernel_sizes[nv] = 1
+
+ pool_op_kernel_sizes.append(pool_kernel_sizes)
+ conv_kernel_sizes.append(deepcopy(kernel_size))
+ #print(conv_kernel_sizes)
+
+ must_be_divisible_by = get_shape_must_be_divisible_by(num_pool_per_axis)
+ patch_size = pad_shape(patch_size, must_be_divisible_by)
+
+ # we need to add one more conv_kernel_size for the bottleneck. We always use 3x3(x3) conv here
+ conv_kernel_sizes.append([3]*dim)
+ return num_pool_per_axis, pool_op_kernel_sizes, conv_kernel_sizes, patch_size, must_be_divisible_by
+
+
+class DefaultPreprocessor(object):
+ def __init__(self,
+ base_dir,
+ ):
+ """
+ Everything we need is in the plans. Those are given when run() is called
+ """
+ self.base_dir = base_dir
+
+ def run_case_npy(self, data: np.ndarray, seg, properties: dict):
+ # let's not mess up the inputs!
+ data = np.copy(data)
+ old_shape = data.shape
+ original_spacing = list(properties['spacing'])
+ ## 由于old spacing读出来是反的,因此这里需要转置一下
+
+ original_spacing_trans = original_spacing[::-1]
+ properties["original_spacing_trans"] = original_spacing_trans
+ properties["target_spacing_trans"] = self.out_spacing
+
+ shape_before_cropping = data.shape[1:]
+ ## crop
+ properties['shape_before_cropping'] = shape_before_cropping
+ # this command will generate a segmentation. This is important because of the nonzero mask which we may need
+ data, seg, bbox = crop_to_nonzero(data, seg)
+ properties['bbox_used_for_cropping'] = bbox
+
+ # crop, remember to store size before cropping!
+ shape_before_resample = data.shape[1:]
+ properties['shape_after_cropping_before_resample'] = shape_before_resample
+
+ new_shape = compute_new_shape(data.shape[1:], original_spacing_trans, self.out_spacing)
+
+ if seg is None :
+ seg_norm = np.zeros_like(data)
+ else :
+ seg_norm = seg
+ data = self._normalize(data, seg_norm,
+ self.foreground_intensity_properties_per_channel)
+
+ assert len(data.shape) == 4
+
+ data = resample_data_or_seg_to_shape(data, new_shape,
+ original_spacing,
+ self.out_spacing,
+ order=3,
+ order_z=0)
+ properties['shape_after_resample'] = new_shape
+
+ if seg is not None :
+ assert len(seg.shape) == 4
+ seg = resample_data_or_seg_to_shape(seg, new_shape,
+ original_spacing,
+ self.out_spacing,
+ is_seg=True,
+ order=1,
+ order_z=0)
+
+ properties['class_locations'] = self._sample_foreground_locations(seg,
+ self.all_labels,
+ )
+
+ if np.max(seg) > 127:
+ seg = seg.astype(np.int16)
+ else:
+ seg = seg.astype(np.int8)
+
+ print(f'old shape: {old_shape}, new_shape after crop and resample: {new_shape}, old_spacing: {original_spacing}, '
+ f'new_spacing: {self.out_spacing}, boxes is {bbox}')
+
+ return data, seg
+
+ # need to modify
+ def get_iterable_list(self):
+ all_cases = os.listdir(self.base_dir)
+
+ all_cases_2 = []
+ for c in all_cases:
+ if "volume" in c and ".nii" in c:
+ ## get data id
+ all_cases_2.append(c.split("-")[-1].split(".")[0])
+
+ return all_cases_2
+
+ def _normalize(self, data: np.ndarray, seg: np.ndarray,
+ foreground_intensity_properties_per_channel: dict) -> np.ndarray:
+ for c in range(data.shape[0]):
+ normalizer_class = CTNormalization
+ normalizer = normalizer_class(use_mask_for_norm=False,
+ intensityproperties=foreground_intensity_properties_per_channel[str(c)])
+ data[c] = normalizer.run(data[c], seg[0])
+ return data
+
+ # need to modify
+ def read_data(self, case_name):
+ ## only for CT dataset
+ try:
+ data = sitk.ReadImage(os.path.join(self.base_dir, f"volume-{case_name}.nii"))
+ except:
+ print(f"data read error: {self.base_dir, case_name}")
+ return None, None, None
+ seg_arr = None
+ ## 一定要是float32!!!!
+ data_arr = sitk.GetArrayFromImage(data).astype(np.float32)
+ data_arr = data_arr[None]
+
+ if os.path.exists(os.path.join(self.base_dir, f"segmentation-{case_name}.nii")):
+ seg = sitk.ReadImage(os.path.join(self.base_dir, f"segmentation-{case_name}.nii"))
+ ## 读出来以后一定转float32!!!
+ seg_arr = sitk.GetArrayFromImage(seg).astype(np.float32)[None,]
+
+ intensities_per_channel, intensity_statistics_per_channel = self.collect_foreground_intensities(seg_arr, data_arr)
+ else :
+ intensities_per_channel = []
+ intensity_statistics_per_channel = []
+
+ properties = {"spacing": data.GetSpacing(),
+ "raw_size": data_arr.shape[1:],
+ "name": case_name.split(".")[0],
+ "intensities_per_channel": intensities_per_channel,
+ "intensity_statistics_per_channel": intensity_statistics_per_channel}
+
+ return data_arr, seg_arr, properties
+
+ def run_case(self, case_name):
+ """
+ seg file can be none (test cases)
+
+ order of operations is: transpose -> crop -> resample
+ so when we export we need to run the following order: resample -> crop -> transpose (we could also run
+ transpose at a different place, but reverting the order of operations done during preprocessing seems cleaner)
+ """
+ data, seg, properties = self.read_data(case_name)
+ if data is not None:
+ data, seg = self.run_case_npy(data, seg, properties)
+ return data, seg, properties
+ else :
+ return None, None, None
+
+ def run_case_save(self, case_name):
+ print(case_name + "~~~~~~~~" * 10)
+ data, seg, properties = self.run_case(case_name)
+ if data is not None:
+ # print('dtypes', data.dtype, seg.dtype)
+ case_name = case_name.split(".")[0]
+ np.savez_compressed(os.path.join(self.output_dir, case_name) + '.npz', data=data, seg=seg)
+ write_pickle(properties, os.path.join(self.output_dir, case_name) + '.pkl')
+ print(f"data is saved at: {os.path.join(self.output_dir, case_name) + '.npz'}")
+
+ def experiment_plan(self, case_name):
+
+ data, seg, properties = self.read_data(case_name)
+ if data is None:
+ return None, None, None
+
+ print(f"labels is {np.unique(seg)}")
+ spacing = properties["spacing"]
+ raw_size = properties["raw_size"]
+ intensities_per_channel = properties["intensities_per_channel"]
+
+ return spacing, raw_size, intensities_per_channel
+
+ def determine_fullres_target_spacing(self, spacings, sizes) -> np.ndarray:
+ # if self.overwrite_target_spacing is not None:
+ # return np.array(self.overwrite_target_spacing)
+
+ # spacings = self.dataset_fingerprint['spacings']
+ # sizes = self.dataset_fingerprint['shapes_after_crop']
+
+ target = np.percentile(np.vstack(spacings), 50, 0)
+ target_size = np.percentile(np.vstack(sizes), 50, 0)
+ # we need to identify datasets for which a different target spacing could be beneficial. These datasets have
+ # the following properties:
+ # - one axis which much lower resolution than the others
+ # - the lowres axis has much less voxels than the others
+ # - (the size in mm of the lowres axis is also reduced)
+ worst_spacing_axis = np.argmax(target)
+ other_axes = [i for i in range(len(target)) if i != worst_spacing_axis]
+ other_spacings = [target[i] for i in other_axes]
+ other_sizes = [target_size[i] for i in other_axes]
+
+ has_aniso_spacing = target[worst_spacing_axis] > (3 * max(other_spacings))
+ has_aniso_voxels = target_size[worst_spacing_axis] * 3 < min(other_sizes)
+
+ if has_aniso_spacing and has_aniso_voxels:
+ spacings_of_that_axis = np.vstack(spacings)[:, worst_spacing_axis]
+ target_spacing_of_that_axis = np.percentile(spacings_of_that_axis, 10)
+ # don't let the spacing of that axis get higher than the other axes
+ if target_spacing_of_that_axis < max(other_spacings):
+ target_spacing_of_that_axis = max(max(other_spacings), target_spacing_of_that_axis) + 1e-5
+ target[worst_spacing_axis] = target_spacing_of_that_axis
+ return target
+
+ def compute_new_shape(self, old_shape: Union[Tuple[int, ...], List[int], np.ndarray],
+ old_spacing: Union[Tuple[float, ...], List[float], np.ndarray],
+ new_spacing: Union[Tuple[float, ...], List[float], np.ndarray]) -> np.ndarray:
+ ## spacing need to be transposed
+ old_spacing = list(old_spacing)[::-1]
+ new_spacing = list(new_spacing)[::-1]
+
+ assert len(old_spacing) == len(old_shape)
+ assert len(old_shape) == len(new_spacing)
+ new_shape = np.array([int(round(i / j * k)) for i, j, k in zip(old_spacing, new_spacing, old_shape)])
+ return new_shape
+
+ def run_plan(self):
+ all_iter = self.get_iterable_list()
+ spacings = []
+ sizes = []
+ intensities_per_channels = []
+ print(f"analysing data......")
+ for case in tqdm(all_iter, total=len(all_iter)):
+ spacing, size, intensities_per_channel = self.experiment_plan(case)
+ if spacing is None:
+ continue
+
+ spacings.append(spacing)
+ sizes.append(size)
+ intensities_per_channels.append(intensities_per_channel)
+
+ print(f"all spacing is {spacings}")
+ print(f"all sizes is {sizes}")
+ foreground_intensities_per_channel = [np.concatenate([r[i] for r in intensities_per_channels]) for i in
+ range(len(intensities_per_channels[0]))]
+
+ num_channels = len(intensities_per_channels[0])
+
+ intensity_statistics_per_channel = {}
+ for i in range(num_channels):
+ intensity_statistics_per_channel[i] = {
+ 'mean': float(np.mean(foreground_intensities_per_channel[i])),
+ 'median': float(np.median(foreground_intensities_per_channel[i])),
+ 'std': float(np.std(foreground_intensities_per_channel[i])),
+ 'min': float(np.min(foreground_intensities_per_channel[i])),
+ 'max': float(np.max(foreground_intensities_per_channel[i])),
+ 'percentile_99_5': float(np.percentile(foreground_intensities_per_channel[i], 99.5)),
+ 'percentile_00_5': float(np.percentile(foreground_intensities_per_channel[i], 0.5)),
+ }
+
+ print(f"intensity_statistics_per_channel is {intensity_statistics_per_channel}")
+
+ fullres_spacing = self.determine_fullres_target_spacing(spacings, sizes)
+ print(f"fullres spacing is {fullres_spacing[::-1]}")
+
+ # get transposed new median shape (what we would have after resampling)
+ new_shapes = [self.compute_new_shape(j, i, fullres_spacing) for i, j in
+ zip(spacings, sizes)]
+ new_median_shape = np.median(new_shapes, 0)
+ print(f"median_shape is {new_median_shape}")
+
+ tmp = 1 / np.array(fullres_spacing)
+ initial_patch_size = [round(i) for i in tmp * (256 ** 3 / np.prod(tmp)) ** (1 / 3)]
+
+ print(f"initial_patch_size is {initial_patch_size[::-1]}")
+
+ network_num_pool_per_axis, pool_op_kernel_sizes, conv_kernel_sizes, patch_size, \
+ shape_must_be_divisible_by = get_pool_and_conv_props(fullres_spacing, initial_patch_size,
+ 4,
+ 999999)
+ print(f"target medium patch size is {patch_size[::-1]}")
+
+ analysis_path = "./data_analysis_result.txt"
+ with open(analysis_path, "w") as f:
+
+ f.write(json.dumps({
+ "intensity_statistics_per_channel": intensity_statistics_per_channel,
+ "fullres spacing": fullres_spacing.tolist(),
+ "median_shape": new_median_shape.tolist(),
+ "initial_patch_size": initial_patch_size,
+ "target medium patch size": patch_size[::-1].tolist()
+ }))
+ print(f"Analysis done, save to {analysis_path}")
+
+
+ def collect_foreground_intensities(self, segmentation: np.ndarray, images: np.ndarray, seed: int = 1234,
+ num_samples: int = 10000):
+ """
+ images=image with multiple channels = shape (c, x, y(, z))
+ """
+ assert len(images.shape) == 4
+ assert len(segmentation.shape) == 4
+
+ assert not np.any(np.isnan(segmentation)), "Segmentation contains NaN values. grrrr.... :-("
+ assert not np.any(np.isnan(images)), "Images contains NaN values. grrrr.... :-("
+
+ rs = np.random.RandomState(seed)
+
+ intensities_per_channel = []
+ # we don't use the intensity_statistics_per_channel at all, it's just something that might be nice to have
+ intensity_statistics_per_channel = []
+
+ # segmentation is 4d: 1,x,y,z. We need to remove the empty dimension for the following code to work
+ foreground_mask = segmentation[0] > 0
+
+ for i in range(len(images)):
+ foreground_pixels = images[i][foreground_mask]
+ num_fg = len(foreground_pixels)
+ # sample with replacement so that we don't get issues with cases that have less than num_samples
+ # foreground_pixels. We could also just sample less in those cases but that would than cause these
+ # training cases to be underrepresented
+ intensities_per_channel.append(
+ rs.choice(foreground_pixels, num_samples, replace=True) if num_fg > 0 else [])
+ intensity_statistics_per_channel.append({
+ 'mean': np.mean(foreground_pixels) if num_fg > 0 else np.nan,
+ 'median': np.median(foreground_pixels) if num_fg > 0 else np.nan,
+ 'min': np.min(foreground_pixels) if num_fg > 0 else np.nan,
+ 'max': np.max(foreground_pixels) if num_fg > 0 else np.nan,
+ 'percentile_99_5': np.percentile(foreground_pixels, 99.5) if num_fg > 0 else np.nan,
+ 'percentile_00_5': np.percentile(foreground_pixels, 0.5) if num_fg > 0 else np.nan,
+
+ })
+
+ return intensities_per_channel, intensity_statistics_per_channel
+
+ @staticmethod
+ def _sample_foreground_locations(seg: np.ndarray, classes_or_regions: Union[List[int], List[Tuple[int, ...]]],
+ seed: int = 1234, verbose: bool = False):
+ num_samples = 10000
+ min_percent_coverage = 0.01 # at least 1% of the class voxels need to be selected, otherwise it may be too
+ # sparse
+ rndst = np.random.RandomState(seed)
+ class_locs = {}
+ for c in classes_or_regions:
+ k = c if not isinstance(c, list) else tuple(c)
+ if isinstance(c, (tuple, list)):
+ ## region
+ mask = seg == c[0]
+ for cc in c[1:]:
+ mask = mask | (seg == cc)
+ all_locs = np.argwhere(mask)
+ else:
+ all_locs = np.argwhere(seg == c)
+ if len(all_locs) == 0:
+ class_locs[k] = []
+ continue
+ target_num_samples = min(num_samples, len(all_locs))
+ target_num_samples = max(target_num_samples, int(np.ceil(len(all_locs) * min_percent_coverage)))
+
+ selected = all_locs[rndst.choice(len(all_locs), target_num_samples, replace=False)]
+ class_locs[k] = selected
+ if verbose:
+ print(c, target_num_samples)
+
+ return class_locs
+
+ def run(self, output_spacing,
+ output_dir,
+ all_labels,
+ foreground_intensity_properties_per_channel=None,
+ num_processes=8):
+ self.out_spacing = output_spacing
+ self.all_labels = all_labels
+ self.output_dir = output_dir
+ self.foreground_intensity_properties_per_channel = foreground_intensity_properties_per_channel
+
+ all_iter = self.get_iterable_list()
+
+ maybe_mkdir_p(self.output_dir)
+
+ # test_run
+ for case_name in all_iter:
+ self.run_case_save(case_name)
+ break
+
+ # multiprocessing magic.
+ r = []
+ with multiprocessing.get_context("spawn").Pool(num_processes) as p:
+ for case_name in all_iter:
+ r.append(p.starmap_async(self.run_case_save,
+ ((case_name, ),)))
+ remaining = list(range(len(all_iter)))
+ # p is pretty nifti. If we kill workers they just respawn but don't do any work.
+ # So we need to store the original pool of workers.
+ workers = [j for j in p._pool]
+ with tqdm(desc=None, total=len(all_iter)) as pbar:
+ while len(remaining) > 0:
+ all_alive = all([j.is_alive() for j in workers])
+ if not all_alive:
+ raise RuntimeError('Some background worker is 6 feet under. Yuck. \n'
+ 'OK jokes aside.\n'
+ 'One of your background processes is missing. This could be because of '
+ 'an error (look for an error message) or because it was killed '
+ 'by your OS due to running out of RAM. If you don\'t see '
+ 'an error message, out of RAM is likely the problem. In that case '
+ 'reducing the number of workers might help')
+ done = [i for i in remaining if r[i].ready()]
+ for _ in done:
+ pbar.update()
+ remaining = [i for i in remaining if i not in done]
+ sleep(0.1)
\ No newline at end of file
diff --git a/PRISM/SegMamba/light_training/preprocessing/preprocessors/preprocessor_brats23_global.py b/PRISM/SegMamba/light_training/preprocessing/preprocessors/preprocessor_brats23_global.py
new file mode 100644
index 0000000000000000000000000000000000000000..a328ff0eaddda3f179ce99b5d3a5c9b8337c101c
--- /dev/null
+++ b/PRISM/SegMamba/light_training/preprocessing/preprocessors/preprocessor_brats23_global.py
@@ -0,0 +1,542 @@
+# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import multiprocessing
+import shutil
+from time import sleep
+from typing import Union, Tuple
+import glob
+import numpy as np
+from batchgenerators.utilities.file_and_folder_operations import *
+from light_training.preprocessing.cropping.cropping import crop_to_nonzero
+# from .default_resampling import resample_data_or_seg_to_spacing, resample_img
+from light_training.preprocessing.resampling.default_resampling import resample_data_or_seg_to_shape, compute_new_shape
+from tqdm import tqdm
+from light_training.preprocessing.normalization.default_normalization_schemes import CTNormalization, ZScoreNormalization
+import SimpleITK as sitk
+from tqdm import tqdm
+from copy import deepcopy
+import json
+# from .default_preprocessor import DefaultPreprocessor
+
+def create_image(image_arr, spacing):
+ image = sitk.GetImageFromArray(image_arr)
+ image.SetSpacing(spacing)
+ return image
+
+def get_shape_must_be_divisible_by(net_numpool_per_axis):
+ return 2 ** np.array(net_numpool_per_axis)
+
+def pad_shape(shape, must_be_divisible_by):
+ """
+ pads shape so that it is divisible by must_be_divisible_by
+ :param shape:
+ :param must_be_divisible_by:
+ :return:
+ """
+ if not isinstance(must_be_divisible_by, (tuple, list, np.ndarray)):
+ must_be_divisible_by = [must_be_divisible_by] * len(shape)
+ else:
+ assert len(must_be_divisible_by) == len(shape)
+
+ new_shp = [shape[i] + must_be_divisible_by[i] - shape[i] % must_be_divisible_by[i] for i in range(len(shape))]
+
+ for i in range(len(shape)):
+ if shape[i] % must_be_divisible_by[i] == 0:
+ new_shp[i] -= must_be_divisible_by[i]
+ new_shp = np.array(new_shp).astype(int)
+ return new_shp
+
+def get_pool_and_conv_props(spacing, patch_size, min_feature_map_size, max_numpool):
+ """
+ this is the same as get_pool_and_conv_props_v2 from old nnunet
+
+ :param spacing:
+ :param patch_size:
+ :param min_feature_map_size: min edge length of feature maps in bottleneck
+ :param max_numpool:
+ :return:
+ """
+ # todo review this code
+ dim = len(spacing)
+
+ current_spacing = deepcopy(list(spacing))
+ current_size = deepcopy(list(patch_size))
+
+ pool_op_kernel_sizes = [[1] * len(spacing)]
+ conv_kernel_sizes = []
+
+ num_pool_per_axis = [0] * dim
+ kernel_size = [1] * dim
+
+ while True:
+ # exclude axes that we cannot pool further because of min_feature_map_size constraint
+ valid_axes_for_pool = [i for i in range(dim) if current_size[i] >= 2*min_feature_map_size]
+ if len(valid_axes_for_pool) < 1:
+ break
+
+ spacings_of_axes = [current_spacing[i] for i in valid_axes_for_pool]
+
+ # find axis that are within factor of 2 within smallest spacing
+ min_spacing_of_valid = min(spacings_of_axes)
+ valid_axes_for_pool = [i for i in valid_axes_for_pool if current_spacing[i] / min_spacing_of_valid < 2]
+
+ # max_numpool constraint
+ valid_axes_for_pool = [i for i in valid_axes_for_pool if num_pool_per_axis[i] < max_numpool]
+
+ if len(valid_axes_for_pool) == 1:
+ if current_size[valid_axes_for_pool[0]] >= 3 * min_feature_map_size:
+ pass
+ else:
+ break
+ if len(valid_axes_for_pool) < 1:
+ break
+
+ # now we need to find kernel sizes
+ # kernel sizes are initialized to 1. They are successively set to 3 when their associated axis becomes within
+ # factor 2 of min_spacing. Once they are 3 they remain 3
+ for d in range(dim):
+ if kernel_size[d] == 3:
+ continue
+ else:
+ if spacings_of_axes[d] / min(current_spacing) < 2:
+ kernel_size[d] = 3
+
+ other_axes = [i for i in range(dim) if i not in valid_axes_for_pool]
+
+ pool_kernel_sizes = [0] * dim
+ for v in valid_axes_for_pool:
+ pool_kernel_sizes[v] = 2
+ num_pool_per_axis[v] += 1
+ current_spacing[v] *= 2
+ current_size[v] = np.ceil(current_size[v] / 2)
+ for nv in other_axes:
+ pool_kernel_sizes[nv] = 1
+
+ pool_op_kernel_sizes.append(pool_kernel_sizes)
+ conv_kernel_sizes.append(deepcopy(kernel_size))
+ #print(conv_kernel_sizes)
+
+ must_be_divisible_by = get_shape_must_be_divisible_by(num_pool_per_axis)
+ patch_size = pad_shape(patch_size, must_be_divisible_by)
+
+ # we need to add one more conv_kernel_size for the bottleneck. We always use 3x3(x3) conv here
+ conv_kernel_sizes.append([3]*dim)
+ return num_pool_per_axis, pool_op_kernel_sizes, conv_kernel_sizes, patch_size, must_be_divisible_by
+
+
+
+class MultiModalityPreprocessor(object):
+ def __init__(self,
+ base_dir,
+ global_size=[128, 128, 128],
+ ):
+
+ self.global_size = global_size
+ self.base_dir = base_dir
+ # self.image_dir = image_dir
+ # self.data_filenames = data_filenames
+ # self.seg_filename = seg_filename
+ # base_dir = "./data/raw_data/BraTS2023/"
+ self.base_dir = base_dir
+ self.data_filenames = ["t2w.nii.gz",
+ "t2f.nii.gz",
+ "t1n.nii.gz",
+ "t1c.nii.gz"]
+ self.seg_filename = "seg.nii.gz"
+
+ def get_iterable_list(self):
+ all_cases = os.listdir(os.path.join(self.base_dir))
+ return all_cases
+
+ def _normalize(self, data: np.ndarray, seg: np.ndarray,
+ foreground_intensity_properties_per_channel: dict) -> np.ndarray:
+ for c in range(data.shape[0]):
+ normalizer_class = ZScoreNormalization
+ normalizer = normalizer_class(use_mask_for_norm=False,
+ intensityproperties=foreground_intensity_properties_per_channel)
+ data[c] = normalizer.run(data[c], seg[0])
+ return data
+
+ def run_case_npy(self, data: np.ndarray, seg, properties: dict):
+ # let's not mess up the inputs!
+ data = np.copy(data)
+ old_shape = data.shape
+ original_spacing = list(properties['spacing'])
+ ## 由于old spacing读出来是反的,因此这里需要转置一下
+
+ original_spacing_trans = original_spacing[::-1]
+ properties["original_spacing_trans"] = original_spacing_trans
+ properties["target_spacing_trans"] = self.out_spacing
+
+ shape_before_cropping = data.shape[1:]
+ ## crop
+ properties['shape_before_cropping'] = shape_before_cropping
+ # this command will generate a segmentation. This is important because of the nonzero mask which we may need
+ data, seg, bbox = crop_to_nonzero(data, seg)
+ properties['bbox_used_for_cropping'] = bbox
+
+ # crop, remember to store size before cropping!
+ shape_before_resample = data.shape[1:]
+ properties['shape_after_cropping_before_resample'] = shape_before_resample
+
+ new_shape = compute_new_shape(data.shape[1:], original_spacing_trans, self.out_spacing)
+
+ if seg is None :
+ seg_norm = np.zeros_like(data)
+ else :
+ seg_norm = seg
+ data = self._normalize(data, seg_norm,
+ self.foreground_intensity_properties_per_channel)
+
+ assert len(data.shape) == 4
+
+ data = resample_data_or_seg_to_shape(data, new_shape,
+ original_spacing,
+ self.out_spacing,
+ order=3,
+ order_z=0)
+
+ ## global view
+ data_global = resample_data_or_seg_to_shape(data, self.global_size,
+ original_spacing,
+ self.out_spacing,
+ order=3,
+ order_z=0)
+
+ # print(data.shape, data_global.shape)
+ # data = np.concatenate([data, data_global], axis=0)
+
+ properties['shape_after_resample'] = new_shape
+
+ if seg is not None :
+ assert len(seg.shape) == 4
+ seg = resample_data_or_seg_to_shape(seg, new_shape,
+ original_spacing,
+ self.out_spacing,
+ is_seg=True,
+ order=1,
+ order_z=0)
+
+ properties['class_locations'] = self._sample_foreground_locations(seg,
+ self.all_labels,
+ )
+
+ ## global view
+ seg_global = resample_data_or_seg_to_shape(seg, self.global_size,
+ original_spacing,
+ self.out_spacing,
+ order=1,
+ order_z=0)
+
+ if np.max(seg) > 127:
+ seg = seg.astype(np.int16)
+ else:
+ seg = seg.astype(np.int8)
+
+ print(f'old shape: {old_shape}, new_shape after crop and resample: {new_shape}, old_spacing: {original_spacing}, '
+ f'new_spacing: {self.out_spacing}, boxes is {bbox}')
+
+ return data, data_global, seg, seg_global
+
+ # need to modify
+ def read_data(self, case_name):
+ ## only for CT dataset
+ assert len(self.data_filenames) != 0
+ data = []
+ for dfname in self.data_filenames:
+ d = sitk.ReadImage(os.path.join(self.base_dir, case_name, dfname))
+ spacing = d.GetSpacing()
+ data.append(sitk.GetArrayFromImage(d).astype(np.float32)[None,])
+
+ data = np.concatenate(data, axis=0)
+
+ seg_arr = None
+ ## 一定要是float32!!!!
+
+ if self.seg_filename != "":
+ seg = sitk.ReadImage(os.path.join(self.base_dir, case_name, self.seg_filename))
+ ## 读出来以后一定转float32!!!
+ seg_arr = sitk.GetArrayFromImage(seg).astype(np.float32)
+ seg_arr = seg_arr[None]
+ intensities_per_channel, intensity_statistics_per_channel = self.collect_foreground_intensities(seg_arr, data)
+
+ else :
+ intensities_per_channel = []
+ intensity_statistics_per_channel = []
+
+ properties = {"spacing": spacing,
+ "raw_size": data.shape[1:],
+ "name": case_name.split(".")[0],
+ "intensities_per_channel": intensities_per_channel,
+ "intensity_statistics_per_channel": intensity_statistics_per_channel}
+
+ return data, seg_arr, properties
+
+ def run_case(self, case_name):
+ """
+ seg file can be none (test cases)
+
+ order of operations is: transpose -> crop -> resample
+ so when we export we need to run the following order: resample -> crop -> transpose (we could also run
+ transpose at a different place, but reverting the order of operations done during preprocessing seems cleaner)
+ """
+ data, seg, properties = self.read_data(case_name)
+
+ data, data_global, seg, seg_global = self.run_case_npy(data, seg, properties)
+
+ return data, data_global, seg, seg_global, properties
+
+ def run_case_save(self, case_name):
+ print(case_name + "~~~~~~~~" * 10)
+ data, data_global, seg, seg_global, properties = self.run_case(case_name)
+ # print('dtypes', data.dtype, seg.dtype)
+ case_name = case_name.split(".")[0]
+ np.savez_compressed(os.path.join(self.output_dir, case_name) + '.npz', data=data, data_global=data_global, seg=seg, seg_global=seg_global)
+ write_pickle(properties, os.path.join(self.output_dir, case_name) + '.pkl')
+ print(f"data is saved at: {os.path.join(self.output_dir, case_name) + '.npz'}, data shape is {data.shape}, data_global shape is {data_global.shape}")
+
+ def experiment_plan(self, case_name):
+
+ data, seg, properties = self.read_data(case_name)
+ print(f"labels is {np.unique(seg)}")
+ spacing = properties["spacing"]
+ raw_size = properties["raw_size"]
+ intensities_per_channel = properties["intensities_per_channel"]
+
+ return spacing, raw_size, intensities_per_channel
+
+ def determine_fullres_target_spacing(self, spacings, sizes) -> np.ndarray:
+ # if self.overwrite_target_spacing is not None:
+ # return np.array(self.overwrite_target_spacing)
+
+ # spacings = self.dataset_fingerprint['spacings']
+ # sizes = self.dataset_fingerprint['shapes_after_crop']
+
+ target = np.percentile(np.vstack(spacings), 50, 0)
+ target_size = np.percentile(np.vstack(sizes), 50, 0)
+ # we need to identify datasets for which a different target spacing could be beneficial. These datasets have
+ # the following properties:
+ # - one axis which much lower resolution than the others
+ # - the lowres axis has much less voxels than the others
+ # - (the size in mm of the lowres axis is also reduced)
+ worst_spacing_axis = np.argmax(target)
+ other_axes = [i for i in range(len(target)) if i != worst_spacing_axis]
+ other_spacings = [target[i] for i in other_axes]
+ other_sizes = [target_size[i] for i in other_axes]
+
+ has_aniso_spacing = target[worst_spacing_axis] > (3 * max(other_spacings))
+ has_aniso_voxels = target_size[worst_spacing_axis] * 3 < min(other_sizes)
+
+ if has_aniso_spacing and has_aniso_voxels:
+ spacings_of_that_axis = np.vstack(spacings)[:, worst_spacing_axis]
+ target_spacing_of_that_axis = np.percentile(spacings_of_that_axis, 10)
+ # don't let the spacing of that axis get higher than the other axes
+ if target_spacing_of_that_axis < max(other_spacings):
+ target_spacing_of_that_axis = max(max(other_spacings), target_spacing_of_that_axis) + 1e-5
+ target[worst_spacing_axis] = target_spacing_of_that_axis
+ return target
+
+ def compute_new_shape(self, old_shape: Union[Tuple[int, ...], List[int], np.ndarray],
+ old_spacing: Union[Tuple[float, ...], List[float], np.ndarray],
+ new_spacing: Union[Tuple[float, ...], List[float], np.ndarray]) -> np.ndarray:
+ ## spacing need to be transposed
+ old_spacing = list(old_spacing)[::-1]
+ new_spacing = list(new_spacing)[::-1]
+
+ assert len(old_spacing) == len(old_shape)
+ assert len(old_shape) == len(new_spacing)
+ new_shape = np.array([int(round(i / j * k)) for i, j, k in zip(old_spacing, new_spacing, old_shape)])
+ return new_shape
+
+ def run_plan(self):
+ all_iter = self.get_iterable_list()
+ spacings = []
+ sizes = []
+ intensities_per_channels = []
+ print(f"analysing data......")
+ for case in tqdm(all_iter, total=len(all_iter)):
+ spacing, size, intensities_per_channel = self.experiment_plan(case)
+ spacings.append(spacing)
+ sizes.append(size)
+ intensities_per_channels.append(intensities_per_channel)
+
+ print(f"all spacing is {spacings}")
+ print(f"all sizes is {sizes}")
+ foreground_intensities_per_channel = [np.concatenate([r[i] for r in intensities_per_channels]) for i in
+ range(len(intensities_per_channels[0]))]
+
+ num_channels = len(intensities_per_channels[0])
+
+ intensity_statistics_per_channel = {}
+ for i in range(num_channels):
+ intensity_statistics_per_channel[i] = {
+ 'mean': float(np.mean(foreground_intensities_per_channel[i])),
+ 'median': float(np.median(foreground_intensities_per_channel[i])),
+ 'std': float(np.std(foreground_intensities_per_channel[i])),
+ 'min': float(np.min(foreground_intensities_per_channel[i])),
+ 'max': float(np.max(foreground_intensities_per_channel[i])),
+ 'percentile_99_5': float(np.percentile(foreground_intensities_per_channel[i], 99.5)),
+ 'percentile_00_5': float(np.percentile(foreground_intensities_per_channel[i], 0.5)),
+ }
+
+ print(f"intensity_statistics_per_channel is {intensity_statistics_per_channel}")
+
+ fullres_spacing = self.determine_fullres_target_spacing(spacings, sizes)
+ print(f"fullres spacing is {fullres_spacing[::-1]}")
+
+ # get transposed new median shape (what we would have after resampling)
+ new_shapes = [self.compute_new_shape(j, i, fullres_spacing) for i, j in
+ zip(spacings, sizes)]
+ new_median_shape = np.median(new_shapes, 0)
+ print(f"median_shape is {new_median_shape}")
+
+ tmp = 1 / np.array(fullres_spacing)
+ initial_patch_size = [round(i) for i in tmp * (256 ** 3 / np.prod(tmp)) ** (1 / 3)]
+
+ print(f"initial_patch_size is {initial_patch_size[::-1]}")
+
+ network_num_pool_per_axis, pool_op_kernel_sizes, conv_kernel_sizes, patch_size, \
+ shape_must_be_divisible_by = get_pool_and_conv_props(fullres_spacing, initial_patch_size,
+ 4,
+ 999999)
+ print(f"target medium patch size is {patch_size[::-1]}")
+
+ analysis_path = "./data_analysis_result.txt"
+ with open(analysis_path, "w") as f:
+
+ f.write(json.dumps({
+ "intensity_statistics_per_channel": intensity_statistics_per_channel,
+ "fullres spacing": fullres_spacing.tolist(),
+ "median_shape": new_median_shape.tolist(),
+ "initial_patch_size": initial_patch_size,
+ "target medium patch size": patch_size[::-1].tolist()
+ }))
+ print(f"Analysis done, save to {analysis_path}")
+
+
+ def collect_foreground_intensities(self, segmentation: np.ndarray, images: np.ndarray, seed: int = 1234,
+ num_samples: int = 10000):
+ """
+ images=image with multiple channels = shape (c, x, y(, z))
+ """
+ assert len(images.shape) == 4
+ assert len(segmentation.shape) == 4
+
+ assert not np.any(np.isnan(segmentation)), "Segmentation contains NaN values. grrrr.... :-("
+ assert not np.any(np.isnan(images)), "Images contains NaN values. grrrr.... :-("
+
+ rs = np.random.RandomState(seed)
+
+ intensities_per_channel = []
+ # we don't use the intensity_statistics_per_channel at all, it's just something that might be nice to have
+ intensity_statistics_per_channel = []
+
+ # segmentation is 4d: 1,x,y,z. We need to remove the empty dimension for the following code to work
+ foreground_mask = segmentation[0] > 0
+
+ for i in range(len(images)):
+ foreground_pixels = images[i][foreground_mask]
+ num_fg = len(foreground_pixels)
+ # sample with replacement so that we don't get issues with cases that have less than num_samples
+ # foreground_pixels. We could also just sample less in those cases but that would than cause these
+ # training cases to be underrepresented
+ intensities_per_channel.append(
+ rs.choice(foreground_pixels, num_samples, replace=True) if num_fg > 0 else [])
+ intensity_statistics_per_channel.append({
+ 'mean': np.mean(foreground_pixels) if num_fg > 0 else np.nan,
+ 'median': np.median(foreground_pixels) if num_fg > 0 else np.nan,
+ 'min': np.min(foreground_pixels) if num_fg > 0 else np.nan,
+ 'max': np.max(foreground_pixels) if num_fg > 0 else np.nan,
+ 'percentile_99_5': np.percentile(foreground_pixels, 99.5) if num_fg > 0 else np.nan,
+ 'percentile_00_5': np.percentile(foreground_pixels, 0.5) if num_fg > 0 else np.nan,
+
+ })
+
+ return intensities_per_channel, intensity_statistics_per_channel
+
+ @staticmethod
+ def _sample_foreground_locations(seg: np.ndarray, classes_or_regions: Union[List[int], List[Tuple[int, ...]]],
+ seed: int = 1234, verbose: bool = False):
+ num_samples = 10000
+ min_percent_coverage = 0.01 # at least 1% of the class voxels need to be selected, otherwise it may be too
+ # sparse
+ rndst = np.random.RandomState(seed)
+ class_locs = {}
+ for c in classes_or_regions:
+ k = c if not isinstance(c, list) else tuple(c)
+ if isinstance(c, (tuple, list)):
+ ## region
+ mask = seg == c[0]
+ for cc in c[1:]:
+ mask = mask | (seg == cc)
+ all_locs = np.argwhere(mask)
+ else:
+ all_locs = np.argwhere(seg == c)
+ if len(all_locs) == 0:
+ class_locs[k] = []
+ continue
+ target_num_samples = min(num_samples, len(all_locs))
+ target_num_samples = max(target_num_samples, int(np.ceil(len(all_locs) * min_percent_coverage)))
+
+ selected = all_locs[rndst.choice(len(all_locs), target_num_samples, replace=False)]
+ class_locs[k] = selected
+ if verbose:
+ print(c, target_num_samples)
+
+ return class_locs
+
+ def run(self,
+ output_spacing,
+ output_dir,
+ all_labels,
+ num_processes=8):
+ self.out_spacing = output_spacing
+ self.all_labels = all_labels
+ self.output_dir = output_dir
+ self.foreground_intensity_properties_per_channel = {}
+
+ all_iter = self.get_iterable_list()
+
+ maybe_mkdir_p(self.output_dir)
+
+ # test_run
+ for case_name in all_iter:
+ self.run_case_save(case_name)
+ break
+
+ r = []
+ with multiprocessing.get_context("spawn").Pool(num_processes) as p:
+ for case_name in all_iter:
+ r.append(p.starmap_async(self.run_case_save,
+ ((case_name, ),)))
+ remaining = list(range(len(all_iter)))
+ # p is pretty nifti. If we kill workers they just respawn but don't do any work.
+ # So we need to store the original pool of workers.
+ workers = [j for j in p._pool]
+ with tqdm(desc=None, total=len(all_iter)) as pbar:
+ while len(remaining) > 0:
+ all_alive = all([j.is_alive() for j in workers])
+ if not all_alive:
+ raise RuntimeError('Some background worker is 6 feet under. Yuck. \n'
+ 'OK jokes aside.\n'
+ 'One of your background processes is missing. This could be because of '
+ 'an error (look for an error message) or because it was killed '
+ 'by your OS due to running out of RAM. If you don\'t see '
+ 'an error message, out of RAM is likely the problem. In that case '
+ 'reducing the number of workers might help')
+ done = [i for i in remaining if r[i].ready()]
+ for _ in done:
+ pbar.update()
+ remaining = [i for i in remaining if i not in done]
+ sleep(0.1)
\ No newline at end of file
diff --git a/PRISM/SegMamba/light_training/preprocessing/preprocessors/preprocessor_mri.py b/PRISM/SegMamba/light_training/preprocessing/preprocessors/preprocessor_mri.py
new file mode 100644
index 0000000000000000000000000000000000000000..1dd6283d331372c574e545f371d7755eb3552d56
--- /dev/null
+++ b/PRISM/SegMamba/light_training/preprocessing/preprocessors/preprocessor_mri.py
@@ -0,0 +1,134 @@
+# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import multiprocessing
+import shutil
+from time import sleep
+from typing import Union, Tuple
+import glob
+import numpy as np
+from batchgenerators.utilities.file_and_folder_operations import *
+from light_training.preprocessing.cropping.cropping import crop_to_nonzero
+# from .default_resampling import resample_data_or_seg_to_spacing, resample_img
+from light_training.preprocessing.resampling.default_resampling import resample_data_or_seg_to_shape, compute_new_shape
+from tqdm import tqdm
+from light_training.preprocessing.normalization.default_normalization_schemes import CTNormalization, ZScoreNormalization
+import SimpleITK as sitk
+from tqdm import tqdm
+from copy import deepcopy
+import json
+from .default_preprocessor import DefaultPreprocessor
+
+class MultiModalityPreprocessor(DefaultPreprocessor):
+ def __init__(self,
+ base_dir,
+ image_dir,
+ data_filenames=[],
+ seg_filename="",
+ ):
+ self.base_dir = base_dir
+ self.image_dir = image_dir
+ self.data_filenames = data_filenames
+ self.seg_filename = seg_filename
+
+ def get_iterable_list(self):
+ all_cases = os.listdir(os.path.join(self.base_dir, self.image_dir))
+ return all_cases
+
+ def _normalize(self, data: np.ndarray, seg: np.ndarray,
+ foreground_intensity_properties_per_channel: dict) -> np.ndarray:
+ for c in range(data.shape[0]):
+ normalizer_class = ZScoreNormalization
+ normalizer = normalizer_class(use_mask_for_norm=False,
+ intensityproperties=foreground_intensity_properties_per_channel)
+ data[c] = normalizer.run(data[c], seg[0])
+ return data
+
+ # need to modify
+ def read_data(self, case_name):
+ ## only for CT dataset
+ assert len(self.data_filenames) != 0
+ data = []
+ for dfname in self.data_filenames:
+ d = sitk.ReadImage(os.path.join(self.base_dir, self.image_dir, case_name, dfname))
+ spacing = d.GetSpacing()
+ data.append(sitk.GetArrayFromImage(d).astype(np.float32)[None,])
+
+ data = np.concatenate(data, axis=0)
+
+ seg_arr = None
+ ## 一定要是float32!!!!
+
+ if self.seg_filename != "":
+ seg = sitk.ReadImage(os.path.join(self.base_dir, self.image_dir, case_name, self.seg_filename))
+ ## 读出来以后一定转float32!!!
+ seg_arr = sitk.GetArrayFromImage(seg).astype(np.float32)
+ seg_arr = seg_arr[None]
+ intensities_per_channel, intensity_statistics_per_channel = self.collect_foreground_intensities(seg_arr, data)
+
+ else :
+ intensities_per_channel = []
+ intensity_statistics_per_channel = []
+
+ properties = {"spacing": spacing,
+ "raw_size": data.shape[1:],
+ "name": case_name.split(".")[0],
+ "intensities_per_channel": intensities_per_channel,
+ "intensity_statistics_per_channel": intensity_statistics_per_channel}
+
+ return data, seg_arr, properties
+
+ def run(self,
+ output_spacing,
+ output_dir,
+ all_labels,
+ num_processes=8):
+ self.out_spacing = output_spacing
+ self.all_labels = all_labels
+ self.output_dir = output_dir
+ self.foreground_intensity_properties_per_channel = {}
+
+ all_iter = self.get_iterable_list()
+
+ maybe_mkdir_p(self.output_dir)
+
+ # test_run
+ for case_name in all_iter:
+ self.run_case_save(case_name)
+ break
+
+ r = []
+ with multiprocessing.get_context("spawn").Pool(num_processes) as p:
+ for case_name in all_iter:
+ r.append(p.starmap_async(self.run_case_save,
+ ((case_name, ),)))
+ remaining = list(range(len(all_iter)))
+ # p is pretty nifti. If we kill workers they just respawn but don't do any work.
+ # So we need to store the original pool of workers.
+ workers = [j for j in p._pool]
+ with tqdm(desc=None, total=len(all_iter)) as pbar:
+ while len(remaining) > 0:
+ all_alive = all([j.is_alive() for j in workers])
+ if not all_alive:
+ raise RuntimeError('Some background worker is 6 feet under. Yuck. \n'
+ 'OK jokes aside.\n'
+ 'One of your background processes is missing. This could be because of '
+ 'an error (look for an error message) or because it was killed '
+ 'by your OS due to running out of RAM. If you don\'t see '
+ 'an error message, out of RAM is likely the problem. In that case '
+ 'reducing the number of workers might help')
+ done = [i for i in remaining if r[i].ready()]
+ for _ in done:
+ pbar.update()
+ remaining = [i for i in remaining if i not in done]
+ sleep(0.1)
\ No newline at end of file
diff --git a/PRISM/SegMamba/light_training/preprocessing/preprocessors/preprocessor_multiinput_and_region.py b/PRISM/SegMamba/light_training/preprocessing/preprocessors/preprocessor_multiinput_and_region.py
new file mode 100644
index 0000000000000000000000000000000000000000..7eaf2a0e22b906758bd5203e95b866e6f0bf5327
--- /dev/null
+++ b/PRISM/SegMamba/light_training/preprocessing/preprocessors/preprocessor_multiinput_and_region.py
@@ -0,0 +1,209 @@
+# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import multiprocessing
+import shutil
+from time import sleep
+from typing import Union, Tuple
+import glob
+import numpy as np
+from batchgenerators.utilities.file_and_folder_operations import *
+from light_training.preprocessing.cropping.cropping import crop_to_nonzero
+# from .default_resampling import resample_data_or_seg_to_spacing, resample_img
+from light_training.preprocessing.resampling.default_resampling import resample_data_or_seg_to_shape, compute_new_shape
+from tqdm import tqdm
+from light_training.preprocessing.normalization.default_normalization_schemes import CTNormalization, ZScoreNormalization, CTNormStandard
+import SimpleITK as sitk
+from tqdm import tqdm
+from copy import deepcopy
+import json
+from .default_preprocessor import DefaultPreprocessor
+
+class MultiInputAndRegionPreprocessor(DefaultPreprocessor):
+ def __init__(self,
+ base_dir,
+ image_dir,
+ data_filenames=[],
+ seg_filename="",
+ ):
+
+ self.base_dir = base_dir
+ self.image_dir = image_dir
+ self.data_filenames = data_filenames
+ self.seg_filename = seg_filename
+
+
+ def get_iterable_list(self):
+ all_cases = os.listdir(os.path.join(self.base_dir, self.image_dir))
+ return all_cases
+
+ def _normalize(self, data: np.ndarray, seg: np.ndarray,
+ foreground_intensity_properties_per_channel: dict) -> np.ndarray:
+ for c in range(data.shape[0]):
+ normalizer_class = CTNormalization
+ normalizer = normalizer_class(use_mask_for_norm=False,
+ intensityproperties=foreground_intensity_properties_per_channel[str(c)])
+ data[c] = normalizer.run(data[c], seg[0])
+ return data
+
+ def run_case_npy(self, data: np.ndarray, seg, properties: dict):
+ # let's not mess up the inputs!
+ data = np.copy(data)
+ old_shape = data.shape
+ original_spacing = list(properties['spacing'])
+ ## 由于old spacing读出来是反的,因此这里需要转置一下
+
+ original_spacing_trans = original_spacing[::-1]
+ properties["original_spacing_trans"] = original_spacing_trans
+ properties["target_spacing_trans"] = self.out_spacing
+
+ shape_before_cropping = data.shape[1:]
+ ## crop
+ properties['shape_before_cropping'] = shape_before_cropping
+ # this command will generate a segmentation. This is important because of the nonzero mask which we may need
+ data, seg, bbox = crop_to_nonzero(data, seg)
+
+ properties['bbox_used_for_cropping'] = bbox
+
+ if seg is None :
+ seg_norm = np.zeros_like(data)
+ else :
+ seg_norm = seg
+ data = self._normalize(data, seg_norm,
+ self.foreground_intensity_properties_per_channel)
+
+ # crop, remember to store size before cropping!
+ shape_before_resample = data.shape[1:]
+ properties['shape_after_cropping_before_resample'] = shape_before_resample
+
+ new_shape = compute_new_shape(data.shape[1:], original_spacing_trans, self.out_spacing)
+
+ assert len(data.shape) == 4
+
+ data = resample_data_or_seg_to_shape(data, new_shape,
+ original_spacing,
+ self.out_spacing,
+ order=3,
+ order_z=0)
+ properties['shape_after_resample'] = new_shape
+
+ if seg is not None :
+ assert len(seg.shape) == 4
+ seg = resample_data_or_seg_to_shape(seg, new_shape,
+ original_spacing,
+ self.out_spacing,
+ is_seg=True,
+ order=1,
+ order_z=0)
+
+ properties['class_locations'] = self._sample_foreground_locations(seg,
+ self.all_labels,
+ True)
+
+ if np.max(seg) > 127:
+ seg = seg.astype(np.int16)
+ else:
+ seg = seg.astype(np.int8)
+
+ print(f'old shape: {old_shape}, shape_after_cropping_before_resample is {shape_before_resample}, new_shape after crop and resample: {new_shape}, old_spacing: {original_spacing}, '
+ f'new_spacing: {self.out_spacing}, boxes is {bbox}')
+
+ return data, seg
+
+ # need to modify
+ def read_data(self, case_name):
+ ## only for CT dataset
+ assert len(self.data_filenames) != 0
+ data = []
+ for dfname in self.data_filenames:
+ d = sitk.ReadImage(os.path.join(self.base_dir, self.image_dir, case_name, dfname))
+ spacing = d.GetSpacing()
+ data.append(sitk.GetArrayFromImage(d).astype(np.float32)[None,])
+
+ data = np.concatenate(data, axis=0)
+
+ seg_arr = None
+ ## 一定要是float32!!!!
+
+ if self.seg_filename != "":
+ seg = sitk.ReadImage(os.path.join(self.base_dir, self.image_dir, case_name, self.seg_filename))
+ ## 读出来以后一定转float32!!!
+ seg_arr = sitk.GetArrayFromImage(seg).astype(np.float32)
+ seg_arr = seg_arr[None]
+ intensities_per_channel, intensity_statistics_per_channel = self.collect_foreground_intensities(seg_arr, data)
+
+ else :
+ intensities_per_channel = []
+ intensity_statistics_per_channel = []
+
+ properties = {"spacing": spacing,
+ "raw_size": data.shape[1:],
+ "name": case_name.split(".")[0],
+ "intensities_per_channel": intensities_per_channel,
+ "intensity_statistics_per_channel": intensity_statistics_per_channel}
+
+ return data, seg_arr, properties
+
+ def run(self,
+ output_spacing,
+ output_dir,
+ all_labels_dict,
+ num_processes=8,
+ foreground_intensity_properties_per_channel={}
+ ):
+ self.out_spacing = output_spacing
+ # all_labels 必须为region格式,例如[[0, 1, 2, 3], [4, 5], [6, 7, 8], 9, 10]
+
+ self.all_labels_dict = all_labels_dict
+ self.all_labels = []
+
+ for k, v in all_labels_dict.items():
+ self.all_labels.append(v)
+
+ self.output_dir = output_dir
+ self.foreground_intensity_properties_per_channel = foreground_intensity_properties_per_channel
+
+ all_iter = self.get_iterable_list()
+
+ maybe_mkdir_p(self.output_dir)
+
+ # test_run
+ for case_name in all_iter:
+ self.run_case_save(case_name)
+ break
+
+ r = []
+ with multiprocessing.get_context("spawn").Pool(num_processes) as p:
+ for case_name in all_iter:
+ r.append(p.starmap_async(self.run_case_save,
+ ((case_name, ),)))
+ remaining = list(range(len(all_iter)))
+ # p is pretty nifti. If we kill workers they just respawn but don't do any work.
+ # So we need to store the original pool of workers.
+ workers = [j for j in p._pool]
+ with tqdm(desc=None, total=len(all_iter)) as pbar:
+ while len(remaining) > 0:
+ all_alive = all([j.is_alive() for j in workers])
+ if not all_alive:
+ raise RuntimeError('Some background worker is 6 feet under. Yuck. \n'
+ 'OK jokes aside.\n'
+ 'One of your background processes is missing. This could be because of '
+ 'an error (look for an error message) or because it was killed '
+ 'by your OS due to running out of RAM. If you don\'t see '
+ 'an error message, out of RAM is likely the problem. In that case '
+ 'reducing the number of workers might help')
+ done = [i for i in remaining if r[i].ready()]
+ for _ in done:
+ pbar.update()
+ remaining = [i for i in remaining if i not in done]
+ sleep(0.1)
\ No newline at end of file
diff --git a/PRISM/SegMamba/light_training/preprocessing/preprocessors/preprocessor_multiinput_and_region_01norm_first.py b/PRISM/SegMamba/light_training/preprocessing/preprocessors/preprocessor_multiinput_and_region_01norm_first.py
new file mode 100644
index 0000000000000000000000000000000000000000..05697234cfc7c8b6276ab71de26d1670246f2a0a
--- /dev/null
+++ b/PRISM/SegMamba/light_training/preprocessing/preprocessors/preprocessor_multiinput_and_region_01norm_first.py
@@ -0,0 +1,239 @@
+# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import multiprocessing
+import shutil
+from time import sleep
+from typing import Union, Tuple
+import glob
+import numpy as np
+from batchgenerators.utilities.file_and_folder_operations import *
+from light_training.preprocessing.cropping.cropping import crop_to_nonzero
+# from .default_resampling import resample_data_or_seg_to_spacing, resample_img
+from light_training.preprocessing.resampling.default_resampling import resample_data_or_seg_to_shape, compute_new_shape
+from tqdm import tqdm
+from light_training.preprocessing.normalization.default_normalization_schemes import CTNormalization, ZScoreNormalization, CTNormStandard
+import SimpleITK as sitk
+from tqdm import tqdm
+from copy import deepcopy
+import json
+from .default_preprocessor import DefaultPreprocessor
+
+class MultiInputAndRegionPreprocessor(DefaultPreprocessor):
+ def __init__(self,
+ base_dir,
+ image_dir,
+ data_filenames=[],
+ seg_filename="",
+ norm_clip_min=-175,
+ norm_clip_max=250,
+ ):
+
+ self.base_dir = base_dir
+ self.image_dir = image_dir
+ self.data_filenames = data_filenames
+ self.seg_filename = seg_filename
+ self.norm_clip_min = norm_clip_min
+ self.norm_clip_max = norm_clip_max
+
+ def get_iterable_list(self):
+ all_cases = os.listdir(os.path.join(self.base_dir, self.image_dir))
+ return all_cases
+
+ def _normalize(self, data: np.ndarray, seg: np.ndarray,
+ foreground_intensity_properties_per_channel: dict) -> np.ndarray:
+ # for c in range(data.shape[0]):
+ normalizer = CTNormStandard(a_min=self.norm_clip_min,
+ a_max=self.norm_clip_max,
+ b_min=0.0,
+ b_max=1.0, clip=True)
+
+ data = normalizer(data)
+ return data
+
+ # def convert_labels_to_region(self, labels):
+ # patch_size = labels.shape[1:]
+ # one_hot_labels = np.zeros([self.all_labels_num,
+ # patch_size[0],
+ # patch_size[1],
+ # patch_size[2]])
+
+ # for k, v in self.all_labels_dict.items():
+ # if isinstance(v, list):
+ # for vv in v:
+ # one_hot_labels[vv-1] = (labels == vv)[0]
+
+ # return one_hot_labels
+
+ def run_case_npy(self, data: np.ndarray, seg, properties: dict):
+ # let's not mess up the inputs!
+ data = np.copy(data)
+ old_shape = data.shape
+ original_spacing = list(properties['spacing'])
+ ## 由于old spacing读出来是反的,因此这里需要转置一下
+
+ original_spacing_trans = original_spacing[::-1]
+ properties["original_spacing_trans"] = original_spacing_trans
+ properties["target_spacing_trans"] = self.out_spacing
+
+ ### norm first
+ need_to_check = False
+ if seg is None :
+ seg_norm = np.zeros_like(data)
+ else :
+ seg_norm = seg
+ before_crop_seg_sum = np.sum(seg.astype(np.uint8))
+ need_to_check = True
+ data = self._normalize(data, seg_norm,
+ self.foreground_intensity_properties_per_channel)
+
+ shape_before_cropping = data.shape[1:]
+ ## crop
+ properties['shape_before_cropping'] = shape_before_cropping
+ # this command will generate a segmentation. This is important because of the nonzero mask which we may need
+ data, seg, bbox = crop_to_nonzero(data, seg)
+
+ if need_to_check:
+ seg_temp = np.copy(seg)
+ seg_temp[seg_temp==-1] = 0
+ after_crop_seg_sum = np.sum(seg_temp.astype(np.uint8))
+ print(f"before crop seg sum is {before_crop_seg_sum}, after is {after_crop_seg_sum}")
+
+ properties['bbox_used_for_cropping'] = bbox
+
+ # crop, remember to store size before cropping!
+ shape_before_resample = data.shape[1:]
+ properties['shape_after_cropping_before_resample'] = shape_before_resample
+
+ new_shape = compute_new_shape(data.shape[1:], original_spacing_trans, self.out_spacing)
+
+ assert len(data.shape) == 4
+
+ data = resample_data_or_seg_to_shape(data, new_shape,
+ original_spacing,
+ self.out_spacing,
+ order=3,
+ order_z=0)
+ properties['shape_after_resample'] = new_shape
+
+ if seg is not None :
+ assert len(seg.shape) == 4
+ seg = resample_data_or_seg_to_shape(seg, new_shape,
+ original_spacing,
+ self.out_spacing,
+ is_seg=True,
+ order=1,
+ order_z=0)
+
+ properties['class_locations'] = self._sample_foreground_locations(seg,
+ self.all_labels,
+ True)
+
+ ## convert to one-hot
+ # seg = self.convert_labels_to_region(seg)
+
+ if np.max(seg) > 127:
+ seg = seg.astype(np.int16)
+ else:
+ seg = seg.astype(np.int8)
+
+ print(f'old shape: {old_shape}, shape_after_cropping_before_resample is {shape_before_resample}, new_shape after crop and resample: {new_shape}, old_spacing: {original_spacing}, '
+ f'new_spacing: {self.out_spacing}, boxes is {bbox}')
+
+ return data, seg
+
+ # need to modify
+ def read_data(self, case_name):
+ ## only for CT dataset
+ assert len(self.data_filenames) != 0
+ data = []
+ for dfname in self.data_filenames:
+ d = sitk.ReadImage(os.path.join(self.base_dir, self.image_dir, case_name, dfname))
+ spacing = d.GetSpacing()
+ data.append(sitk.GetArrayFromImage(d).astype(np.float32)[None,])
+
+ data = np.concatenate(data, axis=0)
+
+ seg_arr = None
+ ## 一定要是float32!!!!
+
+ if self.seg_filename != "":
+ seg = sitk.ReadImage(os.path.join(self.base_dir, self.image_dir, case_name, self.seg_filename))
+ ## 读出来以后一定转float32!!!
+ seg_arr = sitk.GetArrayFromImage(seg).astype(np.float32)
+ seg_arr = seg_arr[None]
+ intensities_per_channel, intensity_statistics_per_channel = self.collect_foreground_intensities(seg_arr, data)
+
+ else :
+ intensities_per_channel = []
+ intensity_statistics_per_channel = []
+
+ properties = {"spacing": spacing,
+ "raw_size": data.shape[1:],
+ "name": case_name.split(".")[0],
+ "intensities_per_channel": intensities_per_channel,
+ "intensity_statistics_per_channel": intensity_statistics_per_channel}
+
+ return data, seg_arr, properties
+
+ def run(self,
+ output_spacing,
+ output_dir,
+ all_labels_dict,
+ num_processes=8):
+ self.out_spacing = output_spacing
+ # all_labels 必须为region格式,例如[[0, 1, 2, 3], [4, 5], [6, 7, 8], 9, 10]
+
+ self.all_labels_dict = all_labels_dict
+ self.all_labels = []
+
+ for k, v in all_labels_dict.items():
+ self.all_labels.append(v)
+
+ self.output_dir = output_dir
+ self.foreground_intensity_properties_per_channel = {}
+
+ all_iter = self.get_iterable_list()
+
+ maybe_mkdir_p(self.output_dir)
+
+ # test_run
+ for case_name in all_iter:
+ self.run_case_save(case_name)
+ break
+
+ r = []
+ with multiprocessing.get_context("spawn").Pool(num_processes) as p:
+ for case_name in all_iter:
+ r.append(p.starmap_async(self.run_case_save,
+ ((case_name, ),)))
+ remaining = list(range(len(all_iter)))
+ # p is pretty nifti. If we kill workers they just respawn but don't do any work.
+ # So we need to store the original pool of workers.
+ workers = [j for j in p._pool]
+ with tqdm(desc=None, total=len(all_iter)) as pbar:
+ while len(remaining) > 0:
+ all_alive = all([j.is_alive() for j in workers])
+ if not all_alive:
+ raise RuntimeError('Some background worker is 6 feet under. Yuck. \n'
+ 'OK jokes aside.\n'
+ 'One of your background processes is missing. This could be because of '
+ 'an error (look for an error message) or because it was killed '
+ 'by your OS due to running out of RAM. If you don\'t see '
+ 'an error message, out of RAM is likely the problem. In that case '
+ 'reducing the number of workers might help')
+ done = [i for i in remaining if r[i].ready()]
+ for _ in done:
+ pbar.update()
+ remaining = [i for i in remaining if i not in done]
+ sleep(0.1)
\ No newline at end of file
diff --git a/PRISM/SegMamba/light_training/preprocessing/preprocessors/preprocessor_multiinput_mrinorm_noresample_nocrop.py b/PRISM/SegMamba/light_training/preprocessing/preprocessors/preprocessor_multiinput_mrinorm_noresample_nocrop.py
new file mode 100644
index 0000000000000000000000000000000000000000..dc243413f747e40ee14437cba5a2489d97819c4e
--- /dev/null
+++ b/PRISM/SegMamba/light_training/preprocessing/preprocessors/preprocessor_multiinput_mrinorm_noresample_nocrop.py
@@ -0,0 +1,167 @@
+# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import multiprocessing
+import shutil
+from time import sleep
+from typing import Union, Tuple
+import glob
+import numpy as np
+from batchgenerators.utilities.file_and_folder_operations import *
+from light_training.preprocessing.cropping.cropping import crop_to_nonzero
+# from .default_resampling import resample_data_or_seg_to_spacing, resample_img
+from light_training.preprocessing.resampling.default_resampling import resample_data_or_seg_to_shape, compute_new_shape
+from tqdm import tqdm
+from light_training.preprocessing.normalization.default_normalization_schemes import CTNormalization, ZScoreNormalization
+import SimpleITK as sitk
+from tqdm import tqdm
+from copy import deepcopy
+import json
+from .default_preprocessor import DefaultPreprocessor
+
+class Preprocessor(DefaultPreprocessor):
+ def __init__(self,
+ base_dir,
+ image_dir,
+ data_filenames=[],
+ seg_filename="",
+ ):
+ self.base_dir = base_dir
+ self.image_dir = image_dir
+ self.data_filenames = data_filenames
+ self.seg_filename = seg_filename
+
+ def get_iterable_list(self):
+ all_cases = os.listdir(os.path.join(self.base_dir, self.image_dir))
+ return all_cases
+
+ def _normalize(self, data: np.ndarray, seg: np.ndarray,
+ foreground_intensity_properties_per_channel: dict) -> np.ndarray:
+ for c in range(data.shape[0]):
+ normalizer_class = ZScoreNormalization
+ normalizer = normalizer_class(use_mask_for_norm=False,
+ intensityproperties=foreground_intensity_properties_per_channel)
+ data[c] = normalizer.run(data[c], seg[0])
+ return data
+
+ def run_case_npy(self, data: np.ndarray, seg, properties: dict):
+ # let's not mess up the inputs!
+ data = np.copy(data)
+ old_shape = data.shape
+ original_spacing = list(properties['spacing'])
+
+ if seg is None :
+ seg_norm = np.zeros_like(data)
+ else :
+ seg_norm = seg
+ data = self._normalize(data, seg_norm,
+ self.foreground_intensity_properties_per_channel)
+
+
+ assert len(data.shape) == 4
+
+ if seg is not None :
+ assert len(seg.shape) == 4
+ properties['class_locations'] = self._sample_foreground_locations(seg,
+ self.all_labels,
+ True)
+ if np.max(seg) > 127:
+ seg = seg.astype(np.int16)
+ else:
+ seg = seg.astype(np.int8)
+
+ print(f'old shape: {old_shape}')
+
+ return data, seg
+
+ # need to modify
+ def read_data(self, case_name):
+ ## only for CT dataset
+ assert len(self.data_filenames) != 0
+ data = []
+ for dfname in self.data_filenames:
+ d = sitk.ReadImage(os.path.join(self.base_dir, self.image_dir, case_name, dfname))
+ spacing = d.GetSpacing()
+ data.append(sitk.GetArrayFromImage(d).astype(np.float32)[None,])
+
+ data = np.concatenate(data, axis=0)
+
+ seg_arr = None
+ ## 一定要是float32!!!!
+
+ if self.seg_filename != "":
+ seg = sitk.ReadImage(os.path.join(self.base_dir, self.image_dir, case_name, self.seg_filename))
+ ## 读出来以后一定转float32!!!
+ seg_arr = sitk.GetArrayFromImage(seg).astype(np.float32)
+ seg_arr = seg_arr[None]
+ intensities_per_channel, intensity_statistics_per_channel = self.collect_foreground_intensities(seg_arr, data)
+
+ else :
+ intensities_per_channel = []
+ intensity_statistics_per_channel = []
+
+ properties = {"spacing": spacing,
+ "raw_size": data.shape[1:],
+ "name": case_name.split(".")[0],
+ "intensities_per_channel": intensities_per_channel,
+ "intensity_statistics_per_channel": intensity_statistics_per_channel}
+
+ return data, seg_arr, properties
+
+ def run(self,
+ output_dir,
+ all_labels_dict,
+ num_processes=8):
+ self.all_labels_dict = all_labels_dict
+ self.all_labels = []
+
+ for k, v in all_labels_dict.items():
+ self.all_labels.append(v)
+
+ self.output_dir = output_dir
+ self.foreground_intensity_properties_per_channel = {}
+
+ all_iter = self.get_iterable_list()
+
+ maybe_mkdir_p(self.output_dir)
+
+ # test_run
+ for case_name in all_iter:
+ self.run_case_save(case_name)
+ break
+
+ r = []
+ with multiprocessing.get_context("spawn").Pool(num_processes) as p:
+ for case_name in all_iter:
+ r.append(p.starmap_async(self.run_case_save,
+ ((case_name, ),)))
+ remaining = list(range(len(all_iter)))
+ # p is pretty nifti. If we kill workers they just respawn but don't do any work.
+ # So we need to store the original pool of workers.
+ workers = [j for j in p._pool]
+ with tqdm(desc=None, total=len(all_iter)) as pbar:
+ while len(remaining) > 0:
+ all_alive = all([j.is_alive() for j in workers])
+ if not all_alive:
+ raise RuntimeError('Some background worker is 6 feet under. Yuck. \n'
+ 'OK jokes aside.\n'
+ 'One of your background processes is missing. This could be because of '
+ 'an error (look for an error message) or because it was killed '
+ 'by your OS due to running out of RAM. If you don\'t see '
+ 'an error message, out of RAM is likely the problem. In that case '
+ 'reducing the number of workers might help')
+ done = [i for i in remaining if r[i].ready()]
+ for _ in done:
+ pbar.update()
+ remaining = [i for i in remaining if i not in done]
+ sleep(0.1)
\ No newline at end of file
diff --git a/PRISM/SegMamba/light_training/preprocessing/resampling/__init__.py b/PRISM/SegMamba/light_training/preprocessing/resampling/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/PRISM/SegMamba/light_training/preprocessing/resampling/default_resampling.py b/PRISM/SegMamba/light_training/preprocessing/resampling/default_resampling.py
new file mode 100644
index 0000000000000000000000000000000000000000..ed1ceb0230255d911dae887caed23a63f27ba6e0
--- /dev/null
+++ b/PRISM/SegMamba/light_training/preprocessing/resampling/default_resampling.py
@@ -0,0 +1,217 @@
+from collections import OrderedDict
+from typing import Union, Tuple, List
+
+import numpy as np
+import pandas as pd
+import torch
+from batchgenerators.augmentations.utils import resize_segmentation
+from scipy.ndimage.interpolation import map_coordinates
+from skimage.transform import resize
+
+ANISO_THRESHOLD = 3
+
+def get_do_separate_z(spacing: Union[Tuple[float, ...], List[float], np.ndarray], anisotropy_threshold=ANISO_THRESHOLD):
+ do_separate_z = (np.max(spacing) / np.min(spacing)) > anisotropy_threshold
+ return do_separate_z
+
+
+def get_lowres_axis(new_spacing: Union[Tuple[float, ...], List[float], np.ndarray]):
+ axis = np.where(max(new_spacing) / np.array(new_spacing) == 1)[0] # find which axis is anisotropic
+ return axis
+
+
+def compute_new_shape(old_shape: Union[Tuple[int, ...], List[int], np.ndarray],
+ old_spacing: Union[Tuple[float, ...], List[float], np.ndarray],
+ new_spacing: Union[Tuple[float, ...], List[float], np.ndarray]) -> np.ndarray:
+ assert len(old_spacing) == len(old_shape)
+ assert len(old_shape) == len(new_spacing)
+
+ new_shape = np.array([int(round(i / j * k)) for i, j, k in zip(old_spacing, new_spacing, old_shape)])
+ return new_shape
+
+
+def resample_data_or_seg_to_spacing(data: np.ndarray,
+ current_spacing: Union[Tuple[float, ...], List[float], np.ndarray],
+ new_spacing: Union[Tuple[float, ...], List[float], np.ndarray],
+ is_seg: bool = False,
+ order: int = 3, order_z: int = 0,
+ force_separate_z: Union[bool, None] = False,
+ separate_z_anisotropy_threshold: float = ANISO_THRESHOLD):
+ if force_separate_z is not None:
+ do_separate_z = force_separate_z
+ if force_separate_z:
+ axis = get_lowres_axis(current_spacing)
+ else:
+ axis = None
+ else:
+ if get_do_separate_z(current_spacing, separate_z_anisotropy_threshold):
+ do_separate_z = True
+ axis = get_lowres_axis(current_spacing)
+ elif get_do_separate_z(new_spacing, separate_z_anisotropy_threshold):
+ do_separate_z = True
+ axis = get_lowres_axis(new_spacing)
+ else:
+ do_separate_z = False
+ axis = None
+
+ if axis is not None:
+ if len(axis) == 3:
+ # every axis has the same spacing, this should never happen, why is this code here?
+ do_separate_z = False
+ elif len(axis) == 2:
+ # this happens for spacings like (0.24, 1.25, 1.25) for example. In that case we do not want to resample
+ # separately in the out of plane axis
+ do_separate_z = False
+ else:
+ pass
+
+ if data is not None:
+ assert len(data.shape) == 4, "data must be c x y z"
+
+ shape = np.array(data[0].shape)
+ new_shape = compute_new_shape(shape[1:], current_spacing, new_spacing)
+
+ data_reshaped = resample_data_or_seg(data, new_shape, is_seg, axis, order, do_separate_z, order_z=order_z)
+ return data_reshaped
+
+
+def resample_data_or_seg_to_shape(data: Union[torch.Tensor, np.ndarray],
+ new_shape: Union[Tuple[int, ...], List[int], np.ndarray],
+ current_spacing: Union[Tuple[float, ...], List[float], np.ndarray],
+ new_spacing: Union[Tuple[float, ...], List[float], np.ndarray],
+ is_seg: bool = False,
+ order: int = 3, order_z: int = 0,
+ force_separate_z: Union[bool, None] = False,
+ separate_z_anisotropy_threshold: float = ANISO_THRESHOLD):
+ """
+ needed for segmentation export. Stupid, I know. Maybe we can fix that with Leos new resampling functions
+ """
+ if isinstance(data, torch.Tensor):
+ data = data.cpu().numpy()
+ if force_separate_z is not None:
+ do_separate_z = force_separate_z
+ if force_separate_z:
+ axis = get_lowres_axis(current_spacing)
+ else:
+ axis = None
+ else:
+ if get_do_separate_z(current_spacing, separate_z_anisotropy_threshold):
+ do_separate_z = True
+ axis = get_lowres_axis(current_spacing)
+ elif get_do_separate_z(new_spacing, separate_z_anisotropy_threshold):
+ do_separate_z = True
+ axis = get_lowres_axis(new_spacing)
+ else:
+ do_separate_z = False
+ axis = None
+
+ if axis is not None:
+ if len(axis) == 3:
+ # every axis has the same spacing, this should never happen, why is this code here?
+ do_separate_z = False
+ elif len(axis) == 2:
+ # this happens for spacings like (0.24, 1.25, 1.25) for example. In that case we do not want to resample
+ # separately in the out of plane axis
+ do_separate_z = False
+ else:
+ pass
+
+ if data is not None:
+ assert len(data.shape) == 4, "data must be c x y z"
+
+ data_reshaped = resample_data_or_seg(data, new_shape, is_seg, axis, order, do_separate_z, order_z=order_z)
+ return data_reshaped
+
+
+def resample_data_or_seg(data: np.ndarray, new_shape: Union[Tuple[float, ...], List[float], np.ndarray],
+ is_seg: bool = False, axis: Union[None, int] = None, order: int = 3,
+ do_separate_z: bool = False, order_z: int = 0):
+ """
+ separate_z=True will resample with order 0 along z
+ :param data:
+ :param new_shape:
+ :param is_seg:
+ :param axis:
+ :param order:
+ :param do_separate_z:
+ :param order_z: only applies if do_separate_z is True
+ :return:
+ """
+ assert len(data.shape) == 4, "data must be (c, x, y, z)"
+ assert len(new_shape) == len(data.shape) - 1
+
+ if is_seg:
+ resize_fn = resize_segmentation
+ kwargs = OrderedDict()
+ else:
+ resize_fn = resize
+ kwargs = {'mode': 'edge', 'anti_aliasing': False}
+ dtype_data = data.dtype
+ shape = np.array(data[0].shape)
+ new_shape = np.array(new_shape)
+ if np.any(shape != new_shape):
+ data = data.astype(float)
+ if do_separate_z:
+ # print("separate z, order in z is", order_z, "order inplane is", order)
+ assert len(axis) == 1, "only one anisotropic axis supported"
+ axis = axis[0]
+ if axis == 0:
+ new_shape_2d = new_shape[1:]
+ elif axis == 1:
+ new_shape_2d = new_shape[[0, 2]]
+ else:
+ new_shape_2d = new_shape[:-1]
+
+ reshaped_final_data = []
+ for c in range(data.shape[0]):
+ reshaped_data = []
+ for slice_id in range(shape[axis]):
+ if axis == 0:
+ reshaped_data.append(resize_fn(data[c, slice_id], new_shape_2d, order, **kwargs))
+ elif axis == 1:
+ reshaped_data.append(resize_fn(data[c, :, slice_id], new_shape_2d, order, **kwargs))
+ else:
+ reshaped_data.append(resize_fn(data[c, :, :, slice_id], new_shape_2d, order, **kwargs))
+ reshaped_data = np.stack(reshaped_data, axis)
+ if shape[axis] != new_shape[axis]:
+
+ # The following few lines are blatantly copied and modified from sklearn's resize()
+ rows, cols, dim = new_shape[0], new_shape[1], new_shape[2]
+ orig_rows, orig_cols, orig_dim = reshaped_data.shape
+
+ row_scale = float(orig_rows) / rows
+ col_scale = float(orig_cols) / cols
+ dim_scale = float(orig_dim) / dim
+
+ map_rows, map_cols, map_dims = np.mgrid[:rows, :cols, :dim]
+ map_rows = row_scale * (map_rows + 0.5) - 0.5
+ map_cols = col_scale * (map_cols + 0.5) - 0.5
+ map_dims = dim_scale * (map_dims + 0.5) - 0.5
+
+ coord_map = np.array([map_rows, map_cols, map_dims])
+ if not is_seg or order_z == 0:
+ reshaped_final_data.append(map_coordinates(reshaped_data, coord_map, order=order_z,
+ mode='nearest')[None])
+ else:
+ unique_labels = np.sort(pd.unique(reshaped_data.ravel())) # np.unique(reshaped_data)
+ reshaped = np.zeros(new_shape, dtype=dtype_data)
+
+ for i, cl in enumerate(unique_labels):
+ reshaped_multihot = np.round(
+ map_coordinates((reshaped_data == cl).astype(float), coord_map, order=order_z,
+ mode='nearest'))
+ reshaped[reshaped_multihot > 0.5] = cl
+ reshaped_final_data.append(reshaped[None])
+ else:
+ reshaped_final_data.append(reshaped_data[None])
+ reshaped_final_data = np.vstack(reshaped_final_data)
+ else:
+ # print("no separate z, order", order)
+ reshaped = []
+ for c in range(data.shape[0]):
+ reshaped.append(resize_fn(data[c], new_shape, order, **kwargs)[None])
+ reshaped_final_data = np.vstack(reshaped)
+ return reshaped_final_data.astype(dtype_data)
+ else:
+ # print("no resampling necessary")
+ return data
diff --git a/PRISM/SegMamba/light_training/process_framework/norm.py b/PRISM/SegMamba/light_training/process_framework/norm.py
new file mode 100644
index 0000000000000000000000000000000000000000..d294fe5c9d81054b8dae218aa61927e276c673a6
--- /dev/null
+++ b/PRISM/SegMamba/light_training/process_framework/norm.py
@@ -0,0 +1,16 @@
+
+
+
+from light_training.preprocessing.normalization.default_normalization_schemes import CTNormalization, CTNormStandard
+
+
+## need to custom, this example is about Segrap competition.
+def norm_func(data, seg=None, **kwargs):
+ normalizer = CTNormStandard(a_min=-175,
+ a_max=250,
+ b_min=0.0,
+ b_max=1.0, clip=True)
+
+ data = normalizer(data)
+
+ return data
diff --git a/PRISM/SegMamba/light_training/process_framework/process.py b/PRISM/SegMamba/light_training/process_framework/process.py
new file mode 100644
index 0000000000000000000000000000000000000000..12e81de1130168b02ca31e90d5dce463249d851c
--- /dev/null
+++ b/PRISM/SegMamba/light_training/process_framework/process.py
@@ -0,0 +1,235 @@
+import torch
+import numpy as np
+import SimpleITK
+import os
+import sys
+from monai.inferers import SlidingWindowInferer
+
+class Customalgorithm(): # SegmentationAlgorithm is not inherited in this class anymore
+ def __init__(self):
+ """
+ Do not modify the `self.input_dir` and `self.output_dir`.
+ (Check https://grand-challenge.org/algorithms/interfaces/)
+ """
+ self.input_dir = "/input/"
+ self.output_dir = "/output/images/head-neck-segmentation/"
+
+ # self.out_spacing = [3.0, 0.54199219, 0.54199219]
+ self.out_spacing = [3.0, 1.0, 1.0]
+
+ # self.device = "cpu"
+
+ self.device = torch.device("cuda")
+
+ self.patch_size = [64, 128, 128]
+
+ def filte_state_dict(self, sd):
+ if "module" in sd :
+ sd = sd["module"]
+ new_sd = {}
+ for k, v in sd.items():
+ k = str(k)
+ new_k = k[7:] if k.startswith("module") else k
+ new_sd[new_k] = v
+ del sd
+ return new_sd
+
+ def convert_mha_to_nii(self, mha_input_path, nii_out_path): # nnUNet specific
+ img = SimpleITK.ReadImage(mha_input_path)
+ print(img.GetSize())
+ SimpleITK.WriteImage(img, nii_out_path, True)
+
+ def convert_nii_to_mha(self, nii_input_path, mha_out_path): # nnUNet specific
+ img = SimpleITK.ReadImage(nii_input_path)
+ SimpleITK.WriteImage(img, mha_out_path, True)
+
+ def read(self, mha_path):
+ img = SimpleITK.ReadImage(mha_path)
+ spacing = img.GetSpacing()
+ raw_size = SimpleITK.GetArrayFromImage(img).shape
+ img = SimpleITK.GetArrayFromImage(img)[None,].astype(np.float32)
+ properties = {
+ "spacing": spacing,
+ "raw_size": raw_size
+ }
+ return img, properties
+
+ def check_gpu(self):
+ """
+ Check if GPU is available. Note that the Grand Challenge only has one available GPU.
+ """
+ print('Checking GPU availability')
+ is_available = torch.cuda.is_available()
+ print('Available: ' + str(is_available))
+ print(f'Device count: {torch.cuda.device_count()}')
+ if is_available:
+ print(f'Current device: {torch.cuda.current_device()}')
+ print('Device name: ' + torch.cuda.get_device_name(0))
+ print('Device memory: ' +
+ str(torch.cuda.get_device_properties(0).total_memory))
+
+ def load_inputs(self): # use two modalities input data
+ """
+ Read input data (two modalities) from `self.input_dir` (/input/).
+ Please do not modify the path for CT and contrast-CT images.
+ """
+ ct_mha = os.listdir(os.path.join(self.input_dir, 'images/head-neck-ct/'))[0]
+ ctc_mha = os.listdir(os.path.join(self.input_dir, 'images/head-neck-contrast-enhanced-ct/'))[0]
+ uuid = os.path.splitext(ct_mha)[0]
+
+ img, properties = self.read(os.path.join(self.input_dir, 'images/head-neck-ct/', ct_mha))
+ img_c, _ = self.read(os.path.join(self.input_dir, 'images/head-neck-contrast-enhanced-ct/', ctc_mha))
+
+ data = np.concatenate([img, img_c], axis=0)
+ del img
+ del img_c
+ # data is (2, d, w, h)
+ return uuid, data, properties
+
+ def crop(self, data, properties):
+ from light_training.preprocessing.cropping.cropping import crop_to_nonzero
+
+ seg = np.zeros_like(data)
+
+ shape_before_cropping = data.shape[1:]
+ ## crop
+ properties['shape_before_cropping'] = shape_before_cropping
+ # this command will generate a segmentation. This is important because of the nonzero mask which we may need
+ data, seg, bbox = crop_to_nonzero(data, seg)
+ del seg
+
+ properties['bbox_used_for_cropping'] = bbox
+
+ return data, properties
+
+ def resample(self, data, properties):
+ from light_training.preprocessing.resampling.default_resampling import compute_new_shape, resample_data_or_seg_to_shape
+ # crop, remember to store size before cropping!
+ shape_before_resample = data.shape[1:]
+ properties['shape_after_cropping_before_resample'] = shape_before_resample
+ new_shape = compute_new_shape(data.shape[1:], original_spacing_trans, self.out_spacing)
+
+ assert len(data.shape) == 4
+
+ data = resample_data_or_seg_to_shape(data, new_shape,
+ original_spacing,
+ self.out_spacing,
+ order=3,
+ order_z=0)
+ properties['shape_after_resample'] = new_shape
+
+ return data, properties
+
+ def preprocess(self, data, properties, crop_first=True):
+ from light_training.process_framework.norm import norm_func
+
+ original_spacing = list(properties['spacing'])
+ ## 由于old spacing读出来是反的,因此这里需要转置一下
+ original_spacing_trans = original_spacing[::-1]
+ properties["original_spacing_trans"] = original_spacing_trans
+ properties["target_spacing_trans"] = self.out_spacing
+
+ if crop_first:
+ data, properties = self.crop(data, properties)
+
+ data = norm_func(data)
+
+ if not crop_first:
+ data, properties = self.crop(data, properties)
+
+
+ data, properties = self.resample(data, properties)
+
+ data = data[None,]
+
+ data = torch.from_numpy(data)
+
+ return data, properties
+
+ def predict(self, data, properties, uid):
+ torch.cuda.empty_cache()
+
+ from models.nnunet3d import NNUNetWrapper
+ model = NNUNetWrapper(norm="ins")
+
+ new_sd = self.filte_state_dict(torch.load("./weight/unet3d_0_addaug_bs2_ep1000_ds_gpu4/final_model_0.8552.pt", map_location="cpu"))
+ model.load_state_dict(new_sd)
+
+ del new_sd
+ torch.cuda.empty_cache()
+ # data = data.to(self.deivce)
+ # model.to(self.device)
+ model.eval()
+ window_infer = SlidingWindowInferer(roi_size=self.patch_size,
+ sw_batch_size=1,
+ overlap=0.5,
+ progress=True,
+ mode="gaussian")
+
+ predictor = Predictor(window_infer, mirror_axes=None)
+ try:
+ ensemble_output = predictor.maybe_mirror_and_predict(data, model, self.device)
+
+ except RuntimeError:
+ ensemble_output = predictor.maybe_mirror_and_predict(data, model, torch.device("cpu"))
+ torch.cuda.empty_cache()
+ del model
+ del data
+
+ print(f"prediction done")
+ ensemble_output = predictor.predict_raw_probability(ensemble_output, properties)
+ print(f"non linear....")
+ # ensemble_output = predictor.apply_nonlinear(ensemble_output, nonlinear_type="sigmoid")
+ ensemble_output = ensemble_output > 0
+
+ print(f"restore crop...")
+ ensemble_output = predictor.predict_noncrop_probability(ensemble_output, properties)
+
+ raw_spacing = properties["spacing"]
+ case_name = uid
+ print(f"uuid is {uid}")
+ os.makedirs(os.path.dirname(self.output_dir), exist_ok=True)
+
+ print(f"saving....")
+ predictor.save_to_nii_multi_organ(ensemble_output,
+ raw_spacing,
+ save_dir=self.output_dir,
+ case_name=case_name,
+ postprocess=False)
+
+ # """
+ # load the model and checkpoint, and generate the predictions. You can replace this part with your own model.
+ # """
+ # predict_from_folder_segrap2023(self.weight, self.nii_path, self.result_path, 0, 0, 1)
+ # print("nnUNet segmentation done!")
+ # if not os.path.exists(os.path.join(self.result_path, self.nii_seg_file)):
+ # print('waiting for nnUNet segmentation to be created')
+
+ # while not os.path.exists(os.path.join(self.result_path, self.nii_seg_file)):
+ # import time
+ # print('.', end='')
+ # time.sleep(5)
+ # # print(cproc) # since nnUNet_predict call is split into prediction and postprocess, a pre-mature exit code is received but segmentation file not yet written. This hack ensures that all spawned subprocesses are finished before being printed.
+ # print('Prediction finished !')
+
+ def post_process(self):
+ self.check_gpu()
+ print('Start processing')
+ uuid, data, properties = self.load_inputs()
+
+ data, properties = self.preprocess(data, properties)
+ print(properties)
+ print('Start prediction')
+ self.predict(data, properties, uuid)
+ # print('Start output writing')
+ # self.write_outputs(uuid)
+
+ def process(self):
+ """
+ Read inputs from /input, process with your algorithm and write to /output
+ """
+ self.post_process()
+
+
+if __name__ == "__main__":
+ Customalgorithm().process()
diff --git a/PRISM/SegMamba/light_training/sampler.py b/PRISM/SegMamba/light_training/sampler.py
new file mode 100644
index 0000000000000000000000000000000000000000..b46a80bddbbadf4532b206fd06e62e608909765e
--- /dev/null
+++ b/PRISM/SegMamba/light_training/sampler.py
@@ -0,0 +1,48 @@
+import torch
+import math
+import numpy as np
+
+class SequentialDistributedSampler(torch.utils.data.sampler.Sampler):
+ """
+ Distributed Sampler that subsamples indicies sequentially,
+ making it easier to collate all results at the end.
+ Even though we only use this sampler for eval and predict (no training),
+ which means that the model params won't have to be synced (i.e. will not hang
+ for synchronization even if varied number of forward passes), we still add extra
+ samples to the sampler to make it evenly divisible (like in `DistributedSampler`)
+ to make it easy to `gather` or `reduce` resulting tensors at the end of the loop.
+ """
+
+ def __init__(self, dataset, batch_size, rank=None, num_replicas=None):
+ if num_replicas is None:
+ if not torch.distributed.is_available():
+ raise RuntimeError("Requires distributed package to be available")
+ num_replicas = torch.distributed.get_world_size()
+ if rank is None:
+ if not torch.distributed.is_available():
+ raise RuntimeError("Requires distributed package to be available")
+ rank = torch.distributed.get_rank()
+ self.dataset = dataset
+ self.num_replicas = num_replicas
+ self.rank = rank
+ self.batch_size = batch_size
+ self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / self.batch_size / self.num_replicas)) * self.batch_size
+ self.total_size = self.num_samples * self.num_replicas
+
+ def __iter__(self):
+ indices = list(range(len(self.dataset)))
+ # add extra samples to make it evenly divisible
+ indices += [indices[-1]] * (self.total_size - len(indices))
+ # subsample
+ indices = indices[self.rank * self.num_samples : (self.rank + 1) * self.num_samples]
+ return iter(indices)
+
+ def __len__(self):
+ return self.num_samples
+
+
+def distributed_concat(tensor, num_total_examples):
+ output_tensors = [tensor.clone() for _ in range(torch.distributed.get_world_size())]
+ torch.distributed.all_gather(output_tensors, tensor)
+ concat = torch.cat(output_tensors, dim=0)
+ return concat[:num_total_examples]
\ No newline at end of file
diff --git a/PRISM/SegMamba/light_training/trainer.py b/PRISM/SegMamba/light_training/trainer.py
new file mode 100644
index 0000000000000000000000000000000000000000..3117702b215077b58df9d83d37c7bd9faebd4259
--- /dev/null
+++ b/PRISM/SegMamba/light_training/trainer.py
@@ -0,0 +1,516 @@
+import os
+from tqdm import tqdm
+import numpy as np
+import torch
+import torch.nn.parallel
+import torch.utils.data.distributed
+from light_training.utils.lr_scheduler import get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup
+from monai.data import DataLoader
+import argparse
+from .launch import launch_dist
+from monai.utils import set_determinism
+from .sampler import SequentialDistributedSampler, distributed_concat
+from torch.utils.tensorboard import SummaryWriter
+from torch.cuda.amp import GradScaler
+from torch import autocast, nn
+import time
+
+class dummy_context(object):
+ def __enter__(self):
+ pass
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ pass
+
+class Trainer:
+ def __init__(self, env_type,
+ max_epochs,
+ batch_size,
+ device="cpu",
+ val_every=1,
+ num_gpus=1,
+ logdir="./logs/",
+ master_ip='localhost',
+ master_port=17750,
+ training_script="train.py",
+ train_process=12,
+ ):
+ assert env_type in ["pytorch", "ddp", "DDP"], f"not support this env_type: {env_type}"
+ self.env_type = env_type
+ self.val_every = val_every
+ self.max_epochs = max_epochs
+ self.ddp = False
+ self.num_gpus = num_gpus
+ self.device = device
+ self.local_rank = 0
+ self.batch_size = batch_size
+ self.not_call_launch = True
+ self.logdir = logdir
+ self.scheduler = None
+ self.model = None
+ self.auto_optim = True
+ self.warmup = 0.0
+ self.scheduler_type = None
+
+ self.optimizer = None
+ self.patch_size = None
+
+ self.num_step_per_epoch = 250 // self.num_gpus
+ self.val_number = 100 // self.num_gpus
+ self.augmentation = True
+ self.train_process = train_process
+ self.print_time = False
+
+ if self.device == "cpu":
+ self.grad_scaler = None
+ else :
+ self.grad_scaler = GradScaler()
+
+ torch.backends.cudnn.enabled = True
+
+ gpu_count = torch.cuda.device_count()
+ if num_gpus > gpu_count:
+ print("gpu数量不符")
+ os._exit(0)
+
+ if env_type == "DDP" or env_type == "ddp":
+ self.ddp = True
+ self.get_dist_args()
+ if not self.not_call_launch:
+ launch_dist(env_type=env_type,
+ num_nodes=1,
+ gpus_per_node=num_gpus,
+ master_addr=master_ip,
+ master_port=master_port,
+ training_script=training_script,
+ )
+ os._exit(1)
+ self.initialize_distributed()
+
+ def initialize_distributed(self):
+ """Initialize torch.distributed."""
+ if self.env_type == 'pytorch':
+ self.print_rank_0('No need to initialize')
+ return
+ if self.env_type == 'DDP' or "deepspeed" in self.env_type:
+
+ if self.local_rank is not None:
+ device = self.local_rank
+ torch.cuda.set_device(device)
+ # Call the init process
+ init_method = 'env://'
+ torch.distributed.init_process_group(
+ backend='nccl',
+ init_method=init_method)
+ self.world_size = torch.distributed.get_world_size()
+
+ print(f"world size is {self.world_size}")
+
+ def get_dataloader(self, dataset, shuffle=False, batch_size=1, train=True):
+ if dataset is None :
+ return None
+ if self.env_type == 'pytorch':
+ return DataLoader(dataset,
+ batch_size=batch_size,
+ shuffle=shuffle,
+ num_workers=12)
+ else :
+ if not train:
+ sampler = SequentialDistributedSampler(dataset, batch_size=batch_size)
+
+ else :
+ sampler = torch.utils.data.distributed.DistributedSampler(dataset, shuffle=True)
+ return DataLoader(dataset,
+ batch_size=batch_size,
+ num_workers=12,
+ sampler=sampler,
+ drop_last=True)
+
+ def get_multi_processor_loader(self, train_ds, val_ds):
+ from .augment.multi_processor import LimitedLenWrapper
+ from .augment.train_augment import get_train_transforms, get_validation_transforms, get_train_transforms_noaug, get_train_transforms_nomirror, get_train_transforms_onlymirror, get_train_transforms_onlyspatial
+ from light_training.dataloading.base_data_loader import DataLoaderMultiProcess
+
+ assert self.patch_size != None
+ if self.augmentation:
+ if self.augmentation == "nomirror":
+ print(f"use augmentation: no mirror")
+ tr_transforms = get_train_transforms_nomirror(patch_size=self.patch_size, mirror_axes=[0, 1, 2])
+ elif self.augmentation == "onlymirror":
+ print(f"use augmentation: only mirror")
+ tr_transforms = get_train_transforms_onlymirror(patch_size=self.patch_size, mirror_axes=[0, 1, 2])
+ elif self.augmentation == "onlyspatial":
+ print(f"use augmentation: only spatial")
+ tr_transforms = get_train_transforms_onlyspatial(patch_size=self.patch_size, mirror_axes=[0, 1, 2])
+
+ else :
+ tr_transforms = get_train_transforms(patch_size=self.patch_size, mirror_axes=[0, 1, 2])
+ else:
+ tr_transforms = get_train_transforms_noaug(patch_size=self.patch_size, mirror_axes=[0, 1, 2])
+
+ val_transforms = get_validation_transforms()
+
+ # train_loader = DataLoader(train_ds, num_workers=1, drop_last=True, shuffle=True, batch_size=self.batch_size)
+ train_loader = DataLoaderMultiProcess(train_ds,
+ batch_size=self.batch_size,
+ patch_size=self.patch_size,
+ print_time=self.print_time)
+
+ data_generator = LimitedLenWrapper(self.num_step_per_epoch, data_loader=train_loader,
+ transform=tr_transforms,
+ num_processes=self.train_process, num_cached=6, seeds=None,
+ pin_memory=True, wait_time=0.02)
+ if val_ds is None:
+ val_data_generator = None
+ else :
+ val_loader = DataLoaderMultiProcess(val_ds,
+ batch_size=1,
+ patch_size=self.patch_size,
+ oversample_foreground_percent=1.0)
+
+ val_data_generator = LimitedLenWrapper(self.val_number, data_loader=val_loader, transform=val_transforms,
+ num_processes=6, num_cached=3, seeds=None,
+ pin_memory=True, wait_time=0.02)
+ return data_generator, val_data_generator
+
+
+ def get_dist_args(self):
+ parser = argparse.ArgumentParser()
+ # parser.add_argument('--local_rank', type=int, default = 0, help="local_rank")
+ parser.add_argument('--not_call_launch',
+ action='store_true',
+ help="not call launch!")
+ ds_args = parser.parse_args()
+ self.local_rank = int(os.environ.get("LOCAL_RANK", 0))
+
+ print(f"self.local_rank is {self.local_rank}")
+ self.not_call_launch = ds_args.not_call_launch
+ self.device = self.local_rank
+
+ def to_device(self, batch):
+ if isinstance(batch, dict):
+ for k, v in batch.items():
+ if isinstance(batch[k], np.ndarray):
+ batch[k] = torch.from_numpy(batch[k])
+
+ if (isinstance(batch[k], torch.Tensor) or isinstance(batch[k], torch.FloatTensor)):
+ batch[k] = batch[k].to(self.device).contiguous()
+
+ elif isinstance(batch, list) :
+ batch = [torch.from_numpy(x) for x in batch if isinstance(x, np.ndarray)]
+ batch = [x.to(self.device).contiguous() for x in batch if (isinstance(x, torch.Tensor) or isinstance(x, torch.FloatTensor))]
+
+ elif isinstance(batch, np.ndarray):
+ batch = torch.from_numpy(batch)
+ batch = batch.to(self.device).contiguous()
+
+ else :
+ print("not support data type")
+ exit(0)
+
+ return batch
+
+ def validation_single_gpu(self, val_dataset,):
+ if self.ddp:
+ print(f"single gpu model not support the ddp")
+ exit(0)
+ val_loader = DataLoader(val_dataset, batch_size=1, shuffle=False, pin_memory=True)
+ if self.model is not None:
+ self.model.to(self.device)
+ self.model.eval()
+ val_outputs = []
+
+ for idx, batch in tqdm(enumerate(val_loader), total=len(val_loader)):
+ batch = self.before_data_to_device(batch)
+ batch = self.to_device(batch)
+
+ with torch.no_grad():
+ val_out = self.validation_step(batch)
+ assert val_out is not None
+
+ return_list = False
+ val_outputs.append(val_out)
+ if isinstance(val_out, list) or isinstance(val_out, tuple):
+ return_list = True
+
+ val_outputs = torch.tensor(val_outputs)
+ if not return_list:
+ # 说明只有一个变量
+ length = 0
+ v_sum = 0.0
+ for v in val_outputs:
+ if not torch.isnan(v):
+ v_sum += v
+ length += 1
+
+ if length == 0:
+ v_sum = 0
+ else :
+ v_sum = v_sum / length
+ else :
+ num_val = len(val_outputs[0])
+ length = [0.0 for i in range(num_val)]
+ v_sum = [0.0 for i in range(num_val)]
+
+ for v in val_outputs:
+ for i in range(num_val):
+ if not torch.isnan(v[i]):
+ v_sum[i] += v[i]
+ length[i] += 1
+
+ for i in range(num_val):
+ if length[i] == 0:
+ v_sum[i] = 0
+ else :
+ v_sum[i] = v_sum[i] / length[i]
+ return v_sum, val_outputs
+
+ def validate(self):
+ val_outputs = []
+ if self.global_step % self.val_every == 0 \
+ and self.val_loader is not None :
+ if self.model is not None:
+ self.model.eval()
+ if self.ddp:
+ torch.distributed.barrier()
+ outputs_split = None
+ # for idx, batch in tqdm(enumerate(self.val_loader), total=len(self.val_loader)):
+ for i in tqdm(range(len(self.val_loader)), total=len(self.val_loader)):
+ batch = next(self.val_loader)
+
+ batch = self.before_data_to_device(batch)
+
+ batch = self.to_device(batch)
+
+ with torch.no_grad():
+ with torch.autocast("cuda", enabled=True) if (self.ddp or 'cuda' in self.device) else dummy_context():
+ val_out = self.validation_step(batch)
+ assert val_out is not None
+ if type(val_out) is not list and type(val_out) is not tuple:
+ val_out = [val_out]
+
+ if outputs_split is None:
+ outputs_split = [[] for i in range(len(val_out))]
+
+ for i, v in enumerate(val_out):
+ outputs_split[i].append(v)
+
+ # val_outputs.append(val_out)
+
+ ## 先汇总结果。
+ if self.ddp:
+ val_outputs = torch.tensor(val_outputs).cuda(self.local_rank)
+ torch.distributed.barrier()
+ val_outputs_merge = []
+ for i in range(len(outputs_split)):
+ val_outputs = torch.tensor(outputs_split[i]).cuda(self.local_rank)
+ val_outputs_merge.append(distributed_concat(val_outputs, num_total_examples=len(self.val_loader) * self.num_gpus))
+
+ # val_outputs = distributed_concat(val_outputs, num_total_examples=len(self.val_loader.sampler.dataset))
+ # val_outputs = distributed_concat(val_outputs, num_total_examples=len(self.val_loader) * self.num_gpus)
+ else :
+ val_outputs_merge = []
+ for i in range(len(outputs_split)):
+ val_outputs = torch.tensor(outputs_split[i])
+ val_outputs_merge.append(val_outputs)
+ # val_outputs = torch.tensor(val_outputs)
+
+ if self.local_rank == 0:
+ if len(val_outputs_merge) == 1:
+ val_outputs_merge = val_outputs_merge[0]
+ self.validation_end(val_outputs_merge)
+ # self.validation_end(val_outputs)
+
+ def train(self,
+ train_dataset,
+ val_dataset=None,
+ ):
+ print(f"augmentation: {self.augmentation}")
+ assert self.patch_size is not None, "please define the patch_size"
+
+ set_determinism(42 + self.local_rank)
+ if self.model is not None:
+ print(f"check model parameter: {next(self.model.parameters()).sum()}, keep model parameters on different processes consistent")
+ para = sum([np.prod(list(p.size())) for p in self.model.parameters()])
+ if self.local_rank == 0:
+ print(f"model parameters is {para / 1000 / 1000}M ")
+
+ self.global_step = 0
+ if self.env_type == "pytorch":
+ if self.model is not None:
+ self.model.to(self.device)
+ os.makedirs(self.logdir, exist_ok=True)
+ self.writer = SummaryWriter(self.logdir)
+
+ elif self.ddp:
+ if self.local_rank == 0:
+ os.makedirs(self.logdir, exist_ok=True)
+ self.writer = SummaryWriter(self.logdir)
+ else:
+ self.writer = None
+ if self.model is not None:
+ self.model.cuda(self.local_rank)
+ self.model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(self.model)
+ self.model = torch.nn.parallel.DistributedDataParallel(self.model,
+ device_ids=[self.local_rank],
+ output_device=self.local_rank,
+ find_unused_parameters=True)
+ else :
+ print("not support env_type")
+ exit(0)
+
+ # self.train_loader = self.get_dataloader(train_dataset, shuffle=True, batch_size=self.batch_size)
+ self.train_loader, self.val_loader = self.get_multi_processor_loader(train_dataset, val_dataset)
+
+ self.max_steps = self.max_epochs * len(self.train_loader)
+
+ print(f"step number is {self.max_steps}")
+
+ if self.scheduler_type == "cosine_with_warmup":
+ if self.warmup == 0.0:
+ self.warmup = 0.1
+ assert self.warmup < 1 and self.warmup > 0
+ warmup_steps = self.max_steps * self.warmup
+ self.scheduler = get_cosine_schedule_with_warmup(self.optimizer,
+ num_warmup_steps=warmup_steps,
+ num_training_steps=self.max_steps)
+ print(f"warmup steps is {warmup_steps}")
+ elif self.scheduler_type == "constant_with_warmup":
+ if self.warmup == 0.0:
+ self.warmup = 0.1
+ assert self.warmup < 1 and self.warmup > 0
+ warmup_steps = self.max_steps * self.warmup
+ self.scheduler = get_constant_schedule_with_warmup(self.optimizer,
+ num_warmup_steps=warmup_steps,
+ )
+ print(f"warmup steps is {warmup_steps}")
+
+ elif self.scheduler_type == "poly_with_warmup":
+ if self.warmup == 0.0:
+ self.warmup = 0.1
+ assert self.warmup < 1 and self.warmup > 0
+ warmup_steps = self.max_steps * self.warmup
+ self.scheduler = get_polynomial_decay_schedule_with_warmup(self.optimizer,
+ num_warmup_steps=warmup_steps,
+ num_training_steps=self.max_steps
+ )
+ print(f"warmup steps is {warmup_steps}")
+
+ elif self.scheduler_type == "poly":
+ from light_training.utils.lr_scheduler import PolyLRScheduler
+ lr = self.optimizer.state_dict()['param_groups'][0]['lr']
+ print(f"initial lr is {lr}")
+ self.scheduler = PolyLRScheduler(self.optimizer, initial_lr=lr, max_steps=self.max_steps)
+ print(f"scheduler_type is poly, warmup steps is {0}")
+
+ for epoch in range(0, self.max_epochs):
+ self.epoch = epoch
+ if self.ddp:
+ torch.distributed.barrier()
+ self.train_epoch(
+ epoch,
+ )
+ if (self.epoch + 1) % self.val_every == 0:
+ self.validate()
+
+ if self.model is not None:
+ self.model.train()
+
+ def before_data_to_device(self, batch_data):
+ return batch_data
+
+ def train_epoch(self,
+ epoch,
+ ):
+ if self.model is not None:
+ self.model.train()
+ # if self.local_rank == 0:
+ with tqdm(total=self.num_step_per_epoch, disable=(self.local_rank != 0)) as t:
+ for i in range(self.num_step_per_epoch):
+ self.global_step += 1
+ t.set_description('Epoch %i' % epoch)
+
+ if self.print_time:
+ s = time.time()
+ batch = next(self.train_loader)
+ if self.print_time:
+ e = time.time()
+ print(f"get batch time is {e - s}")
+
+ batch = self.before_data_to_device(batch)
+
+ batch = self.to_device(batch)
+
+ if self.model is not None:
+ for param in self.model.parameters(): param.grad = None
+
+ if not self.auto_optim:
+ loss = self.training_step(batch)
+ else:
+ with autocast("cuda", enabled=True) if (self.ddp or 'cuda' in self.device) else dummy_context():
+ if self.print_time:
+ s = time.time()
+ loss = self.training_step(batch)
+ if self.print_time:
+ e = time.time()
+ print(f"training step time is {e - s}")
+
+ if self.print_time:
+ s = time.time()
+
+ if self.grad_scaler is not None:
+ self.grad_scaler.scale(loss).backward()
+ self.grad_scaler.unscale_(self.optimizer)
+ torch.nn.utils.clip_grad_norm_(self.model.parameters(), 12)
+ self.grad_scaler.step(self.optimizer)
+ self.grad_scaler.update()
+ else:
+ loss.backward()
+ torch.nn.utils.clip_grad_norm_(self.model.parameters(), 12)
+ self.optimizer.step()
+
+ if self.print_time:
+ e = time.time()
+ print(f"backward time is {e - s}")
+
+ if self.scheduler is not None:
+ self.scheduler.step()
+ lr = self.optimizer.state_dict()['param_groups'][0]['lr']
+ self.log("lr", lr, self.global_step)
+
+ t.set_postfix(loss=loss.item(), lr=lr)
+
+ t.update(1)
+
+ def training_step(self, batch):
+ raise NotImplementedError
+
+ def validation_step(self, batch):
+ raise NotImplementedError
+
+ def validation_end(self, mean_val_outputs, val_outputs):
+ pass
+
+ def log(self, k, v, step):
+ if self.local_rank == 0:
+ self.writer.add_scalar(k, scalar_value=v, global_step=step)
+
+ def log_dict(self, dict_, step):
+ if self.local_rank == 0:
+ for k, v in dict_.items():
+ self.writer.add_scalar(k, scalar_value=v, global_step=step)
+
+ def load_state_dict(self, weight_path, strict=True):
+ sd = torch.load(weight_path, map_location="cpu")
+ if "module" in sd :
+ sd = sd["module"]
+ new_sd = {}
+ for k, v in sd.items():
+ k = str(k)
+ new_k = k[7:] if k.startswith("module") else k
+ new_sd[new_k] = v
+
+ self.model.load_state_dict(new_sd, strict=strict)
+
+ print(f"model parameters are loaded successed.")
+
diff --git a/PRISM/SegMamba/light_training/trainer_fp32.py b/PRISM/SegMamba/light_training/trainer_fp32.py
new file mode 100644
index 0000000000000000000000000000000000000000..aa30b2e10d4375e70e69ac19ac30cf566a6c8b72
--- /dev/null
+++ b/PRISM/SegMamba/light_training/trainer_fp32.py
@@ -0,0 +1,471 @@
+import os
+from tqdm import tqdm
+import numpy as np
+import torch
+import torch.nn.parallel
+import torch.utils.data.distributed
+from light_training.utils.lr_scheduler import get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup
+from monai.data import DataLoader
+import argparse
+from .launch import launch_dist
+from monai.utils import set_determinism
+from .sampler import SequentialDistributedSampler, distributed_concat
+from torch.utils.tensorboard import SummaryWriter
+
+class dummy_context(object):
+ def __enter__(self):
+ pass
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ pass
+
+class Trainer:
+ def __init__(self, env_type,
+ max_epochs,
+ batch_size,
+ device="cpu",
+ val_every=1,
+ num_gpus=1,
+ logdir="./logs/",
+ master_ip='localhost',
+ master_port=17750,
+ training_script="train.py",
+ ):
+ assert env_type in ["pytorch", "ddp", "DDP"], f"not support this env_type: {env_type}"
+ self.env_type = env_type
+ self.val_every = val_every
+ self.max_epochs = max_epochs
+ self.ddp = False
+ self.num_gpus = num_gpus
+ self.device = device
+ self.local_rank = 0
+ self.batch_size = batch_size
+ self.not_call_launch = True
+ self.logdir = logdir
+ self.scheduler = None
+ self.model = None
+ self.auto_optim = True
+ self.warmup = 0.0
+ self.scheduler_type = None
+
+ self.optimizer = None
+ self.patch_size = None
+
+ self.num_step_per_epoch = 250 // self.num_gpus
+ self.val_number = 100 // self.num_gpus
+ self.augmentation = True
+
+ torch.backends.cudnn.enabled = True
+
+ gpu_count = torch.cuda.device_count()
+ if num_gpus > gpu_count:
+ print("gpu数量不符")
+ os._exit(0)
+
+ if env_type == "DDP" or env_type == "ddp":
+ self.ddp = True
+ self.get_dist_args()
+ if not self.not_call_launch:
+ launch_dist(env_type=env_type,
+ num_nodes=1,
+ gpus_per_node=num_gpus,
+ master_addr=master_ip,
+ master_port=master_port,
+ training_script=training_script,
+ )
+ os._exit(1)
+ self.initialize_distributed()
+
+ def initialize_distributed(self):
+ """Initialize torch.distributed."""
+ if self.env_type == 'pytorch':
+ self.print_rank_0('No need to initialize')
+ return
+ if self.env_type == 'DDP' or "deepspeed" in self.env_type:
+
+ if self.local_rank is not None:
+ device = self.local_rank
+ torch.cuda.set_device(device)
+ # Call the init process
+ init_method = 'env://'
+ torch.distributed.init_process_group(
+ backend='nccl',
+ init_method=init_method)
+ self.world_size = torch.distributed.get_world_size()
+
+ print(f"world size is {self.world_size}")
+
+ def get_dataloader(self, dataset, shuffle=False, batch_size=1, train=True):
+ if dataset is None :
+ return None
+ if self.env_type == 'pytorch':
+ return DataLoader(dataset,
+ batch_size=batch_size,
+ shuffle=shuffle,
+ num_workers=12)
+ else :
+ if not train:
+ sampler = SequentialDistributedSampler(dataset, batch_size=batch_size)
+
+ else :
+ sampler = torch.utils.data.distributed.DistributedSampler(dataset, shuffle=True)
+ return DataLoader(dataset,
+ batch_size=batch_size,
+ num_workers=12,
+ sampler=sampler,
+ drop_last=True)
+
+ def get_multi_processor_loader(self, train_ds, val_ds):
+ from .augment.multi_processor import LimitedLenWrapper
+ from .augment.train_augment import get_train_transforms, get_validation_transforms, get_train_transforms_noaug
+ from light_training.dataloading.base_data_loader import DataLoaderMultiProcess
+
+ assert self.patch_size != None
+ if self.augmentation:
+ tr_transforms = get_train_transforms(patch_size=self.patch_size, mirror_axes=[0, 1, 2])
+ else:
+ tr_transforms = get_train_transforms_noaug(patch_size=self.patch_size, mirror_axes=[0, 1, 2])
+
+ val_transforms = get_validation_transforms()
+
+ # train_loader = DataLoader(train_ds, num_workers=1, drop_last=True, shuffle=True, batch_size=self.batch_size)
+ train_loader = DataLoaderMultiProcess(train_ds, annotated_classes_key=self.all_labels,
+ batch_size=self.batch_size,
+ patch_size=self.patch_size)
+
+ data_generator = LimitedLenWrapper(self.num_step_per_epoch, data_loader=train_loader,
+ transform=tr_transforms,
+ num_processes=12, num_cached=6, seeds=None,
+ pin_memory=True, wait_time=0.02)
+ if val_ds is None:
+ val_data_generator = None
+ else :
+ val_loader = DataLoaderMultiProcess(val_ds, annotated_classes_key=self.all_labels,
+ batch_size=1,
+ patch_size=self.patch_size,
+ oversample_foreground_percent=1.0)
+
+ val_data_generator = LimitedLenWrapper(self.val_number, data_loader=val_loader, transform=val_transforms,
+ num_processes=6, num_cached=3, seeds=None,
+ pin_memory=True, wait_time=0.02)
+ return data_generator, val_data_generator
+
+
+ def get_dist_args(self):
+ parser = argparse.ArgumentParser()
+ # parser.add_argument('--local_rank', type=int, default = 0, help="local_rank")
+ parser.add_argument('--not_call_launch',
+ action='store_true',
+ help="not call launch!")
+ ds_args = parser.parse_args()
+ self.local_rank = int(os.environ.get("LOCAL_RANK", 0))
+
+ print(f"self.local_rank is {self.local_rank}")
+ self.not_call_launch = ds_args.not_call_launch
+ self.device = self.local_rank
+
+ def to_device(self, batch):
+ if isinstance(batch, dict):
+ for k, v in batch.items():
+ if isinstance(batch[k], np.ndarray):
+ batch[k] = torch.from_numpy(batch[k])
+
+ if (isinstance(batch[k], torch.Tensor) or isinstance(batch[k], torch.FloatTensor)):
+ batch[k] = batch[k].to(self.device).contiguous()
+
+ elif isinstance(batch, list) :
+ batch = [torch.from_numpy(x) for x in batch if isinstance(x, np.ndarray)]
+ batch = [x.to(self.device).contiguous() for x in batch if (isinstance(x, torch.Tensor) or isinstance(x, torch.FloatTensor))]
+
+ elif isinstance(batch, np.ndarray):
+ batch = torch.from_numpy(batch)
+ batch = batch.to(self.device).contiguous()
+
+ else :
+ print("not support data type")
+ exit(0)
+
+ return batch
+
+ def validation_single_gpu(self, val_dataset,):
+ if self.ddp:
+ print(f"single gpu model not support the ddp")
+ exit(0)
+ val_loader = DataLoader(val_dataset, batch_size=1, shuffle=False)
+ self.model.to(self.device)
+ val_outputs = []
+ self.model.eval()
+ for idx, batch in tqdm(enumerate(val_loader), total=len(val_loader)):
+ batch = self.to_device(batch)
+
+ with torch.no_grad():
+ val_out = self.validation_step(batch)
+ assert val_out is not None
+
+ return_list = False
+ val_outputs.append(val_out)
+ if isinstance(val_out, list) or isinstance(val_out, tuple):
+ return_list = True
+
+ val_outputs = torch.tensor(val_outputs)
+ if not return_list:
+ # 说明只有一个变量
+ length = 0
+ v_sum = 0.0
+ for v in val_outputs:
+ if not torch.isnan(v):
+ v_sum += v
+ length += 1
+
+ if length == 0:
+ v_sum = 0
+ else :
+ v_sum = v_sum / length
+ else :
+ num_val = len(val_outputs[0])
+ length = [0.0 for i in range(num_val)]
+ v_sum = [0.0 for i in range(num_val)]
+
+ for v in val_outputs:
+ for i in range(num_val):
+ if not torch.isnan(v[i]):
+ v_sum[i] += v[i]
+ length[i] += 1
+
+ for i in range(num_val):
+ if length[i] == 0:
+ v_sum[i] = 0
+ else :
+ v_sum[i] = v_sum[i] / length[i]
+ return v_sum, val_outputs
+
+ def validate(self):
+ val_outputs = []
+ if self.global_step % self.val_every == 0 \
+ and self.val_loader is not None :
+ if self.model is not None:
+ self.model.eval()
+ if self.ddp:
+ torch.distributed.barrier()
+ # for idx, batch in tqdm(enumerate(self.val_loader), total=len(self.val_loader)):
+ for i in tqdm(range(len(self.val_loader)), total=len(self.val_loader)):
+ batch = next(self.val_loader)
+
+ batch = self.to_device(batch)
+
+ with torch.no_grad():
+ val_out = self.validation_step(batch)
+ assert val_out is not None
+
+ return_list = False
+ val_outputs.append(val_out)
+ if isinstance(val_out, list) or isinstance(val_out, tuple):
+ return_list = True
+
+ ## 先汇总结果。
+ if self.ddp:
+ val_outputs = torch.tensor(val_outputs).cuda(self.local_rank)
+ torch.distributed.barrier()
+ # val_outputs = distributed_concat(val_outputs, num_total_examples=len(self.val_loader.sampler.dataset))
+ val_outputs = distributed_concat(val_outputs, num_total_examples=len(self.val_loader) * self.num_gpus)
+ else :
+ val_outputs = torch.tensor(val_outputs)
+
+ if self.local_rank == 0:
+ if not return_list:
+ # 说明只有一个变量
+ length = 0
+ v_sum = 0.0
+ for v in val_outputs:
+ if not torch.isnan(v):
+ v_sum += v
+ length += 1
+
+ if length == 0:
+ v_sum = 0
+ else :
+ v_sum = v_sum / length
+ self.validation_end(mean_val_outputs=v_sum, val_outputs=val_outputs)
+
+ else :
+ num_val = len(val_outputs[0])
+ length = [0.0 for i in range(num_val)]
+ v_sum = [0.0 for i in range(num_val)]
+
+ for v in val_outputs:
+ for i in range(num_val):
+ if not torch.isnan(v[i]):
+ v_sum[i] += v[i]
+ length[i] += 1
+
+ for i in range(num_val):
+ if length[i] == 0:
+ v_sum[i] = 0
+ else :
+ v_sum[i] = v_sum[i] / length[i]
+
+ self.validation_end(mean_val_outputs=v_sum, val_outputs=val_outputs)
+
+ def train(self,
+ train_dataset,
+ val_dataset=None,
+ ):
+ print(f"augmentation: {self.augmentation}")
+ assert self.patch_size is not None, "please define the patch_size"
+ assert self.all_labels is not None, "please define all the labels, for example, [1, 2, 3, ]"
+
+ set_determinism(42 + self.local_rank)
+ if self.model is not None:
+ print(f"check model parameter: {next(self.model.parameters()).sum()}, keep model parameters on different processes consistent")
+ para = sum([np.prod(list(p.size())) for p in self.model.parameters()])
+ if self.local_rank == 0:
+ print(f"model parameters is {para * 4 / 1000 / 1000}M ")
+
+ self.global_step = 0
+ if self.env_type == "pytorch":
+ if self.model is not None:
+ self.model.to(self.device)
+ os.makedirs(self.logdir, exist_ok=True)
+ self.writer = SummaryWriter(self.logdir)
+
+ elif self.ddp:
+ if self.local_rank == 0:
+ os.makedirs(self.logdir, exist_ok=True)
+ self.writer = SummaryWriter(self.logdir)
+ else:
+ self.writer = None
+ if self.model is not None:
+ self.model.cuda(self.local_rank)
+ # self.model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(self.model)
+ self.model = torch.nn.parallel.DistributedDataParallel(self.model,
+ device_ids=[self.local_rank],
+ output_device=self.local_rank,
+ find_unused_parameters=True)
+ else :
+ print("not support env_type")
+ exit(0)
+
+ # self.train_loader = self.get_dataloader(train_dataset, shuffle=True, batch_size=self.batch_size)
+ self.train_loader, self.val_loader = self.get_multi_processor_loader(train_dataset, val_dataset)
+
+ self.max_steps = self.max_epochs * len(self.train_loader)
+
+ print(f"step number is {self.max_steps}")
+
+ if self.scheduler_type == "cosine_with_warmup":
+ if self.warmup == 0.0:
+ self.warmup = 0.1
+ assert self.warmup < 1 and self.warmup > 0
+ warmup_steps = self.max_steps * self.warmup
+ self.scheduler = get_cosine_schedule_with_warmup(self.optimizer,
+ num_warmup_steps=warmup_steps,
+ num_training_steps=self.max_steps)
+ print(f"warmup steps is {warmup_steps}")
+ elif self.scheduler_type == "constant_with_warmup":
+ if self.warmup == 0.0:
+ self.warmup = 0.1
+ assert self.warmup < 1 and self.warmup > 0
+ warmup_steps = self.max_steps * self.warmup
+ self.scheduler = get_constant_schedule_with_warmup(self.optimizer,
+ num_warmup_steps=warmup_steps,
+ )
+ print(f"warmup steps is {warmup_steps}")
+
+ elif self.scheduler_type == "poly_with_warmup":
+ if self.warmup == 0.0:
+ self.warmup = 0.1
+ assert self.warmup < 1 and self.warmup > 0
+ warmup_steps = self.max_steps * self.warmup
+ self.scheduler = get_polynomial_decay_schedule_with_warmup(self.optimizer,
+ num_warmup_steps=warmup_steps,
+ num_training_steps=self.max_steps
+ )
+ print(f"warmup steps is {warmup_steps}")
+
+ elif self.scheduler_type == "poly":
+ from light_training.utils.lr_scheduler import PolyLRScheduler
+ lr = self.optimizer.state_dict()['param_groups'][0]['lr']
+ print(f"initial lr is {lr}")
+ self.scheduler = PolyLRScheduler(self.optimizer, initial_lr=lr, max_steps=self.max_steps)
+ print(f"scheduler_type is poly, warmup steps is {0}")
+
+ for epoch in range(0, self.max_epochs):
+ self.epoch = epoch
+ if self.ddp:
+ torch.distributed.barrier()
+ self.train_epoch(
+ epoch,
+ )
+ if (self.epoch + 1) % self.val_every == 0:
+ self.validate()
+
+ if self.model is not None:
+ self.model.train()
+
+ def train_epoch(self,
+ epoch,
+ ):
+ if self.model is not None:
+ self.model.train()
+ with tqdm(total=self.num_step_per_epoch, disable=(self.local_rank != 0)) as t:
+ for i in range(self.num_step_per_epoch):
+ # for idx, batch in enumerate(loader):
+ self.global_step += 1
+ t.set_description('Epoch %i' % epoch)
+
+ batch = next(self.train_loader)
+
+ batch = self.to_device(batch)
+
+ if self.model is not None:
+ for param in self.model.parameters(): param.grad = None
+
+ if not self.auto_optim:
+ loss = self.training_step(batch)
+ else:
+ loss = self.training_step(batch)
+ loss.backward()
+ torch.nn.utils.clip_grad_norm_(self.model.parameters(), 12)
+ self.optimizer.step()
+
+ if self.scheduler is not None:
+ self.scheduler.step()
+ lr = self.optimizer.state_dict()['param_groups'][0]['lr']
+ self.log("lr", lr, self.global_step)
+
+ t.set_postfix(loss=loss.item(), lr=lr)
+
+ t.update(1)
+
+ def training_step(self, batch):
+ raise NotImplementedError
+
+ def validation_step(self, batch):
+ raise NotImplementedError
+
+ def validation_end(self, mean_val_outputs, val_outputs):
+ pass
+
+ def log(self, k, v, step):
+ if self.local_rank == 0:
+ self.writer.add_scalar(k, scalar_value=v, global_step=step)
+
+ def log_dict(self, dict_, step):
+ if self.local_rank == 0:
+ for k, v in dict_.items():
+ self.writer.add_scalar(k, scalar_value=v, global_step=step)
+
+ def load_state_dict(self, weight_path, strict=True):
+ sd = torch.load(weight_path, map_location="cpu")
+ if "module" in sd :
+ sd = sd["module"]
+ new_sd = {}
+ for k, v in sd.items():
+ k = str(k)
+ new_k = k[7:] if k.startswith("module") else k
+ new_sd[new_k] = v
+
+ self.model.load_state_dict(new_sd, strict=strict)
+
+ print(f"model parameters are loaded successed.")
+
diff --git a/PRISM/SegMamba/light_training/utilities/__init__.py b/PRISM/SegMamba/light_training/utilities/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/PRISM/SegMamba/light_training/utilities/collate_outputs.py b/PRISM/SegMamba/light_training/utilities/collate_outputs.py
new file mode 100644
index 0000000000000000000000000000000000000000..c9d67984febd927b946b8e44f33eaab0530e4b73
--- /dev/null
+++ b/PRISM/SegMamba/light_training/utilities/collate_outputs.py
@@ -0,0 +1,24 @@
+from typing import List
+
+import numpy as np
+
+
+def collate_outputs(outputs: List[dict]):
+ """
+ used to collate default train_step and validation_step outputs. If you want something different then you gotta
+ extend this
+
+ we expect outputs to be a list of dictionaries where each of the dict has the same set of keys
+ """
+ collated = {}
+ for k in outputs[0].keys():
+ if np.isscalar(outputs[0][k]):
+ collated[k] = [o[k] for o in outputs]
+ elif isinstance(outputs[0][k], np.ndarray):
+ collated[k] = np.vstack([o[k][None] for o in outputs])
+ elif isinstance(outputs[0][k], list):
+ collated[k] = [item for o in outputs for item in o[k]]
+ else:
+ raise ValueError(f'Cannot collate input of type {type(outputs[0][k])}. '
+ f'Modify collate_outputs to add this functionality')
+ return collated
\ No newline at end of file
diff --git a/PRISM/SegMamba/light_training/utilities/dataset_name_id_conversion.py b/PRISM/SegMamba/light_training/utilities/dataset_name_id_conversion.py
new file mode 100644
index 0000000000000000000000000000000000000000..1f2c35078009249d1c493639363be54059b1c2c7
--- /dev/null
+++ b/PRISM/SegMamba/light_training/utilities/dataset_name_id_conversion.py
@@ -0,0 +1,74 @@
+# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import Union
+
+from nnunetv2.paths import nnUNet_preprocessed, nnUNet_raw, nnUNet_results
+from batchgenerators.utilities.file_and_folder_operations import *
+import numpy as np
+
+
+def find_candidate_datasets(dataset_id: int):
+ startswith = "Dataset%03.0d" % dataset_id
+ if nnUNet_preprocessed is not None and isdir(nnUNet_preprocessed):
+ candidates_preprocessed = subdirs(nnUNet_preprocessed, prefix=startswith, join=False)
+ else:
+ candidates_preprocessed = []
+
+ if nnUNet_raw is not None and isdir(nnUNet_raw):
+ candidates_raw = subdirs(nnUNet_raw, prefix=startswith, join=False)
+ else:
+ candidates_raw = []
+
+ candidates_trained_models = []
+ if nnUNet_results is not None and isdir(nnUNet_results):
+ candidates_trained_models += subdirs(nnUNet_results, prefix=startswith, join=False)
+
+ all_candidates = candidates_preprocessed + candidates_raw + candidates_trained_models
+ unique_candidates = np.unique(all_candidates)
+ return unique_candidates
+
+
+def convert_id_to_dataset_name(dataset_id: int):
+ unique_candidates = find_candidate_datasets(dataset_id)
+ if len(unique_candidates) > 1:
+ raise RuntimeError("More than one dataset name found for dataset id %d. Please correct that. (I looked in the "
+ "following folders:\n%s\n%s\n%s" % (dataset_id, nnUNet_raw, nnUNet_preprocessed, nnUNet_results))
+ if len(unique_candidates) == 0:
+ raise RuntimeError(f"Could not find a dataset with the ID {dataset_id}. Make sure the requested dataset ID "
+ f"exists and that nnU-Net knows where raw and preprocessed data are located "
+ f"(see Documentation - Installation). Here are your currently defined folders:\n"
+ f"nnUNet_preprocessed={os.environ.get('nnUNet_preprocessed') if os.environ.get('nnUNet_preprocessed') is not None else 'None'}\n"
+ f"nnUNet_results={os.environ.get('nnUNet_results') if os.environ.get('nnUNet_results') is not None else 'None'}\n"
+ f"nnUNet_raw={os.environ.get('nnUNet_raw') if os.environ.get('nnUNet_raw') is not None else 'None'}\n"
+ f"If something is not right, adapt your environment variables.")
+ return unique_candidates[0]
+
+
+def convert_dataset_name_to_id(dataset_name: str):
+ assert dataset_name.startswith("Dataset")
+ dataset_id = int(dataset_name[7:10])
+ return dataset_id
+
+
+def maybe_convert_to_dataset_name(dataset_name_or_id: Union[int, str]) -> str:
+ if isinstance(dataset_name_or_id, str) and dataset_name_or_id.startswith("Dataset"):
+ return dataset_name_or_id
+ if isinstance(dataset_name_or_id, str):
+ try:
+ dataset_name_or_id = int(dataset_name_or_id)
+ except ValueError:
+ raise ValueError("dataset_name_or_id was a string and did not start with 'Dataset' so we tried to "
+ "convert it to a dataset ID (int). That failed, however. Please give an integer number "
+ "('1', '2', etc) or a correct tast name. Your input: %s" % dataset_name_or_id)
+ return convert_id_to_dataset_name(dataset_name_or_id)
\ No newline at end of file
diff --git a/PRISM/SegMamba/light_training/utilities/ddp_allgather.py b/PRISM/SegMamba/light_training/utilities/ddp_allgather.py
new file mode 100644
index 0000000000000000000000000000000000000000..c42b3ef654f361904d5fe1868621b3f6f5cd29a6
--- /dev/null
+++ b/PRISM/SegMamba/light_training/utilities/ddp_allgather.py
@@ -0,0 +1,49 @@
+# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import Any, Optional, Tuple
+
+import torch
+from torch import distributed
+
+
+def print_if_rank0(*args):
+ if distributed.get_rank() == 0:
+ print(*args)
+
+
+class AllGatherGrad(torch.autograd.Function):
+ # stolen from pytorch lightning
+ @staticmethod
+ def forward(
+ ctx: Any,
+ tensor: torch.Tensor,
+ group: Optional["torch.distributed.ProcessGroup"] = None,
+ ) -> torch.Tensor:
+ ctx.group = group
+
+ gathered_tensor = [torch.zeros_like(tensor) for _ in range(torch.distributed.get_world_size())]
+
+ torch.distributed.all_gather(gathered_tensor, tensor, group=group)
+ gathered_tensor = torch.stack(gathered_tensor, dim=0)
+
+ return gathered_tensor
+
+ @staticmethod
+ def backward(ctx: Any, *grad_output: torch.Tensor) -> Tuple[torch.Tensor, None]:
+ grad_output = torch.cat(grad_output)
+
+ torch.distributed.all_reduce(grad_output, op=torch.distributed.ReduceOp.SUM, async_op=False, group=ctx.group)
+
+ return grad_output[torch.distributed.get_rank()], None
+
diff --git a/PRISM/SegMamba/light_training/utilities/default_n_proc_DA.py b/PRISM/SegMamba/light_training/utilities/default_n_proc_DA.py
new file mode 100644
index 0000000000000000000000000000000000000000..3ecc9228296355d01087f216a9ea2640b90403f8
--- /dev/null
+++ b/PRISM/SegMamba/light_training/utilities/default_n_proc_DA.py
@@ -0,0 +1,44 @@
+import subprocess
+import os
+
+
+def get_allowed_n_proc_DA():
+ """
+ This function is used to set the number of processes used on different Systems. It is specific to our cluster
+ infrastructure at DKFZ. You can modify it to suit your needs. Everything is allowed.
+
+ IMPORTANT: if the environment variable nnUNet_n_proc_DA is set it will overwrite anything in this script
+ (see first line).
+
+ Interpret the output as the number of processes used for data augmentation PER GPU.
+
+ The way it is implemented here is simply a look up table. We know the hostnames, CPU and GPU configurations of our
+ systems and set the numbers accordingly. For example, a system with 4 GPUs and 48 threads can use 12 threads per
+ GPU without overloading the CPU (technically 11 because we have a main process as well), so that's what we use.
+ """
+
+ if 'nnUNet_n_proc_DA' in os.environ.keys():
+ use_this = int(os.environ['nnUNet_n_proc_DA'])
+ else:
+ hostname = subprocess.getoutput(['hostname'])
+ if hostname in ['Fabian', ]:
+ use_this = 12
+ elif hostname in ['hdf19-gpu16', 'hdf19-gpu17', 'hdf19-gpu18', 'hdf19-gpu19', 'e230-AMDworkstation']:
+ use_this = 16
+ elif hostname.startswith('e230-dgx1'):
+ use_this = 10
+ elif hostname.startswith('hdf18-gpu') or hostname.startswith('e132-comp'):
+ use_this = 16
+ elif hostname.startswith('e230-dgx2'):
+ use_this = 6
+ elif hostname.startswith('e230-dgxa100-'):
+ use_this = 28
+ elif hostname.startswith('lsf22-gpu'):
+ use_this = 28
+ elif hostname.startswith('hdf19-gpu') or hostname.startswith('e071-gpu'):
+ use_this = 12
+ else:
+ use_this = 12 # default value
+
+ use_this = min(use_this, os.cpu_count())
+ return use_this
diff --git a/PRISM/SegMamba/light_training/utilities/file_path_utilities.py b/PRISM/SegMamba/light_training/utilities/file_path_utilities.py
new file mode 100644
index 0000000000000000000000000000000000000000..611f6e24dbcd12b69b1b1695e6a0e6a6318981bf
--- /dev/null
+++ b/PRISM/SegMamba/light_training/utilities/file_path_utilities.py
@@ -0,0 +1,123 @@
+from multiprocessing import Pool
+from typing import Union, Tuple
+import numpy as np
+from batchgenerators.utilities.file_and_folder_operations import *
+
+from nnunetv2.configuration import default_num_processes
+from nnunetv2.paths import nnUNet_results
+from nnunetv2.utilities.dataset_name_id_conversion import maybe_convert_to_dataset_name
+
+
+def convert_trainer_plans_config_to_identifier(trainer_name, plans_identifier, configuration):
+ return f'{trainer_name}__{plans_identifier}__{configuration}'
+
+
+def convert_identifier_to_trainer_plans_config(identifier: str):
+ return os.path.basename(identifier).split('__')
+
+
+def get_output_folder(dataset_name_or_id: Union[str, int], trainer_name: str = 'nnUNetTrainer',
+ plans_identifier: str = 'nnUNetPlans', configuration: str = '3d_fullres',
+ fold: Union[str, int] = None) -> str:
+ tmp = join(nnUNet_results, maybe_convert_to_dataset_name(dataset_name_or_id),
+ convert_trainer_plans_config_to_identifier(trainer_name, plans_identifier, configuration))
+ if fold is not None:
+ tmp = join(tmp, f'fold_{fold}')
+ return tmp
+
+
+def parse_dataset_trainer_plans_configuration_from_path(path: str):
+ folders = split_path(path)
+ # this here can be a little tricky because we are making assumptions. Let's hope this never fails lol
+
+ # safer to make this depend on two conditions, the fold_x and the DatasetXXX
+ # first let's see if some fold_X is present
+ fold_x_present = [i.startswith('fold_') for i in folders]
+ if any(fold_x_present):
+ idx = fold_x_present.index(True)
+ # OK now two entries before that there should be DatasetXXX
+ assert len(folders[:idx]) >= 2, 'Bad path, cannot extract what I need. Your path needs to be at least ' \
+ 'DatasetXXX/MODULE__PLANS__CONFIGURATION for this to work'
+ if folders[idx - 2].startswith('Dataset'):
+ splitted = folders[idx - 1].split('__')
+ assert len(splitted) == 3, 'Bad path, cannot extract what I need. Your path needs to be at least ' \
+ 'DatasetXXX/MODULE__PLANS__CONFIGURATION for this to work'
+ return folders[idx - 2], *splitted
+ else:
+ # we can only check for dataset followed by a string that is separable into three strings by splitting with '__'
+ # look for DatasetXXX
+ dataset_folder = [i.startswith('Dataset') for i in folders]
+ if any(dataset_folder):
+ idx = dataset_folder.index(True)
+ assert len(folders) >= (idx + 1), 'Bad path, cannot extract what I need. Your path needs to be at least ' \
+ 'DatasetXXX/MODULE__PLANS__CONFIGURATION for this to work'
+ splitted = folders[idx + 1].split('__')
+ assert len(splitted) == 3, 'Bad path, cannot extract what I need. Your path needs to be at least ' \
+ 'DatasetXXX/MODULE__PLANS__CONFIGURATION for this to work'
+ return folders[idx], *splitted
+
+
+def get_ensemble_name(model1_folder, model2_folder, folds: Tuple[int, ...]):
+ identifier = 'ensemble___' + os.path.basename(model1_folder) + '___' + \
+ os.path.basename(model2_folder) + '___' + folds_tuple_to_string(folds)
+ return identifier
+
+
+def get_ensemble_name_from_d_tr_c(dataset, tr1, p1, c1, tr2, p2, c2, folds: Tuple[int, ...]):
+ model1_folder = get_output_folder(dataset, tr1, p1, c1)
+ model2_folder = get_output_folder(dataset, tr2, p2, c2)
+
+ get_ensemble_name(model1_folder, model2_folder, folds)
+
+
+def convert_ensemble_folder_to_model_identifiers_and_folds(ensemble_folder: str):
+ prefix, *models, folds = os.path.basename(ensemble_folder).split('___')
+ return models, folds
+
+
+def folds_tuple_to_string(folds: Union[List[int], Tuple[int, ...]]):
+ s = str(folds[0])
+ for f in folds[1:]:
+ s += f"_{f}"
+ return s
+
+
+def folds_string_to_tuple(folds_string: str):
+ folds = folds_string.split('_')
+ res = []
+ for f in folds:
+ try:
+ res.append(int(f))
+ except ValueError:
+ res.append(f)
+ return res
+
+
+def check_workers_alive_and_busy(export_pool: Pool, worker_list: List, results_list: List, allowed_num_queued: int = 0):
+ """
+
+ returns True if the number of results that are not ready is greater than the number of available workers + allowed_num_queued
+ """
+ alive = [i.is_alive() for i in worker_list]
+ if not all(alive):
+ raise RuntimeError('Some background workers are no longer alive')
+
+ not_ready = [not i.ready() for i in results_list]
+ if sum(not_ready) >= (len(export_pool._pool) + allowed_num_queued):
+ return True
+ return False
+
+
+if __name__ == '__main__':
+ ### well at this point I could just write tests...
+ path = '/home/fabian/results/nnUNet_remake/Dataset002_Heart/nnUNetModule__nnUNetPlans__3d_fullres'
+ print(parse_dataset_trainer_plans_configuration_from_path(path))
+ path = 'Dataset002_Heart/nnUNetModule__nnUNetPlans__3d_fullres'
+ print(parse_dataset_trainer_plans_configuration_from_path(path))
+ path = '/home/fabian/results/nnUNet_remake/Dataset002_Heart/nnUNetModule__nnUNetPlans__3d_fullres/fold_all'
+ print(parse_dataset_trainer_plans_configuration_from_path(path))
+ try:
+ path = '/home/fabian/results/nnUNet_remake/Dataset002_Heart/'
+ print(parse_dataset_trainer_plans_configuration_from_path(path))
+ except AssertionError:
+ print('yayy, assertion works')
diff --git a/PRISM/SegMamba/light_training/utilities/find_class_by_name.py b/PRISM/SegMamba/light_training/utilities/find_class_by_name.py
new file mode 100644
index 0000000000000000000000000000000000000000..a345d99a707ad9f70eea6c991d9726b1efb4c062
--- /dev/null
+++ b/PRISM/SegMamba/light_training/utilities/find_class_by_name.py
@@ -0,0 +1,24 @@
+import importlib
+import pkgutil
+
+from batchgenerators.utilities.file_and_folder_operations import *
+
+
+def recursive_find_python_class(folder: str, class_name: str, current_module: str):
+ tr = None
+ for importer, modname, ispkg in pkgutil.iter_modules([folder]):
+ # print(modname, ispkg)
+ if not ispkg:
+ m = importlib.import_module(current_module + "." + modname)
+ if hasattr(m, class_name):
+ tr = getattr(m, class_name)
+ break
+
+ if tr is None:
+ for importer, modname, ispkg in pkgutil.iter_modules([folder]):
+ if ispkg:
+ next_current_module = current_module + "." + modname
+ tr = recursive_find_python_class(join(folder, modname), class_name, current_module=next_current_module)
+ if tr is not None:
+ break
+ return tr
\ No newline at end of file
diff --git a/PRISM/SegMamba/light_training/utilities/get_network_from_plans.py b/PRISM/SegMamba/light_training/utilities/get_network_from_plans.py
new file mode 100644
index 0000000000000000000000000000000000000000..447d1d5e944c5cd24078338679912e3ba19915b5
--- /dev/null
+++ b/PRISM/SegMamba/light_training/utilities/get_network_from_plans.py
@@ -0,0 +1,77 @@
+from dynamic_network_architectures.architectures.unet import PlainConvUNet, ResidualEncoderUNet
+from dynamic_network_architectures.building_blocks.helper import get_matching_instancenorm, convert_dim_to_conv_op
+from dynamic_network_architectures.initialization.weight_init import init_last_bn_before_add_to_0
+from nnunetv2.utilities.network_initialization import InitWeights_He
+from nnunetv2.utilities.plans_handling.plans_handler import ConfigurationManager, PlansManager
+from torch import nn
+
+
+def get_network_from_plans(plans_manager: PlansManager,
+ dataset_json: dict,
+ configuration_manager: ConfigurationManager,
+ num_input_channels: int,
+ deep_supervision: bool = True):
+ """
+ we may have to change this in the future to accommodate other plans -> network mappings
+
+ num_input_channels can differ depending on whether we do cascade. Its best to make this info available in the
+ trainer rather than inferring it again from the plans here.
+ """
+ num_stages = len(configuration_manager.conv_kernel_sizes)
+
+ dim = len(configuration_manager.conv_kernel_sizes[0])
+ conv_op = convert_dim_to_conv_op(dim)
+
+ label_manager = plans_manager.get_label_manager(dataset_json)
+
+ segmentation_network_class_name = configuration_manager.UNet_class_name
+ mapping = {
+ 'PlainConvUNet': PlainConvUNet,
+ 'ResidualEncoderUNet': ResidualEncoderUNet
+ }
+ kwargs = {
+ 'PlainConvUNet': {
+ 'conv_bias': True,
+ 'norm_op': get_matching_instancenorm(conv_op),
+ 'norm_op_kwargs': {'eps': 1e-5, 'affine': True},
+ 'dropout_op': None, 'dropout_op_kwargs': None,
+ 'nonlin': nn.LeakyReLU, 'nonlin_kwargs': {'inplace': True},
+ },
+ 'ResidualEncoderUNet': {
+ 'conv_bias': True,
+ 'norm_op': get_matching_instancenorm(conv_op),
+ 'norm_op_kwargs': {'eps': 1e-5, 'affine': True},
+ 'dropout_op': None, 'dropout_op_kwargs': None,
+ 'nonlin': nn.LeakyReLU, 'nonlin_kwargs': {'inplace': True},
+ }
+ }
+ assert segmentation_network_class_name in mapping.keys(), 'The network architecture specified by the plans file ' \
+ 'is non-standard (maybe your own?). Yo\'ll have to dive ' \
+ 'into either this ' \
+ 'function (get_network_from_plans) or ' \
+ 'the init of your nnUNetModule to accomodate that.'
+ network_class = mapping[segmentation_network_class_name]
+
+ conv_or_blocks_per_stage = {
+ 'n_conv_per_stage'
+ if network_class != ResidualEncoderUNet else 'n_blocks_per_stage': configuration_manager.n_conv_per_stage_encoder,
+ 'n_conv_per_stage_decoder': configuration_manager.n_conv_per_stage_decoder
+ }
+ # network class name!!
+ model = network_class(
+ input_channels=num_input_channels,
+ n_stages=num_stages,
+ features_per_stage=[min(configuration_manager.UNet_base_num_features * 2 ** i,
+ configuration_manager.unet_max_num_features) for i in range(num_stages)],
+ conv_op=conv_op,
+ kernel_sizes=configuration_manager.conv_kernel_sizes,
+ strides=configuration_manager.pool_op_kernel_sizes,
+ num_classes=label_manager.num_segmentation_heads,
+ deep_supervision=deep_supervision,
+ **conv_or_blocks_per_stage,
+ **kwargs[segmentation_network_class_name]
+ )
+ model.apply(InitWeights_He(1e-2))
+ if network_class == ResidualEncoderUNet:
+ model.apply(init_last_bn_before_add_to_0)
+ return model
diff --git a/PRISM/SegMamba/light_training/utilities/helpers.py b/PRISM/SegMamba/light_training/utilities/helpers.py
new file mode 100644
index 0000000000000000000000000000000000000000..42448e3f9c3de88ba13568ff7585797ee29607ab
--- /dev/null
+++ b/PRISM/SegMamba/light_training/utilities/helpers.py
@@ -0,0 +1,27 @@
+import torch
+
+
+def softmax_helper_dim0(x: torch.Tensor) -> torch.Tensor:
+ return torch.softmax(x, 0)
+
+
+def softmax_helper_dim1(x: torch.Tensor) -> torch.Tensor:
+ return torch.softmax(x, 1)
+
+
+def empty_cache(device: torch.device):
+ if device.type == 'cuda':
+ torch.cuda.empty_cache()
+ elif device.type == 'mps':
+ from torch import mps
+ mps.empty_cache()
+ else:
+ pass
+
+
+class dummy_context(object):
+ def __enter__(self):
+ pass
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ pass
diff --git a/PRISM/SegMamba/light_training/utilities/json_export.py b/PRISM/SegMamba/light_training/utilities/json_export.py
new file mode 100644
index 0000000000000000000000000000000000000000..faed954f4a57f39c56851f899e447caab213f29d
--- /dev/null
+++ b/PRISM/SegMamba/light_training/utilities/json_export.py
@@ -0,0 +1,59 @@
+from collections.abc import Iterable
+
+import numpy as np
+import torch
+
+
+def recursive_fix_for_json_export(my_dict: dict):
+ # json is stupid. 'cannot serialize object of type bool_/int64/float64'. Come on bro.
+ keys = list(my_dict.keys()) # cannot iterate over keys() if we change keys....
+ for k in keys:
+ if isinstance(k, (np.int64, np.int32, np.int8, np.uint8)):
+ tmp = my_dict[k]
+ del my_dict[k]
+ my_dict[int(k)] = tmp
+ del tmp
+ k = int(k)
+
+ if isinstance(my_dict[k], dict):
+ recursive_fix_for_json_export(my_dict[k])
+ elif isinstance(my_dict[k], np.ndarray):
+ assert len(my_dict[k].shape) == 1, 'only 1d arrays are supported'
+ my_dict[k] = fix_types_iterable(my_dict[k], output_type=list)
+ elif isinstance(my_dict[k], (np.bool_,)):
+ my_dict[k] = bool(my_dict[k])
+ elif isinstance(my_dict[k], (np.int64, np.int32, np.int8, np.uint8)):
+ my_dict[k] = int(my_dict[k])
+ elif isinstance(my_dict[k], (np.float32, np.float64, np.float16)):
+ my_dict[k] = float(my_dict[k])
+ elif isinstance(my_dict[k], list):
+ my_dict[k] = fix_types_iterable(my_dict[k], output_type=type(my_dict[k]))
+ elif isinstance(my_dict[k], tuple):
+ my_dict[k] = fix_types_iterable(my_dict[k], output_type=tuple)
+ elif isinstance(my_dict[k], torch.device):
+ my_dict[k] = str(my_dict[k])
+ else:
+ pass # pray it can be serialized
+
+
+def fix_types_iterable(iterable, output_type):
+ # this sh!t is hacky as hell and will break if you use it for anything outside nnunet. Keep you hands off of this.
+ out = []
+ for i in iterable:
+ if type(i) in (np.int64, np.int32, np.int8, np.uint8):
+ out.append(int(i))
+ elif isinstance(i, dict):
+ recursive_fix_for_json_export(i)
+ out.append(i)
+ elif type(i) in (np.float32, np.float64, np.float16):
+ out.append(float(i))
+ elif type(i) in (np.bool_,):
+ out.append(bool(i))
+ elif isinstance(i, str):
+ out.append(i)
+ elif isinstance(i, Iterable):
+ # print('recursive call on', i, type(i))
+ out.append(fix_types_iterable(i, type(i)))
+ else:
+ out.append(i)
+ return output_type(out)
diff --git a/PRISM/SegMamba/light_training/utilities/label_handling/__init__.py b/PRISM/SegMamba/light_training/utilities/label_handling/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/PRISM/SegMamba/light_training/utilities/label_handling/label_handling.py b/PRISM/SegMamba/light_training/utilities/label_handling/label_handling.py
new file mode 100644
index 0000000000000000000000000000000000000000..32f1b6d020189614fd0574b40f2d165def51a786
--- /dev/null
+++ b/PRISM/SegMamba/light_training/utilities/label_handling/label_handling.py
@@ -0,0 +1,319 @@
+from __future__ import annotations
+from time import time
+from typing import Union, List, Tuple, Type
+
+import numpy as np
+import torch
+from acvl_utils.cropping_and_padding.bounding_boxes import bounding_box_to_slice
+from batchgenerators.utilities.file_and_folder_operations import join
+
+import nnunetv2
+from nnunetv2.utilities.find_class_by_name import recursive_find_python_class
+from nnunetv2.utilities.helpers import softmax_helper_dim0
+
+from typing import TYPE_CHECKING
+
+# see https://adamj.eu/tech/2021/05/13/python-type-hints-how-to-fix-circular-imports/
+if TYPE_CHECKING:
+ from nnunetv2.utilities.plans_handling.plans_handler import PlansManager, ConfigurationManager
+
+
+class LabelManager(object):
+ def __init__(self, label_dict: dict, regions_class_order: Union[List[int], None], force_use_labels: bool = False,
+ inference_nonlin=None):
+ self._sanity_check(label_dict)
+ self.label_dict = label_dict
+ self.regions_class_order = regions_class_order
+ self._force_use_labels = force_use_labels
+
+ if force_use_labels:
+ self._has_regions = False
+ else:
+ self._has_regions: bool = any(
+ [isinstance(i, (tuple, list)) and len(i) > 1 for i in self.label_dict.values()])
+
+ self._ignore_label: Union[None, int] = self._determine_ignore_label()
+ self._all_labels: List[int] = self._get_all_labels()
+
+ self._regions: Union[None, List[Union[int, Tuple[int, ...]]]] = self._get_regions()
+
+ if self.has_ignore_label:
+ assert self.ignore_label == max(
+ self.all_labels) + 1, 'If you use the ignore label it must have the highest ' \
+ 'label value! It cannot be 0 or in between other labels. ' \
+ 'Sorry bro.'
+
+ if inference_nonlin is None:
+ self.inference_nonlin = torch.sigmoid if self.has_regions else softmax_helper_dim0
+ else:
+ self.inference_nonlin = inference_nonlin
+
+ def _sanity_check(self, label_dict: dict):
+ if not 'background' in label_dict.keys():
+ raise RuntimeError('Background label not declared (remeber that this should be label 0!)')
+ bg_label = label_dict['background']
+ if isinstance(bg_label, (tuple, list)):
+ raise RuntimeError(f"Background label must be 0. Not a list. Not a tuple. Your background label: {bg_label}")
+ assert int(bg_label) == 0, f"Background label must be 0. Your background label: {bg_label}"
+ # not sure if we want to allow regions that contain background. I don't immediately see how this could cause
+ # problems so we allow it for now. That doesn't mean that this is explicitly supported. It could be that this
+ # just crashes.
+
+ def _get_all_labels(self) -> List[int]:
+ all_labels = []
+ for k, r in self.label_dict.items():
+ # ignore label is not going to be used, hence the name. Duh.
+ if k == 'ignore':
+ continue
+ if isinstance(r, (tuple, list)):
+ for ri in r:
+ all_labels.append(int(ri))
+ else:
+ all_labels.append(int(r))
+ all_labels = list(np.unique(all_labels))
+ all_labels.sort()
+ return all_labels
+
+ def _get_regions(self) -> Union[None, List[Union[int, Tuple[int, ...]]]]:
+ if not self._has_regions or self._force_use_labels:
+ return None
+ else:
+ assert self.regions_class_order is not None, 'if region-based training is requested then you need to ' \
+ 'define regions_class_order!'
+ regions = []
+ for k, r in self.label_dict.items():
+ # ignore ignore label
+ if k == 'ignore':
+ continue
+ # ignore regions that are background
+ if (np.isscalar(r) and r == 0) \
+ or \
+ (isinstance(r, (tuple, list)) and len(np.unique(r)) == 1 and np.unique(r)[0] == 0):
+ continue
+ if isinstance(r, list):
+ r = tuple(r)
+ regions.append(r)
+ assert len(self.regions_class_order) == len(regions), 'regions_class_order must have as ' \
+ 'many entries as there are ' \
+ 'regions'
+ return regions
+
+ def _determine_ignore_label(self) -> Union[None, int]:
+ ignore_label = self.label_dict.get('ignore')
+ if ignore_label is not None:
+ assert isinstance(ignore_label, int), f'Ignore label has to be an integer. It cannot be a region ' \
+ f'(list/tuple). Got {type(ignore_label)}.'
+ return ignore_label
+
+ @property
+ def has_regions(self) -> bool:
+ return self._has_regions
+
+ @property
+ def has_ignore_label(self) -> bool:
+ return self.ignore_label is not None
+
+ @property
+ def all_regions(self) -> Union[None, List[Union[int, Tuple[int, ...]]]]:
+ return self._regions
+
+ @property
+ def all_labels(self) -> List[int]:
+ return self._all_labels
+
+ @property
+ def ignore_label(self) -> Union[None, int]:
+ return self._ignore_label
+
+ def apply_inference_nonlin(self, logits: Union[np.ndarray, torch.Tensor]) -> \
+ Union[np.ndarray, torch.Tensor]:
+ """
+ logits has to have shape (c, x, y(, z)) where c is the number of classes/regions
+ """
+ if isinstance(logits, np.ndarray):
+ logits = torch.from_numpy(logits)
+
+ with torch.no_grad():
+ # softmax etc is not implemented for half
+ logits = logits.float()
+ probabilities = self.inference_nonlin(logits)
+
+ return probabilities
+
+ def convert_probabilities_to_segmentation(self, predicted_probabilities: Union[np.ndarray, torch.Tensor]) -> \
+ Union[np.ndarray, torch.Tensor]:
+ """
+ assumes that inference_nonlinearity was already applied!
+
+ predicted_probabilities has to have shape (c, x, y(, z)) where c is the number of classes/regions
+ """
+ if not isinstance(predicted_probabilities, (np.ndarray, torch.Tensor)):
+ raise RuntimeError(f"Unexpected input type. Expected np.ndarray or torch.Tensor,"
+ f" got {type(predicted_probabilities)}")
+
+ if self.has_regions:
+ assert self.regions_class_order is not None, 'if region-based training is requested then you need to ' \
+ 'define regions_class_order!'
+ # check correct number of outputs
+ assert predicted_probabilities.shape[0] == self.num_segmentation_heads, \
+ f'unexpected number of channels in predicted_probabilities. Expected {self.num_segmentation_heads}, ' \
+ f'got {predicted_probabilities.shape[0]}. Remeber that predicted_probabilities should have shape ' \
+ f'(c, x, y(, z)).'
+
+ if self.has_regions:
+ if isinstance(predicted_probabilities, np.ndarray):
+ segmentation = np.zeros(predicted_probabilities.shape[1:], dtype=np.uint16)
+ else:
+ # no uint16 in torch
+ segmentation = torch.zeros(predicted_probabilities.shape[1:], dtype=torch.int16,
+ device=predicted_probabilities.device)
+ for i, c in enumerate(self.regions_class_order):
+ segmentation[predicted_probabilities[i] > 0.5] = c
+ else:
+ segmentation = predicted_probabilities.argmax(0)
+
+ return segmentation
+
+ def convert_logits_to_segmentation(self, predicted_logits: Union[np.ndarray, torch.Tensor]) -> \
+ Union[np.ndarray, torch.Tensor]:
+ probabilities = self.apply_inference_nonlin(predicted_logits)
+ return self.convert_probabilities_to_segmentation(probabilities)
+
+ def revert_cropping_on_probabilities(self, predicted_probabilities: Union[torch.Tensor, np.ndarray],
+ bbox: List[List[int]],
+ original_shape: Union[List[int], Tuple[int, ...]]):
+ """
+ ONLY USE THIS WITH PROBABILITIES, DO NOT USE LOGITS AND DO NOT USE FOR SEGMENTATION MAPS!!!
+
+ predicted_probabilities must be (c, x, y(, z))
+
+ Why do we do this here? Well if we pad probabilities we need to make sure that convert_logits_to_segmentation
+ correctly returns background in the padded areas. Also we want to ba able to look at the padded probabilities
+ and not have strange artifacts.
+ Only LabelManager knows how this needs to be done. So let's let him/her do it, ok?
+ """
+ # revert cropping
+ probs_reverted_cropping = np.zeros((predicted_probabilities.shape[0], *original_shape),
+ dtype=predicted_probabilities.dtype) \
+ if isinstance(predicted_probabilities, np.ndarray) else \
+ torch.zeros((predicted_probabilities.shape[0], *original_shape), dtype=predicted_probabilities.dtype)
+
+ if not self.has_regions:
+ probs_reverted_cropping[0] = 1
+
+ slicer = bounding_box_to_slice(bbox)
+ probs_reverted_cropping[tuple([slice(None)] + list(slicer))] = predicted_probabilities
+ return probs_reverted_cropping
+
+ @staticmethod
+ def filter_background(classes_or_regions: Union[List[int], List[Union[int, Tuple[int, ...]]]]):
+ # heck yeah
+ # This is definitely taking list comprehension too far. Enjoy.
+ return [i for i in classes_or_regions if
+ ((not isinstance(i, (tuple, list))) and i != 0)
+ or
+ (isinstance(i, (tuple, list)) and not (
+ len(np.unique(i)) == 1 and np.unique(i)[0] == 0))]
+
+ @property
+ def foreground_regions(self):
+ return self.filter_background(self.all_regions)
+
+ @property
+ def foreground_labels(self):
+ return self.filter_background(self.all_labels)
+
+ @property
+ def num_segmentation_heads(self):
+ if self.has_regions:
+ return len(self.foreground_regions)
+ else:
+ return len(self.all_labels)
+
+
+def get_labelmanager_class_from_plans(plans: dict) -> Type[LabelManager]:
+ if 'label_manager' not in plans.keys():
+ print('No label manager specified in plans. Using default: LabelManager')
+ return LabelManager
+ else:
+ labelmanager_class = recursive_find_python_class(join(nnunetv2.__path__[0], "utilities", "label_handling"),
+ plans['label_manager'],
+ current_module="nnunetv2.utilities.label_handling")
+ return labelmanager_class
+
+
+def convert_labelmap_to_one_hot(segmentation: Union[np.ndarray, torch.Tensor],
+ all_labels: Union[List, torch.Tensor, np.ndarray, tuple],
+ output_dtype=None) -> Union[np.ndarray, torch.Tensor]:
+ """
+ if output_dtype is None then we use np.uint8/torch.uint8
+ if input is torch.Tensor then output will be on the same device
+
+ np.ndarray is faster than torch.Tensor
+
+ if segmentation is torch.Tensor, this function will be faster if it is LongTensor. If it is somethine else we have
+ to cast which takes time.
+
+ IMPORTANT: This function only works properly if your labels are consecutive integers, so something like 0, 1, 2, 3, ...
+ DO NOT use it with 0, 32, 123, 255, ... or whatever (fix your labels, yo)
+ """
+ if isinstance(segmentation, torch.Tensor):
+ result = torch.zeros((len(all_labels), *segmentation.shape),
+ dtype=output_dtype if output_dtype is not None else torch.uint8,
+ device=segmentation.device)
+ # variant 1, 2x faster than 2
+ result.scatter_(0, segmentation[None].long(), 1) # why does this have to be long!?
+ # variant 2, slower than 1
+ # for i, l in enumerate(all_labels):
+ # result[i] = segmentation == l
+ else:
+ result = np.zeros((len(all_labels), *segmentation.shape),
+ dtype=output_dtype if output_dtype is not None else np.uint8)
+ # variant 1, fastest in my testing
+ for i, l in enumerate(all_labels):
+ result[i] = segmentation == l
+ # variant 2. Takes about twice as long so nah
+ # result = np.eye(len(all_labels))[segmentation].transpose((3, 0, 1, 2))
+ return result
+
+
+def determine_num_input_channels(plans_manager: PlansManager,
+ configuration_or_config_manager: Union[str, ConfigurationManager],
+ dataset_json: dict) -> int:
+ if isinstance(configuration_or_config_manager, str):
+ config_manager = plans_manager.get_configuration(configuration_or_config_manager)
+ else:
+ config_manager = configuration_or_config_manager
+
+ label_manager = plans_manager.get_label_manager(dataset_json)
+ num_modalities = len(dataset_json['modality']) if 'modality' in dataset_json.keys() else len(dataset_json['channel_names'])
+
+ # cascade has different number of input channels
+ if config_manager.previous_stage_name is not None:
+ num_label_inputs = len(label_manager.foreground_labels)
+ num_input_channels = num_modalities + num_label_inputs
+ else:
+ num_input_channels = num_modalities
+ return num_input_channels
+
+
+if __name__ == '__main__':
+ # this code used to be able to differentiate variant 1 and 2 to measure time.
+ num_labels = 7
+ seg = np.random.randint(0, num_labels, size=(256, 256, 256), dtype=np.uint8)
+ seg_torch = torch.from_numpy(seg)
+ st = time()
+ onehot_npy = convert_labelmap_to_one_hot(seg, np.arange(num_labels))
+ time_1 = time()
+ onehot_npy2 = convert_labelmap_to_one_hot(seg, np.arange(num_labels))
+ time_2 = time()
+ onehot_torch = convert_labelmap_to_one_hot(seg_torch, np.arange(num_labels))
+ time_torch = time()
+ onehot_torch2 = convert_labelmap_to_one_hot(seg_torch, np.arange(num_labels))
+ time_torch2 = time()
+ print(
+ f'np: {time_1 - st}, np2: {time_2 - time_1}, torch: {time_torch - time_2}, torch2: {time_torch2 - time_torch}')
+ onehot_torch = onehot_torch.numpy()
+ onehot_torch2 = onehot_torch2.numpy()
+ print(np.all(onehot_torch == onehot_npy))
+ print(np.all(onehot_torch2 == onehot_npy))
diff --git a/PRISM/SegMamba/light_training/utilities/network_initialization.py b/PRISM/SegMamba/light_training/utilities/network_initialization.py
new file mode 100644
index 0000000000000000000000000000000000000000..1ead271800b20873040973280726ee51093d7919
--- /dev/null
+++ b/PRISM/SegMamba/light_training/utilities/network_initialization.py
@@ -0,0 +1,12 @@
+from torch import nn
+
+
+class InitWeights_He(object):
+ def __init__(self, neg_slope=1e-2):
+ self.neg_slope = neg_slope
+
+ def __call__(self, module):
+ if isinstance(module, nn.Conv3d) or isinstance(module, nn.Conv2d) or isinstance(module, nn.ConvTranspose2d) or isinstance(module, nn.ConvTranspose3d):
+ module.weight = nn.init.kaiming_normal_(module.weight, a=self.neg_slope)
+ if module.bias is not None:
+ module.bias = nn.init.constant_(module.bias, 0)
diff --git a/PRISM/SegMamba/light_training/utilities/overlay_plots.py b/PRISM/SegMamba/light_training/utilities/overlay_plots.py
new file mode 100644
index 0000000000000000000000000000000000000000..f5d7f9439accd1644feddf7cc74846a2f74d7580
--- /dev/null
+++ b/PRISM/SegMamba/light_training/utilities/overlay_plots.py
@@ -0,0 +1,274 @@
+# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import multiprocessing
+from multiprocessing.pool import Pool
+from typing import Tuple, Union
+
+import numpy as np
+import pandas as pd
+from batchgenerators.utilities.file_and_folder_operations import *
+from nnunetv2.configuration import default_num_processes
+from nnunetv2.imageio.base_reader_writer import BaseReaderWriter
+from nnunetv2.imageio.reader_writer_registry import determine_reader_writer_from_dataset_json
+from nnunetv2.paths import nnUNet_raw, nnUNet_preprocessed
+from nnunetv2.utilities.dataset_name_id_conversion import maybe_convert_to_dataset_name
+from nnunetv2.utilities.utils import get_identifiers_from_splitted_dataset_folder
+
+color_cycle = (
+ "000000",
+ "4363d8",
+ "f58231",
+ "3cb44b",
+ "e6194B",
+ "911eb4",
+ "ffe119",
+ "bfef45",
+ "42d4f4",
+ "f032e6",
+ "000075",
+ "9A6324",
+ "808000",
+ "800000",
+ "469990",
+)
+
+
+def hex_to_rgb(hex: str):
+ assert len(hex) == 6
+ return tuple(int(hex[i:i + 2], 16) for i in (0, 2, 4))
+
+
+def generate_overlay(input_image: np.ndarray, segmentation: np.ndarray, mapping: dict = None,
+ color_cycle: Tuple[str, ...] = color_cycle,
+ overlay_intensity: float = 0.6):
+ """
+ image can be 2d greyscale or 2d RGB (color channel in last dimension!)
+
+ Segmentation must be label map of same shape as image (w/o color channels)
+
+ mapping can be label_id -> idx_in_cycle or None
+
+ returned image is scaled to [0, 255] (uint8)!!!
+ """
+ # create a copy of image
+ image = np.copy(input_image)
+
+ if len(image.shape) == 2:
+ image = np.tile(image[:, :, None], (1, 1, 3))
+ elif len(image.shape) == 3:
+ if image.shape[2] == 1:
+ image = np.tile(image, (1, 1, 3))
+ else:
+ raise RuntimeError(f'if 3d image is given the last dimension must be the color channels (3 channels). '
+ f'Only 2D images are supported. Your image shape: {image.shape}')
+ else:
+ raise RuntimeError("unexpected image shape. only 2D images and 2D images with color channels (color in "
+ "last dimension) are supported")
+
+ # rescale image to [0, 255]
+ image = image - image.min()
+ image = image / image.max() * 255
+
+ # create output
+ if mapping is None:
+ uniques = np.sort(pd.unique(segmentation.ravel())) # np.unique(segmentation)
+ mapping = {i: c for c, i in enumerate(uniques)}
+
+ for l in mapping.keys():
+ image[segmentation == l] += overlay_intensity * np.array(hex_to_rgb(color_cycle[mapping[l]]))
+
+ # rescale result to [0, 255]
+ image = image / image.max() * 255
+ return image.astype(np.uint8)
+
+
+def select_slice_to_plot(image: np.ndarray, segmentation: np.ndarray) -> int:
+ """
+ image and segmentation are expected to be 3D
+
+ selects the slice with the largest amount of fg (regardless of label)
+
+ we give image so that we can easily replace this function if needed
+ """
+ fg_mask = segmentation != 0
+ fg_per_slice = fg_mask.sum((1, 2))
+ selected_slice = int(np.argmax(fg_per_slice))
+ return selected_slice
+
+
+def select_slice_to_plot2(image: np.ndarray, segmentation: np.ndarray) -> int:
+ """
+ image and segmentation are expected to be 3D (or 1, x, y)
+
+ selects the slice with the largest amount of fg (how much percent of each class are in each slice? pick slice
+ with highest avg percent)
+
+ we give image so that we can easily replace this function if needed
+ """
+ classes = [i for i in np.sort(pd.unique(segmentation.ravel())) if i != 0]
+ fg_per_slice = np.zeros((image.shape[0], len(classes)))
+ for i, c in enumerate(classes):
+ fg_mask = segmentation == c
+ fg_per_slice[:, i] = fg_mask.sum((1, 2))
+ fg_per_slice[:, i] /= fg_per_slice.sum()
+ fg_per_slice = fg_per_slice.mean(1)
+ return int(np.argmax(fg_per_slice))
+
+
+def plot_overlay(image_file: str, segmentation_file: str, image_reader_writer: BaseReaderWriter, output_file: str,
+ overlay_intensity: float = 0.6):
+ import matplotlib.pyplot as plt
+
+ image, props = image_reader_writer.read_images((image_file, ))
+ image = image[0]
+ seg, props_seg = image_reader_writer.read_seg(segmentation_file)
+ seg = seg[0]
+
+ assert all([i == j for i, j in zip(image.shape, seg.shape)]), "image and seg do not have the same shape: %s, %s" % (
+ image_file, segmentation_file)
+
+ assert len(image.shape) == 3, 'only 3D images/segs are supported'
+
+ selected_slice = select_slice_to_plot2(image, seg)
+ # print(image.shape, selected_slice)
+
+ overlay = generate_overlay(image[selected_slice], seg[selected_slice], overlay_intensity=overlay_intensity)
+
+ plt.imsave(output_file, overlay)
+
+
+def plot_overlay_preprocessed(case_file: str, output_file: str, overlay_intensity: float = 0.6, channel_idx=0):
+ import matplotlib.pyplot as plt
+ data = np.load(case_file)['data']
+ seg = np.load(case_file)['seg'][0]
+
+ assert channel_idx < (data.shape[0]), 'This dataset only supports channel index up to %d' % (data.shape[0] - 1)
+
+ image = data[channel_idx]
+ seg[seg < 0] = 0
+
+ selected_slice = select_slice_to_plot2(image, seg)
+
+ overlay = generate_overlay(image[selected_slice], seg[selected_slice], overlay_intensity=overlay_intensity)
+
+ plt.imsave(output_file, overlay)
+
+
+def multiprocessing_plot_overlay(list_of_image_files, list_of_seg_files, image_reader_writer,
+ list_of_output_files, overlay_intensity,
+ num_processes=8):
+ with multiprocessing.get_context("spawn").Pool(num_processes) as p:
+ r = p.starmap_async(plot_overlay, zip(
+ list_of_image_files, list_of_seg_files, [image_reader_writer] * len(list_of_output_files),
+ list_of_output_files, [overlay_intensity] * len(list_of_output_files)
+ ))
+ r.get()
+
+
+def multiprocessing_plot_overlay_preprocessed(list_of_case_files, list_of_output_files, overlay_intensity,
+ num_processes=8, channel_idx=0):
+ with multiprocessing.get_context("spawn").Pool(num_processes) as p:
+ r = p.starmap_async(plot_overlay_preprocessed, zip(
+ list_of_case_files, list_of_output_files, [overlay_intensity] * len(list_of_output_files),
+ [channel_idx] * len(list_of_output_files)
+ ))
+ r.get()
+
+
+def generate_overlays_from_raw(dataset_name_or_id: Union[int, str], output_folder: str,
+ num_processes: int = 8, channel_idx: int = 0, overlay_intensity: float = 0.6):
+ dataset_name = maybe_convert_to_dataset_name(dataset_name_or_id)
+ folder = join(nnUNet_raw, dataset_name)
+ dataset_json = load_json(join(folder, 'dataset.json'))
+ identifiers = get_identifiers_from_splitted_dataset_folder(join(folder, 'imagesTr'), dataset_json['file_ending'])
+
+ image_files = [join(folder, 'imagesTr', i + "_%04.0d.nii.gz" % channel_idx) for i in identifiers]
+ seg_files = [join(folder, 'labelsTr', i + ".nii.gz") for i in identifiers]
+
+ assert all([isfile(i) for i in image_files])
+ assert all([isfile(i) for i in seg_files])
+
+ maybe_mkdir_p(output_folder)
+ output_files = [join(output_folder, i + '.png') for i in identifiers]
+
+ image_reader_writer = determine_reader_writer_from_dataset_json(dataset_json, image_files[0])()
+ multiprocessing_plot_overlay(image_files, seg_files, image_reader_writer, output_files, overlay_intensity, num_processes)
+
+
+def generate_overlays_from_preprocessed(dataset_name_or_id: Union[int, str], output_folder: str,
+ num_processes: int = 8, channel_idx: int = 0,
+ configuration: str = None,
+ plans_identifier: str = 'nnUNetPlans',
+ overlay_intensity: float = 0.6):
+ dataset_name = maybe_convert_to_dataset_name(dataset_name_or_id)
+ folder = join(nnUNet_preprocessed, dataset_name)
+ if not isdir(folder): raise RuntimeError("run preprocessing for that task first")
+
+ plans = load_json(join(folder, plans_identifier + '.json'))
+ if configuration is None:
+ if '3d_fullres' in plans['configurations'].keys():
+ configuration = '3d_fullres'
+ else:
+ configuration = '2d'
+ data_identifier = plans['configurations'][configuration]["data_identifier"]
+ preprocessed_folder = join(folder, data_identifier)
+
+ if not isdir(preprocessed_folder):
+ raise RuntimeError(f"Preprocessed data folder for configuration {configuration} of plans identifier "
+ f"{plans_identifier} ({dataset_name}) does not exist. Run preprocessing for this "
+ f"configuration first!")
+
+ identifiers = [i[:-4] for i in subfiles(preprocessed_folder, suffix='.npz', join=False)]
+
+ output_files = [join(output_folder, i + '.png') for i in identifiers]
+ image_files = [join(preprocessed_folder, i + ".npz") for i in identifiers]
+
+ maybe_mkdir_p(output_folder)
+ multiprocessing_plot_overlay_preprocessed(image_files, output_files, overlay_intensity=overlay_intensity,
+ num_processes=num_processes, channel_idx=channel_idx)
+
+
+def entry_point_generate_overlay():
+ import argparse
+ parser = argparse.ArgumentParser("Plots png overlays of the slice with the most foreground. Note that this "
+ "disregards spacing information!")
+ parser.add_argument('-d', type=str, help="Dataset name or id", required=True)
+ parser.add_argument('-o', type=str, help="output folder", required=True)
+ parser.add_argument('-np', type=int, default=default_num_processes, required=False,
+ help=f"number of processes used. Default: {default_num_processes}")
+ parser.add_argument('-channel_idx', type=int, default=0, required=False,
+ help="channel index used (0 = _0000). Default: 0")
+ parser.add_argument('--use_raw', action='store_true', required=False, help="if set then we use raw data. else "
+ "we use preprocessed")
+ parser.add_argument('-p', type=str, required=False, default='nnUNetPlans',
+ help='plans identifier. Only used if --use_raw is not set! Default: nnUNetPlans')
+ parser.add_argument('-c', type=str, required=False, default=None,
+ help='configuration name. Only used if --use_raw is not set! Default: None = '
+ '3d_fullres if available, else 2d')
+ parser.add_argument('-overlay_intensity', type=float, required=False, default=0.6,
+ help='overlay intensity. Higher = brighter/less transparent')
+
+
+ args = parser.parse_args()
+
+ if args.use_raw:
+ generate_overlays_from_raw(args.d, args.o, args.np, args.channel_idx,
+ overlay_intensity=args.overlay_intensity)
+ else:
+ generate_overlays_from_preprocessed(args.d, args.o, args.np, args.channel_idx, args.c, args.p,
+ overlay_intensity=args.overlay_intensity)
+
+
+if __name__ == '__main__':
+ entry_point_generate_overlay()
\ No newline at end of file
diff --git a/PRISM/SegMamba/light_training/utilities/plans_handling/__init__.py b/PRISM/SegMamba/light_training/utilities/plans_handling/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/PRISM/SegMamba/light_training/utilities/plans_handling/plans_handler.py b/PRISM/SegMamba/light_training/utilities/plans_handling/plans_handler.py
new file mode 100644
index 0000000000000000000000000000000000000000..6c39fd1ede290094c2b4d5b12a1f2182cb1226dc
--- /dev/null
+++ b/PRISM/SegMamba/light_training/utilities/plans_handling/plans_handler.py
@@ -0,0 +1,307 @@
+from __future__ import annotations
+
+import dynamic_network_architectures
+from copy import deepcopy
+from functools import lru_cache, partial
+from typing import Union, Tuple, List, Type, Callable
+
+import numpy as np
+import torch
+
+from nnunetv2.preprocessing.resampling.utils import recursive_find_resampling_fn_by_name
+from torch import nn
+
+import nnunetv2
+from batchgenerators.utilities.file_and_folder_operations import load_json, join
+
+from nnunetv2.imageio.reader_writer_registry import recursive_find_reader_writer_by_name
+from nnunetv2.utilities.find_class_by_name import recursive_find_python_class
+from nnunetv2.utilities.label_handling.label_handling import get_labelmanager_class_from_plans
+
+
+# see https://adamj.eu/tech/2021/05/13/python-type-hints-how-to-fix-circular-imports/
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+ from nnunetv2.utilities.label_handling.label_handling import LabelManager
+ from nnunetv2.imageio.base_reader_writer import BaseReaderWriter
+ from nnunetv2.preprocessing.preprocessors.default_preprocessor import DefaultPreprocessor
+ from nnunetv2.experiment_planning.experiment_planners.default_experiment_planner import ExperimentPlanner
+
+
+class ConfigurationManager(object):
+ def __init__(self, configuration_dict: dict):
+ self.configuration = configuration_dict
+
+ def __repr__(self):
+ return self.configuration.__repr__()
+
+ @property
+ def data_identifier(self) -> str:
+ return self.configuration['data_identifier']
+
+ @property
+ def preprocessor_name(self) -> str:
+ return self.configuration['preprocessor_name']
+
+ @property
+ @lru_cache(maxsize=1)
+ def preprocessor_class(self) -> Type[DefaultPreprocessor]:
+ preprocessor_class = recursive_find_python_class(join(nnunetv2.__path__[0], "preprocessing"),
+ self.preprocessor_name,
+ current_module="nnunetv2.preprocessing")
+ return preprocessor_class
+
+ @property
+ def batch_size(self) -> int:
+ return self.configuration['batch_size']
+
+ @property
+ def patch_size(self) -> List[int]:
+ return self.configuration['patch_size']
+
+ @property
+ def median_image_size_in_voxels(self) -> List[int]:
+ return self.configuration['median_image_size_in_voxels']
+
+ @property
+ def spacing(self) -> List[float]:
+ return self.configuration['spacing']
+
+ @property
+ def normalization_schemes(self) -> List[str]:
+ return self.configuration['normalization_schemes']
+
+ @property
+ def use_mask_for_norm(self) -> List[bool]:
+ return self.configuration['use_mask_for_norm']
+
+ @property
+ def UNet_class_name(self) -> str:
+ return self.configuration['UNet_class_name']
+
+ @property
+ @lru_cache(maxsize=1)
+ def UNet_class(self) -> Type[nn.Module]:
+ unet_class = recursive_find_python_class(join(dynamic_network_architectures.__path__[0], "architectures"),
+ self.UNet_class_name,
+ current_module="dynamic_network_architectures.architectures")
+ if unet_class is None:
+ raise RuntimeError('The network architecture specified by the plans file '
+ 'is non-standard (maybe your own?). Fix this by not using '
+ 'ConfigurationManager.UNet_class to instantiate '
+ 'it (probably just overwrite build_network_architecture of your trainer.')
+ return unet_class
+
+ @property
+ def UNet_base_num_features(self) -> int:
+ return self.configuration['UNet_base_num_features']
+
+ @property
+ def n_conv_per_stage_encoder(self) -> List[int]:
+ return self.configuration['n_conv_per_stage_encoder']
+
+ @property
+ def n_conv_per_stage_decoder(self) -> List[int]:
+ return self.configuration['n_conv_per_stage_decoder']
+
+ @property
+ def num_pool_per_axis(self) -> List[int]:
+ return self.configuration['num_pool_per_axis']
+
+ @property
+ def pool_op_kernel_sizes(self) -> List[List[int]]:
+ return self.configuration['pool_op_kernel_sizes']
+
+ @property
+ def conv_kernel_sizes(self) -> List[List[int]]:
+ return self.configuration['conv_kernel_sizes']
+
+ @property
+ def unet_max_num_features(self) -> int:
+ return self.configuration['unet_max_num_features']
+
+ @property
+ @lru_cache(maxsize=1)
+ def resampling_fn_data(self) -> Callable[
+ [Union[torch.Tensor, np.ndarray],
+ Union[Tuple[int, ...], List[int], np.ndarray],
+ Union[Tuple[float, ...], List[float], np.ndarray],
+ Union[Tuple[float, ...], List[float], np.ndarray]
+ ],
+ Union[torch.Tensor, np.ndarray]]:
+ fn = recursive_find_resampling_fn_by_name(self.configuration['resampling_fn_data'])
+ fn = partial(fn, **self.configuration['resampling_fn_data_kwargs'])
+ return fn
+
+ @property
+ @lru_cache(maxsize=1)
+ def resampling_fn_probabilities(self) -> Callable[
+ [Union[torch.Tensor, np.ndarray],
+ Union[Tuple[int, ...], List[int], np.ndarray],
+ Union[Tuple[float, ...], List[float], np.ndarray],
+ Union[Tuple[float, ...], List[float], np.ndarray]
+ ],
+ Union[torch.Tensor, np.ndarray]]:
+ fn = recursive_find_resampling_fn_by_name(self.configuration['resampling_fn_probabilities'])
+ fn = partial(fn, **self.configuration['resampling_fn_probabilities_kwargs'])
+ return fn
+
+ @property
+ @lru_cache(maxsize=1)
+ def resampling_fn_seg(self) -> Callable[
+ [Union[torch.Tensor, np.ndarray],
+ Union[Tuple[int, ...], List[int], np.ndarray],
+ Union[Tuple[float, ...], List[float], np.ndarray],
+ Union[Tuple[float, ...], List[float], np.ndarray]
+ ],
+ Union[torch.Tensor, np.ndarray]]:
+ fn = recursive_find_resampling_fn_by_name(self.configuration['resampling_fn_seg'])
+ fn = partial(fn, **self.configuration['resampling_fn_seg_kwargs'])
+ return fn
+
+ @property
+ def batch_dice(self) -> bool:
+ return self.configuration['batch_dice']
+
+ @property
+ def next_stage_names(self) -> Union[List[str], None]:
+ ret = self.configuration.get('next_stage')
+ if ret is not None:
+ if isinstance(ret, str):
+ ret = [ret]
+ return ret
+
+ @property
+ def previous_stage_name(self) -> Union[str, None]:
+ return self.configuration.get('previous_stage')
+
+
+class PlansManager(object):
+ def __init__(self, plans_file_or_dict: Union[str, dict]):
+ """
+ Why do we need this?
+ 1) resolve inheritance in configurations
+ 2) expose otherwise annoying stuff like getting the label manager or IO class from a string
+ 3) clearly expose the things that are in the plans instead of hiding them in a dict
+ 4) cache shit
+
+ This class does not prevent you from going wild. You can still use the plans directly if you prefer
+ (PlansHandler.plans['key'])
+ """
+ self.plans = plans_file_or_dict if isinstance(plans_file_or_dict, dict) else load_json(plans_file_or_dict)
+
+ def __repr__(self):
+ return self.plans.__repr__()
+
+ def _internal_resolve_configuration_inheritance(self, configuration_name: str,
+ visited: Tuple[str, ...] = None) -> dict:
+ if configuration_name not in self.plans['configurations'].keys():
+ raise ValueError(f'The configuration {configuration_name} does not exist in the plans I have. Valid '
+ f'configuration names are {list(self.plans["configurations"].keys())}.')
+ configuration = deepcopy(self.plans['configurations'][configuration_name])
+ if 'inherits_from' in configuration:
+ parent_config_name = configuration['inherits_from']
+
+ if visited is None:
+ visited = (configuration_name,)
+ else:
+ if parent_config_name in visited:
+ raise RuntimeError(f"Circular dependency detected. The following configurations were visited "
+ f"while solving inheritance (in that order!): {visited}. "
+ f"Current configuration: {configuration_name}. Its parent configuration "
+ f"is {parent_config_name}.")
+ visited = (*visited, configuration_name)
+
+ base_config = self._internal_resolve_configuration_inheritance(parent_config_name, visited)
+ base_config.update(configuration)
+ configuration = base_config
+ return configuration
+
+ @lru_cache(maxsize=10)
+ def get_configuration(self, configuration_name: str):
+ if configuration_name not in self.plans['configurations'].keys():
+ raise RuntimeError(f"Requested configuration {configuration_name} not found in plans. "
+ f"Available configurations: {list(self.plans['configurations'].keys())}")
+
+ configuration_dict = self._internal_resolve_configuration_inheritance(configuration_name)
+ return ConfigurationManager(configuration_dict)
+
+ @property
+ def dataset_name(self) -> str:
+ return self.plans['dataset_name']
+
+ @property
+ def plans_name(self) -> str:
+ return self.plans['plans_name']
+
+ @property
+ def original_median_spacing_after_transp(self) -> List[float]:
+ return self.plans['original_median_spacing_after_transp']
+
+ @property
+ def original_median_shape_after_transp(self) -> List[float]:
+ return self.plans['original_median_shape_after_transp']
+
+ @property
+ @lru_cache(maxsize=1)
+ def image_reader_writer_class(self) -> Type[BaseReaderWriter]:
+ return recursive_find_reader_writer_by_name(self.plans['image_reader_writer'])
+
+ @property
+ def transpose_forward(self) -> List[int]:
+ return self.plans['transpose_forward']
+
+ @property
+ def transpose_backward(self) -> List[int]:
+ return self.plans['transpose_backward']
+
+ @property
+ def available_configurations(self) -> List[str]:
+ return list(self.plans['configurations'].keys())
+
+ @property
+ @lru_cache(maxsize=1)
+ def experiment_planner_class(self) -> Type[ExperimentPlanner]:
+ planner_name = self.experiment_planner_name
+ experiment_planner = recursive_find_python_class(join(nnunetv2.__path__[0], "experiment_planning"),
+ planner_name,
+ current_module="nnunetv2.experiment_planning")
+ return experiment_planner
+
+ @property
+ def experiment_planner_name(self) -> str:
+ return self.plans['experiment_planner_used']
+
+ @property
+ @lru_cache(maxsize=1)
+ def label_manager_class(self) -> Type[LabelManager]:
+ return get_labelmanager_class_from_plans(self.plans)
+
+ def get_label_manager(self, dataset_json: dict, **kwargs) -> LabelManager:
+ return self.label_manager_class(label_dict=dataset_json['labels'],
+ regions_class_order=dataset_json.get('regions_class_order'),
+ **kwargs)
+
+ @property
+ def foreground_intensity_properties_per_channel(self) -> dict:
+ if 'foreground_intensity_properties_per_channel' not in self.plans.keys():
+ if 'foreground_intensity_properties_by_modality' in self.plans.keys():
+ return self.plans['foreground_intensity_properties_by_modality']
+ return self.plans['foreground_intensity_properties_per_channel']
+
+
+if __name__ == '__main__':
+ from nnunetv2.paths import nnUNet_preprocessed
+ from nnunetv2.utilities.dataset_name_id_conversion import maybe_convert_to_dataset_name
+
+ plans = load_json(join(nnUNet_preprocessed, maybe_convert_to_dataset_name(3), 'nnUNetPlans.json'))
+ # build new configuration that inherits from 3d_fullres
+ plans['configurations']['3d_fullres_bs4'] = {
+ 'batch_size': 4,
+ 'inherits_from': '3d_fullres'
+ }
+ # now get plans and configuration managers
+ plans_manager = PlansManager(plans)
+ configuration_manager = plans_manager.get_configuration('3d_fullres_bs4')
+ print(configuration_manager) # look for batch size 4
diff --git a/PRISM/SegMamba/light_training/utilities/tensor_utilities.py b/PRISM/SegMamba/light_training/utilities/tensor_utilities.py
new file mode 100644
index 0000000000000000000000000000000000000000..b16ffcac2e46d93c19522937098f0af5b208aca7
--- /dev/null
+++ b/PRISM/SegMamba/light_training/utilities/tensor_utilities.py
@@ -0,0 +1,15 @@
+from typing import Union, List, Tuple
+
+import numpy as np
+import torch
+
+
+def sum_tensor(inp: torch.Tensor, axes: Union[np.ndarray, Tuple, List], keepdim: bool = False) -> torch.Tensor:
+ axes = np.unique(axes).astype(int)
+ if keepdim:
+ for ax in axes:
+ inp = inp.sum(int(ax), keepdim=True)
+ else:
+ for ax in sorted(axes, reverse=True):
+ inp = inp.sum(int(ax))
+ return inp
diff --git a/PRISM/SegMamba/light_training/utilities/utils.py b/PRISM/SegMamba/light_training/utilities/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..8703e58055498e47eaf89d3bff445799cc8cc64b
--- /dev/null
+++ b/PRISM/SegMamba/light_training/utilities/utils.py
@@ -0,0 +1,56 @@
+# Copyright 2021 HIP Applied Computer Vision Lab, Division of Medical Image Computing, German Cancer Research Center
+# (DKFZ), Heidelberg, Germany
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import Union
+
+from batchgenerators.utilities.file_and_folder_operations import *
+import numpy as np
+import re
+
+def _convert_to_npy(npz_file: str, unpack_segmentation: bool = True, overwrite_existing: bool = False) -> None:
+ try:
+ a = np.load(npz_file) # inexpensive, no compression is done here. This just reads metadata
+ if overwrite_existing or not isfile(npz_file[:-3] + "npy"):
+ np.save(npz_file[:-3] + "npy", a['data'])
+ if unpack_segmentation and (overwrite_existing or not isfile(npz_file[:-4] + "_seg.npy")):
+ np.save(npz_file[:-4] + "_seg.npy", a['seg'])
+ except KeyboardInterrupt:
+ if isfile(npz_file[:-3] + "npy"):
+ os.remove(npz_file[:-3] + "npy")
+ if isfile(npz_file[:-4] + "_seg.npy"):
+ os.remove(npz_file[:-4] + "_seg.npy")
+ raise KeyboardInterrupt
+
+def get_identifiers_from_splitted_dataset_folder(folder: str, file_ending: str):
+ files = subfiles(folder, suffix=file_ending, join=False)
+ # all files must be .nii.gz and have 4 digit channel index
+ crop = len(file_ending) + 5
+ files = [i[:-crop] for i in files]
+ # only unique image ids
+ files = np.unique(files)
+ return files
+
+
+def create_lists_from_splitted_dataset_folder(folder: str, file_ending: str, identifiers: List[str] = None) -> List[List[str]]:
+ """
+ does not rely on dataset.json
+ """
+ if identifiers is None:
+ identifiers = get_identifiers_from_splitted_dataset_folder(folder, file_ending)
+ files = subfiles(folder, suffix=file_ending, join=False, sort=True)
+ list_of_lists = []
+ for f in identifiers:
+ p = re.compile(re.escape(f) + r"_\d\d\d\d" + re.escape(file_ending))
+ list_of_lists.append([join(folder, i) for i in files if p.fullmatch(i)])
+ return list_of_lists
diff --git a/PRISM/SegMamba/light_training/utils/files_helper.py b/PRISM/SegMamba/light_training/utils/files_helper.py
new file mode 100644
index 0000000000000000000000000000000000000000..471e746fd45de31db297b0de47417728a1c9128e
--- /dev/null
+++ b/PRISM/SegMamba/light_training/utils/files_helper.py
@@ -0,0 +1,22 @@
+
+import os
+import glob
+import torch
+
+def delete_last_model(model_dir, symbol):
+
+ last_model = glob.glob(f"{model_dir}/{symbol}*.pt")
+ if len(last_model) != 0:
+ os.remove(last_model[0])
+
+
+def save_new_model_and_delete_last(model, save_path, delete_symbol=None):
+ save_dir = os.path.dirname(save_path)
+
+ os.makedirs(save_dir, exist_ok=True)
+ if delete_last_model is not None:
+ delete_last_model(save_dir, delete_symbol)
+
+ torch.save(model.state_dict(), save_path)
+
+ print(f"model is saved in {save_path}")
diff --git a/PRISM/SegMamba/light_training/utils/log_image.py b/PRISM/SegMamba/light_training/utils/log_image.py
new file mode 100644
index 0000000000000000000000000000000000000000..3e9b590f4859cce6cbfc60b94a3dd0fffdf05ab4
--- /dev/null
+++ b/PRISM/SegMamba/light_training/utils/log_image.py
@@ -0,0 +1,20 @@
+
+
+import os
+from PIL import Image
+
+
+def log_image(save_dir, split, images,
+ global_step, current_epoch):
+ root = os.path.join(save_dir, "images", split)
+ for k in images:
+
+ filename = "{}_gs-{:06}_e-{:06}.png".format(
+ k,
+ global_step,
+ current_epoch,
+ )
+ path = os.path.join(root, filename)
+ os.makedirs(os.path.split(path)[0], exist_ok=True)
+
+ Image.fromarray(images[k]).save(path)
\ No newline at end of file
diff --git a/PRISM/SegMamba/light_training/utils/lr_scheduler.py b/PRISM/SegMamba/light_training/utils/lr_scheduler.py
new file mode 100644
index 0000000000000000000000000000000000000000..f42d0c92a56389c197017e61add9ca1dc51a6e1c
--- /dev/null
+++ b/PRISM/SegMamba/light_training/utils/lr_scheduler.py
@@ -0,0 +1,222 @@
+# Copyright 2020 - 2021 MONAI Consortium
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import math
+import warnings
+from typing import List
+
+from torch.optim import Adam, Optimizer
+from torch.optim.lr_scheduler import _LRScheduler
+from torch.optim.lr_scheduler import LambdaLR
+import math
+from torch.optim import Optimizer
+
+class PolyLRScheduler(_LRScheduler):
+ def __init__(self, optimizer, initial_lr: float, max_steps: int, exponent: float = 0.9, current_step: int = None):
+ self.optimizer = optimizer
+ self.initial_lr = initial_lr
+ self.max_steps = max_steps
+ self.exponent = exponent
+ self.ctr = 0
+ super().__init__(optimizer, current_step if current_step is not None else -1, False)
+
+ def step(self, current_step=None):
+ if current_step is None or current_step == -1:
+ current_step = self.ctr
+ self.ctr += 1
+
+ new_lr = self.initial_lr * (1 - current_step / self.max_steps) ** self.exponent
+ for param_group in self.optimizer.param_groups:
+ param_group['lr'] = new_lr
+
+def get_polynomial_decay_schedule_with_warmup(
+ optimizer, num_warmup_steps, num_training_steps, lr_end=1e-7, power=1.0, last_epoch=-1
+):
+ """
+ Create a schedule with a learning rate that decreases as a polynomial decay from the initial lr set in the
+ optimizer to end lr defined by *lr_end*, after a warmup period during which it increases linearly from 0 to the
+ initial lr set in the optimizer.
+
+ Args:
+ optimizer ([`~torch.optim.Optimizer`]):
+ The optimizer for which to schedule the learning rate.
+ num_warmup_steps (`int`):
+ The number of steps for the warmup phase.
+ num_training_steps (`int`):
+ The total number of training steps.
+ lr_end (`float`, *optional*, defaults to 1e-7):
+ The end LR.
+ power (`float`, *optional*, defaults to 1.0):
+ Power factor.
+ last_epoch (`int`, *optional*, defaults to -1):
+ The index of the last epoch when resuming training.
+
+ Note: *power* defaults to 1.0 as in the fairseq implementation, which in turn is based on the original BERT
+ implementation at
+ https://github.com/google-research/bert/blob/f39e881b169b9d53bea03d2d341b31707a6c052b/optimization.py#L37
+
+ Return:
+ `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
+
+ """
+
+ lr_init = optimizer.defaults["lr"]
+ if not (lr_init > lr_end):
+ raise ValueError(f"lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})")
+
+ def lr_lambda(current_step: int):
+ if current_step < num_warmup_steps:
+ return float(current_step) / float(max(1, num_warmup_steps))
+ elif current_step > num_training_steps:
+ return lr_end / lr_init # as LambdaLR multiplies by lr_init
+ else:
+ lr_range = lr_init - lr_end
+ decay_steps = num_training_steps - num_warmup_steps
+ pct_remaining = 1 - (current_step - num_warmup_steps) / decay_steps
+ decay = lr_range * pct_remaining**power + lr_end
+ return decay / lr_init # as LambdaLR multiplies by lr_init
+
+ return LambdaLR(optimizer, lr_lambda, last_epoch)
+
+def get_cosine_schedule_with_warmup(
+ optimizer: Optimizer, num_warmup_steps: int, num_training_steps: int, num_cycles: float = 0.5, last_epoch: int = -1
+):
+ """
+ Create a schedule with a learning rate that decreases following the values of the cosine function between the
+ initial lr set in the optimizer to 0, after a warmup period during which it increases linearly between 0 and the
+ initial lr set in the optimizer.
+
+ Args:
+ optimizer ([`~torch.optim.Optimizer`]):
+ The optimizer for which to schedule the learning rate.
+ num_warmup_steps (`int`):
+ The number of steps for the warmup phase.
+ num_training_steps (`int`):
+ The total number of training steps.
+ num_periods (`float`, *optional*, defaults to 0.5):
+ The number of periods of the cosine function in a schedule (the default is to just decrease from the max
+ value to 0 following a half-cosine).
+ last_epoch (`int`, *optional*, defaults to -1):
+ The index of the last epoch when resuming training.
+
+ Return:
+ `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
+ """
+
+ def lr_lambda(current_step):
+ if current_step < num_warmup_steps:
+ return float(current_step) / float(max(1, num_warmup_steps))
+ progress = float(current_step - num_warmup_steps) / float(max(1, num_training_steps - num_warmup_steps))
+ return max(0.0, 0.5 * (1.0 + math.cos(math.pi * float(num_cycles) * 2.0 * progress)))
+
+ return LambdaLR(optimizer, lr_lambda, last_epoch)
+
+def get_constant_schedule_with_warmup(optimizer, num_warmup_steps: int, last_epoch: int = -1):
+ """
+ Create a schedule with a constant learning rate preceded by a warmup period during which the learning rate
+ increases linearly between 0 and the initial lr set in the optimizer.
+
+ Args:
+ optimizer ([`~torch.optim.Optimizer`]):
+ The optimizer for which to schedule the learning rate.
+ num_warmup_steps (`int`):
+ The number of steps for the warmup phase.
+ last_epoch (`int`, *optional*, defaults to -1):
+ The index of the last epoch when resuming training.
+
+ Return:
+ `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
+ """
+
+ def lr_lambda(current_step: int):
+ if current_step < num_warmup_steps:
+ return float(current_step) / float(max(1.0, num_warmup_steps))
+ return 1.0
+
+ return LambdaLR(optimizer, lr_lambda, last_epoch=last_epoch)
+
+class LinearWarmupCosineAnnealingLR(_LRScheduler):
+
+ def __init__(
+ self,
+ optimizer: Optimizer,
+ warmup_epochs: int,
+ max_epochs: int,
+ warmup_start_lr: float = 0.0,
+ eta_min: float = 0.0,
+ last_epoch: int = -1,
+ ) -> None:
+ """
+ Args:
+ optimizer (Optimizer): Wrapped optimizer.
+ warmup_epochs (int): Maximum number of iterations for linear warmup
+ max_epochs (int): Maximum number of iterations
+ warmup_start_lr (float): Learning rate to start the linear warmup. Default: 0.
+ eta_min (float): Minimum learning rate. Default: 0.
+ last_epoch (int): The index of last epoch. Default: -1.
+ """
+ self.warmup_epochs = warmup_epochs
+ self.max_epochs = max_epochs
+ self.warmup_start_lr = warmup_start_lr
+ self.eta_min = eta_min
+
+ super(LinearWarmupCosineAnnealingLR, self).__init__(optimizer, last_epoch)
+
+ def get_lr(self) -> List[float]:
+ """
+ Compute learning rate using chainable form of the scheduler
+ """
+ if not self._get_lr_called_within_step:
+ warnings.warn(
+ "To get the last learning rate computed by the scheduler, "
+ "please use `get_last_lr()`.",
+ UserWarning,
+ )
+
+ if self.last_epoch == 0:
+ return [self.warmup_start_lr] * len(self.base_lrs)
+ elif self.last_epoch < self.warmup_epochs:
+ return [
+ group["lr"] + (base_lr - self.warmup_start_lr) / (self.warmup_epochs - 1)
+ for base_lr, group in zip(self.base_lrs, self.optimizer.param_groups)
+ ]
+ elif self.last_epoch == self.warmup_epochs:
+ return self.base_lrs
+ elif (self.last_epoch - 1 - self.max_epochs) % (2 * (self.max_epochs - self.warmup_epochs)) == 0:
+ return [
+ group["lr"] + (base_lr - self.eta_min) *
+ (1 - math.cos(math.pi / (self.max_epochs - self.warmup_epochs))) / 2
+ for base_lr, group in zip(self.base_lrs, self.optimizer.param_groups)
+ ]
+
+ return [
+ (1 + math.cos(math.pi * (self.last_epoch - self.warmup_epochs) / (self.max_epochs - self.warmup_epochs))) /
+ (
+ 1 +
+ math.cos(math.pi * (self.last_epoch - self.warmup_epochs - 1) / (self.max_epochs - self.warmup_epochs))
+ ) * (group["lr"] - self.eta_min) + self.eta_min for group in self.optimizer.param_groups
+ ]
+
+ def _get_closed_form_lr(self) -> List[float]:
+ """
+ Called when epoch is passed as a param to the `step` function of the scheduler.
+ """
+ if self.last_epoch < self.warmup_epochs:
+ return [
+ self.warmup_start_lr + self.last_epoch * (base_lr - self.warmup_start_lr) / (self.warmup_epochs - 1)
+ for base_lr in self.base_lrs
+ ]
+
+ return [
+ self.eta_min + 0.5 * (base_lr - self.eta_min) *
+ (1 + math.cos(math.pi * (self.last_epoch - self.warmup_epochs) / (self.max_epochs - self.warmup_epochs)))
+ for base_lr in self.base_lrs
+ ]
diff --git a/PRISM/SegMamba/mamba/.DS_Store b/PRISM/SegMamba/mamba/.DS_Store
new file mode 100644
index 0000000000000000000000000000000000000000..d3b3e2def10b2c5558a50a9a3604687560b957ed
Binary files /dev/null and b/PRISM/SegMamba/mamba/.DS_Store differ
diff --git a/PRISM/SegMamba/mamba/.gitmodules b/PRISM/SegMamba/mamba/.gitmodules
new file mode 100644
index 0000000000000000000000000000000000000000..a7445800fb64f3ae664c0b994a54235105986d2e
--- /dev/null
+++ b/PRISM/SegMamba/mamba/.gitmodules
@@ -0,0 +1,3 @@
+[submodule "3rdparty/lm-evaluation-harness"]
+ path = 3rdparty/lm-evaluation-harness
+ url = https://github.com/EleutherAI/lm-evaluation-harness/
diff --git a/PRISM/SegMamba/mamba/AUTHORS b/PRISM/SegMamba/mamba/AUTHORS
new file mode 100644
index 0000000000000000000000000000000000000000..38557a872f8d603ed963a05c211de7032de5926b
--- /dev/null
+++ b/PRISM/SegMamba/mamba/AUTHORS
@@ -0,0 +1,2 @@
+Tri Dao, tri@tridao.me
+Albert Gu, agu@andrew.cmu.edu
diff --git a/PRISM/SegMamba/mamba/LICENSE b/PRISM/SegMamba/mamba/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..f4abe24eb520fbb077753ae4f34bfaa43cb3b83f
--- /dev/null
+++ b/PRISM/SegMamba/mamba/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2023 Tri Dao, Albert Gu
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/PRISM/SegMamba/mamba/README.md b/PRISM/SegMamba/mamba/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..754cefd7f862a90bad8fbdff71e3793a4e7849e3
--- /dev/null
+++ b/PRISM/SegMamba/mamba/README.md
@@ -0,0 +1,149 @@
+# Mamba
+
+
+> **Mamba: Linear-Time Sequence Modeling with Selective State Spaces**\
+> Albert Gu*, Tri Dao*\
+> Paper: https://arxiv.org/abs/2312.00752
+
+## About
+
+Mamba is a new state space model architecture showing promising performance on information-dense data such as language modeling, where previous subquadratic models fall short of Transformers.
+It is based on the line of progress on [structured state space models](https://github.com/state-spaces/s4),
+with an efficient hardware-aware design and implementation in the spirit of [FlashAttention](https://github.com/Dao-AILab/flash-attention).
+
+## Installation
+
+- `pip install causal-conv1d`: an efficient implementation of a simple causal Conv1d layer used inside the Mamba block.
+- `pip install mamba-ssm`: the core Mamba package.
+
+It can also be built from source with `pip install .` from this repository.
+
+If `pip` complains about PyTorch versions, try passing `--no-build-isolation` to `pip`.
+
+Other requirements:
+- Linux
+- NVIDIA GPU
+- PyTorch 1.12+
+- CUDA 11.6+
+
+## Usage
+
+We expose several levels of interface with the Mamba model.
+
+### Selective SSM
+
+Mamba is based on a selective SSM layer, which is the focus of the paper (Section 3; Algorithm 2).
+
+Source: [ops/selective_scan_interface.py](mamba_ssm/ops/selective_scan_interface.py).
+
+### Mamba Block
+
+The main module of this repository is the Mamba architecture block wrapping the selective SSM.
+
+Source: [modules/mamba_simple.py](mamba_ssm/modules/mamba_simple.py).
+
+Usage:
+```
+from mamba_ssm import Mamba
+
+batch, length, dim = 2, 64, 16
+x = torch.randn(batch, length, dim).to("cuda")
+model = Mamba(
+ # This module uses roughly 3 * expand * d_model^2 parameters
+ d_model=dim, # Model dimension d_model
+ d_state=16, # SSM state expansion factor
+ d_conv=4, # Local convolution width
+ expand=2, # Block expansion factor
+).to("cuda")
+y = model(x)
+assert y.shape == x.shape
+```
+
+### Mamba Language Model
+
+Finally, we provide an example of a complete language model: a deep sequence model backbone (with repeating Mamba blocks) + language model head.
+
+Source: [models/mixer_seq_simple.py](mamba_ssm/models/mixer_seq_simple.py).
+
+This is an example of how to integrate Mamba into an end-to-end neural network.
+This example is used in the generation scripts below.
+
+
+
+## Pretrained Models
+
+Pretrained models are uploaded to
+[HuggingFace](https://huggingface.co/state-spaces): `mamba-130m`, `mamba-370m`,
+`mamba-790m`, `mamba-1.4b`, `mamba-2.8b`.
+
+The models will be autodownloaded by the generation script below.
+
+These models were trained on the [Pile](https://huggingface.co/datasets/EleutherAI/pile), and follow the standard model dimensions described by GPT-3 and followed by many open source models:
+
+| Parameters | Layers | Model dim. |
+|------------|--------|------------|
+| 130M | 12 | 768 |
+| 370M | 24 | 1024 |
+| 790M | 24 | 1536 |
+| 1.4B | 24 | 2048 |
+| 2.8B | 32 | 2560 |
+
+(The layer count of Mamba should be doubled, as two Mamba blocks are needed for each "layer" (MHA block + MLP block) of a Transformer.)
+
+Note: these are base models trained only for 300B tokens, without any form of downstream modification (instruction tuning, etc.).
+Performance is expected to be comparable or better than other architectures trained on similar data, but not to match larger or fine-tuned models.
+
+
+## Evaluations
+
+To run zero-shot evaluations of models (corresponding to Table 3 of the paper),
+we use the
+[lm-evaluation-harness](https://github.com/EleutherAI/lm-evaluation-harness/tree/big-refactor)
+library.
+
+1. Pull the `lm-evaluation-harness` repo by `git submodule update --init
+ --recursive`. We use the `big-refactor` branch.
+2. Install `lm-evaluation-harness`: `pip install -e 3rdparty/lm-evaluation-harness`
+3. Run evaluation with (more documentation at the [lm-evaluation-harness](https://github.com/EleutherAI/lm-evaluation-harness/tree/big-refactor) repo):
+```
+python evals/lm_harness_eval.py --model mamba --model_args pretrained=state-spaces/mamba-130m --tasks lambada_openai,hellaswag,piqa,arc_easy,arc_challenge,winogrande --device cuda --batch_size 64
+python evals/lm_harness_eval.py --model hf --model_args pretrained=EleutherAI/pythia-160m --tasks lambada_openai,hellaswag,piqa,arc_easy,arc_challenge,winogrande --device cuda --batch_size 64
+```
+
+Note that the result of each task might differ from reported values by 0.1-0.3 due to noise in the evaluation process.
+
+## Inference
+
+The script [benchmarks/benchmark_generation_mamba_simple.py](benchmarks/benchmark_generation_mamba_simple.py)
+1. autoloads a model from the HuggingFace Hub,
+2. generates completions of a user-specified prompt,
+3. benchmarks the inference speed of this generation.
+
+Other configurable options include the top-p (nucleus sampling) probability, and the softmax temperature.
+
+### Examples
+
+To test generation latency (e.g. batch size = 1) with different sampling strategies:
+
+```
+python benchmarks/benchmark_generation_mamba_simple.py --model-name "state-spaces/mamba-2.8b" --prompt "My cat wrote all this CUDA code for a new language model and" --topp 0.9 --temperature 0.5
+python benchmarks/benchmark_generation_mamba_simple.py --model-name "EleutherAI/pythia-2.8b" --prompt "My cat wrote all this CUDA code for a new language model and" --topp 0.9 --temperature 0.5
+```
+
+To test generation throughput with random prompts (e.g. large batch size):
+```
+python benchmarks/benchmark_generation_mamba_simple.py --model-name "state-spaces/mamba-2.8b" --batch 128
+python benchmarks/benchmark_generation_mamba_simple.py --model-name "EleutherAI/pythia-2.8b" --batch 128
+```
+
+## Citation
+
+If you use this codebase, or otherwise found our work valuable, please cite Mamba:
+```
+@article{mamba,
+ title={Mamba: Linear-Time Sequence Modeling with Selective State Spaces},
+ author={Gu, Albert and Dao, Tri},
+ journal={arXiv preprint arXiv:2312.00752},
+ year={2023}
+}
+```
diff --git a/PRISM/SegMamba/mamba/assets/selection.png b/PRISM/SegMamba/mamba/assets/selection.png
new file mode 100644
index 0000000000000000000000000000000000000000..1adeff6ecac841e1eb7067f20517705f82697cfc
--- /dev/null
+++ b/PRISM/SegMamba/mamba/assets/selection.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d57aeeeca3250d0551956494aa0ef7f56f5758563d849cb3a3576f836e13b914
+size 818624
diff --git a/PRISM/SegMamba/mamba/benchmarks/benchmark_generation_mamba_simple.py b/PRISM/SegMamba/mamba/benchmarks/benchmark_generation_mamba_simple.py
new file mode 100644
index 0000000000000000000000000000000000000000..8f2943cb4bde6f25eddb82b7b999c5c5f8b39acc
--- /dev/null
+++ b/PRISM/SegMamba/mamba/benchmarks/benchmark_generation_mamba_simple.py
@@ -0,0 +1,88 @@
+# Copyright (c) 2023, Tri Dao, Albert Gu.
+
+import argparse
+import time
+import json
+
+import torch
+import torch.nn.functional as F
+
+from einops import rearrange
+
+from transformers import AutoTokenizer, AutoModelForCausalLM
+
+from mamba_ssm.models.mixer_seq_simple import MambaLMHeadModel
+
+
+parser = argparse.ArgumentParser(description="Generation benchmarking")
+parser.add_argument("--model-name", type=str, default="state-spaces/mamba-130m")
+parser.add_argument("--prompt", type=str, default=None)
+parser.add_argument("--promptlen", type=int, default=100)
+parser.add_argument("--genlen", type=int, default=100)
+parser.add_argument("--temperature", type=float, default=1.0)
+parser.add_argument("--topk", type=int, default=1)
+parser.add_argument("--topp", type=float, default=1.0)
+parser.add_argument("--batch", type=int, default=1)
+args = parser.parse_args()
+
+repeats = 3
+device = "cuda"
+dtype = torch.float16
+
+print(f"Loading model {args.model_name}")
+is_mamba = args.model_name.startswith("state-spaces/mamba-") or "mamba" in args.model_name
+
+if is_mamba:
+ tokenizer = AutoTokenizer.from_pretrained("/home/zhulianghui/VisionProjects/mamba/ckpts/gpt-neox-20b-tokenizer")
+ model = MambaLMHeadModel.from_pretrained(args.model_name, device=device, dtype=dtype)
+else:
+ tokenizer = AutoTokenizer.from_pretrained(args.model_name)
+ model = AutoModelForCausalLM.from_pretrained(args.model_name, device_map={"": device}, torch_dtype=dtype)
+model.eval()
+print(f"Number of parameters: {sum(p.numel() for p in model.parameters() if p.requires_grad)}")
+
+torch.random.manual_seed(0)
+if args.prompt is None:
+ input_ids = torch.randint(1, 1000, (args.batch, args.promptlen), dtype=torch.long, device="cuda")
+ attn_mask = torch.ones_like(input_ids, dtype=torch.long, device="cuda")
+else:
+ tokens = tokenizer(args.prompt, return_tensors="pt")
+ input_ids = tokens.input_ids.to(device=device)
+ attn_mask = tokens.attention_mask.to(device=device)
+max_length = input_ids.shape[1] + args.genlen
+
+if is_mamba:
+ fn = lambda: model.generate(
+ input_ids=input_ids,
+ max_length=max_length,
+ cg=True,
+ return_dict_in_generate=True,
+ output_scores=True,
+ enable_timing=False,
+ temperature=args.temperature,
+ top_k=args.topk,
+ top_p=args.topp,
+ )
+else:
+ fn = lambda: model.generate(
+ input_ids=input_ids,
+ attention_mask=attn_mask,
+ max_length=max_length,
+ return_dict_in_generate=True,
+ pad_token_id=tokenizer.eos_token_id,
+ do_sample=True,
+ temperature=args.temperature,
+ top_k=args.topk,
+ top_p=args.topp,
+ )
+out = fn()
+if args.prompt is not None:
+ print(tokenizer.batch_decode(out.sequences.tolist()))
+
+torch.cuda.synchronize()
+start = time.time()
+for _ in range(repeats):
+ fn()
+torch.cuda.synchronize()
+print(f"Prompt length: {len(input_ids[0])}, generation length: {len(out.sequences[0]) - len(input_ids[0])}")
+print(f"{args.model_name} prompt processing + decoding time: {(time.time() - start) / repeats * 1000:.0f}ms")
diff --git a/PRISM/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/__init__.py b/PRISM/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..2ecd144db5dbec72bcfcdcea28c624a7e2bf053b
--- /dev/null
+++ b/PRISM/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/__init__.py
@@ -0,0 +1,5 @@
+__version__ = "1.0.1"
+
+from mamba_ssm.ops.selective_scan_interface import selective_scan_fn, mamba_inner_fn, bimamba_inner_fn
+from mamba_ssm.modules.mamba_simple import Mamba
+from mamba_ssm.models.mixer_seq_simple import MambaLMHeadModel
diff --git a/PRISM/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/models/__init__.py b/PRISM/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/models/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/PRISM/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/models/mixer_seq_simple.py b/PRISM/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/models/mixer_seq_simple.py
new file mode 100644
index 0000000000000000000000000000000000000000..383f773f1f700cd53176e51327a5d8dc58158da0
--- /dev/null
+++ b/PRISM/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/models/mixer_seq_simple.py
@@ -0,0 +1,233 @@
+# Copyright (c) 2023, Albert Gu, Tri Dao.
+
+import math
+from functools import partial
+
+from collections import namedtuple
+
+import torch
+import torch.nn as nn
+
+from mamba_ssm.modules.mamba_simple import Mamba, Block
+from mamba_ssm.utils.generation import GenerationMixin
+from mamba_ssm.utils.hf import load_config_hf, load_state_dict_hf
+
+try:
+ from mamba_ssm.ops.triton.layernorm import RMSNorm, layer_norm_fn, rms_norm_fn
+except ImportError:
+ RMSNorm, layer_norm_fn, rms_norm_fn = None, None, None
+
+
+def create_block(
+ d_model,
+ ssm_cfg=None,
+ norm_epsilon=1e-5,
+ rms_norm=False,
+ residual_in_fp32=False,
+ fused_add_norm=False,
+ layer_idx=None,
+ device=None,
+ dtype=None,
+):
+ if ssm_cfg is None:
+ ssm_cfg = {}
+ factory_kwargs = {"device": device, "dtype": dtype}
+ mixer_cls = partial(Mamba, layer_idx=layer_idx, **ssm_cfg, **factory_kwargs)
+ norm_cls = partial(
+ nn.LayerNorm if not rms_norm else RMSNorm, eps=norm_epsilon, **factory_kwargs
+ )
+ block = Block(
+ d_model,
+ mixer_cls,
+ norm_cls=norm_cls,
+ fused_add_norm=fused_add_norm,
+ residual_in_fp32=residual_in_fp32,
+ )
+ block.layer_idx = layer_idx
+ return block
+
+
+# https://github.com/huggingface/transformers/blob/c28d04e9e252a1a099944e325685f14d242ecdcd/src/transformers/models/gpt2/modeling_gpt2.py#L454
+def _init_weights(
+ module,
+ n_layer,
+ initializer_range=0.02, # Now only used for embedding layer.
+ rescale_prenorm_residual=True,
+ n_residuals_per_layer=1, # Change to 2 if we have MLP
+):
+ if isinstance(module, nn.Linear):
+ if module.bias is not None:
+ if not getattr(module.bias, "_no_reinit", False):
+ nn.init.zeros_(module.bias)
+ elif isinstance(module, nn.Embedding):
+ nn.init.normal_(module.weight, std=initializer_range)
+
+ if rescale_prenorm_residual:
+ # Reinitialize selected weights subject to the OpenAI GPT-2 Paper Scheme:
+ # > A modified initialization which accounts for the accumulation on the residual path with model depth. Scale
+ # > the weights of residual layers at initialization by a factor of 1/√N where N is the # of residual layers.
+ # > -- GPT-2 :: https://openai.com/blog/better-language-models/
+ #
+ # Reference (Megatron-LM): https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py
+ for name, p in module.named_parameters():
+ if name in ["out_proj.weight", "fc2.weight"]:
+ # Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block
+ # Following Pytorch init, except scale by 1/sqrt(2 * n_layer)
+ # We need to reinit p since this code could be called multiple times
+ # Having just p *= scale would repeatedly scale it down
+ nn.init.kaiming_uniform_(p, a=math.sqrt(5))
+ with torch.no_grad():
+ p /= math.sqrt(n_residuals_per_layer * n_layer)
+
+
+class MixerModel(nn.Module):
+ def __init__(
+ self,
+ d_model: int,
+ n_layer: int,
+ vocab_size: int,
+ ssm_cfg=None,
+ norm_epsilon: float = 1e-5,
+ rms_norm: bool = False,
+ initializer_cfg=None,
+ fused_add_norm=False,
+ residual_in_fp32=False,
+ device=None,
+ dtype=None,
+ ) -> None:
+ factory_kwargs = {"device": device, "dtype": dtype}
+ super().__init__()
+ self.residual_in_fp32 = residual_in_fp32
+
+ self.embedding = nn.Embedding(vocab_size, d_model, **factory_kwargs)
+
+ # We change the order of residual and layer norm:
+ # Instead of LN -> Attn / MLP -> Add, we do:
+ # Add -> LN -> Attn / MLP / Mixer, returning both the residual branch (output of Add) and
+ # the main branch (output of MLP / Mixer). The model definition is unchanged.
+ # This is for performance reason: we can fuse add + layer_norm.
+ self.fused_add_norm = fused_add_norm
+ if self.fused_add_norm:
+ if layer_norm_fn is None or rms_norm_fn is None:
+ raise ImportError("Failed to import Triton LayerNorm / RMSNorm kernels")
+
+ self.layers = nn.ModuleList(
+ [
+ create_block(
+ d_model,
+ ssm_cfg=ssm_cfg,
+ norm_epsilon=norm_epsilon,
+ rms_norm=rms_norm,
+ residual_in_fp32=residual_in_fp32,
+ fused_add_norm=fused_add_norm,
+ layer_idx=i,
+ **factory_kwargs,
+ )
+ for i in range(n_layer)
+ ]
+ )
+
+ self.norm_f = (nn.LayerNorm if not rms_norm else RMSNorm)(
+ d_model, eps=norm_epsilon, **factory_kwargs
+ )
+
+ self.apply(
+ partial(
+ _init_weights,
+ n_layer=n_layer,
+ **(initializer_cfg if initializer_cfg is not None else {}),
+ )
+ )
+
+ def allocate_inference_cache(self, batch_size, max_seqlen, dtype=None, **kwargs):
+ return {
+ i: layer.allocate_inference_cache(batch_size, max_seqlen, dtype=dtype, **kwargs)
+ for i, layer in enumerate(self.layers)
+ }
+
+ def forward(self, input_ids, inference_params=None):
+ hidden_states = self.embedding(input_ids)
+ residual = None
+ for layer in self.layers:
+ hidden_states, residual = layer(
+ hidden_states, residual, inference_params=inference_params
+ )
+ if not self.fused_add_norm:
+ residual = (hidden_states + residual) if residual is not None else hidden_states
+ hidden_states = self.norm_f(residual.to(dtype=self.norm_f.weight.dtype))
+ else:
+ # Set prenorm=False here since we don't need the residual
+ fused_add_norm_fn = rms_norm_fn if isinstance(self.norm_f, RMSNorm) else layer_norm_fn
+ hidden_states = fused_add_norm_fn(
+ hidden_states,
+ self.norm_f.weight,
+ self.norm_f.bias,
+ eps=self.norm_f.eps,
+ residual=residual,
+ prenorm=False,
+ residual_in_fp32=self.residual_in_fp32,
+ )
+ return hidden_states
+
+
+class MambaLMHeadModel(nn.Module, GenerationMixin):
+
+ def __init__(
+ self,
+ d_model: int,
+ n_layer: int,
+ vocab_size: int,
+ initializer_cfg=None,
+ pad_vocab_size_multiple: int = 1,
+ device=None,
+ dtype=None,
+ **backbone_kwargs,
+ ) -> None:
+ factory_kwargs = {"device": device, "dtype": dtype}
+ super().__init__()
+ if vocab_size % pad_vocab_size_multiple != 0:
+ vocab_size += pad_vocab_size_multiple - (vocab_size % pad_vocab_size_multiple)
+ self.backbone = MixerModel(
+ d_model=d_model,
+ n_layer=n_layer,
+ vocab_size=vocab_size,
+ initializer_cfg=initializer_cfg,
+ **backbone_kwargs,
+ **factory_kwargs,
+ )
+ self.lm_head = nn.Linear(d_model, vocab_size, bias=False, **factory_kwargs)
+
+ # Initialize weights and apply final processing
+ self.apply(
+ partial(
+ _init_weights,
+ n_layer=n_layer,
+ **(initializer_cfg if initializer_cfg is not None else {}),
+ )
+ )
+ self.tie_weights()
+
+ def tie_weights(self):
+ self.lm_head.weight = self.backbone.embedding.weight
+
+ def allocate_inference_cache(self, batch_size, max_seqlen, dtype=None, **kwargs):
+ return self.backbone.allocate_inference_cache(batch_size, max_seqlen, dtype=dtype, **kwargs)
+
+ def forward(self, input_ids, position_ids=None, inference_params=None, num_last_tokens=0):
+ """
+ "position_ids" is just to be compatible with Transformer generation. We don't use it.
+ num_last_tokens: if > 0, only return the logits for the last n tokens
+ """
+ hidden_states = self.backbone(input_ids, inference_params=inference_params)
+ if num_last_tokens > 0:
+ hidden_states = hidden_states[:, -num_last_tokens:]
+ lm_logits = self.lm_head(hidden_states)
+ CausalLMOutput = namedtuple("CausalLMOutput", ["logits"])
+ return CausalLMOutput(logits=lm_logits)
+
+ @classmethod
+ def from_pretrained(cls, pretrained_model_name, device=None, dtype=None, **kwargs):
+ config = load_config_hf(pretrained_model_name)
+ model = cls(**config, device=device, dtype=dtype, **kwargs)
+ model.load_state_dict(load_state_dict_hf(pretrained_model_name, device=device, dtype=dtype))
+ return model
diff --git a/PRISM/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/modules/__init__.py b/PRISM/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/modules/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/PRISM/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/modules/mamba_simple.py b/PRISM/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/modules/mamba_simple.py
new file mode 100644
index 0000000000000000000000000000000000000000..8ffc53d24110bc39651d086f7f3969cf5069f196
--- /dev/null
+++ b/PRISM/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/modules/mamba_simple.py
@@ -0,0 +1,501 @@
+# Copyright (c) 2023, Tri Dao, Albert Gu.
+
+import math
+from typing import Optional
+
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+from torch import Tensor
+
+from einops import rearrange, repeat
+
+try:
+ from causal_conv1d import causal_conv1d_fn, causal_conv1d_update
+except ImportError:
+ causal_conv1d_fn, causal_conv1d_update = None
+
+try:
+ from mamba_ssm.ops.selective_scan_interface import selective_scan_fn, mamba_inner_fn, bimamba_inner_fn, mamba_inner_fn_no_out_proj
+except ImportError:
+ selective_scan_fn, mamba_inner_fn, bimamba_inner_fn, mamba_inner_fn_no_out_proj = None, None, None, None, None
+
+try:
+ from mamba_ssm.ops.triton.selective_state_update import selective_state_update
+except ImportError:
+ selective_state_update = None
+
+try:
+ from mamba_ssm.ops.triton.layernorm import RMSNorm, layer_norm_fn, rms_norm_fn
+except ImportError:
+ RMSNorm, layer_norm_fn, rms_norm_fn = None, None, None
+
+
+class Mamba(nn.Module):
+ def __init__(
+ self,
+ d_model,
+ d_state=16,
+ d_conv=4,
+ expand=2,
+ dt_rank="auto",
+ dt_min=0.001,
+ dt_max=0.1,
+ dt_init="random",
+ dt_scale=1.0,
+ dt_init_floor=1e-4,
+ conv_bias=True,
+ bias=False,
+ use_fast_path=True, # Fused kernel options
+ layer_idx=None,
+ device=None,
+ dtype=None,
+ bimamba_type="none",
+ nslices=5
+ ):
+ factory_kwargs = {"device": device, "dtype": dtype}
+ super().__init__()
+ self.d_model = d_model
+ self.d_state = d_state
+ self.d_conv = d_conv
+ self.expand = expand
+ self.d_inner = int(self.expand * self.d_model)
+ self.dt_rank = math.ceil(self.d_model / 16) if dt_rank == "auto" else dt_rank
+ self.use_fast_path = use_fast_path
+ self.layer_idx = layer_idx
+ self.bimamba_type = bimamba_type
+ self.nslices = nslices
+
+ self.in_proj = nn.Linear(self.d_model, self.d_inner * 2, bias=bias, **factory_kwargs)
+
+ self.conv1d = nn.Conv1d(
+ in_channels=self.d_inner,
+ out_channels=self.d_inner,
+ bias=conv_bias,
+ kernel_size=d_conv,
+ groups=self.d_inner,
+ padding=d_conv - 1,
+ **factory_kwargs,
+ )
+
+ self.activation = "silu"
+ self.act = nn.SiLU()
+
+ self.x_proj = nn.Linear(
+ self.d_inner, self.dt_rank + self.d_state * 2, bias=False, **factory_kwargs
+ )
+ self.dt_proj = nn.Linear(self.dt_rank, self.d_inner, bias=True, **factory_kwargs)
+
+ # Initialize special dt projection to preserve variance at initialization
+ dt_init_std = self.dt_rank**-0.5 * dt_scale
+ if dt_init == "constant":
+ nn.init.constant_(self.dt_proj.weight, dt_init_std)
+ elif dt_init == "random":
+ nn.init.uniform_(self.dt_proj.weight, -dt_init_std, dt_init_std)
+ else:
+ raise NotImplementedError
+
+ # Initialize dt bias so that F.softplus(dt_bias) is between dt_min and dt_max
+ dt = torch.exp(
+ torch.rand(self.d_inner, **factory_kwargs) * (math.log(dt_max) - math.log(dt_min))
+ + math.log(dt_min)
+ ).clamp(min=dt_init_floor)
+ # Inverse of softplus: https://github.com/pytorch/pytorch/issues/72759
+ inv_dt = dt + torch.log(-torch.expm1(-dt))
+ with torch.no_grad():
+ self.dt_proj.bias.copy_(inv_dt)
+ # Our initialization would set all Linear.bias to zero, need to mark this one as _no_reinit
+ self.dt_proj.bias._no_reinit = True
+
+ # S4D real initialization
+ A = repeat(
+ torch.arange(1, self.d_state + 1, dtype=torch.float32, device=device),
+ "n -> d n",
+ d=self.d_inner,
+ ).contiguous()
+ A_log = torch.log(A) # Keep A_log in fp32
+ self.A_log = nn.Parameter(A_log)
+ self.A_log._no_weight_decay = True
+
+ # D "skip" parameter
+ self.D = nn.Parameter(torch.ones(self.d_inner, device=device)) # Keep in fp32
+ self.D._no_weight_decay = True
+
+ # bidirectional
+ assert bimamba_type == "v3"
+
+ A_b = repeat(
+ torch.arange(1, self.d_state + 1, dtype=torch.float32, device=device),
+ "n -> d n",
+ d=self.d_inner,
+ ).contiguous()
+ A_b_log = torch.log(A_b) # Keep A_b_log in fp32
+ self.A_b_log = nn.Parameter(A_b_log)
+ self.A_b_log._no_weight_decay = True
+
+ self.conv1d_b = nn.Conv1d(
+ in_channels=self.d_inner,
+ out_channels=self.d_inner,
+ bias=conv_bias,
+ kernel_size=d_conv,
+ groups=self.d_inner,
+ padding=d_conv - 1,
+ **factory_kwargs,
+ )
+
+ self.x_proj_b = nn.Linear(
+ self.d_inner, self.dt_rank + self.d_state * 2, bias=False, **factory_kwargs
+ )
+ self.dt_proj_b = nn.Linear(self.dt_rank, self.d_inner, bias=True, **factory_kwargs)
+
+ self.D_b = nn.Parameter(torch.ones(self.d_inner, device=device)) # Keep in fp32
+ self.D_b._no_weight_decay = True
+
+ # assert bimamba_type == "v3"
+ # spatial
+ A_s = repeat(
+ torch.arange(1, self.d_state + 1, dtype=torch.float32, device=device),
+ "n -> d n",
+ d=self.d_inner,
+ ).contiguous()
+ A_s_log = torch.log(A_s) # Keep A_b_log in fp32
+ self.A_s_log = nn.Parameter(A_s_log)
+ self.A_s_log._no_weight_decay = True
+
+ self.conv1d_s = nn.Conv1d(
+ in_channels=self.d_inner,
+ out_channels=self.d_inner,
+ bias=conv_bias,
+ kernel_size=d_conv,
+ groups=self.d_inner,
+ padding=d_conv - 1,
+ **factory_kwargs,
+ )
+
+ self.x_proj_s = nn.Linear(
+ self.d_inner, self.dt_rank + self.d_state * 2, bias=False, **factory_kwargs
+ )
+ self.dt_proj_s = nn.Linear(self.dt_rank, self.d_inner, bias=True, **factory_kwargs)
+
+ self.D_s = nn.Parameter(torch.ones(self.d_inner, device=device)) # Keep in fp32
+ self.D_s._no_weight_decay = True
+
+
+
+
+ self.out_proj = nn.Linear(self.d_inner, self.d_model, bias=bias, **factory_kwargs)
+
+ def forward(self, hidden_states, inference_params=None):
+ """
+ hidden_states: (B, L, D)
+ Returns: same shape as hidden_states
+ """
+ batch, seqlen, dim = hidden_states.shape
+
+ conv_state, ssm_state = None, None
+ if inference_params is not None:
+ conv_state, ssm_state = self._get_states_from_cache(inference_params, batch)
+ if inference_params.seqlen_offset > 0:
+ # The states are updated inplace
+ out, _, _ = self.step(hidden_states, conv_state, ssm_state)
+ return out
+
+ # We do matmul and transpose BLH -> HBL at the same time
+ xz = rearrange(
+ self.in_proj.weight @ rearrange(hidden_states, "b l d -> d (b l)"),
+ "d (b l) -> b d l",
+ l=seqlen,
+ )
+ if self.in_proj.bias is not None:
+ xz = xz + rearrange(self.in_proj.bias.to(dtype=xz.dtype), "d -> d 1")
+
+ A = -torch.exp(self.A_log.float()) # (d_inner, d_state)
+ # In the backward pass we write dx and dz next to each other to avoid torch.cat
+ if self.use_fast_path and inference_params is None: # Doesn't support outputting the states
+ if self.bimamba_type == "v3":
+ A_b = -torch.exp(self.A_b_log.float())
+ out = mamba_inner_fn_no_out_proj(
+ xz,
+ self.conv1d.weight,
+ self.conv1d.bias,
+ self.x_proj.weight,
+ self.dt_proj.weight,
+ A,
+ None, # input-dependent B
+ None, # input-dependent C
+ self.D.float(),
+ delta_bias=self.dt_proj.bias.float(),
+ delta_softplus=True,
+ )
+ out_b = mamba_inner_fn_no_out_proj(
+ xz.flip([-1]),
+ self.conv1d_b.weight,
+ self.conv1d_b.bias,
+ self.x_proj_b.weight,
+ self.dt_proj_b.weight,
+ A_b,
+ None,
+ None,
+ self.D_b.float(),
+ delta_bias=self.dt_proj_b.bias.float(),
+ delta_softplus=True,
+ )
+ A_s = -torch.exp(self.A_s_log.float())
+
+ xz_s = xz.chunk(self.nslices, dim=-1)
+ xz_s = torch.stack(xz_s,dim=-1)
+ xz_s = xz_s.flatten(-2)
+ out_s = mamba_inner_fn_no_out_proj(
+ xz_s,
+ self.conv1d_s.weight,
+ self.conv1d_s.bias,
+ self.x_proj_s.weight,
+ self.dt_proj_s.weight,
+ A_s,
+ None,
+ None,
+ self.D_s.float(),
+ delta_bias=self.dt_proj_s.bias.float(),
+ delta_softplus=True,
+ )
+ out_s = out_s.reshape(batch,self.d_inner,seqlen//self.nslices,self.nslices).permute(0,1,3,2).flatten(-2)
+
+ # F.linear(rearrange(out_z, "b d l -> b l d"), out_proj_weight, out_proj_bias)
+ out = F.linear(rearrange(out + out_b.flip([-1]) + out_s, "b d l -> b l d"), self.out_proj.weight, self.out_proj.bias)
+ elif self.bimamba_type == "v2":
+ A_b = -torch.exp(self.A_b_log.float())
+ out = mamba_inner_fn_no_out_proj(
+ xz,
+ self.conv1d.weight,
+ self.conv1d.bias,
+ self.x_proj.weight,
+ self.dt_proj.weight,
+ A,
+ None, # input-dependent B
+ None, # input-dependent C
+ self.D.float(),
+ delta_bias=self.dt_proj.bias.float(),
+ delta_softplus=True,
+ )
+ out_b = mamba_inner_fn_no_out_proj(
+ xz.flip([-1]),
+ self.conv1d_b.weight,
+ self.conv1d_b.bias,
+ self.x_proj_b.weight,
+ self.dt_proj_b.weight,
+ A_b,
+ None,
+ None,
+ self.D_b.float(),
+ delta_bias=self.dt_proj_b.bias.float(),
+ delta_softplus=True,
+ )
+ # F.linear(rearrange(out_z, "b d l -> b l d"), out_proj_weight, out_proj_bias)
+ out = F.linear(rearrange(out + out_b.flip([-1]), "b d l -> b l d"), self.out_proj.weight, self.out_proj.bias)
+ else:
+ out = mamba_inner_fn(
+ xz,
+ self.conv1d.weight,
+ self.conv1d.bias,
+ self.x_proj.weight,
+ self.dt_proj.weight,
+ self.out_proj.weight,
+ self.out_proj.bias,
+ A,
+ None, # input-dependent B
+ None, # input-dependent C
+ self.D.float(),
+ delta_bias=self.dt_proj.bias.float(),
+ delta_softplus=True,
+ )
+ else:
+ x, z = xz.chunk(2, dim=1)
+ # Compute short convolution
+ if conv_state is not None:
+ conv_state.copy_(x[:, :, -self.d_conv :]) # Update state (B D W)
+ if causal_conv1d_fn is None:
+ x = self.act(self.conv1d(x)[..., :seqlen])
+ else:
+ assert self.activation in ["silu", "swish"]
+ x = causal_conv1d_fn(
+ x,
+ rearrange(self.conv1d.weight, "d 1 w -> d w"),
+ self.conv1d.bias,
+ self.activation,
+ )
+
+ # We're careful here about the layout, to avoid extra transposes.
+ # We want dt to have d as the slowest moving dimension
+ # and L as the fastest moving dimension, since those are what the ssm_scan kernel expects.
+ x_dbl = self.x_proj(rearrange(x, "b d l -> (b l) d")) # (bl d)
+ dt, B, C = torch.split(x_dbl, [self.dt_rank, self.d_state, self.d_state], dim=-1)
+ dt = self.dt_proj.weight @ dt.t()
+ dt = rearrange(dt, "d (b l) -> b d l", l=seqlen)
+ B = rearrange(B, "(b l) dstate -> b dstate l", l=seqlen).contiguous()
+ C = rearrange(C, "(b l) dstate -> b dstate l", l=seqlen).contiguous()
+ assert self.activation in ["silu", "swish"]
+ y = selective_scan_fn(
+ x,
+ dt,
+ A,
+ B,
+ C,
+ self.D.float(),
+ z=z,
+ delta_bias=self.dt_proj.bias.float(),
+ delta_softplus=True,
+ return_last_state=ssm_state is not None,
+ )
+ if ssm_state is not None:
+ y, last_state = y
+ ssm_state.copy_(last_state)
+ y = rearrange(y, "b d l -> b l d")
+ out = self.out_proj(y)
+ return out
+
+ def step(self, hidden_states, conv_state, ssm_state):
+ dtype = hidden_states.dtype
+ assert hidden_states.shape[1] == 1, "Only support decoding with 1 token at a time for now"
+ xz = self.in_proj(hidden_states.squeeze(1)) # (B 2D)
+ x, z = xz.chunk(2, dim=-1) # (B D)
+
+ # Conv step
+ if causal_conv1d_update is None:
+ conv_state.copy_(torch.roll(conv_state, shifts=-1, dims=-1)) # Update state (B D W)
+ conv_state[:, :, -1] = x
+ x = torch.sum(conv_state * rearrange(self.conv1d.weight, "d 1 w -> d w"), dim=-1) # (B D)
+ if self.conv1d.bias is not None:
+ x = x + self.conv1d.bias
+ x = self.act(x).to(dtype=dtype)
+ else:
+ x = causal_conv1d_update(
+ x,
+ conv_state,
+ rearrange(self.conv1d.weight, "d 1 w -> d w"),
+ self.conv1d.bias,
+ self.activation,
+ )
+
+ x_db = self.x_proj(x) # (B dt_rank+2*d_state)
+ dt, B, C = torch.split(x_db, [self.dt_rank, self.d_state, self.d_state], dim=-1)
+ # Don't add dt_bias here
+ dt = F.linear(dt, self.dt_proj.weight) # (B d_inner)
+ A = -torch.exp(self.A_log.float()) # (d_inner, d_state)
+
+ # SSM step
+ if selective_state_update is None:
+ # Discretize A and B
+ dt = F.softplus(dt + self.dt_proj.bias.to(dtype=dt.dtype))
+ dA = torch.exp(torch.einsum("bd,dn->bdn", dt, A))
+ dB = torch.einsum("bd,bn->bdn", dt, B)
+ ssm_state.copy_(ssm_state * dA + rearrange(x, "b d -> b d 1") * dB)
+ y = torch.einsum("bdn,bn->bd", ssm_state.to(dtype), C)
+ y = y + self.D.to(dtype) * x
+ y = y * self.act(z) # (B D)
+ else:
+ y = selective_state_update(
+ ssm_state, x, dt, A, B, C, self.D, z=z, dt_bias=self.dt_proj.bias, dt_softplus=True
+ )
+
+ out = self.out_proj(y)
+ return out.unsqueeze(1), conv_state, ssm_state
+
+ def allocate_inference_cache(self, batch_size, max_seqlen, dtype=None, **kwargs):
+ device = self.out_proj.weight.device
+ conv_dtype = self.conv1d.weight.dtype if dtype is None else dtype
+ conv_state = torch.zeros(
+ batch_size, self.d_model * self.expand, self.d_conv, device=device, dtype=conv_dtype
+ )
+ ssm_dtype = self.dt_proj.weight.dtype if dtype is None else dtype
+ # ssm_dtype = torch.float32
+ ssm_state = torch.zeros(
+ batch_size, self.d_model * self.expand, self.d_state, device=device, dtype=ssm_dtype
+ )
+ return conv_state, ssm_state
+
+ def _get_states_from_cache(self, inference_params, batch_size, initialize_states=False):
+ assert self.layer_idx is not None
+ if self.layer_idx not in inference_params.key_value_memory_dict:
+ batch_shape = (batch_size,)
+ conv_state = torch.zeros(
+ batch_size,
+ self.d_model * self.expand,
+ self.d_conv,
+ device=self.conv1d.weight.device,
+ dtype=self.conv1d.weight.dtype,
+ )
+ ssm_state = torch.zeros(
+ batch_size,
+ self.d_model * self.expand,
+ self.d_state,
+ device=self.dt_proj.weight.device,
+ dtype=self.dt_proj.weight.dtype,
+ # dtype=torch.float32,
+ )
+ inference_params.key_value_memory_dict[self.layer_idx] = (conv_state, ssm_state)
+ else:
+ conv_state, ssm_state = inference_params.key_value_memory_dict[self.layer_idx]
+ # TODO: What if batch size changes between generation, and we reuse the same states?
+ if initialize_states:
+ conv_state.zero_()
+ ssm_state.zero_()
+ return conv_state, ssm_state
+
+
+class Block(nn.Module):
+ def __init__(
+ self, dim, mixer_cls, norm_cls=nn.LayerNorm, fused_add_norm=False, residual_in_fp32=False
+ ):
+ """
+ Simple block wrapping a mixer class with LayerNorm/RMSNorm and residual connection"
+
+ This Block has a slightly different structure compared to a regular
+ prenorm Transformer block.
+ The standard block is: LN -> MHA/MLP -> Add.
+ [Ref: https://arxiv.org/abs/2002.04745]
+ Here we have: Add -> LN -> Mixer, returning both
+ the hidden_states (output of the mixer) and the residual.
+ This is purely for performance reasons, as we can fuse add and LayerNorm.
+ The residual needs to be provided (except for the very first block).
+ """
+ super().__init__()
+ self.residual_in_fp32 = residual_in_fp32
+ self.fused_add_norm = fused_add_norm
+ self.mixer = mixer_cls(dim)
+ self.norm = norm_cls(dim)
+ if self.fused_add_norm:
+ assert RMSNorm is not None, "RMSNorm import fails"
+ assert isinstance(
+ self.norm, (nn.LayerNorm, RMSNorm)
+ ), "Only LayerNorm and RMSNorm are supported for fused_add_norm"
+
+ def forward(
+ self, hidden_states: Tensor, residual: Optional[Tensor] = None, inference_params=None
+ ):
+ r"""Pass the input through the encoder layer.
+
+ Args:
+ hidden_states: the sequence to the encoder layer (required).
+ residual: hidden_states = Mixer(LN(residual))
+ """
+ if not self.fused_add_norm:
+ residual = (hidden_states + residual) if residual is not None else hidden_states
+ hidden_states = self.norm(residual.to(dtype=self.norm.weight.dtype))
+ if self.residual_in_fp32:
+ residual = residual.to(torch.float32)
+ else:
+ fused_add_norm_fn = rms_norm_fn if isinstance(self.norm, RMSNorm) else layer_norm_fn
+ hidden_states, residual = fused_add_norm_fn(
+ hidden_states,
+ self.norm.weight,
+ self.norm.bias,
+ residual=residual,
+ prenorm=True,
+ residual_in_fp32=self.residual_in_fp32,
+ eps=self.norm.eps,
+ )
+ hidden_states = self.mixer(hidden_states, inference_params=inference_params)
+ return hidden_states, residual
+
+ def allocate_inference_cache(self, batch_size, max_seqlen, dtype=None, **kwargs):
+ return self.mixer.allocate_inference_cache(batch_size, max_seqlen, dtype=dtype, **kwargs)
diff --git a/PRISM/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/ops/__init__.py b/PRISM/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/ops/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/PRISM/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/ops/selective_scan_interface.py b/PRISM/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/ops/selective_scan_interface.py
new file mode 100644
index 0000000000000000000000000000000000000000..99b455ed949c123bb453922d5ac88d00f401e392
--- /dev/null
+++ b/PRISM/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/ops/selective_scan_interface.py
@@ -0,0 +1,709 @@
+# Copyright (c) 2023, Tri Dao, Albert Gu.
+
+import torch
+import torch.nn.functional as F
+from torch.cuda.amp import custom_bwd, custom_fwd
+
+from einops import rearrange, repeat
+
+from causal_conv1d import causal_conv1d_fn
+import causal_conv1d_cuda
+import selective_scan_cuda
+
+
+class SelectiveScanFn(torch.autograd.Function):
+
+ @staticmethod
+ def forward(ctx, u, delta, A, B, C, D=None, z=None, delta_bias=None, delta_softplus=False,
+ return_last_state=False):
+ if u.stride(-1) != 1:
+ u = u.contiguous()
+ if delta.stride(-1) != 1:
+ delta = delta.contiguous()
+ if D is not None:
+ D = D.contiguous()
+ if B.stride(-1) != 1:
+ B = B.contiguous()
+ if C.stride(-1) != 1:
+ C = C.contiguous()
+ if z is not None and z.stride(-1) != 1:
+ z = z.contiguous()
+ if B.dim() == 3:
+ B = rearrange(B, "b dstate l -> b 1 dstate l")
+ ctx.squeeze_B = True
+ if C.dim() == 3:
+ C = rearrange(C, "b dstate l -> b 1 dstate l")
+ ctx.squeeze_C = True
+ out, x, *rest = selective_scan_cuda.fwd(u, delta, A, B, C, D, z, delta_bias, delta_softplus)
+ ctx.delta_softplus = delta_softplus
+ ctx.has_z = z is not None
+ last_state = x[:, :, -1, 1::2] # (batch, dim, dstate)
+ if not ctx.has_z:
+ ctx.save_for_backward(u, delta, A, B, C, D, delta_bias, x)
+ return out if not return_last_state else (out, last_state)
+ else:
+ ctx.save_for_backward(u, delta, A, B, C, D, z, delta_bias, x, out)
+ out_z = rest[0]
+ return out_z if not return_last_state else (out_z, last_state)
+
+ @staticmethod
+ def backward(ctx, dout, *args):
+ if not ctx.has_z:
+ u, delta, A, B, C, D, delta_bias, x = ctx.saved_tensors
+ z = None
+ out = None
+ else:
+ u, delta, A, B, C, D, z, delta_bias, x, out = ctx.saved_tensors
+ if dout.stride(-1) != 1:
+ dout = dout.contiguous()
+ # The kernel supports passing in a pre-allocated dz (e.g., in case we want to fuse the
+ # backward of selective_scan_cuda with the backward of chunk).
+ # Here we just pass in None and dz will be allocated in the C++ code.
+ du, ddelta, dA, dB, dC, dD, ddelta_bias, *rest = selective_scan_cuda.bwd(
+ u, delta, A, B, C, D, z, delta_bias, dout, x, out, None, ctx.delta_softplus,
+ False # option to recompute out_z, not used here
+ )
+ dz = rest[0] if ctx.has_z else None
+ dB = dB.squeeze(1) if getattr(ctx, "squeeze_B", False) else dB
+ dC = dC.squeeze(1) if getattr(ctx, "squeeze_C", False) else dC
+ return (du, ddelta, dA, dB, dC,
+ dD if D is not None else None,
+ dz,
+ ddelta_bias if delta_bias is not None else None,
+ None,
+ None)
+
+
+def selective_scan_fn(u, delta, A, B, C, D=None, z=None, delta_bias=None, delta_softplus=False,
+ return_last_state=False):
+ """if return_last_state is True, returns (out, last_state)
+ last_state has shape (batch, dim, dstate). Note that the gradient of the last state is
+ not considered in the backward pass.
+ """
+ return SelectiveScanFn.apply(u, delta, A, B, C, D, z, delta_bias, delta_softplus, return_last_state)
+
+
+def selective_scan_ref(u, delta, A, B, C, D=None, z=None, delta_bias=None, delta_softplus=False,
+ return_last_state=False):
+ """
+ u: r(B D L)
+ delta: r(B D L)
+ A: c(D N) or r(D N)
+ B: c(D N) or r(B N L) or r(B N 2L) or r(B G N L) or (B G N L)
+ C: c(D N) or r(B N L) or r(B N 2L) or r(B G N L) or (B G N L)
+ D: r(D)
+ z: r(B D L)
+ delta_bias: r(D), fp32
+
+ out: r(B D L)
+ last_state (optional): r(B D dstate) or c(B D dstate)
+ """
+ dtype_in = u.dtype
+ u = u.float()
+ delta = delta.float()
+ if delta_bias is not None:
+ delta = delta + delta_bias[..., None].float()
+ if delta_softplus:
+ delta = F.softplus(delta)
+ batch, dim, dstate = u.shape[0], A.shape[0], A.shape[1]
+ is_variable_B = B.dim() >= 3
+ is_variable_C = C.dim() >= 3
+ if A.is_complex():
+ if is_variable_B:
+ B = torch.view_as_complex(rearrange(B.float(), "... (L two) -> ... L two", two=2))
+ if is_variable_C:
+ C = torch.view_as_complex(rearrange(C.float(), "... (L two) -> ... L two", two=2))
+ else:
+ B = B.float()
+ C = C.float()
+ x = A.new_zeros((batch, dim, dstate))
+ ys = []
+ deltaA = torch.exp(torch.einsum('bdl,dn->bdln', delta, A))
+ if not is_variable_B:
+ deltaB_u = torch.einsum('bdl,dn,bdl->bdln', delta, B, u)
+ else:
+ if B.dim() == 3:
+ deltaB_u = torch.einsum('bdl,bnl,bdl->bdln', delta, B, u)
+ else:
+ B = repeat(B, "B G N L -> B (G H) N L", H=dim // B.shape[1])
+ deltaB_u = torch.einsum('bdl,bdnl,bdl->bdln', delta, B, u)
+ if is_variable_C and C.dim() == 4:
+ C = repeat(C, "B G N L -> B (G H) N L", H=dim // C.shape[1])
+ last_state = None
+ for i in range(u.shape[2]):
+ x = deltaA[:, :, i] * x + deltaB_u[:, :, i]
+ if not is_variable_C:
+ y = torch.einsum('bdn,dn->bd', x, C)
+ else:
+ if C.dim() == 3:
+ y = torch.einsum('bdn,bn->bd', x, C[:, :, i])
+ else:
+ y = torch.einsum('bdn,bdn->bd', x, C[:, :, :, i])
+ if i == u.shape[2] - 1:
+ last_state = x
+ if y.is_complex():
+ y = y.real * 2
+ ys.append(y)
+ y = torch.stack(ys, dim=2) # (batch dim L)
+ out = y if D is None else y + u * rearrange(D, "d -> d 1")
+ if z is not None:
+ out = out * F.silu(z)
+ out = out.to(dtype=dtype_in)
+ return out if not return_last_state else (out, last_state)
+
+
+class MambaInnerFnNoOutProj(torch.autograd.Function):
+
+ @staticmethod
+ @custom_fwd
+ def forward(ctx, xz, conv1d_weight, conv1d_bias, x_proj_weight, delta_proj_weight,
+ A, B=None, C=None, D=None, delta_bias=None, B_proj_bias=None,
+ C_proj_bias=None, delta_softplus=True, checkpoint_lvl=1):
+ """
+ xz: (batch, dim, seqlen)
+ """
+ assert checkpoint_lvl in [0, 1]
+ L = xz.shape[-1]
+ delta_rank = delta_proj_weight.shape[1]
+ d_state = A.shape[-1] * (1 if not A.is_complex() else 2)
+ if torch.is_autocast_enabled():
+ x_proj_weight = x_proj_weight.to(dtype=torch.get_autocast_gpu_dtype())
+ delta_proj_weight = delta_proj_weight.to(dtype=torch.get_autocast_gpu_dtype())
+ if xz.stride(-1) != 1:
+ xz = xz.contiguous()
+ conv1d_weight = rearrange(conv1d_weight, "d 1 w -> d w")
+ x, z = xz.chunk(2, dim=1)
+ conv1d_bias = conv1d_bias.contiguous() if conv1d_bias is not None else None
+ conv1d_out = causal_conv1d_cuda.causal_conv1d_fwd(x, conv1d_weight, conv1d_bias, True)
+ # We're being very careful here about the layout, to avoid extra transposes.
+ # We want delta to have d as the slowest moving dimension
+ # and L as the fastest moving dimension, since those are what the ssm_scan kernel expects.
+ x_dbl = F.linear(rearrange(conv1d_out, 'b d l -> (b l) d'), x_proj_weight) # (bl d)
+ delta = rearrange(delta_proj_weight @ x_dbl[:, :delta_rank].t(), "d (b l) -> b d l", l = L)
+ ctx.is_variable_B = B is None
+ ctx.is_variable_C = C is None
+ ctx.B_proj_bias_is_None = B_proj_bias is None
+ ctx.C_proj_bias_is_None = C_proj_bias is None
+ if B is None: # variable B
+ B = x_dbl[:, delta_rank:delta_rank + d_state] # (bl dstate)
+ if B_proj_bias is not None:
+ B = B + B_proj_bias.to(dtype=B.dtype)
+ if not A.is_complex():
+ # B = rearrange(B, "(b l) dstate -> b dstate l", l=L).contiguous()
+ B = rearrange(B, "(b l) dstate -> b 1 dstate l", l=L).contiguous()
+ else:
+ B = rearrange(B, "(b l) (dstate two) -> b 1 dstate (l two)", l=L, two=2).contiguous()
+ else:
+ if B.stride(-1) != 1:
+ B = B.contiguous()
+ if C is None: # variable C
+ C = x_dbl[:, -d_state:] # (bl dstate)
+ if C_proj_bias is not None:
+ C = C + C_proj_bias.to(dtype=C.dtype)
+ if not A.is_complex():
+ # C = rearrange(C, "(b l) dstate -> b dstate l", l=L).contiguous()
+ C = rearrange(C, "(b l) dstate -> b 1 dstate l", l=L).contiguous()
+ else:
+ C = rearrange(C, "(b l) (dstate two) -> b 1 dstate (l two)", l=L, two=2).contiguous()
+ else:
+ if C.stride(-1) != 1:
+ C = C.contiguous()
+ if D is not None:
+ D = D.contiguous()
+ out, scan_intermediates, out_z = selective_scan_cuda.fwd(
+ conv1d_out, delta, A, B, C, D, z, delta_bias, delta_softplus
+ )
+ ctx.delta_softplus = delta_softplus
+ ctx.checkpoint_lvl = checkpoint_lvl
+ if checkpoint_lvl >= 1: # Will recompute conv1d_out and delta in the backward pass
+ conv1d_out, delta = None, None
+ ctx.save_for_backward(xz, conv1d_weight, conv1d_bias, x_dbl, x_proj_weight,
+ delta_proj_weight, conv1d_out, delta,
+ A, B, C, D, delta_bias, scan_intermediates, out)
+ # return rearrange(out_z, "b d l -> b l d")
+ return out_z
+
+ @staticmethod
+ @custom_bwd
+ def backward(ctx, dout):
+ # dout: (batch, seqlen, dim)
+ (xz, conv1d_weight, conv1d_bias, x_dbl, x_proj_weight, delta_proj_weight,
+ conv1d_out, delta, A, B, C, D, delta_bias, scan_intermediates, out) = ctx.saved_tensors
+ L = xz.shape[-1]
+ delta_rank = delta_proj_weight.shape[1]
+ d_state = A.shape[-1] * (1 if not A.is_complex() else 2)
+ x, z = xz.chunk(2, dim=1)
+ if dout.stride(-1) != 1:
+ dout = dout.contiguous()
+ if ctx.checkpoint_lvl == 1:
+ conv1d_out = causal_conv1d_cuda.causal_conv1d_fwd(x, conv1d_weight, conv1d_bias, True)
+ delta = rearrange(delta_proj_weight @ x_dbl[:, :delta_rank].t(),
+ "d (b l) -> b d l", l = L)
+ # The kernel supports passing in a pre-allocated dz (e.g., in case we want to fuse the
+ # backward of selective_scan_cuda with the backward of chunk).
+ dxz = torch.empty_like(xz) # (batch, dim, seqlen)
+ dx, dz = dxz.chunk(2, dim=1)
+ # dout_y = rearrange(dout, "b l d -> b d l") # because no arrange at end of forward, so dout shape is b d l
+ dconv1d_out, ddelta, dA, dB, dC, dD, ddelta_bias, dz, out_z = selective_scan_cuda.bwd(
+ conv1d_out, delta, A, B, C, D, z, delta_bias, dout, scan_intermediates, out, dz,
+ ctx.delta_softplus,
+ True # option to recompute out_z
+ )
+ dD = dD if D is not None else None
+ dx_dbl = torch.empty_like(x_dbl)
+ dB_proj_bias = None
+ if ctx.is_variable_B:
+ if not A.is_complex():
+ dB = rearrange(dB, "b 1 dstate l -> (b l) dstate").contiguous()
+ else:
+ dB = rearrange(dB, "b 1 dstate (l two) -> (b l) (dstate two)", two=2).contiguous()
+ dB_proj_bias = dB.sum(0) if not ctx.B_proj_bias_is_None else None
+ dx_dbl[:, delta_rank:delta_rank + d_state] = dB # (bl d)
+ dB = None
+ dC_proj_bias = None
+ if ctx.is_variable_C:
+ if not A.is_complex():
+ dC = rearrange(dC, "b 1 dstate l -> (b l) dstate").contiguous()
+ else:
+ dC = rearrange(dC, "b 1 dstate (l two) -> (b l) (dstate two)", two=2).contiguous()
+ dC_proj_bias = dC.sum(0) if not ctx.C_proj_bias_is_None else None
+ dx_dbl[:, -d_state:] = dC # (bl d)
+ dC = None
+ ddelta = rearrange(ddelta, "b d l -> d (b l)")
+ ddelta_proj_weight = torch.einsum("dB,Br->dr", ddelta, x_dbl[:, :delta_rank])
+ dx_dbl[:, :delta_rank] = torch.einsum("dB,dr->Br", ddelta, delta_proj_weight)
+ dconv1d_out = rearrange(dconv1d_out, "b d l -> d (b l)")
+ dx_proj_weight = torch.einsum("Br,Bd->rd", dx_dbl, rearrange(conv1d_out, "b d l -> (b l) d"))
+ dconv1d_out = torch.addmm(dconv1d_out, x_proj_weight.t(), dx_dbl.t(), out=dconv1d_out)
+ dconv1d_out = rearrange(dconv1d_out, "d (b l) -> b d l", b=x.shape[0], l=x.shape[-1])
+ # The kernel supports passing in a pre-allocated dx (e.g., in case we want to fuse the
+ # backward of conv1d with the backward of chunk).
+ dx, dconv1d_weight, dconv1d_bias = causal_conv1d_cuda.causal_conv1d_bwd(
+ x, conv1d_weight, conv1d_bias, dconv1d_out, dx, True
+ )
+ dconv1d_bias = dconv1d_bias if conv1d_bias is not None else None
+ dconv1d_weight = rearrange(dconv1d_weight, "d w -> d 1 w")
+ return (dxz, dconv1d_weight, dconv1d_bias, dx_proj_weight, ddelta_proj_weight,
+ dA, dB, dC, dD,
+ ddelta_bias if delta_bias is not None else None,
+ dB_proj_bias, dC_proj_bias, None)
+
+
+class MambaInnerFn(torch.autograd.Function):
+
+ @staticmethod
+ @custom_fwd
+ def forward(ctx, xz, conv1d_weight, conv1d_bias, x_proj_weight, delta_proj_weight,
+ out_proj_weight, out_proj_bias,
+ A, B=None, C=None, D=None, delta_bias=None, B_proj_bias=None,
+ C_proj_bias=None, delta_softplus=True, checkpoint_lvl=1):
+ """
+ xz: (batch, dim, seqlen)
+ """
+ assert checkpoint_lvl in [0, 1]
+ L = xz.shape[-1]
+ delta_rank = delta_proj_weight.shape[1]
+ d_state = A.shape[-1] * (1 if not A.is_complex() else 2)
+ if torch.is_autocast_enabled():
+ x_proj_weight = x_proj_weight.to(dtype=torch.get_autocast_gpu_dtype())
+ delta_proj_weight = delta_proj_weight.to(dtype=torch.get_autocast_gpu_dtype())
+ out_proj_weight = out_proj_weight.to(dtype=torch.get_autocast_gpu_dtype())
+ out_proj_bias = (out_proj_bias.to(dtype=torch.get_autocast_gpu_dtype())
+ if out_proj_bias is not None else None)
+ if xz.stride(-1) != 1:
+ xz = xz.contiguous()
+ conv1d_weight = rearrange(conv1d_weight, "d 1 w -> d w")
+ x, z = xz.chunk(2, dim=1)
+ conv1d_bias = conv1d_bias.contiguous() if conv1d_bias is not None else None
+ conv1d_out = causal_conv1d_cuda.causal_conv1d_fwd(x, conv1d_weight, conv1d_bias, True)
+ # We're being very careful here about the layout, to avoid extra transposes.
+ # We want delta to have d as the slowest moving dimension
+ # and L as the fastest moving dimension, since those are what the ssm_scan kernel expects.
+ x_dbl = F.linear(rearrange(conv1d_out, 'b d l -> (b l) d'), x_proj_weight) # (bl d)
+ delta = rearrange(delta_proj_weight @ x_dbl[:, :delta_rank].t(), "d (b l) -> b d l", l = L)
+ ctx.is_variable_B = B is None
+ ctx.is_variable_C = C is None
+ ctx.B_proj_bias_is_None = B_proj_bias is None
+ ctx.C_proj_bias_is_None = C_proj_bias is None
+ if B is None: # variable B
+ B = x_dbl[:, delta_rank:delta_rank + d_state] # (bl dstate)
+ if B_proj_bias is not None:
+ B = B + B_proj_bias.to(dtype=B.dtype)
+ if not A.is_complex():
+ # B = rearrange(B, "(b l) dstate -> b dstate l", l=L).contiguous()
+ B = rearrange(B, "(b l) dstate -> b 1 dstate l", l=L).contiguous()
+ else:
+ B = rearrange(B, "(b l) (dstate two) -> b 1 dstate (l two)", l=L, two=2).contiguous()
+ else:
+ if B.stride(-1) != 1:
+ B = B.contiguous()
+ if C is None: # variable C
+ C = x_dbl[:, -d_state:] # (bl dstate)
+ if C_proj_bias is not None:
+ C = C + C_proj_bias.to(dtype=C.dtype)
+ if not A.is_complex():
+ # C = rearrange(C, "(b l) dstate -> b dstate l", l=L).contiguous()
+ C = rearrange(C, "(b l) dstate -> b 1 dstate l", l=L).contiguous()
+ else:
+ C = rearrange(C, "(b l) (dstate two) -> b 1 dstate (l two)", l=L, two=2).contiguous()
+ else:
+ if C.stride(-1) != 1:
+ C = C.contiguous()
+ if D is not None:
+ D = D.contiguous()
+ out, scan_intermediates, out_z = selective_scan_cuda.fwd(
+ conv1d_out, delta, A, B, C, D, z, delta_bias, delta_softplus
+ )
+ ctx.delta_softplus = delta_softplus
+ ctx.out_proj_bias_is_None = out_proj_bias is None
+ ctx.checkpoint_lvl = checkpoint_lvl
+ if checkpoint_lvl >= 1: # Will recompute conv1d_out and delta in the backward pass
+ conv1d_out, delta = None, None
+ ctx.save_for_backward(xz, conv1d_weight, conv1d_bias, x_dbl, x_proj_weight,
+ delta_proj_weight, out_proj_weight, conv1d_out, delta,
+ A, B, C, D, delta_bias, scan_intermediates, out)
+ return F.linear(rearrange(out_z, "b d l -> b l d"), out_proj_weight, out_proj_bias)
+
+ @staticmethod
+ @custom_bwd
+ def backward(ctx, dout):
+ # dout: (batch, seqlen, dim)
+ (xz, conv1d_weight, conv1d_bias, x_dbl, x_proj_weight, delta_proj_weight, out_proj_weight,
+ conv1d_out, delta, A, B, C, D, delta_bias, scan_intermediates, out) = ctx.saved_tensors
+ L = xz.shape[-1]
+ delta_rank = delta_proj_weight.shape[1]
+ d_state = A.shape[-1] * (1 if not A.is_complex() else 2)
+ x, z = xz.chunk(2, dim=1)
+ if dout.stride(-1) != 1:
+ dout = dout.contiguous()
+ if ctx.checkpoint_lvl == 1:
+ conv1d_out = causal_conv1d_cuda.causal_conv1d_fwd(x, conv1d_weight, conv1d_bias, True)
+ delta = rearrange(delta_proj_weight @ x_dbl[:, :delta_rank].t(),
+ "d (b l) -> b d l", l = L)
+ # The kernel supports passing in a pre-allocated dz (e.g., in case we want to fuse the
+ # backward of selective_scan_cuda with the backward of chunk).
+ dxz = torch.empty_like(xz) # (batch, dim, seqlen)
+ dx, dz = dxz.chunk(2, dim=1)
+ dout = rearrange(dout, "b l e -> e (b l)")
+ dout_y = rearrange(out_proj_weight.t() @ dout, "d (b l) -> b d l", l=L)
+ dconv1d_out, ddelta, dA, dB, dC, dD, ddelta_bias, dz, out_z = selective_scan_cuda.bwd(
+ conv1d_out, delta, A, B, C, D, z, delta_bias, dout_y, scan_intermediates, out, dz,
+ ctx.delta_softplus,
+ True # option to recompute out_z
+ )
+ dout_proj_weight = torch.einsum("eB,dB->ed", dout, rearrange(out_z, "b d l -> d (b l)"))
+ dout_proj_bias = dout.sum(dim=(0, 1)) if not ctx.out_proj_bias_is_None else None
+ dD = dD if D is not None else None
+ dx_dbl = torch.empty_like(x_dbl)
+ dB_proj_bias = None
+ if ctx.is_variable_B:
+ if not A.is_complex():
+ dB = rearrange(dB, "b 1 dstate l -> (b l) dstate").contiguous()
+ else:
+ dB = rearrange(dB, "b 1 dstate (l two) -> (b l) (dstate two)", two=2).contiguous()
+ dB_proj_bias = dB.sum(0) if not ctx.B_proj_bias_is_None else None
+ dx_dbl[:, delta_rank:delta_rank + d_state] = dB # (bl d)
+ dB = None
+ dC_proj_bias = None
+ if ctx.is_variable_C:
+ if not A.is_complex():
+ dC = rearrange(dC, "b 1 dstate l -> (b l) dstate").contiguous()
+ else:
+ dC = rearrange(dC, "b 1 dstate (l two) -> (b l) (dstate two)", two=2).contiguous()
+ dC_proj_bias = dC.sum(0) if not ctx.C_proj_bias_is_None else None
+ dx_dbl[:, -d_state:] = dC # (bl d)
+ dC = None
+ ddelta = rearrange(ddelta, "b d l -> d (b l)")
+ ddelta_proj_weight = torch.einsum("dB,Br->dr", ddelta, x_dbl[:, :delta_rank])
+ dx_dbl[:, :delta_rank] = torch.einsum("dB,dr->Br", ddelta, delta_proj_weight)
+ dconv1d_out = rearrange(dconv1d_out, "b d l -> d (b l)")
+ dx_proj_weight = torch.einsum("Br,Bd->rd", dx_dbl, rearrange(conv1d_out, "b d l -> (b l) d"))
+ dconv1d_out = torch.addmm(dconv1d_out, x_proj_weight.t(), dx_dbl.t(), out=dconv1d_out)
+ dconv1d_out = rearrange(dconv1d_out, "d (b l) -> b d l", b=x.shape[0], l=x.shape[-1])
+ # The kernel supports passing in a pre-allocated dx (e.g., in case we want to fuse the
+ # backward of conv1d with the backward of chunk).
+ dx, dconv1d_weight, dconv1d_bias = causal_conv1d_cuda.causal_conv1d_bwd(
+ x, conv1d_weight, conv1d_bias, dconv1d_out, dx, True
+ )
+ dconv1d_bias = dconv1d_bias if conv1d_bias is not None else None
+ dconv1d_weight = rearrange(dconv1d_weight, "d w -> d 1 w")
+ return (dxz, dconv1d_weight, dconv1d_bias, dx_proj_weight, ddelta_proj_weight,
+ dout_proj_weight, dout_proj_bias,
+ dA, dB, dC, dD,
+ ddelta_bias if delta_bias is not None else None,
+ dB_proj_bias, dC_proj_bias, None)
+
+
+class BiMambaInnerFn(torch.autograd.Function):
+
+ @staticmethod
+ @custom_fwd
+ def forward(ctx, xz, conv1d_weight, conv1d_bias, x_proj_weight, delta_proj_weight,
+ out_proj_weight, out_proj_bias,
+ A, A_b, B=None, C=None, D=None, delta_bias=None, B_proj_bias=None,
+ C_proj_bias=None, delta_softplus=True, checkpoint_lvl=1):
+ """
+ xz: (batch, dim, seqlen)
+ """
+ assert checkpoint_lvl in [0, 1]
+ L = xz.shape[-1]
+ delta_rank = delta_proj_weight.shape[1]
+ d_state = A.shape[-1] * (1 if not A.is_complex() else 2)
+ if torch.is_autocast_enabled():
+ x_proj_weight = x_proj_weight.to(dtype=torch.get_autocast_gpu_dtype())
+ delta_proj_weight = delta_proj_weight.to(dtype=torch.get_autocast_gpu_dtype())
+ out_proj_weight = out_proj_weight.to(dtype=torch.get_autocast_gpu_dtype())
+ out_proj_bias = (out_proj_bias.to(dtype=torch.get_autocast_gpu_dtype())
+ if out_proj_bias is not None else None)
+ if xz.stride(-1) != 1:
+ xz = xz.contiguous()
+ conv1d_weight = rearrange(conv1d_weight, "d 1 w -> d w")
+ x, z = xz.chunk(2, dim=1)
+ conv1d_bias = conv1d_bias.contiguous() if conv1d_bias is not None else None
+ conv1d_out = causal_conv1d_cuda.causal_conv1d_fwd(x, conv1d_weight, conv1d_bias, True)
+ # We're being very careful here about the layout, to avoid extra transposes.
+ # We want delta to have d as the slowest moving dimension
+ # and L as the fastest moving dimension, since those are what the ssm_scan kernel expects.
+ x_dbl = F.linear(rearrange(conv1d_out, 'b d l -> (b l) d'), x_proj_weight) # (bl d)
+ delta = rearrange(delta_proj_weight @ x_dbl[:, :delta_rank].t(), "d (b l) -> b d l", l = L)
+ ctx.is_variable_B = B is None
+ ctx.is_variable_C = C is None
+ ctx.B_proj_bias_is_None = B_proj_bias is None
+ ctx.C_proj_bias_is_None = C_proj_bias is None
+ if B is None: # variable B
+ B = x_dbl[:, delta_rank:delta_rank + d_state] # (bl dstate)
+ if B_proj_bias is not None:
+ B = B + B_proj_bias.to(dtype=B.dtype)
+ if not A.is_complex():
+ # B = rearrange(B, "(b l) dstate -> b dstate l", l=L).contiguous()
+ B = rearrange(B, "(b l) dstate -> b 1 dstate l", l=L).contiguous()
+ else:
+ B = rearrange(B, "(b l) (dstate two) -> b 1 dstate (l two)", l=L, two=2).contiguous()
+ else:
+ if B.stride(-1) != 1:
+ B = B.contiguous()
+ if C is None: # variable C
+ C = x_dbl[:, -d_state:] # (bl dstate)
+ if C_proj_bias is not None:
+ C = C + C_proj_bias.to(dtype=C.dtype)
+ if not A.is_complex():
+ # C = rearrange(C, "(b l) dstate -> b dstate l", l=L).contiguous()
+ C = rearrange(C, "(b l) dstate -> b 1 dstate l", l=L).contiguous()
+ else:
+ C = rearrange(C, "(b l) (dstate two) -> b 1 dstate (l two)", l=L, two=2).contiguous()
+ else:
+ if C.stride(-1) != 1:
+ C = C.contiguous()
+ if D is not None:
+ D = D.contiguous()
+ out_f, scan_intermediates_f, out_z_f = selective_scan_cuda.fwd(
+ conv1d_out, delta, A, B, C, D, z, delta_bias, delta_softplus
+ )
+ assert not A_b.is_complex(), "A should not be complex!!"
+ out_b, scan_intermediates_b, out_z_b = selective_scan_cuda.fwd(
+ conv1d_out.flip([-1]), delta.flip([-1]), A_b, B.flip([-1]), C.flip([-1]), D, z.flip([-1]), delta_bias, delta_softplus,
+ )
+
+ out_z = out_z_f + out_z_b.flip([-1])
+
+ ctx.delta_softplus = delta_softplus
+ ctx.out_proj_bias_is_None = out_proj_bias is None
+ ctx.checkpoint_lvl = checkpoint_lvl
+ if checkpoint_lvl >= 1: # Will recompute conv1d_out and delta in the backward pass
+ conv1d_out, delta = None, None
+ ctx.save_for_backward(xz, conv1d_weight, conv1d_bias, x_dbl, x_proj_weight,
+ delta_proj_weight, out_proj_weight, conv1d_out, delta,
+ A, A_b, B, C, D, delta_bias, scan_intermediates_f, scan_intermediates_b, out_f, out_b)
+ return F.linear(rearrange(out_z, "b d l -> b l d"), out_proj_weight, out_proj_bias)
+
+ @staticmethod
+ @custom_bwd
+ def backward(ctx, dout):
+ # dout: (batch, seqlen, dim)
+ (xz, conv1d_weight, conv1d_bias, x_dbl, x_proj_weight, delta_proj_weight, out_proj_weight,
+ conv1d_out, delta, A, A_b, B, C, D, delta_bias, scan_intermediates_f, scan_intermediates_b, out_f, out_b) = ctx.saved_tensors
+ L = xz.shape[-1]
+ delta_rank = delta_proj_weight.shape[1]
+ d_state = A.shape[-1] * (1 if not A.is_complex() else 2)
+ x, z = xz.chunk(2, dim=1)
+ if dout.stride(-1) != 1:
+ dout = dout.contiguous()
+ if ctx.checkpoint_lvl == 1:
+ conv1d_out = causal_conv1d_cuda.causal_conv1d_fwd(x, conv1d_weight, conv1d_bias, True)
+ delta = rearrange(delta_proj_weight @ x_dbl[:, :delta_rank].t(),
+ "d (b l) -> b d l", l = L)
+ # The kernel supports passing in a pre-allocated dz (e.g., in case we want to fuse the
+ # backward of selective_scan_cuda with the backward of chunk).
+ dxz = torch.empty_like(xz) # (batch, dim, seqlen)
+ dx, dz = dxz.chunk(2, dim=1)
+ dout = rearrange(dout, "b l e -> e (b l)")
+ dout_y = rearrange(out_proj_weight.t() @ dout, "d (b l) -> b d l", l=L)
+ dconv1d_out, ddelta, dA, dB, dC, dD, ddelta_bias, dz, out_z_f = selective_scan_cuda.bwd(
+ conv1d_out, delta, A, B, C, D, z, delta_bias, dout_y, scan_intermediates_f, out_f, dz,
+ ctx.delta_softplus,
+ True # option to recompute out_z
+ )
+ # flip one
+ dz_b = torch.empty_like(dz)
+ dconv1d_out_f_b, ddelta_f_b, dA_b, dB_f_b, dC_f_b, dD_b, ddelta_bias_b, dz_b, out_z_b = selective_scan_cuda.bwd(
+ conv1d_out.flip([-1]), delta.flip([-1]), A_b, B.flip([-1]), C.flip([-1]), D, z.flip([-1]), delta_bias, dout_y.flip([-1]), scan_intermediates_b, out_b, dz_b,
+ ctx.delta_softplus,
+ True # option to recompute out_z
+ )
+
+ dconv1d_out = dconv1d_out + dconv1d_out_f_b.flip([-1])
+ ddelta = ddelta + ddelta_f_b.flip([-1])
+ dB = dB + dB_f_b.flip([-1])
+ dC = dC + dC_f_b.flip([-1])
+ dD = dD + dD_b
+ ddelta_bias = ddelta_bias + ddelta_bias_b
+ dz = dz + dz_b.flip([-1])
+ out_z = out_z_f + out_z_b.flip([-1])
+
+ dout_proj_weight = torch.einsum("eB,dB->ed", dout, rearrange(out_z, "b d l -> d (b l)"))
+ dout_proj_bias = dout.sum(dim=(0, 1)) if not ctx.out_proj_bias_is_None else None
+ dD = dD if D is not None else None
+ dx_dbl = torch.empty_like(x_dbl)
+ dB_proj_bias = None
+ if ctx.is_variable_B:
+ if not A.is_complex():
+ dB = rearrange(dB, "b 1 dstate l -> (b l) dstate").contiguous()
+ else:
+ dB = rearrange(dB, "b 1 dstate (l two) -> (b l) (dstate two)", two=2).contiguous()
+ dB_proj_bias = dB.sum(0) if not ctx.B_proj_bias_is_None else None
+ dx_dbl[:, delta_rank:delta_rank + d_state] = dB # (bl d)
+ dB = None
+ dC_proj_bias = None
+ if ctx.is_variable_C:
+ if not A.is_complex():
+ dC = rearrange(dC, "b 1 dstate l -> (b l) dstate").contiguous()
+ else:
+ dC = rearrange(dC, "b 1 dstate (l two) -> (b l) (dstate two)", two=2).contiguous()
+ dC_proj_bias = dC.sum(0) if not ctx.C_proj_bias_is_None else None
+ dx_dbl[:, -d_state:] = dC # (bl d)
+ dC = None
+ ddelta = rearrange(ddelta, "b d l -> d (b l)")
+ ddelta_proj_weight = torch.einsum("dB,Br->dr", ddelta, x_dbl[:, :delta_rank])
+ dx_dbl[:, :delta_rank] = torch.einsum("dB,dr->Br", ddelta, delta_proj_weight)
+ dconv1d_out = rearrange(dconv1d_out, "b d l -> d (b l)")
+ dx_proj_weight = torch.einsum("Br,Bd->rd", dx_dbl, rearrange(conv1d_out, "b d l -> (b l) d"))
+ dconv1d_out = torch.addmm(dconv1d_out, x_proj_weight.t(), dx_dbl.t(), out=dconv1d_out)
+ dconv1d_out = rearrange(dconv1d_out, "d (b l) -> b d l", b=x.shape[0], l=x.shape[-1])
+ # The kernel supports passing in a pre-allocated dx (e.g., in case we want to fuse the
+ # backward of conv1d with the backward of chunk).
+ dx, dconv1d_weight, dconv1d_bias = causal_conv1d_cuda.causal_conv1d_bwd(
+ x, conv1d_weight, conv1d_bias, dconv1d_out, dx, True
+ )
+ dconv1d_bias = dconv1d_bias if conv1d_bias is not None else None
+ dconv1d_weight = rearrange(dconv1d_weight, "d w -> d 1 w")
+ return (dxz, dconv1d_weight, dconv1d_bias, dx_proj_weight, ddelta_proj_weight,
+ dout_proj_weight, dout_proj_bias,
+ dA, dA_b, dB, dC, dD,
+ ddelta_bias if delta_bias is not None else None,
+ dB_proj_bias, dC_proj_bias, None)
+
+
+def mamba_inner_fn(
+ xz, conv1d_weight, conv1d_bias, x_proj_weight, delta_proj_weight,
+ out_proj_weight, out_proj_bias,
+ A, B=None, C=None, D=None, delta_bias=None, B_proj_bias=None,
+ C_proj_bias=None, delta_softplus=True
+):
+ return MambaInnerFn.apply(xz, conv1d_weight, conv1d_bias, x_proj_weight, delta_proj_weight,
+ out_proj_weight, out_proj_bias,
+ A, B, C, D, delta_bias, B_proj_bias, C_proj_bias, delta_softplus)
+
+def bimamba_inner_fn(
+ xz, conv1d_weight, conv1d_bias, x_proj_weight, delta_proj_weight,
+ out_proj_weight, out_proj_bias,
+ A, A_b, B=None, C=None, D=None, delta_bias=None, B_proj_bias=None,
+ C_proj_bias=None, delta_softplus=True
+):
+ return BiMambaInnerFn.apply(xz, conv1d_weight, conv1d_bias, x_proj_weight, delta_proj_weight,
+ out_proj_weight, out_proj_bias,
+ A, A_b, B, C, D, delta_bias, B_proj_bias, C_proj_bias, delta_softplus)
+
+
+def mamba_inner_fn_no_out_proj(
+ xz, conv1d_weight, conv1d_bias, x_proj_weight, delta_proj_weight,
+ A, B=None, C=None, D=None, delta_bias=None, B_proj_bias=None,
+ C_proj_bias=None, delta_softplus=True
+):
+ return MambaInnerFnNoOutProj.apply(xz, conv1d_weight, conv1d_bias, x_proj_weight, delta_proj_weight,
+ A, B, C, D, delta_bias, B_proj_bias, C_proj_bias, delta_softplus)
+
+
+def mamba_inner_ref(
+ xz, conv1d_weight, conv1d_bias, x_proj_weight, delta_proj_weight,
+ out_proj_weight, out_proj_bias,
+ A, B=None, C=None, D=None, delta_bias=None, B_proj_bias=None,
+ C_proj_bias=None, delta_softplus=True
+):
+ L = xz.shape[-1]
+ delta_rank = delta_proj_weight.shape[1]
+ d_state = A.shape[-1] * (1 if not A.is_complex() else 2)
+ x, z = xz.chunk(2, dim=1)
+ x = causal_conv1d_fn(x, rearrange(conv1d_weight, "d 1 w -> d w"), conv1d_bias, "silu")
+ # We're being very careful here about the layout, to avoid extra transposes.
+ # We want delta to have d as the slowest moving dimension
+ # and L as the fastest moving dimension, since those are what the ssm_scan kernel expects.
+ x_dbl = F.linear(rearrange(x, 'b d l -> (b l) d'), x_proj_weight) # (bl d)
+ delta = delta_proj_weight @ x_dbl[:, :delta_rank].t()
+ delta = rearrange(delta, "d (b l) -> b d l", l=L)
+ if B is None: # variable B
+ B = x_dbl[:, delta_rank:delta_rank + d_state] # (bl d)
+ if B_proj_bias is not None:
+ B = B + B_proj_bias.to(dtype=B.dtype)
+ if not A.is_complex():
+ B = rearrange(B, "(b l) dstate -> b dstate l", l=L).contiguous()
+ else:
+ B = rearrange(B, "(b l) (dstate two) -> b dstate (l two)", l=L, two=2).contiguous()
+ if C is None: # variable B
+ C = x_dbl[:, -d_state:] # (bl d)
+ if C_proj_bias is not None:
+ C = C + C_proj_bias.to(dtype=C.dtype)
+ if not A.is_complex():
+ C = rearrange(C, "(b l) dstate -> b dstate l", l=L).contiguous()
+ else:
+ C = rearrange(C, "(b l) (dstate two) -> b dstate (l two)", l=L, two=2).contiguous()
+ y = selective_scan_fn(x, delta, A, B, C, D, z=z, delta_bias=delta_bias, delta_softplus=True)
+ return F.linear(rearrange(y, "b d l -> b l d"), out_proj_weight, out_proj_bias)
+
+
+def bimamba_inner_ref(
+ xz, conv1d_weight, conv1d_bias, x_proj_weight, delta_proj_weight,
+ out_proj_weight, out_proj_bias,
+ A, A_b, B=None, C=None, D=None, delta_bias=None, B_proj_bias=None,
+ C_proj_bias=None, delta_softplus=True
+):
+ L = xz.shape[-1]
+ delta_rank = delta_proj_weight.shape[1]
+ d_state = A.shape[-1] * (1 if not A.is_complex() else 2)
+ x, z = xz.chunk(2, dim=1)
+ x = causal_conv1d_fn(x, rearrange(conv1d_weight, "d 1 w -> d w"), conv1d_bias, "silu")
+ # We're being very careful here about the layout, to avoid extra transposes.
+ # We want delta to have d as the slowest moving dimension
+ # and L as the fastest moving dimension, since those are what the ssm_scan kernel expects.
+ x_dbl = F.linear(rearrange(x, 'b d l -> (b l) d'), x_proj_weight) # (bl d)
+ delta = delta_proj_weight @ x_dbl[:, :delta_rank].t()
+ delta = rearrange(delta, "d (b l) -> b d l", l=L)
+ if B is None: # variable B
+ B = x_dbl[:, delta_rank:delta_rank + d_state] # (bl d)
+ if B_proj_bias is not None:
+ B = B + B_proj_bias.to(dtype=B.dtype)
+ if not A.is_complex():
+ B = rearrange(B, "(b l) dstate -> b dstate l", l=L).contiguous()
+ else:
+ B = rearrange(B, "(b l) (dstate two) -> b dstate (l two)", l=L, two=2).contiguous()
+ if C is None: # variable B
+ C = x_dbl[:, -d_state:] # (bl d)
+ if C_proj_bias is not None:
+ C = C + C_proj_bias.to(dtype=C.dtype)
+ if not A.is_complex():
+ C = rearrange(C, "(b l) dstate -> b dstate l", l=L).contiguous()
+ else:
+ C = rearrange(C, "(b l) (dstate two) -> b dstate (l two)", l=L, two=2).contiguous()
+ y = selective_scan_fn(x, delta, A, B, C, D, z=z, delta_bias=delta_bias, delta_softplus=True)
+ y_b = selective_scan_fn(x.flip([-1]), delta.flip([-1]), A_b, B.flip([-1]), C.flip([-1]), D, z.flip([-1]), delta_bias, delta_softplus=True)
+ y = y + y_b.flip([-1])
+ return F.linear(rearrange(y, "b d l -> b l d"), out_proj_weight, out_proj_bias)
diff --git a/PRISM/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/ops/triton/__init__.py b/PRISM/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/ops/triton/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/PRISM/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/ops/triton/layernorm.py b/PRISM/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/ops/triton/layernorm.py
new file mode 100644
index 0000000000000000000000000000000000000000..8df9d042a34b6584196f218f5ffeeb104799bd5e
--- /dev/null
+++ b/PRISM/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/ops/triton/layernorm.py
@@ -0,0 +1,636 @@
+# Copyright (c) 2023, Tri Dao.
+# Implement residual + layer_norm / rms_norm.
+
+# Based on the Triton LayerNorm tutorial: https://triton-lang.org/main/getting-started/tutorials/05-layer-norm.html
+# For the backward pass, we keep weight_grad and bias_grad in registers and accumulate.
+# This is faster for dimensions up to 8k, but after that it's much slower due to register spilling.
+# The models we train have hidden dim up to 8k anyway (e.g. Llama 70B), so this is fine.
+
+import math
+
+import torch
+import torch.nn.functional as F
+from torch.cuda.amp import custom_fwd, custom_bwd
+
+import triton
+import triton.language as tl
+
+
+def layer_norm_ref(x, weight, bias, residual=None, eps=1e-6, prenorm=False, upcast=False):
+ dtype = x.dtype
+ if upcast:
+ weight = weight.float()
+ bias = bias.float() if bias is not None else None
+ if upcast:
+ x = x.float()
+ residual = residual.float() if residual is not None else residual
+ if residual is not None:
+ x = (x + residual).to(x.dtype)
+ out = F.layer_norm(x.to(weight.dtype), x.shape[-1:], weight=weight, bias=bias, eps=eps).to(
+ dtype
+ )
+ return out if not prenorm else (out, x)
+
+
+def rms_norm_ref(x, weight, bias, residual=None, eps=1e-6, prenorm=False, upcast=False):
+ dtype = x.dtype
+ if upcast:
+ weight = weight.float()
+ bias = bias.float() if bias is not None else None
+ if upcast:
+ x = x.float()
+ residual = residual.float() if residual is not None else residual
+ if residual is not None:
+ x = (x + residual).to(x.dtype)
+ rstd = 1 / torch.sqrt((x.square()).mean(dim=-1, keepdim=True) + eps)
+ out = (x * rstd * weight) + bias if bias is not None else (x * rstd * weight)
+ out = out.to(dtype)
+ return out if not prenorm else (out, x)
+
+
+@triton.autotune(
+ configs=[
+ triton.Config({}, num_warps=1),
+ triton.Config({}, num_warps=2),
+ triton.Config({}, num_warps=4),
+ triton.Config({}, num_warps=8),
+ triton.Config({}, num_warps=16),
+ triton.Config({}, num_warps=32),
+ ],
+ key=["N", "HAS_RESIDUAL", "STORE_RESIDUAL_OUT", "IS_RMS_NORM", "HAS_BIAS"],
+)
+# @triton.heuristics({"HAS_BIAS": lambda args: args["B"] is not None})
+# @triton.heuristics({"HAS_RESIDUAL": lambda args: args["RESIDUAL"] is not None})
+@triton.jit
+def _layer_norm_fwd_1pass_kernel(
+ X, # pointer to the input
+ Y, # pointer to the output
+ W, # pointer to the weights
+ B, # pointer to the biases
+ RESIDUAL, # pointer to the residual
+ RESIDUAL_OUT, # pointer to the residual
+ Mean, # pointer to the mean
+ Rstd, # pointer to the 1/std
+ stride_x_row, # how much to increase the pointer when moving by 1 row
+ stride_y_row,
+ stride_res_row,
+ stride_res_out_row,
+ N, # number of columns in X
+ eps, # epsilon to avoid division by zero
+ IS_RMS_NORM: tl.constexpr,
+ BLOCK_N: tl.constexpr,
+ HAS_RESIDUAL: tl.constexpr,
+ STORE_RESIDUAL_OUT: tl.constexpr,
+ HAS_BIAS: tl.constexpr,
+):
+ # Map the program id to the row of X and Y it should compute.
+ row = tl.program_id(0)
+ X += row * stride_x_row
+ Y += row * stride_y_row
+ if HAS_RESIDUAL:
+ RESIDUAL += row * stride_res_row
+ if STORE_RESIDUAL_OUT:
+ RESIDUAL_OUT += row * stride_res_out_row
+ # Compute mean and variance
+ cols = tl.arange(0, BLOCK_N)
+ x = tl.load(X + cols, mask=cols < N, other=0.0).to(tl.float32)
+ if HAS_RESIDUAL:
+ residual = tl.load(RESIDUAL + cols, mask=cols < N, other=0.0).to(tl.float32)
+ x += residual
+ if STORE_RESIDUAL_OUT:
+ tl.store(RESIDUAL_OUT + cols, x, mask=cols < N)
+ if not IS_RMS_NORM:
+ mean = tl.sum(x, axis=0) / N
+ tl.store(Mean + row, mean)
+ xbar = tl.where(cols < N, x - mean, 0.0)
+ var = tl.sum(xbar * xbar, axis=0) / N
+ else:
+ xbar = tl.where(cols < N, x, 0.0)
+ var = tl.sum(xbar * xbar, axis=0) / N
+ rstd = 1 / tl.sqrt(var + eps)
+ tl.store(Rstd + row, rstd)
+ # Normalize and apply linear transformation
+ mask = cols < N
+ w = tl.load(W + cols, mask=mask).to(tl.float32)
+ if HAS_BIAS:
+ b = tl.load(B + cols, mask=mask).to(tl.float32)
+ x_hat = (x - mean) * rstd if not IS_RMS_NORM else x * rstd
+ y = x_hat * w + b if HAS_BIAS else x_hat * w
+ # Write output
+ tl.store(Y + cols, y, mask=mask)
+
+
+def _layer_norm_fwd(
+ x, weight, bias, eps, residual=None, out_dtype=None, residual_dtype=None, is_rms_norm=False
+):
+ if residual is not None:
+ residual_dtype = residual.dtype
+ M, N = x.shape
+ assert x.stride(-1) == 1
+ if residual is not None:
+ assert residual.stride(-1) == 1
+ assert residual.shape == (M, N)
+ assert weight.shape == (N,)
+ assert weight.stride(-1) == 1
+ if bias is not None:
+ assert bias.stride(-1) == 1
+ assert bias.shape == (N,)
+ # allocate output
+ y = torch.empty_like(x, dtype=x.dtype if out_dtype is None else out_dtype)
+ assert y.stride(-1) == 1
+ if residual is not None or (residual_dtype is not None and residual_dtype != x.dtype):
+ residual_out = torch.empty(M, N, device=x.device, dtype=residual_dtype)
+ assert residual_out.stride(-1) == 1
+ else:
+ residual_out = None
+ mean = torch.empty((M,), dtype=torch.float32, device="cuda") if not is_rms_norm else None
+ rstd = torch.empty((M,), dtype=torch.float32, device="cuda")
+ # Less than 64KB per feature: enqueue fused kernel
+ MAX_FUSED_SIZE = 65536 // x.element_size()
+ BLOCK_N = min(MAX_FUSED_SIZE, triton.next_power_of_2(N))
+ if N > BLOCK_N:
+ raise RuntimeError("This layer norm doesn't support feature dim >= 64KB.")
+ # heuristics for number of warps
+ with torch.cuda.device(x.device.index):
+ _layer_norm_fwd_1pass_kernel[(M,)](
+ x,
+ y,
+ weight,
+ bias,
+ residual,
+ residual_out,
+ mean,
+ rstd,
+ x.stride(0),
+ y.stride(0),
+ residual.stride(0) if residual is not None else 0,
+ residual_out.stride(0) if residual_out is not None else 0,
+ N,
+ eps,
+ is_rms_norm,
+ BLOCK_N,
+ residual is not None,
+ residual_out is not None,
+ bias is not None,
+ )
+ # residual_out is None if residual is None and residual_dtype == input_dtype
+ return y, mean, rstd, residual_out if residual_out is not None else x
+
+
+@triton.autotune(
+ configs=[
+ triton.Config({}, num_warps=1),
+ triton.Config({}, num_warps=2),
+ triton.Config({}, num_warps=4),
+ triton.Config({}, num_warps=8),
+ triton.Config({}, num_warps=16),
+ triton.Config({}, num_warps=32),
+ ],
+ key=["N", "HAS_DRESIDUAL", "STORE_DRESIDUAL", "IS_RMS_NORM", "HAS_BIAS"],
+)
+# @triton.heuristics({"HAS_BIAS": lambda args: args["B"] is not None})
+# @triton.heuristics({"HAS_DRESIDUAL": lambda args: args["DRESIDUAL"] is not None})
+# @triton.heuristics({"STORE_DRESIDUAL": lambda args: args["DRESIDUAL_IN"] is not None})
+@triton.heuristics({"RECOMPUTE_OUTPUT": lambda args: args["Y"] is not None})
+@triton.jit
+def _layer_norm_bwd_kernel(
+ X, # pointer to the input
+ W, # pointer to the weights
+ B, # pointer to the biases
+ Y, # pointer to the output to be recomputed
+ DY, # pointer to the output gradient
+ DX, # pointer to the input gradient
+ DW, # pointer to the partial sum of weights gradient
+ DB, # pointer to the partial sum of biases gradient
+ DRESIDUAL,
+ DRESIDUAL_IN,
+ Mean, # pointer to the mean
+ Rstd, # pointer to the 1/std
+ stride_x_row, # how much to increase the pointer when moving by 1 row
+ stride_y_row,
+ stride_dy_row,
+ stride_dx_row,
+ stride_dres_row,
+ stride_dres_in_row,
+ M, # number of rows in X
+ N, # number of columns in X
+ eps, # epsilon to avoid division by zero
+ rows_per_program,
+ IS_RMS_NORM: tl.constexpr,
+ BLOCK_N: tl.constexpr,
+ HAS_DRESIDUAL: tl.constexpr,
+ STORE_DRESIDUAL: tl.constexpr,
+ HAS_BIAS: tl.constexpr,
+ RECOMPUTE_OUTPUT: tl.constexpr,
+):
+ # Map the program id to the elements of X, DX, and DY it should compute.
+ row_block_id = tl.program_id(0)
+ row_start = row_block_id * rows_per_program
+ cols = tl.arange(0, BLOCK_N)
+ mask = cols < N
+ X += row_start * stride_x_row
+ if HAS_DRESIDUAL:
+ DRESIDUAL += row_start * stride_dres_row
+ if STORE_DRESIDUAL:
+ DRESIDUAL_IN += row_start * stride_dres_in_row
+ DY += row_start * stride_dy_row
+ DX += row_start * stride_dx_row
+ if RECOMPUTE_OUTPUT:
+ Y += row_start * stride_y_row
+ w = tl.load(W + cols, mask=mask).to(tl.float32)
+ if RECOMPUTE_OUTPUT and HAS_BIAS:
+ b = tl.load(B + cols, mask=mask, other=0.0).to(tl.float32)
+ dw = tl.zeros((BLOCK_N,), dtype=tl.float32)
+ if HAS_BIAS:
+ db = tl.zeros((BLOCK_N,), dtype=tl.float32)
+ row_end = min((row_block_id + 1) * rows_per_program, M)
+ for row in range(row_start, row_end):
+ # Load data to SRAM
+ x = tl.load(X + cols, mask=mask, other=0).to(tl.float32)
+ dy = tl.load(DY + cols, mask=mask, other=0).to(tl.float32)
+ if not IS_RMS_NORM:
+ mean = tl.load(Mean + row)
+ rstd = tl.load(Rstd + row)
+ # Compute dx
+ xhat = (x - mean) * rstd if not IS_RMS_NORM else x * rstd
+ xhat = tl.where(mask, xhat, 0.0)
+ if RECOMPUTE_OUTPUT:
+ y = xhat * w + b if HAS_BIAS else xhat * w
+ tl.store(Y + cols, y, mask=mask)
+ wdy = w * dy
+ dw += dy * xhat
+ if HAS_BIAS:
+ db += dy
+ if not IS_RMS_NORM:
+ c1 = tl.sum(xhat * wdy, axis=0) / N
+ c2 = tl.sum(wdy, axis=0) / N
+ dx = (wdy - (xhat * c1 + c2)) * rstd
+ else:
+ c1 = tl.sum(xhat * wdy, axis=0) / N
+ dx = (wdy - xhat * c1) * rstd
+ if HAS_DRESIDUAL:
+ dres = tl.load(DRESIDUAL + cols, mask=mask, other=0).to(tl.float32)
+ dx += dres
+ # Write dx
+ if STORE_DRESIDUAL:
+ tl.store(DRESIDUAL_IN + cols, dx, mask=mask)
+ tl.store(DX + cols, dx, mask=mask)
+
+ X += stride_x_row
+ if HAS_DRESIDUAL:
+ DRESIDUAL += stride_dres_row
+ if STORE_DRESIDUAL:
+ DRESIDUAL_IN += stride_dres_in_row
+ if RECOMPUTE_OUTPUT:
+ Y += stride_y_row
+ DY += stride_dy_row
+ DX += stride_dx_row
+ tl.store(DW + row_block_id * N + cols, dw, mask=mask)
+ if HAS_BIAS:
+ tl.store(DB + row_block_id * N + cols, db, mask=mask)
+
+
+def _layer_norm_bwd(
+ dy,
+ x,
+ weight,
+ bias,
+ eps,
+ mean,
+ rstd,
+ dresidual=None,
+ has_residual=False,
+ is_rms_norm=False,
+ x_dtype=None,
+ recompute_output=False,
+):
+ M, N = x.shape
+ assert x.stride(-1) == 1
+ assert dy.stride(-1) == 1
+ assert dy.shape == (M, N)
+ if dresidual is not None:
+ assert dresidual.stride(-1) == 1
+ assert dresidual.shape == (M, N)
+ assert weight.shape == (N,)
+ assert weight.stride(-1) == 1
+ if bias is not None:
+ assert bias.stride(-1) == 1
+ assert bias.shape == (N,)
+ # allocate output
+ dx = (
+ torch.empty_like(x)
+ if x_dtype is None
+ else torch.empty(M, N, dtype=x_dtype, device=x.device)
+ )
+ dresidual_in = torch.empty_like(x) if has_residual and dx.dtype != x.dtype else None
+ y = torch.empty(M, N, dtype=dy.dtype, device=dy.device) if recompute_output else None
+
+ # Less than 64KB per feature: enqueue fused kernel
+ MAX_FUSED_SIZE = 65536 // x.element_size()
+ BLOCK_N = min(MAX_FUSED_SIZE, triton.next_power_of_2(N))
+ if N > BLOCK_N:
+ raise RuntimeError("This layer norm doesn't support feature dim >= 64KB.")
+ sm_count = torch.cuda.get_device_properties(x.device).multi_processor_count
+ _dw = torch.empty((sm_count, N), dtype=torch.float32, device=weight.device)
+ _db = (
+ torch.empty((sm_count, N), dtype=torch.float32, device=bias.device)
+ if bias is not None
+ else None
+ )
+ rows_per_program = math.ceil(M / sm_count)
+ grid = (sm_count,)
+ with torch.cuda.device(x.device.index):
+ _layer_norm_bwd_kernel[grid](
+ x,
+ weight,
+ bias,
+ y,
+ dy,
+ dx,
+ _dw,
+ _db,
+ dresidual,
+ dresidual_in,
+ mean,
+ rstd,
+ x.stride(0),
+ 0 if not recompute_output else y.stride(0),
+ dy.stride(0),
+ dx.stride(0),
+ dresidual.stride(0) if dresidual is not None else 0,
+ dresidual_in.stride(0) if dresidual_in is not None else 0,
+ M,
+ N,
+ eps,
+ rows_per_program,
+ is_rms_norm,
+ BLOCK_N,
+ dresidual is not None,
+ dresidual_in is not None,
+ bias is not None,
+ )
+ dw = _dw.sum(0).to(weight.dtype)
+ db = _db.sum(0).to(bias.dtype) if bias is not None else None
+ # Don't need to compute dresidual_in separately in this case
+ if has_residual and dx.dtype == x.dtype:
+ dresidual_in = dx
+ return (dx, dw, db, dresidual_in) if not recompute_output else (dx, dw, db, dresidual_in, y)
+
+
+class LayerNormFn(torch.autograd.Function):
+ @staticmethod
+ def forward(
+ ctx,
+ x,
+ weight,
+ bias,
+ residual=None,
+ eps=1e-6,
+ prenorm=False,
+ residual_in_fp32=False,
+ is_rms_norm=False,
+ ):
+ x_shape_og = x.shape
+ # reshape input data into 2D tensor
+ x = x.reshape(-1, x.shape[-1])
+ if x.stride(-1) != 1:
+ x = x.contiguous()
+ if residual is not None:
+ assert residual.shape == x_shape_og
+ residual = residual.reshape(-1, residual.shape[-1])
+ if residual.stride(-1) != 1:
+ residual = residual.contiguous()
+ weight = weight.contiguous()
+ if bias is not None:
+ bias = bias.contiguous()
+ residual_dtype = (
+ residual.dtype
+ if residual is not None
+ else (torch.float32 if residual_in_fp32 else None)
+ )
+ y, mean, rstd, residual_out = _layer_norm_fwd(
+ x, weight, bias, eps, residual, residual_dtype=residual_dtype, is_rms_norm=is_rms_norm
+ )
+ ctx.save_for_backward(residual_out, weight, bias, mean, rstd)
+ ctx.x_shape_og = x_shape_og
+ ctx.eps = eps
+ ctx.is_rms_norm = is_rms_norm
+ ctx.has_residual = residual is not None
+ ctx.prenorm = prenorm
+ ctx.x_dtype = x.dtype
+ y = y.reshape(x_shape_og)
+ return y if not prenorm else (y, residual_out.reshape(x_shape_og))
+
+ @staticmethod
+ def backward(ctx, dy, *args):
+ x, weight, bias, mean, rstd = ctx.saved_tensors
+ dy = dy.reshape(-1, dy.shape[-1])
+ if dy.stride(-1) != 1:
+ dy = dy.contiguous()
+ assert dy.shape == x.shape
+ if ctx.prenorm:
+ dresidual = args[0]
+ dresidual = dresidual.reshape(-1, dresidual.shape[-1])
+ if dresidual.stride(-1) != 1:
+ dresidual = dresidual.contiguous()
+ assert dresidual.shape == x.shape
+ else:
+ dresidual = None
+ dx, dw, db, dresidual_in = _layer_norm_bwd(
+ dy,
+ x,
+ weight,
+ bias,
+ ctx.eps,
+ mean,
+ rstd,
+ dresidual,
+ ctx.has_residual,
+ ctx.is_rms_norm,
+ x_dtype=ctx.x_dtype,
+ )
+ return (
+ dx.reshape(ctx.x_shape_og),
+ dw,
+ db,
+ dresidual_in.reshape(ctx.x_shape_og) if ctx.has_residual else None,
+ None,
+ None,
+ None,
+ None,
+ )
+
+
+def layer_norm_fn(
+ x,
+ weight,
+ bias,
+ residual=None,
+ eps=1e-6,
+ prenorm=False,
+ residual_in_fp32=False,
+ is_rms_norm=False,
+):
+ return LayerNormFn.apply(x, weight, bias, residual, eps, prenorm, residual_in_fp32, is_rms_norm)
+
+
+def rms_norm_fn(x, weight, bias, residual=None, prenorm=False, residual_in_fp32=False, eps=1e-6):
+ return LayerNormFn.apply(x, weight, bias, residual, eps, prenorm, residual_in_fp32, True)
+
+
+class RMSNorm(torch.nn.Module):
+ def __init__(self, hidden_size, eps=1e-5, device=None, dtype=None):
+ factory_kwargs = {"device": device, "dtype": dtype}
+ super().__init__()
+ self.eps = eps
+ self.weight = torch.nn.Parameter(torch.empty(hidden_size, **factory_kwargs))
+ self.register_parameter("bias", None)
+ self.reset_parameters()
+
+ def reset_parameters(self):
+ torch.nn.init.ones_(self.weight)
+
+ def forward(self, x, residual=None, prenorm=False, residual_in_fp32=False):
+ return rms_norm_fn(
+ x,
+ self.weight,
+ self.bias,
+ residual=residual,
+ eps=self.eps,
+ prenorm=prenorm,
+ residual_in_fp32=residual_in_fp32,
+ is_rms_norm=True,
+ )
+
+
+class LayerNormLinearFn(torch.autograd.Function):
+ @staticmethod
+ @custom_fwd
+ def forward(
+ ctx,
+ x,
+ norm_weight,
+ norm_bias,
+ linear_weight,
+ linear_bias,
+ residual=None,
+ eps=1e-6,
+ prenorm=False,
+ residual_in_fp32=False,
+ is_rms_norm=False,
+ ):
+ x_shape_og = x.shape
+ # reshape input data into 2D tensor
+ x = x.reshape(-1, x.shape[-1])
+ if x.stride(-1) != 1:
+ x = x.contiguous()
+ if residual is not None:
+ assert residual.shape == x_shape_og
+ residual = residual.reshape(-1, residual.shape[-1])
+ if residual.stride(-1) != 1:
+ residual = residual.contiguous()
+ norm_weight = norm_weight.contiguous()
+ if norm_bias is not None:
+ norm_bias = norm_bias.contiguous()
+ residual_dtype = (
+ residual.dtype
+ if residual is not None
+ else (torch.float32 if residual_in_fp32 else None)
+ )
+ y, mean, rstd, residual_out = _layer_norm_fwd(
+ x,
+ norm_weight,
+ norm_bias,
+ eps,
+ residual,
+ out_dtype=None if not torch.is_autocast_enabled() else torch.get_autocast_gpu_dtype(),
+ residual_dtype=residual_dtype,
+ is_rms_norm=is_rms_norm,
+ )
+ y = y.reshape(x_shape_og)
+ dtype = torch.get_autocast_gpu_dtype() if torch.is_autocast_enabled() else y.dtype
+ linear_weight = linear_weight.to(dtype)
+ linear_bias = linear_bias.to(dtype) if linear_bias is not None else None
+ out = F.linear(y.to(linear_weight.dtype), linear_weight, linear_bias)
+ # We don't store y, will be recomputed in the backward pass to save memory
+ ctx.save_for_backward(residual_out, norm_weight, norm_bias, linear_weight, mean, rstd)
+ ctx.x_shape_og = x_shape_og
+ ctx.eps = eps
+ ctx.is_rms_norm = is_rms_norm
+ ctx.has_residual = residual is not None
+ ctx.prenorm = prenorm
+ ctx.x_dtype = x.dtype
+ ctx.linear_bias_is_none = linear_bias is None
+ return out if not prenorm else (out, residual_out.reshape(x_shape_og))
+
+ @staticmethod
+ @custom_bwd
+ def backward(ctx, dout, *args):
+ x, norm_weight, norm_bias, linear_weight, mean, rstd = ctx.saved_tensors
+ dout = dout.reshape(-1, dout.shape[-1])
+ dy = F.linear(dout, linear_weight.t())
+ dlinear_bias = None if ctx.linear_bias_is_none else dout.sum(0)
+ if dy.stride(-1) != 1:
+ dy = dy.contiguous()
+ assert dy.shape == x.shape
+ if ctx.prenorm:
+ dresidual = args[0]
+ dresidual = dresidual.reshape(-1, dresidual.shape[-1])
+ if dresidual.stride(-1) != 1:
+ dresidual = dresidual.contiguous()
+ assert dresidual.shape == x.shape
+ else:
+ dresidual = None
+ dx, dnorm_weight, dnorm_bias, dresidual_in, y = _layer_norm_bwd(
+ dy,
+ x,
+ norm_weight,
+ norm_bias,
+ ctx.eps,
+ mean,
+ rstd,
+ dresidual,
+ ctx.has_residual,
+ ctx.is_rms_norm,
+ x_dtype=ctx.x_dtype,
+ recompute_output=True,
+ )
+ dlinear_weight = torch.einsum("bo,bi->oi", dout, y)
+ return (
+ dx.reshape(ctx.x_shape_og),
+ dnorm_weight,
+ dnorm_bias,
+ dlinear_weight,
+ dlinear_bias,
+ dresidual_in.reshape(ctx.x_shape_og) if ctx.has_residual else None,
+ None,
+ None,
+ None,
+ None,
+ )
+
+
+def layer_norm_linear_fn(
+ x,
+ norm_weight,
+ norm_bias,
+ linear_weight,
+ linear_bias,
+ residual=None,
+ eps=1e-6,
+ prenorm=False,
+ residual_in_fp32=False,
+ is_rms_norm=False,
+):
+ return LayerNormLinearFn.apply(
+ x,
+ norm_weight,
+ norm_bias,
+ linear_weight,
+ linear_bias,
+ residual,
+ eps,
+ prenorm,
+ residual_in_fp32,
+ is_rms_norm,
+ )
diff --git a/PRISM/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/ops/triton/selective_state_update.py b/PRISM/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/ops/triton/selective_state_update.py
new file mode 100644
index 0000000000000000000000000000000000000000..fa95de73f173292914c5f00fbe9426937d00e502
--- /dev/null
+++ b/PRISM/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/ops/triton/selective_state_update.py
@@ -0,0 +1,192 @@
+# Copyright (c) 2023, Tri Dao.
+
+"""We want triton==2.1.0 for this
+"""
+
+import math
+import torch
+import torch.nn.functional as F
+
+import triton
+import triton.language as tl
+
+from einops import rearrange, repeat
+
+
+@triton.heuristics({"HAS_DT_BIAS": lambda args: args["dt_bias_ptr"] is not None})
+@triton.heuristics({"HAS_D": lambda args: args["D_ptr"] is not None})
+@triton.heuristics({"HAS_Z": lambda args: args["z_ptr"] is not None})
+@triton.heuristics({"BLOCK_SIZE_DSTATE": lambda args: triton.next_power_of_2(args["dstate"])})
+@triton.jit
+def _selective_scan_update_kernel(
+ # Pointers to matrices
+ state_ptr, x_ptr, dt_ptr, dt_bias_ptr, A_ptr, B_ptr, C_ptr, D_ptr, z_ptr, out_ptr,
+ # Matrix dimensions
+ batch, dim, dstate,
+ # Strides
+ stride_state_batch, stride_state_dim, stride_state_dstate,
+ stride_x_batch, stride_x_dim,
+ stride_dt_batch, stride_dt_dim,
+ stride_dt_bias_dim,
+ stride_A_dim, stride_A_dstate,
+ stride_B_batch, stride_B_dstate,
+ stride_C_batch, stride_C_dstate,
+ stride_D_dim,
+ stride_z_batch, stride_z_dim,
+ stride_out_batch, stride_out_dim,
+ # Meta-parameters
+ DT_SOFTPLUS: tl.constexpr,
+ BLOCK_SIZE_M: tl.constexpr,
+ HAS_DT_BIAS: tl.constexpr,
+ HAS_D: tl.constexpr,
+ HAS_Z: tl.constexpr,
+ BLOCK_SIZE_DSTATE: tl.constexpr,
+):
+ pid_m = tl.program_id(axis=0)
+ pid_b = tl.program_id(axis=1)
+ state_ptr += pid_b * stride_state_batch
+ x_ptr += pid_b * stride_x_batch
+ dt_ptr += pid_b * stride_dt_batch
+ B_ptr += pid_b * stride_B_batch
+ C_ptr += pid_b * stride_C_batch
+ if HAS_Z:
+ z_ptr += pid_b * stride_z_batch
+ out_ptr += pid_b * stride_out_batch
+
+ offs_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
+ offs_n = tl.arange(0, BLOCK_SIZE_DSTATE)
+ state_ptrs = state_ptr + (offs_m[:, None] * stride_state_dim + offs_n[None, :] * stride_state_dstate)
+ x_ptrs = x_ptr + offs_m * stride_x_dim
+ dt_ptrs = dt_ptr + offs_m * stride_dt_dim
+ if HAS_DT_BIAS:
+ dt_bias_ptrs = dt_bias_ptr + offs_m * stride_dt_bias_dim
+ A_ptrs = A_ptr + (offs_m[:, None] * stride_A_dim + offs_n[None, :] * stride_A_dstate)
+ B_ptrs = B_ptr + offs_n * stride_B_dstate
+ C_ptrs = C_ptr + offs_n * stride_C_dstate
+ if HAS_D:
+ D_ptrs = D_ptr + offs_m * stride_D_dim
+ if HAS_Z:
+ z_ptrs = z_ptr + offs_m * stride_z_dim
+ out_ptrs = out_ptr + offs_m * stride_out_dim
+
+ state = tl.load(state_ptrs, mask=(offs_m[:, None] < dim) & (offs_n[None, :] < dstate), other=0.0)
+ x = tl.load(x_ptrs, mask=offs_m < dim, other=0.0).to(tl.float32)
+ dt = tl.load(dt_ptrs, mask=offs_m < dim, other=0.0).to(tl.float32)
+ if HAS_DT_BIAS:
+ dt += tl.load(dt_bias_ptrs, mask=offs_m < dim, other=0.0).to(tl.float32)
+ if DT_SOFTPLUS:
+ dt = tl.log(1.0 + tl.exp(dt))
+ A = tl.load(A_ptrs, mask=(offs_m[:, None] < dim) & (offs_n[None, :] < dstate), other=0.0).to(tl.float32)
+ dA = tl.exp(A * dt[:, None])
+ B = tl.load(B_ptrs, mask=offs_n < dstate, other=0.0).to(tl.float32)
+ C = tl.load(C_ptrs, mask=offs_n < dstate, other=0.0).to(tl.float32)
+ if HAS_D:
+ D = tl.load(D_ptrs, mask=offs_m < dim, other=0.0).to(tl.float32)
+ if HAS_Z:
+ z = tl.load(z_ptrs, mask=offs_m < dim, other=0.0).to(tl.float32)
+
+ dB = B[None, :] * dt[:, None]
+ state = state * dA + dB * x[:, None]
+ tl.store(state_ptrs, state, mask=(offs_m[:, None] < dim) & (offs_n[None, :] < dstate))
+ out = tl.sum(state * C[None, :], axis=1)
+ if HAS_D:
+ out += x * D
+ if HAS_Z:
+ out *= z * tl.sigmoid(z)
+ tl.store(out_ptrs, out, mask=offs_m < dim)
+
+
+def selective_state_update(state, x, dt, A, B, C, D=None, z=None, dt_bias=None, dt_softplus=False):
+ """
+ Argument:
+ state: (batch, dim, dstate)
+ x: (batch, dim)
+ dt: (batch, dim)
+ A: (dim, dstate)
+ B: (batch, dstate)
+ C: (batch, dstate)
+ D: (dim,)
+ z: (batch, dim)
+ dt_bias: (dim,)
+ Return:
+ out: (batch, dim)
+ """
+ batch, dim, dstate = state.shape
+ assert x.shape == (batch, dim)
+ assert dt.shape == x.shape
+ assert A.shape == (dim, dstate)
+ assert B.shape == (batch, dstate)
+ assert C.shape == B.shape
+ if D is not None:
+ assert D.shape == (dim,)
+ if z is not None:
+ assert z.shape == x.shape
+ if dt_bias is not None:
+ assert dt_bias.shape == (dim,)
+ out = torch.empty_like(x)
+ grid = lambda META: (triton.cdiv(dim, META['BLOCK_SIZE_M']), batch)
+ z_strides = ((z.stride(0), z.stride(1)) if z is not None else (0, 0))
+ # We don't want autotune since it will overwrite the state
+ # We instead tune by hand.
+ BLOCK_SIZE_M, num_warps = ((32, 4) if dstate <= 16
+ else ((16, 4) if dstate <= 32 else
+ ((8, 4) if dstate <= 64 else
+ ((4, 4) if dstate <= 128 else
+ ((4, 8))))))
+ with torch.cuda.device(x.device.index):
+ _selective_scan_update_kernel[grid](
+ state, x, dt, dt_bias, A, B, C, D, z, out,
+ batch, dim, dstate,
+ state.stride(0), state.stride(1), state.stride(2),
+ x.stride(0), x.stride(1),
+ dt.stride(0), dt.stride(1),
+ dt_bias.stride(0) if dt_bias is not None else 0,
+ A.stride(0), A.stride(1),
+ B.stride(0), B.stride(1),
+ C.stride(0), C.stride(1),
+ D.stride(0) if D is not None else 0,
+ z_strides[0], z_strides[1],
+ out.stride(0), out.stride(1),
+ dt_softplus,
+ BLOCK_SIZE_M,
+ num_warps=num_warps,
+ )
+ return out
+
+
+def selective_state_update_ref(state, x, dt, A, B, C, D=None, z=None, dt_bias=None, dt_softplus=False):
+ """
+ Argument:
+ state: (batch, dim, dstate)
+ x: (batch, dim)
+ dt: (batch, dim)
+ A: (dim, dstate)
+ B: (batch, dstate)
+ C: (batch, dstate)
+ D: (dim,)
+ z: (batch, dim)
+ dt_bias: (dim,)
+ Return:
+ out: (batch, dim)
+ """
+ batch, dim, dstate = state.shape
+ assert x.shape == (batch, dim)
+ assert dt.shape == x.shape
+ assert A.shape == (dim, dstate)
+ assert B.shape == (batch, dstate)
+ assert C.shape == B.shape
+ if D is not None:
+ assert D.shape == (dim,)
+ if z is not None:
+ assert z.shape == x.shape
+ if dt_bias is not None:
+ assert dt_bias.shape == (dim,)
+ dt = dt + dt_bias
+ dt = F.softplus(dt) if dt_softplus else dt
+ dA = torch.exp(rearrange(dt, "b d -> b d 1") * A) # (batch, dim, dstate)
+ dB = rearrange(dt, "b d -> b d 1") * rearrange(B, "b n -> b 1 n") # (batch, dim, dstate)
+ state.copy_(state * dA + dB * rearrange(x, "b d -> b d 1")) # (batch, dim, dstate
+ out = torch.einsum("bdn,bn->bd", state.to(C.dtype), C)
+ if D is not None:
+ out += (x * D).to(out.dtype)
+ return (out if z is None else out * F.silu(z)).to(x.dtype)
diff --git a/PRISM/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/utils/__init__.py b/PRISM/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/utils/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/PRISM/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/utils/generation.py b/PRISM/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/utils/generation.py
new file mode 100644
index 0000000000000000000000000000000000000000..9d766b29ac28a388a7d77b22aa2cb1eda733c0f4
--- /dev/null
+++ b/PRISM/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/utils/generation.py
@@ -0,0 +1,377 @@
+# Copyright (c) 2023, Albert Gu, Tri Dao.
+import gc
+import time
+from collections import namedtuple
+from dataclasses import dataclass, field
+from functools import partial
+from typing import Callable, Optional, Sequence, Union
+
+import torch
+import torch.nn.functional as F
+from einops import rearrange, repeat
+from torch import Tensor
+from torch.profiler import ProfilerActivity, profile, record_function
+from transformers.generation import GreedySearchDecoderOnlyOutput, SampleDecoderOnlyOutput
+
+
+@dataclass
+class InferenceParams:
+ """Inference parameters that are passed to the main model in order
+ to efficienly calculate and store the context during inference."""
+
+ max_seqlen: int
+ max_batch_size: int
+ seqlen_offset: int = 0
+ batch_size_offset: int = 0
+ key_value_memory_dict: dict = field(default_factory=dict)
+ lengths_per_sample: Optional[Tensor] = None
+
+ def reset(self, max_seqlen, max_batch_size):
+ self.max_seqlen = max_seqlen
+ self.max_batch_size = max_batch_size
+ self.seqlen_offset = 0
+ if self.lengths_per_sample is not None:
+ self.lengths_per_sample.zero_()
+
+
+# https://github.com/NVIDIA/Megatron-LM/blob/0bb597b42c53355a567aba2a1357cc34b9d99ddd/megatron/text_generation/sampling.py
+# https://github.com/huggingface/transformers/blob/a44985b41cfa2de48a5e1de7f1f93b7483da25d1/src/transformers/generation/logits_process.py#L231
+def modify_logits_for_top_k_filtering(logits, top_k):
+ """Set the logits for none top-k values to -inf. Done in-place."""
+ indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
+ logits.masked_fill_(indices_to_remove, float("-Inf"))
+
+
+# https://github.com/NVIDIA/Megatron-LM/blob/0bb597b42c53355a567aba2a1357cc34b9d99ddd/megatron/text_generation/sampling.py
+# https://github.com/huggingface/transformers/blob/a44985b41cfa2de48a5e1de7f1f93b7483da25d1/src/transformers/generation/logits_process.py#L170
+def modify_logits_for_top_p_filtering(logits, top_p):
+ """Set the logits for none top-p values to -inf. Done in-place."""
+ if top_p <= 0.0 or top_p >= 1.0:
+ return
+ # First sort and calculate cumulative sum of probabilities.
+ sorted_logits, sorted_indices = torch.sort(logits, descending=False)
+ cumulative_probs = sorted_logits.softmax(dim=-1).cumsum(dim=-1)
+ # Remove tokens with cumulative top_p above the threshold (token with 0 are kept)
+ sorted_indices_to_remove = cumulative_probs <= (1 - top_p)
+ # scatter sorted tensors to original indexing
+ indices_to_remove = sorted_indices_to_remove.scatter(
+ 1, sorted_indices, sorted_indices_to_remove
+ )
+ logits.masked_fill_(indices_to_remove, float("-inf"))
+
+
+def sample(logits, top_k=1, top_p=0.0, temperature=1.0):
+ """Sample from top-k logits.
+ Arguments:
+ logits: Tensor of shape (batch_size, vocab_size)
+ """
+ if top_k == 1: # Short-circuit for greedy decoding
+ return logits.argmax(dim=-1)
+ else:
+ if top_p > 0.0:
+ assert top_p <= 1.0, "top-p should be in (0, 1]."
+ if top_k > 0:
+ top_k = min(top_k, logits.size(-1)) # Safety check
+ logits_top, indices = torch.topk(logits, top_k, dim=-1)
+ if temperature != 1.0:
+ logits_top /= temperature
+ modify_logits_for_top_p_filtering(logits_top, top_p)
+ return indices[
+ torch.arange(indices.shape[0], device=indices.device),
+ torch.multinomial(torch.softmax(logits_top, dim=-1), num_samples=1).squeeze(dim=-1),
+ ]
+ else:
+ # Clone so that when we modify for top_p we don't change the original logits
+ logits_top = logits / temperature if temperature != 1.0 else logits.clone()
+ modify_logits_for_top_p_filtering(logits_top, top_p)
+ return torch.multinomial(torch.softmax(logits_top, dim=-1), num_samples=1).squeeze(
+ dim=-1
+ )
+
+
+@torch.inference_mode()
+def decode(
+ input_ids,
+ model,
+ max_length,
+ top_k=1,
+ top_p=0.0,
+ temperature=1.0,
+ eos_token_id=None,
+ teacher_outputs=None,
+ vocab_size=None,
+ tensor_parallel=1,
+ cg=False,
+ enable_timing=False,
+):
+ """Decoding, either greedy or with top-k or top-p sampling.
+ If top-k = 0, don't limit the number of candidates (pure sampling).
+ Top-k and top-p can be used together. If top_k > 0 and top_p > 0, then top-k is applied first,
+ then top-p.
+ We assume that all sequences in the same batch have the same length.
+
+ Arguments:
+ input_ids: (batch, seq_len)
+ max_length: int
+ teacher_outputs (optional): (batch, seq_len). If provided, instead of sampling from the
+ logits, the next token is taken from the teacher_outputs. Useful for testing.
+ Returns: GreedySearchDecoderOnlyOutput or SampleDecoderOnlyOutput, with the following fields:
+ sequences: (batch, max_length)
+ scores: tuples of (batch, vocab_size)
+ """
+ batch_size, seqlen_og = input_ids.shape
+ teacher_output_len = teacher_outputs.shape[1] if teacher_outputs is not None else 0
+ if cg:
+ if not hasattr(model, "_decoding_cache"):
+ model._decoding_cache = None
+ model._decoding_cache = update_graph_cache(
+ model,
+ model._decoding_cache,
+ batch_size,
+ seqlen_og,
+ max_length,
+ tensor_parallel=tensor_parallel,
+ )
+ inference_params = model._decoding_cache.inference_params
+ inference_params.reset(max_length, batch_size)
+ else:
+ inference_params = InferenceParams(max_seqlen=max_length, max_batch_size=batch_size)
+
+ def get_logits(input_ids, inference_params):
+ decoding = inference_params.seqlen_offset > 0
+ if decoding:
+ position_ids = torch.full(
+ (batch_size, 1),
+ inference_params.seqlen_offset,
+ dtype=torch.long,
+ device=input_ids.device,
+ )
+ else:
+ position_ids = None
+ if not cg or not decoding:
+ logits = model(
+ input_ids,
+ position_ids=position_ids,
+ inference_params=inference_params,
+ num_last_tokens=1,
+ ).logits.squeeze(dim=1)
+ else:
+ logits = model._decoding_cache.run(
+ input_ids, position_ids, inference_params.seqlen_offset
+ ).squeeze(dim=1)
+ return logits[..., :vocab_size] if vocab_size is not None else logits
+
+ def sample_tokens(logits, inference_params):
+ if teacher_outputs is None or teacher_output_len <= inference_params.seqlen_offset:
+ token = sample(logits, top_k=top_k, top_p=top_p, temperature=temperature)
+ else:
+ token = teacher_outputs[:, inference_params.seqlen_offset]
+ # return rearrange(token, "b -> b 1")
+ return token.unsqueeze(1)
+
+ def should_stop(current_token, inference_params):
+ if inference_params.seqlen_offset == 0:
+ return False
+ if eos_token_id is not None and (current_token == eos_token_id).all():
+ return True
+ if inference_params.seqlen_offset >= max_length - 1:
+ return True
+ return False
+
+ start = torch.cuda.Event(enable_timing=enable_timing)
+ end = torch.cuda.Event(enable_timing=enable_timing)
+
+ if enable_timing:
+ if tensor_parallel > 1:
+ torch.distributed.barrier()
+ start.record()
+ scores, sequences = [], [input_ids]
+ while not should_stop(sequences[-1], inference_params):
+ scores.append(get_logits(sequences[-1], inference_params))
+ inference_params.seqlen_offset += sequences[-1].shape[1]
+ sequences.append(sample_tokens(scores[-1], inference_params))
+ if enable_timing:
+ end.record()
+ if tensor_parallel > 1:
+ torch.distributed.barrier()
+ torch.cuda.synchronize()
+ print(f"Prompt processing + decoding time: {(start.elapsed_time(end)):.0f}ms")
+ output_cls = GreedySearchDecoderOnlyOutput if top_k == 1 else SampleDecoderOnlyOutput
+ return output_cls(sequences=torch.cat(sequences, dim=1), scores=tuple(scores))
+
+
+class GenerationMixin:
+ def allocate_inference_cache(self, batch_size, max_seqlen, dtype=None, **kwargs):
+ raise NotImplementedError
+
+ def generate(
+ self,
+ input_ids,
+ max_length,
+ top_k=1,
+ top_p=0.0,
+ temperature=1.0,
+ return_dict_in_generate=False,
+ output_scores=False,
+ **kwargs,
+ ):
+ output = decode(
+ input_ids, self, max_length, top_k=top_k, top_p=top_p, temperature=temperature, **kwargs
+ )
+ if not output_scores:
+ output.scores = None
+ return output if return_dict_in_generate else output.sequences
+
+
+def allocate_inference_cache(
+ max_batch_size,
+ max_seqlen,
+ nheads,
+ headdim,
+ layers: Union[int, Sequence],
+ device,
+ dtype=torch.float16,
+):
+ assert dtype in [torch.float16, torch.bfloat16, torch.float32]
+ kv_cache_shape = (max_batch_size, max_seqlen, 2, nheads, headdim)
+ if isinstance(layers, int):
+ layers = range(layers)
+ return {i: torch.empty(kv_cache_shape, device=device, dtype=dtype) for i in layers}
+
+
+@dataclass
+class DecodingCGCache:
+ max_batch_size: int = 0
+ max_seqlen: int = 0
+ device = None
+ dtype = None
+ callables: dict = field(default_factory=dict)
+ mempool = None
+ inference_params: Optional[InferenceParams] = None
+ run: Optional[Callable] = None
+
+
+@torch.inference_mode()
+def update_graph_cache(
+ model,
+ cache,
+ batch_size,
+ seqlen_og,
+ max_seqlen,
+ decoding_seqlens=(1,),
+ tensor_parallel=1,
+ dtype=None,
+ n_warmups=2,
+):
+ if cache is None:
+ cache = DecodingCGCache()
+ param_example = next(iter(model.parameters()))
+ device = param_example.device
+ if dtype is None:
+ dtype = param_example.dtype
+ if (
+ (device, dtype) != (cache.device, cache.dtype)
+ or batch_size > cache.max_batch_size
+ or max_seqlen > cache.max_seqlen
+ ): # Invalidate the cache
+ cache.callables = {}
+ cache.mempool = None
+ cache.inference_params = None
+ gc.collect()
+ cache.device, cache.dtype = device, dtype
+ cache.max_batch_size, cache.max_seqlen = batch_size, max_seqlen
+ if hasattr(model, "allocate_inference_cache"):
+ inf_cache = model.allocate_inference_cache(batch_size, max_seqlen, dtype)
+ else:
+ headdim = getattr(
+ model.config,
+ "head_dim",
+ model.config.hidden_size // model.config.num_attention_heads,
+ )
+ inf_cache = allocate_inference_cache(
+ batch_size,
+ max_seqlen,
+ model.config.num_attention_heads // tensor_parallel,
+ headdim,
+ model.config.num_hidden_layers,
+ device,
+ dtype,
+ )
+ lengths_per_sample = torch.full((batch_size,), seqlen_og, dtype=torch.int32, device=device)
+ cache.inference_params = InferenceParams(
+ max_seqlen=max_seqlen,
+ max_batch_size=batch_size,
+ seqlen_offset=seqlen_og,
+ key_value_memory_dict=inf_cache,
+ lengths_per_sample=lengths_per_sample,
+ )
+ cache.mempool = torch.cuda.graphs.graph_pool_handle()
+ for decoding_seqlen in decoding_seqlens:
+ if (batch_size, decoding_seqlen) not in cache.callables:
+ cache.callables[batch_size, decoding_seqlen] = capture_graph(
+ model,
+ cache.inference_params,
+ batch_size,
+ max_seqlen,
+ decoding_seqlen=decoding_seqlen,
+ mempool=cache.mempool,
+ n_warmups=n_warmups,
+ )
+
+ def dispatch(input_ids, position_ids, seqlen):
+ batch_size, decoding_seqlen = input_ids.shape[:2]
+ return cache.callables[batch_size, decoding_seqlen](input_ids, position_ids, seqlen)
+
+ cache.run = dispatch
+ cache.inference_params.seqlen_offset = 0 # Reset so it's not confusing
+ return cache
+
+
+def capture_graph(
+ model, inference_params, batch_size, max_seqlen, decoding_seqlen=1, mempool=None, n_warmups=2
+):
+ device = next(iter(model.parameters())).device
+ input_ids = torch.full((batch_size, decoding_seqlen), 0, dtype=torch.long, device=device)
+ position_ids = torch.full((batch_size, decoding_seqlen), 0, dtype=torch.long, device=device)
+ seqlen_offset_og = inference_params.seqlen_offset
+ inference_params.seqlen_offset = max_seqlen - decoding_seqlen
+ inference_params.lengths_per_sample[:] = inference_params.seqlen_offset
+
+ # Warmup before capture
+ s = torch.cuda.Stream()
+ s.wait_stream(torch.cuda.current_stream())
+ with torch.cuda.stream(s):
+ for _ in range(n_warmups):
+ logits = model(
+ input_ids,
+ position_ids=position_ids,
+ inference_params=inference_params,
+ num_last_tokens=decoding_seqlen,
+ ).logits
+ s.synchronize()
+ # This might be needed for correctness if we run with NCCL_GRAPH_MIXING_SUPPORT=0,
+ # which requires that graph launch and non-captured launch to not overlap (I think,
+ # that's how I interpret the documentation). I'm not sure if this is required.
+ if torch.distributed.is_initialized():
+ torch.distributed.barrier()
+ torch.cuda.current_stream().wait_stream(s)
+ # Captures the graph
+ # To allow capture, automatically sets a side stream as the current stream in the context
+ graph = torch.cuda.CUDAGraph()
+ with torch.cuda.graph(graph, pool=mempool):
+ logits = model(
+ input_ids,
+ position_ids=position_ids,
+ inference_params=inference_params,
+ num_last_tokens=decoding_seqlen,
+ ).logits
+
+ def run(new_input_ids, new_position_ids, seqlen):
+ inference_params.lengths_per_sample[:] = seqlen
+ input_ids.copy_(new_input_ids)
+ position_ids.copy_(new_position_ids)
+ graph.replay()
+ return logits.clone()
+
+ inference_params.seqlen_offset = seqlen_offset_og
+ return run
diff --git a/PRISM/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/utils/hf.py b/PRISM/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/utils/hf.py
new file mode 100644
index 0000000000000000000000000000000000000000..0d7555acddbd260636d1d14d5bd6324f6af0056a
--- /dev/null
+++ b/PRISM/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/utils/hf.py
@@ -0,0 +1,23 @@
+import json
+
+import torch
+
+from transformers.utils import WEIGHTS_NAME, CONFIG_NAME
+from transformers.utils.hub import cached_file
+
+
+def load_config_hf(model_name):
+ resolved_archive_file = cached_file(model_name, CONFIG_NAME, _raise_exceptions_for_missing_entries=False)
+ return json.load(open(resolved_archive_file))
+
+
+def load_state_dict_hf(model_name, device=None, dtype=None):
+ # If not fp32, then we don't want to load directly to the GPU
+ mapped_device = "cpu" if dtype not in [torch.float32, None] else device
+ resolved_archive_file = cached_file(model_name, WEIGHTS_NAME, _raise_exceptions_for_missing_entries=False)
+ return torch.load(resolved_archive_file, map_location=mapped_device)
+ # Convert dtype before moving to GPU to save memory
+ if dtype is not None:
+ state_dict = {k: v.to(dtype=dtype) for k, v in state_dict.items()}
+ state_dict = {k: v.to(device=device) for k, v in state_dict.items()}
+ return state_dict
diff --git a/PRISM/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/selective_scan_cuda.cpython-312-x86_64-linux-gnu.so b/PRISM/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/selective_scan_cuda.cpython-312-x86_64-linux-gnu.so
new file mode 100644
index 0000000000000000000000000000000000000000..614433170ff0d70d84e91250d258b1596a92d08d
--- /dev/null
+++ b/PRISM/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/selective_scan_cuda.cpython-312-x86_64-linux-gnu.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:de497e1e8bf755807db1be7e7a915b2711899de71e3c20f26bf9ed388d485b39
+size 61424368
diff --git a/PRISM/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/.ninja_deps b/PRISM/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/.ninja_deps
new file mode 100644
index 0000000000000000000000000000000000000000..868105a23d6da24b20cd9b9b649e563c81cbfaa8
--- /dev/null
+++ b/PRISM/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/.ninja_deps
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e4831c2f877d7c143edf32ef11a91a00533610c0df16673d58e5d734eb5fde2a
+size 679492
diff --git a/PRISM/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/.ninja_log b/PRISM/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/.ninja_log
new file mode 100644
index 0000000000000000000000000000000000000000..ba4797cd6d493c56ebea07adb348cc082b09d0dc
--- /dev/null
+++ b/PRISM/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/.ninja_log
@@ -0,0 +1,11 @@
+# ninja log v7
+1 19805 1766954697427967371 /teamspace/studios/this_studio/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan.o cb5c8304b521546b
+2 41314 1766954697431967553 /teamspace/studios/this_studio/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_fwd_bf16.o 292bedf00520f73c
+19845 60355 1766954717272866994 /teamspace/studios/this_studio/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_fwd_fp16.o f16354525b1b0ec9
+2 69379 1766954697431967553 /teamspace/studios/this_studio/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_bwd_fp32_real.o 722bc0a6c32ace27
+1 71204 1766954697427967371 /teamspace/studios/this_studio/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_bwd_fp16_real.o d3fa7d01cfc27105
+1 71283 1766954697427967371 /teamspace/studios/this_studio/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_bwd_bf16_real.o 7e325ee323faedd
+41320 80944 1766954738749840606 /teamspace/studios/this_studio/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_fwd_fp32.o d1292e78dbee8802
+2 99285 1766954697431967553 /teamspace/studios/this_studio/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_bwd_fp32_complex.o 7d1d4df97ab3b9ab
+1 100796 1766954697427967371 /teamspace/studios/this_studio/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_bwd_fp16_complex.o d85521417cf6cf13
+1 101411 1766954697427967371 /teamspace/studios/this_studio/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_bwd_bf16_complex.o 9e39e25fb3232edb
diff --git a/PRISM/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/build.ninja b/PRISM/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/build.ninja
new file mode 100644
index 0000000000000000000000000000000000000000..b4ae4e0af9f7546bbb7ab04ce54801a4a6c41a7c
--- /dev/null
+++ b/PRISM/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/build.ninja
@@ -0,0 +1,46 @@
+ninja_required_version = 1.3
+cxx = c++
+nvcc = /usr/local/cuda/bin/nvcc
+
+cflags = -pthread -B /home/zeus/miniconda3/envs/cloudspace/compiler_compat -fno-strict-overflow -Wsign-compare -DNDEBUG -O2 -Wall -fPIC -O2 -isystem /home/zeus/miniconda3/envs/cloudspace/include -fPIC -O2 -isystem /home/zeus/miniconda3/envs/cloudspace/include -fPIC -I/teamspace/studios/this_studio/SegMamba/mamba/csrc/selective_scan -I/home/zeus/miniconda3/envs/cloudspace/lib/python3.12/site-packages/torch/include -I/home/zeus/miniconda3/envs/cloudspace/lib/python3.12/site-packages/torch/include/torch/csrc/api/include -I/usr/local/cuda/include -I/home/zeus/miniconda3/envs/cloudspace/include/python3.12 -c
+post_cflags = -O3 -std=c++17 -DTORCH_API_INCLUDE_EXTENSION_H '-DPYBIND11_COMPILER_TYPE="_gcc"' '-DPYBIND11_STDLIB="_libstdcpp"' '-DPYBIND11_BUILD_ABI="_cxxabi1018"' -DTORCH_EXTENSION_NAME=selective_scan_cuda
+cuda_cflags = -I/teamspace/studios/this_studio/SegMamba/mamba/csrc/selective_scan -I/home/zeus/miniconda3/envs/cloudspace/lib/python3.12/site-packages/torch/include -I/home/zeus/miniconda3/envs/cloudspace/lib/python3.12/site-packages/torch/include/torch/csrc/api/include -I/usr/local/cuda/include -I/home/zeus/miniconda3/envs/cloudspace/include/python3.12 -c
+cuda_post_cflags = -D__CUDA_NO_HALF_OPERATORS__ -D__CUDA_NO_HALF_CONVERSIONS__ -D__CUDA_NO_BFLOAT16_CONVERSIONS__ -D__CUDA_NO_HALF2_OPERATORS__ --expt-relaxed-constexpr --compiler-options ''"'"'-fPIC'"'"'' -O3 -std=c++17 -U__CUDA_NO_HALF_OPERATORS__ -U__CUDA_NO_HALF_CONVERSIONS__ -U__CUDA_NO_BFLOAT16_OPERATORS__ -U__CUDA_NO_BFLOAT16_CONVERSIONS__ -U__CUDA_NO_BFLOAT162_OPERATORS__ -U__CUDA_NO_BFLOAT162_CONVERSIONS__ --expt-relaxed-constexpr --expt-extended-lambda --use_fast_math --ptxas-options=-v -lineinfo -gencode arch=compute_89,code=sm_89 --threads 4 -DTORCH_API_INCLUDE_EXTENSION_H '-DPYBIND11_COMPILER_TYPE="_gcc"' '-DPYBIND11_STDLIB="_libstdcpp"' '-DPYBIND11_BUILD_ABI="_cxxabi1018"' -DTORCH_EXTENSION_NAME=selective_scan_cuda
+cuda_dlink_post_cflags =
+sycl_dlink_post_cflags =
+ldflags =
+
+rule compile
+ command = $cxx -MMD -MF $out.d $cflags -c $in -o $out $post_cflags
+ depfile = $out.d
+ deps = gcc
+
+rule cuda_compile
+ depfile = $out.d
+ deps = gcc
+ command = $nvcc --generate-dependencies-with-compile --dependency-output $out.d $cuda_cflags -c $in -o $out $cuda_post_cflags
+
+
+
+
+
+
+
+build /teamspace/studios/this_studio/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan.o: compile /teamspace/studios/this_studio/SegMamba/mamba/csrc/selective_scan/selective_scan.cpp
+build /teamspace/studios/this_studio/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_bwd_bf16_complex.o: cuda_compile /teamspace/studios/this_studio/SegMamba/mamba/csrc/selective_scan/selective_scan_bwd_bf16_complex.cu
+build /teamspace/studios/this_studio/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_bwd_bf16_real.o: cuda_compile /teamspace/studios/this_studio/SegMamba/mamba/csrc/selective_scan/selective_scan_bwd_bf16_real.cu
+build /teamspace/studios/this_studio/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_bwd_fp16_complex.o: cuda_compile /teamspace/studios/this_studio/SegMamba/mamba/csrc/selective_scan/selective_scan_bwd_fp16_complex.cu
+build /teamspace/studios/this_studio/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_bwd_fp16_real.o: cuda_compile /teamspace/studios/this_studio/SegMamba/mamba/csrc/selective_scan/selective_scan_bwd_fp16_real.cu
+build /teamspace/studios/this_studio/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_bwd_fp32_complex.o: cuda_compile /teamspace/studios/this_studio/SegMamba/mamba/csrc/selective_scan/selective_scan_bwd_fp32_complex.cu
+build /teamspace/studios/this_studio/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_bwd_fp32_real.o: cuda_compile /teamspace/studios/this_studio/SegMamba/mamba/csrc/selective_scan/selective_scan_bwd_fp32_real.cu
+build /teamspace/studios/this_studio/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_fwd_bf16.o: cuda_compile /teamspace/studios/this_studio/SegMamba/mamba/csrc/selective_scan/selective_scan_fwd_bf16.cu
+build /teamspace/studios/this_studio/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_fwd_fp16.o: cuda_compile /teamspace/studios/this_studio/SegMamba/mamba/csrc/selective_scan/selective_scan_fwd_fp16.cu
+build /teamspace/studios/this_studio/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_fwd_fp32.o: cuda_compile /teamspace/studios/this_studio/SegMamba/mamba/csrc/selective_scan/selective_scan_fwd_fp32.cu
+
+
+
+
+
+
+
+
diff --git a/PRISM/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan.o b/PRISM/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan.o
new file mode 100644
index 0000000000000000000000000000000000000000..fb9ff29ded6a84809186f6ddea678c193f4d891d
--- /dev/null
+++ b/PRISM/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan.o
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:30642e36142e5b060973723e4e887b2d59880c30a0f91c192fb857b1bef0af49
+size 397560
diff --git a/PRISM/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_bwd_bf16_complex.o b/PRISM/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_bwd_bf16_complex.o
new file mode 100644
index 0000000000000000000000000000000000000000..c0ae6a699b02be35fcd7d1d42b27aca2065b57bf
--- /dev/null
+++ b/PRISM/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_bwd_bf16_complex.o
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:25fa5c83cf2ee35e8070b8c8c52c70c4bf3dcf21029f5e9f4590eb13f7a3cc42
+size 10478608
diff --git a/PRISM/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_bwd_bf16_real.o b/PRISM/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_bwd_bf16_real.o
new file mode 100644
index 0000000000000000000000000000000000000000..59262b6694f6011d87882b7d550fc93d136ac10e
--- /dev/null
+++ b/PRISM/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_bwd_bf16_real.o
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a579cb649353677789e91906a42ba466016e6d0a8e46b03a422aa3c8f9a36feb
+size 6449680
diff --git a/PRISM/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_bwd_fp16_complex.o b/PRISM/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_bwd_fp16_complex.o
new file mode 100644
index 0000000000000000000000000000000000000000..1e064c242a793e46960c376507fec59748539cab
--- /dev/null
+++ b/PRISM/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_bwd_fp16_complex.o
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:402053a21751a06229a38b35bdfbb6baa63d4c4435f82e628a8a3999a5d2beb2
+size 10455272
diff --git a/PRISM/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_bwd_fp16_real.o b/PRISM/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_bwd_fp16_real.o
new file mode 100644
index 0000000000000000000000000000000000000000..8edecf7112671e10e6784edf3a0fd52999c908f6
--- /dev/null
+++ b/PRISM/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_bwd_fp16_real.o
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:357d3d897d7697774261cbd88f27fc5e4fb2f01caa8a5428ace0c66f22de7794
+size 6435864
diff --git a/PRISM/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_bwd_fp32_complex.o b/PRISM/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_bwd_fp32_complex.o
new file mode 100644
index 0000000000000000000000000000000000000000..effaf88981097ce66e6997dc06f90ec03aef90be
--- /dev/null
+++ b/PRISM/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_bwd_fp32_complex.o
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:00817b11aa97238772fbee510dc5da41411df480d0258a5344126c72deb983f3
+size 10070104
diff --git a/PRISM/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_bwd_fp32_real.o b/PRISM/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_bwd_fp32_real.o
new file mode 100644
index 0000000000000000000000000000000000000000..8929c84dd2f0f0b28315e5a0385c67a17c512935
--- /dev/null
+++ b/PRISM/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_bwd_fp32_real.o
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:88cc42d0fa66b0cbac41a4445a1f8c204c4f0f9a6cce18b9f94086213bfba7f1
+size 6057784
diff --git a/PRISM/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_fwd_bf16.o b/PRISM/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_fwd_bf16.o
new file mode 100644
index 0000000000000000000000000000000000000000..be04c8f9a8828eb50c56537cbf8c7fec81fce683
--- /dev/null
+++ b/PRISM/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_fwd_bf16.o
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c5d08cbe64bf5265c8f7245f0440b337fe5fb7701c5715e8206eab1d88e475d2
+size 4116912
diff --git a/PRISM/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_fwd_fp16.o b/PRISM/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_fwd_fp16.o
new file mode 100644
index 0000000000000000000000000000000000000000..2bfb5d5fa14170e3fcf2841a3abfb10fe4cca094
--- /dev/null
+++ b/PRISM/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_fwd_fp16.o
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:38293bd3de57cfff70fe33ad77437e7ffcfe7378675092f38c348efe066ecef0
+size 4098472
diff --git a/PRISM/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_fwd_fp32.o b/PRISM/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_fwd_fp32.o
new file mode 100644
index 0000000000000000000000000000000000000000..2e0efb63f6a4f491f4b35b47661001f3d067f846
--- /dev/null
+++ b/PRISM/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_fwd_fp32.o
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f8a5320d5733c4b86ff7fc1e42d6a3f24f4ec935999fa7a9c4242990ea422530
+size 3936016
diff --git a/PRISM/SegMamba/mamba/csrc/selective_scan/reverse_scan.cuh b/PRISM/SegMamba/mamba/csrc/selective_scan/reverse_scan.cuh
new file mode 100644
index 0000000000000000000000000000000000000000..d7e93174bb391d45271e6c77669a5e52d6c9cc78
--- /dev/null
+++ b/PRISM/SegMamba/mamba/csrc/selective_scan/reverse_scan.cuh
@@ -0,0 +1,401 @@
+/******************************************************************************
+ * Copyright (c) 2023, Tri Dao.
+ ******************************************************************************/
+
+#pragma once
+
+#include
+
+#include
+#include