Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- testbed/Project-MONAI__MONAI/.gitattributes +1 -0
- testbed/Project-MONAI__MONAI/.gitignore +126 -0
- testbed/Project-MONAI__MONAI/.readthedocs.yml +28 -0
- testbed/Project-MONAI__MONAI/CHANGELOG.md +61 -0
- testbed/Project-MONAI__MONAI/CONTRIBUTING.md +212 -0
- testbed/Project-MONAI__MONAI/Dockerfile +30 -0
- testbed/Project-MONAI__MONAI/LICENSE +201 -0
- testbed/Project-MONAI__MONAI/MANIFEST.in +2 -0
- testbed/Project-MONAI__MONAI/README.md +68 -0
- testbed/Project-MONAI__MONAI/examples/README.md +36 -0
- testbed/Project-MONAI__MONAI/examples/classification_3d/densenet_evaluation_array.py +77 -0
- testbed/Project-MONAI__MONAI/examples/classification_3d/densenet_evaluation_dict.py +85 -0
- testbed/Project-MONAI__MONAI/examples/classification_3d/densenet_training_array.py +139 -0
- testbed/Project-MONAI__MONAI/examples/classification_3d/densenet_training_dict.py +155 -0
- testbed/Project-MONAI__MONAI/examples/classification_3d_ignite/densenet_evaluation_array.py +94 -0
- testbed/Project-MONAI__MONAI/examples/classification_3d_ignite/densenet_evaluation_dict.py +102 -0
- testbed/Project-MONAI__MONAI/examples/classification_3d_ignite/densenet_training_array.py +144 -0
- testbed/Project-MONAI__MONAI/examples/classification_3d_ignite/densenet_training_dict.py +166 -0
- testbed/Project-MONAI__MONAI/examples/distributed_training/unet_evaluation_ddp.py +166 -0
- testbed/Project-MONAI__MONAI/examples/distributed_training/unet_evaluation_horovod.py +165 -0
- testbed/Project-MONAI__MONAI/examples/distributed_training/unet_evaluation_workflows.py +203 -0
- testbed/Project-MONAI__MONAI/examples/distributed_training/unet_training_ddp.py +193 -0
- testbed/Project-MONAI__MONAI/examples/distributed_training/unet_training_horovod.py +193 -0
- testbed/Project-MONAI__MONAI/examples/distributed_training/unet_training_workflows.py +206 -0
- testbed/Project-MONAI__MONAI/examples/segmentation_3d/unet_evaluation_array.py +89 -0
- testbed/Project-MONAI__MONAI/examples/segmentation_3d/unet_evaluation_dict.py +103 -0
- testbed/Project-MONAI__MONAI/examples/segmentation_3d/unet_training_array.py +167 -0
- testbed/Project-MONAI__MONAI/examples/segmentation_3d/unet_training_dict.py +187 -0
- testbed/Project-MONAI__MONAI/examples/segmentation_3d_ignite/unet_evaluation_array.py +113 -0
- testbed/Project-MONAI__MONAI/examples/segmentation_3d_ignite/unet_evaluation_dict.py +119 -0
- testbed/Project-MONAI__MONAI/examples/segmentation_3d_ignite/unet_training_array.py +160 -0
- testbed/Project-MONAI__MONAI/examples/segmentation_3d_ignite/unet_training_dict.py +200 -0
- testbed/Project-MONAI__MONAI/examples/synthesis/gan_evaluation.py +67 -0
- testbed/Project-MONAI__MONAI/examples/synthesis/gan_training.py +203 -0
- testbed/Project-MONAI__MONAI/examples/workflows/unet_evaluation_dict.py +121 -0
- testbed/Project-MONAI__MONAI/examples/workflows/unet_training_dict.py +179 -0
- testbed/Project-MONAI__MONAI/monai/README.md +26 -0
- testbed/Project-MONAI__MONAI/monai/__init__.py +33 -0
- testbed/Project-MONAI__MONAI/monai/_version.py +519 -0
- testbed/Project-MONAI__MONAI/monai/apps/__init__.py +13 -0
- testbed/Project-MONAI__MONAI/monai/apps/datasets.py +265 -0
- testbed/Project-MONAI__MONAI/monai/apps/utils.py +186 -0
- testbed/Project-MONAI__MONAI/monai/engines/__init__.py +14 -0
- testbed/Project-MONAI__MONAI/monai/engines/evaluator.py +280 -0
- testbed/Project-MONAI__MONAI/monai/engines/multi_gpu_supervised_trainer.py +136 -0
- testbed/Project-MONAI__MONAI/monai/engines/trainer.py +297 -0
- testbed/Project-MONAI__MONAI/monai/engines/utils.py +90 -0
- testbed/Project-MONAI__MONAI/monai/engines/workflow.py +172 -0
- testbed/Project-MONAI__MONAI/monai/inferers/__init__.py +13 -0
- testbed/Project-MONAI__MONAI/monai/inferers/inferer.py +110 -0
testbed/Project-MONAI__MONAI/.gitattributes
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
monai/_version.py export-subst
|
testbed/Project-MONAI__MONAI/.gitignore
ADDED
|
@@ -0,0 +1,126 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Byte-compiled / optimized / DLL files
|
| 2 |
+
__pycache__/
|
| 3 |
+
*.py[cod]
|
| 4 |
+
*$py.class
|
| 5 |
+
|
| 6 |
+
# C extensions
|
| 7 |
+
*.so
|
| 8 |
+
|
| 9 |
+
# Distribution / packaging
|
| 10 |
+
.Python
|
| 11 |
+
build/
|
| 12 |
+
develop-eggs/
|
| 13 |
+
dist/
|
| 14 |
+
downloads/
|
| 15 |
+
eggs/
|
| 16 |
+
.eggs/
|
| 17 |
+
lib/
|
| 18 |
+
lib64/
|
| 19 |
+
parts/
|
| 20 |
+
sdist/
|
| 21 |
+
var/
|
| 22 |
+
wheels/
|
| 23 |
+
*.egg-info/
|
| 24 |
+
.installed.cfg
|
| 25 |
+
*.egg
|
| 26 |
+
MANIFEST
|
| 27 |
+
|
| 28 |
+
# PyInstaller
|
| 29 |
+
# Usually these files are written by a python script from a template
|
| 30 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
| 31 |
+
*.manifest
|
| 32 |
+
*.spec
|
| 33 |
+
|
| 34 |
+
# Installer logs
|
| 35 |
+
pip-log.txt
|
| 36 |
+
pip-delete-this-directory.txt
|
| 37 |
+
|
| 38 |
+
# Unit test / coverage reports
|
| 39 |
+
htmlcov/
|
| 40 |
+
.tox/
|
| 41 |
+
.coverage
|
| 42 |
+
.coverage.*
|
| 43 |
+
.cache
|
| 44 |
+
nosetests.xml
|
| 45 |
+
coverage.xml
|
| 46 |
+
*.cover
|
| 47 |
+
.hypothesis/
|
| 48 |
+
.pytest_cache/
|
| 49 |
+
|
| 50 |
+
# Translations
|
| 51 |
+
*.mo
|
| 52 |
+
*.pot
|
| 53 |
+
|
| 54 |
+
# Django stuff:
|
| 55 |
+
*.log
|
| 56 |
+
local_settings.py
|
| 57 |
+
db.sqlite3
|
| 58 |
+
|
| 59 |
+
# Flask stuff:
|
| 60 |
+
instance/
|
| 61 |
+
.webassets-cache
|
| 62 |
+
|
| 63 |
+
# Scrapy stuff:
|
| 64 |
+
.scrapy
|
| 65 |
+
|
| 66 |
+
# Sphinx documentation
|
| 67 |
+
docs/_build/
|
| 68 |
+
|
| 69 |
+
# PyBuilder
|
| 70 |
+
target/
|
| 71 |
+
|
| 72 |
+
# Jupyter Notebook
|
| 73 |
+
.ipynb_checkpoints
|
| 74 |
+
|
| 75 |
+
# pyenv
|
| 76 |
+
.python-version
|
| 77 |
+
|
| 78 |
+
# celery beat schedule file
|
| 79 |
+
celerybeat-schedule
|
| 80 |
+
|
| 81 |
+
# SageMath parsed files
|
| 82 |
+
*.sage.py
|
| 83 |
+
|
| 84 |
+
# Environments
|
| 85 |
+
.env
|
| 86 |
+
.venv
|
| 87 |
+
env/
|
| 88 |
+
venv/
|
| 89 |
+
ENV/
|
| 90 |
+
env.bak/
|
| 91 |
+
venv.bak/
|
| 92 |
+
|
| 93 |
+
# Spyder project settings
|
| 94 |
+
.spyderproject
|
| 95 |
+
.spyproject
|
| 96 |
+
|
| 97 |
+
# Rope project settings
|
| 98 |
+
.ropeproject
|
| 99 |
+
|
| 100 |
+
# mkdocs documentation
|
| 101 |
+
/site
|
| 102 |
+
|
| 103 |
+
# pytype cache
|
| 104 |
+
.pytype/
|
| 105 |
+
|
| 106 |
+
# mypy
|
| 107 |
+
.mypy_cache/
|
| 108 |
+
examples/scd_lvsegs.npz
|
| 109 |
+
temp/
|
| 110 |
+
.idea/
|
| 111 |
+
|
| 112 |
+
*~
|
| 113 |
+
|
| 114 |
+
# Remove .pyre temporary config files
|
| 115 |
+
.pyre
|
| 116 |
+
.pyre_configuration
|
| 117 |
+
|
| 118 |
+
# temporary editor files that should not be in git
|
| 119 |
+
*.orig
|
| 120 |
+
*.bak
|
| 121 |
+
*.swp
|
| 122 |
+
.DS_Store
|
| 123 |
+
|
| 124 |
+
# temporary testing data MedNIST
|
| 125 |
+
tests/testing_data/MedNIST*
|
| 126 |
+
tests/testing_data/*Hippocampus*
|
testbed/Project-MONAI__MONAI/.readthedocs.yml
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# .readthedocs.yml
|
| 2 |
+
# Read the Docs configuration file
|
| 3 |
+
# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
|
| 4 |
+
|
| 5 |
+
# Required
|
| 6 |
+
version: 2
|
| 7 |
+
|
| 8 |
+
# Build documentation in the docs/ directory with Sphinx
|
| 9 |
+
sphinx:
|
| 10 |
+
configuration: docs/source/conf.py
|
| 11 |
+
|
| 12 |
+
# Build documentation with MkDocs
|
| 13 |
+
#mkdocs:
|
| 14 |
+
# configuration: mkdocs.yml
|
| 15 |
+
|
| 16 |
+
# Optionally build your docs in additional formats such as PDF and ePub
|
| 17 |
+
# formats: all
|
| 18 |
+
|
| 19 |
+
# Optionally set the version of Python and requirements required to build your docs
|
| 20 |
+
python:
|
| 21 |
+
version: 3.7
|
| 22 |
+
install:
|
| 23 |
+
- requirements: docs/requirements.txt
|
| 24 |
+
# system_packages: true
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
build:
|
| 28 |
+
image: stable
|
testbed/Project-MONAI__MONAI/CHANGELOG.md
ADDED
|
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Changelog
|
| 2 |
+
All notable changes to MONAI are documented in this file.
|
| 3 |
+
|
| 4 |
+
The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/)
|
| 5 |
+
and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html).
|
| 6 |
+
|
| 7 |
+
## [Unreleased]
|
| 8 |
+
## [0.2.0] - 2020-07-02
|
| 9 |
+
### Added
|
| 10 |
+
* Overview document for [feature highlights in v0.2.0](https://github.com/Project-MONAI/MONAI/blob/master/docs/source/highlights.md)
|
| 11 |
+
* Type hints and static type analysis support
|
| 12 |
+
* `MONAI/research` folder
|
| 13 |
+
* `monai.engine.workflow` APIs for supervised training
|
| 14 |
+
* `monai.inferers` APIs for validation and inference
|
| 15 |
+
* 7 new tutorials and examples
|
| 16 |
+
* 3 new loss functions
|
| 17 |
+
* 4 new event handlers
|
| 18 |
+
* 8 new layers, blocks, and networks
|
| 19 |
+
* 12 new transforms, including post-processing transforms
|
| 20 |
+
* `monai.apps.datasets` APIs, including `MedNISTDataset` and `DecathlonDataset`
|
| 21 |
+
* Persistent caching, `ZipDataset`, and `ArrayDataset` in `monai.data`
|
| 22 |
+
* Cross-platform CI tests supporting multiple Python versions
|
| 23 |
+
* Optional import mechanism
|
| 24 |
+
* Experimental features for third-party transforms integration
|
| 25 |
+
### Changed
|
| 26 |
+
> For more details please visit [the project wiki](https://github.com/Project-MONAI/MONAI/wiki/Notable-changes-between-0.1.0-and-0.2.0)
|
| 27 |
+
* Core modules now require numpy >= 1.17
|
| 28 |
+
* Categorized `monai.transforms` modules into crop and pad, intensity, IO, post-processing, spatial, and utility.
|
| 29 |
+
* Most transforms are now implemented with PyTorch native APIs
|
| 30 |
+
* Code style enforcement and automated formatting workflows now use autopep8 and black
|
| 31 |
+
* Base Docker image upgraded to `nvcr.io/nvidia/pytorch:20.03-py3` from `nvcr.io/nvidia/pytorch:19.10-py3`
|
| 32 |
+
* Enhanced local testing tools
|
| 33 |
+
* Documentation website domain changed to https://docs.monai.io
|
| 34 |
+
### Removed
|
| 35 |
+
* Support of Python < 3.6
|
| 36 |
+
* Automatic installation of optional dependencies including pytorch-ignite, nibabel, tensorboard, pillow, scipy, scikit-image
|
| 37 |
+
### Fixed
|
| 38 |
+
* Various issues in type and argument names consistency
|
| 39 |
+
* Various issues in docstring and documentation site
|
| 40 |
+
* Various issues in unit and integration tests
|
| 41 |
+
* Various issues in examples and notebooks
|
| 42 |
+
|
| 43 |
+
## [0.1.0] - 2020-04-17
|
| 44 |
+
### Added
|
| 45 |
+
* Public alpha source code release under the Apache 2.0 license ([highlights](https://github.com/Project-MONAI/MONAI/blob/0.1.0/docs/source/highlights.md))
|
| 46 |
+
* Various tutorials and examples
|
| 47 |
+
- Medical image classification and segmentation workflows
|
| 48 |
+
- Spacing/orientation-aware preprocessing with CPU/GPU and caching
|
| 49 |
+
- Flexible workflows with PyTorch Ignite and Lightning
|
| 50 |
+
* Various GitHub Actions
|
| 51 |
+
- CI/CD pipelines via self-hosted runners
|
| 52 |
+
- Documentation publishing via readthedocs.org
|
| 53 |
+
- PyPI package publishing
|
| 54 |
+
* Contributing guidelines
|
| 55 |
+
* A project logo and badges
|
| 56 |
+
|
| 57 |
+
[highlights]: https://github.com/Project-MONAI/MONAI/blob/master/docs/source/highlights.md
|
| 58 |
+
|
| 59 |
+
[Unreleased]: https://github.com/Project-MONAI/MONAI/compare/0.2.0...HEAD
|
| 60 |
+
[0.2.0]: https://github.com/Project-MONAI/MONAI/compare/0.1.0...0.2.0
|
| 61 |
+
[0.1.0]: https://github.com/Project-MONAI/MONAI/commits/0.1.0
|
testbed/Project-MONAI__MONAI/CONTRIBUTING.md
ADDED
|
@@ -0,0 +1,212 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
- [Introduction](#introduction)
|
| 2 |
+
- [The contribution process](#the-contribution-process)
|
| 3 |
+
* [Submitting pull requests](#submitting-pull-requests)
|
| 4 |
+
* [Ensuring code quality](#ensuring-code-quality)
|
| 5 |
+
1. [Coding style](#coding-style)
|
| 6 |
+
1. [Code analysis and unit testing](#code-analysis-and-unit-testing)
|
| 7 |
+
1. [Building the documentation](#building-the-documentation)
|
| 8 |
+
1. [Automatic code formatting](#automatic-code-formatting)
|
| 9 |
+
1. [Utility functions](#utility-functions)
|
| 10 |
+
- [The code reviewing process (for the maintainers)](#the-code-reviewing-process)
|
| 11 |
+
* [Reviewing pull requests](#reviewing-pull-requests)
|
| 12 |
+
- [Admin tasks (for the maintainers)](#admin-tasks)
|
| 13 |
+
* [Releasing a new version](#release-a-new-version)
|
| 14 |
+
|
| 15 |
+
## Introduction
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
This documentation is intended for individuals and institutions interested in contributing to MONAI. MONAI is an open-source project and, as such, its success relies on its community of contributors willing to keep improving it. Your contribution will be a valued addition to the code base; we simply ask that you read this page and understand our contribution process, whether you are a seasoned open-source contributor or whether you are a first-time contributor.
|
| 19 |
+
|
| 20 |
+
### Communicate with us
|
| 21 |
+
|
| 22 |
+
We are happy to talk with you about your needs for MONAI and your ideas for contributing to the project. One way to do this is to create an issue discussing your thoughts. It might be that a very similar feature is under development or already exists, so an issue is a great starting point.
|
| 23 |
+
|
| 24 |
+
### Does it belong in PyTorch instead of MONAI?
|
| 25 |
+
|
| 26 |
+
MONAI is based on the PyTorch and Numpy libraries. These libraries implement what we consider to be best practice for general scientific computing and deep learning functionality. MONAI builds on these with a strong focus on medical applications. As such, it is a good idea to consider whether your functionality is medical-application specific or not. General deep learning functionality may be better off in PyTorch; you can find their contribution guidelines [here](https://pytorch.org/docs/stable/community/contribution_guide.html).
|
| 27 |
+
|
| 28 |
+
## The contribution process
|
| 29 |
+
|
| 30 |
+
_Pull request early_
|
| 31 |
+
|
| 32 |
+
We encourage you to create pull requests early. It helps us track the contributions under development, whether they are ready to be merged or not. Change your pull request's title to begin with `[WIP]` until it is ready for formal review.
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
### Submitting pull requests
|
| 36 |
+
All code changes to the master branch must be done via [pull requests](https://help.github.com/en/github/collaborating-with-issues-and-pull-requests/proposing-changes-to-your-work-with-pull-requests).
|
| 37 |
+
1. Create a new ticket or take a known ticket from [the issue list][monai issue list].
|
| 38 |
+
1. Check if there's already a branch dedicated to the task.
|
| 39 |
+
1. If the task has not been taken, [create a new branch in your fork](https://help.github.com/en/github/collaborating-with-issues-and-pull-requests/creating-a-pull-request-from-a-fork)
|
| 40 |
+
of the codebase named `[ticket_id]-[task_name]`.
|
| 41 |
+
For example, branch name `19-ci-pipeline-setup` corresponds to [issue #19](https://github.com/Project-MONAI/MONAI/issues/19).
|
| 42 |
+
Ideally, the new branch should be based on the latest `master` branch.
|
| 43 |
+
1. Make changes to the branch ([use detailed commit messages if possible](https://chris.beams.io/posts/git-commit/)).
|
| 44 |
+
1. Make sure that new tests cover the changes and the changed codebase [passes all tests locally](#ensuring-code-quality).
|
| 45 |
+
1. [Create a new pull request](https://help.github.com/en/desktop/contributing-to-projects/creating-a-pull-request) from the task branch to the master branch, with detailed descriptions of the purpose of this pull request.
|
| 46 |
+
1. Check [the CI/CD status of the pull request][github ci], make sure all CI/CD tests passed.
|
| 47 |
+
1. Wait for reviews; if there are reviews, make point-to-point responses, make further code changes if needed.
|
| 48 |
+
1. If there're conflicts between the pull request branch and the master branch, pull the changes from the master and resolve the conflicts locally.
|
| 49 |
+
1. Reviewer and contributor may have discussions back and forth until all comments addressed.
|
| 50 |
+
1. Wait for the pull request to be merged.
|
| 51 |
+
|
| 52 |
+
### Ensuring code quality
|
| 53 |
+
To ensure the code quality, MONAI relies on several linting tools ([flake8 and its plugins](https://gitlab.com/pycqa/flake8), [black](https://github.com/psf/black), [isort](https://github.com/timothycrosley/isort)),
|
| 54 |
+
static type analysis tools ([mypy](https://github.com/python/mypy), [pytype](https://github.com/google/pytype)), as well as a set of unit/integration tests.
|
| 55 |
+
|
| 56 |
+
This section highlights all the necessary steps required before sending a pull request.
|
| 57 |
+
To collaborate efficiently, please read through this section and follow them.
|
| 58 |
+
|
| 59 |
+
* [Coding style](#coding-style)
|
| 60 |
+
* [Code analysis and unit testing](#code-analysis-and-unit-testing)
|
| 61 |
+
* [Building documentation](#building-the-documentation)
|
| 62 |
+
|
| 63 |
+
#### Coding style
|
| 64 |
+
Coding style is checked and enforced by flake8, black, and isort, using [a flake8 configuration](./setup.cfg) similar to [PyTorch's](https://github.com/pytorch/pytorch/blob/master/.flake8).
|
| 65 |
+
The next section provides [a few commands to run the relevant tools](#code-analysis-and-unit-testing).
|
| 66 |
+
|
| 67 |
+
For string definition, [f-string](https://www.python.org/dev/peps/pep-0498/) is recommended to use over `%-print` and `format-print` from python 3.6. So please try to use `f-string` if you need to define any string object.
|
| 68 |
+
|
| 69 |
+
License information: all source code files should start with this paragraph:
|
| 70 |
+
```
|
| 71 |
+
# Copyright 2020 MONAI Consortium
|
| 72 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 73 |
+
# you may not use this file except in compliance with the License.
|
| 74 |
+
# You may obtain a copy of the License at
|
| 75 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 76 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 77 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 78 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 79 |
+
# See the License for the specific language governing permissions and
|
| 80 |
+
# limitations under the License.
|
| 81 |
+
|
| 82 |
+
```
|
| 83 |
+
|
| 84 |
+
#### Code analysis and unit testing
|
| 85 |
+
MONAI tests are located under `tests/`.
|
| 86 |
+
|
| 87 |
+
- The unit test's file name follows `test_[module_name].py`.
|
| 88 |
+
- The integration test's file name follows `test_integration_[workflow_name].py`.
|
| 89 |
+
|
| 90 |
+
A bash script (`runtests.sh`) is provided to run all tests locally
|
| 91 |
+
Please run ``./runtests.sh -h`` to see all options.
|
| 92 |
+
|
| 93 |
+
To run a particular test, for example `tests/test_dice_loss.py`:
|
| 94 |
+
```
|
| 95 |
+
python -m tests.test_dice_loss
|
| 96 |
+
```
|
| 97 |
+
|
| 98 |
+
Before submitting a pull request, we recommend that all linting and unit tests
|
| 99 |
+
should pass, by running the following command locally:
|
| 100 |
+
|
| 101 |
+
```bash
|
| 102 |
+
./runtests.sh --codeformat --coverage
|
| 103 |
+
```
|
| 104 |
+
|
| 105 |
+
It is recommended that the new test `test_[module_name].py` is constructed by using only
|
| 106 |
+
python 3.6+ build-in functions, `torch`, `numpy`, and `parameterized` packages.
|
| 107 |
+
If it requires any other external packages, please make sure:
|
| 108 |
+
- the packages are listed in [`requirements-dev.txt`](requirements-dev.txt)
|
| 109 |
+
- the new test `test_[module_name].py` is added to the `exclude_cases` in [`./tests/min_tests.py`](./tests/min_tests.py) so that
|
| 110 |
+
the minimal CI runner will not execute it.
|
| 111 |
+
|
| 112 |
+
_If it's not tested, it's broken_
|
| 113 |
+
|
| 114 |
+
All new functionality should be accompanied by an appropriate set of tests.
|
| 115 |
+
MONAI functionality has plenty of unit tests from which you can draw inspiration,
|
| 116 |
+
and you can reach out to us if you are unsure of how to proceed with testing.
|
| 117 |
+
|
| 118 |
+
MONAI's code coverage report is available at [CodeCov](https://codecov.io/gh/Project-MONAI/MONAI).
|
| 119 |
+
|
| 120 |
+
#### Building the documentation
|
| 121 |
+
MONAI's documentation is located at `docs/`.
|
| 122 |
+
|
| 123 |
+
```bash
|
| 124 |
+
# install the doc-related dependencies
|
| 125 |
+
pip install --upgrade pip
|
| 126 |
+
pip install -r docs/requirements.txt
|
| 127 |
+
|
| 128 |
+
# build the docs
|
| 129 |
+
cd docs/
|
| 130 |
+
make html
|
| 131 |
+
```
|
| 132 |
+
The above commands build html documentation, they are used to automatically generate [https://docs.monai.io](https://docs.monai.io).
|
| 133 |
+
|
| 134 |
+
Before submitting a pull request, it is recommended to:
|
| 135 |
+
- edit the relevant `.rst` files in [`docs/source`](./docs/source) accordingly.
|
| 136 |
+
- build html documentation locally
|
| 137 |
+
- check the auto-generated documentation (by browsing `./docs/build/html/index.html` with a web browser)
|
| 138 |
+
- type `make clean` in `docs/` folder to remove the current build files.
|
| 139 |
+
|
| 140 |
+
Please type `make help` for all supported format options.
|
| 141 |
+
|
| 142 |
+
#### Automatic code formatting
|
| 143 |
+
MONAI provides support of automatic Python code formatting via [a customised GitHub action](https://github.com/Project-MONAI/monai-code-formatter).
|
| 144 |
+
This makes the project's Python coding style consistent and reduces maintenance burdens.
|
| 145 |
+
Commenting a pull request with `/black` triggers the formatting action based on [`psf/Black`](https://github.com/psf/black) (this is implemented with [`slash command dispatch`](https://github.com/marketplace/actions/slash-command-dispatch)).
|
| 146 |
+
|
| 147 |
+
Steps for the formatting process:
|
| 148 |
+
- After submitting a pull request or push to an existing pull request,
|
| 149 |
+
make a comment to the pull request to trigger the formatting action.
|
| 150 |
+
The first line of the comment must be `/black` so that it will be interpreted by [the comment parser](https://github.com/marketplace/actions/slash-command-dispatch#how-are-comments-parsed-for-slash-commands).
|
| 151 |
+
- [Auto] The GitHub action tries to format all Python files (using [`psf/Black`](https://github.com/psf/black)) in the branch and makes a commit under the name "MONAI bot" if there's code change. The actual formatting action is deployed at [project-monai/monai-code-formatter](https://github.com/Project-MONAI/monai-code-formatter).
|
| 152 |
+
- [Auto] After the formatting commit, the GitHub action adds an emoji to the comment that triggered the process.
|
| 153 |
+
- Repeat the above steps if necessary.
|
| 154 |
+
|
| 155 |
+
#### Utility functions
|
| 156 |
+
MONAI provides a set of generic utility functions and frequently used routines.
|
| 157 |
+
These are located in [``monai/utils``](./monai/utils/) and in the module folders such as [``networks/utils.py``](./monai/networks/).
|
| 158 |
+
Users are encouraged to use these common routines to improve code readability and reduce the code maintenance burdens.
|
| 159 |
+
|
| 160 |
+
Notably,
|
| 161 |
+
- ``monai.module.export`` decorator can make the module name shorter when importing,
|
| 162 |
+
for example, ``import monai.transforms.Spacing`` is the equivalent of ``monai.transforms.spatial.array.Spacing`` if
|
| 163 |
+
``class Spacing`` defined in file `monai/transforms/spatial/array.py` is decorated with ``@export("monai.transforms")``.
|
| 164 |
+
|
| 165 |
+
|
| 166 |
+
## The code reviewing process
|
| 167 |
+
|
| 168 |
+
|
| 169 |
+
### Reviewing pull requests
|
| 170 |
+
All code review comments should be specific, constructive, and actionable.
|
| 171 |
+
1. Check [the CI/CD status of the pull request][github ci], make sure all CI/CD tests passed before reviewing (contact the branch owner if needed).
|
| 172 |
+
1. Read carefully the descriptions of the pull request and the files changed, write comments if needed.
|
| 173 |
+
1. Make in-line comments to specific code segments, [request for changes](https://help.github.com/en/github/collaborating-with-issues-and-pull-requests/about-pull-request-reviews) if needed.
|
| 174 |
+
1. Review any further code changes until all comments addressed by the contributors.
|
| 175 |
+
1. Merge the pull request to the master branch.
|
| 176 |
+
1. Close the corresponding task ticket on [the issue list][monai issue list].
|
| 177 |
+
|
| 178 |
+
[github ci]: https://github.com/Project-MONAI/MONAI/actions
|
| 179 |
+
[monai issue list]: https://github.com/Project-MONAI/MONAI/issues
|
| 180 |
+
|
| 181 |
+
|
| 182 |
+
## Admin tasks
|
| 183 |
+
|
| 184 |
+
### Release a new version
|
| 185 |
+
- Prepare [a release note](https://github.com/Project-MONAI/MONAI/releases).
|
| 186 |
+
- Checkout a new branch `releases/[version number]` locally from the master branch and push to the codebase.
|
| 187 |
+
- Create a tag, for example `git tag -a 0.1a -m "version 0.1a"`.
|
| 188 |
+
- Push the tag to the codebase, for example `git push origin 0.1a`.
|
| 189 |
+
This step will trigger package building and testing.
|
| 190 |
+
The resultant packages are automatically uploaded to
|
| 191 |
+
[TestPyPI](https://test.pypi.org/project/monai/). The packages are also available for downloading as
|
| 192 |
+
repository's artifacts (e.g. the file at https://github.com/Project-MONAI/MONAI/actions/runs/66570977).
|
| 193 |
+
- Check the release test at [TestPyPI](https://test.pypi.org/project/monai/), download the artifacts when the CI finishes.
|
| 194 |
+
- Upload the packages to [PyPI](https://pypi.org/project/monai/).
|
| 195 |
+
This could be done manually by ``twine upload dist/*``, given the artifacts are unzipped to the folder ``dist/``.
|
| 196 |
+
- Publish the release note.
|
| 197 |
+
|
| 198 |
+
Note that the release should be tagged with a [PEP440](https://www.python.org/dev/peps/pep-0440/) compliant
|
| 199 |
+
[semantic versioning](https://semver.org/spec/v2.0.0.html) number.
|
| 200 |
+
|
| 201 |
+
If any error occurs during the release process, first checkout a new branch from the master, make PRs to the master
|
| 202 |
+
to fix the bugs via the regular contribution procedure.
|
| 203 |
+
Then rollback the release branch and tag:
|
| 204 |
+
- remove any artifacts (website UI) and tag (`git tag -d` and `git push origin -d`).
|
| 205 |
+
- reset the `releases/[version number]` branch to the latest master:
|
| 206 |
+
```bash
|
| 207 |
+
git checkout master
|
| 208 |
+
git pull origin master
|
| 209 |
+
git checkout releases/[version number]
|
| 210 |
+
git reset --hard master
|
| 211 |
+
```
|
| 212 |
+
Finally, repeat the tagging and TestPyPI uploading process.
|
testbed/Project-MONAI__MONAI/Dockerfile
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2020 MONAI Consortium
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 6 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 7 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 8 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 9 |
+
# See the License for the specific language governing permissions and
|
| 10 |
+
# limitations under the License.
|
| 11 |
+
|
| 12 |
+
ARG PYTORCH_IMAGE=nvcr.io/nvidia/pytorch:20.08-py3
|
| 13 |
+
|
| 14 |
+
FROM ${PYTORCH_IMAGE} as base
|
| 15 |
+
|
| 16 |
+
WORKDIR /opt/monai
|
| 17 |
+
COPY . .
|
| 18 |
+
|
| 19 |
+
ENV PYTHONPATH=$PYTHONPATH:/opt/monai
|
| 20 |
+
ENV PATH=/opt/tools:$PATH
|
| 21 |
+
|
| 22 |
+
RUN python -m pip install --no-cache-dir -U pip wheel \
|
| 23 |
+
&& python -m pip install --no-cache-dir -r requirements-dev.txt
|
| 24 |
+
|
| 25 |
+
# NGC Client
|
| 26 |
+
WORKDIR /opt/tools
|
| 27 |
+
RUN wget -q https://ngc.nvidia.com/downloads/ngccli_cat_linux.zip && \
|
| 28 |
+
unzip ngccli_cat_linux.zip && chmod u+x ngc && \
|
| 29 |
+
rm -rf ngccli_cat_linux.zip ngc.md5
|
| 30 |
+
WORKDIR /opt/monai
|
testbed/Project-MONAI__MONAI/LICENSE
ADDED
|
@@ -0,0 +1,201 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Apache License
|
| 2 |
+
Version 2.0, January 2004
|
| 3 |
+
http://www.apache.org/licenses/
|
| 4 |
+
|
| 5 |
+
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
| 6 |
+
|
| 7 |
+
1. Definitions.
|
| 8 |
+
|
| 9 |
+
"License" shall mean the terms and conditions for use, reproduction,
|
| 10 |
+
and distribution as defined by Sections 1 through 9 of this document.
|
| 11 |
+
|
| 12 |
+
"Licensor" shall mean the copyright owner or entity authorized by
|
| 13 |
+
the copyright owner that is granting the License.
|
| 14 |
+
|
| 15 |
+
"Legal Entity" shall mean the union of the acting entity and all
|
| 16 |
+
other entities that control, are controlled by, or are under common
|
| 17 |
+
control with that entity. For the purposes of this definition,
|
| 18 |
+
"control" means (i) the power, direct or indirect, to cause the
|
| 19 |
+
direction or management of such entity, whether by contract or
|
| 20 |
+
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
| 21 |
+
outstanding shares, or (iii) beneficial ownership of such entity.
|
| 22 |
+
|
| 23 |
+
"You" (or "Your") shall mean an individual or Legal Entity
|
| 24 |
+
exercising permissions granted by this License.
|
| 25 |
+
|
| 26 |
+
"Source" form shall mean the preferred form for making modifications,
|
| 27 |
+
including but not limited to software source code, documentation
|
| 28 |
+
source, and configuration files.
|
| 29 |
+
|
| 30 |
+
"Object" form shall mean any form resulting from mechanical
|
| 31 |
+
transformation or translation of a Source form, including but
|
| 32 |
+
not limited to compiled object code, generated documentation,
|
| 33 |
+
and conversions to other media types.
|
| 34 |
+
|
| 35 |
+
"Work" shall mean the work of authorship, whether in Source or
|
| 36 |
+
Object form, made available under the License, as indicated by a
|
| 37 |
+
copyright notice that is included in or attached to the work
|
| 38 |
+
(an example is provided in the Appendix below).
|
| 39 |
+
|
| 40 |
+
"Derivative Works" shall mean any work, whether in Source or Object
|
| 41 |
+
form, that is based on (or derived from) the Work and for which the
|
| 42 |
+
editorial revisions, annotations, elaborations, or other modifications
|
| 43 |
+
represent, as a whole, an original work of authorship. For the purposes
|
| 44 |
+
of this License, Derivative Works shall not include works that remain
|
| 45 |
+
separable from, or merely link (or bind by name) to the interfaces of,
|
| 46 |
+
the Work and Derivative Works thereof.
|
| 47 |
+
|
| 48 |
+
"Contribution" shall mean any work of authorship, including
|
| 49 |
+
the original version of the Work and any modifications or additions
|
| 50 |
+
to that Work or Derivative Works thereof, that is intentionally
|
| 51 |
+
submitted to Licensor for inclusion in the Work by the copyright owner
|
| 52 |
+
or by an individual or Legal Entity authorized to submit on behalf of
|
| 53 |
+
the copyright owner. For the purposes of this definition, "submitted"
|
| 54 |
+
means any form of electronic, verbal, or written communication sent
|
| 55 |
+
to the Licensor or its representatives, including but not limited to
|
| 56 |
+
communication on electronic mailing lists, source code control systems,
|
| 57 |
+
and issue tracking systems that are managed by, or on behalf of, the
|
| 58 |
+
Licensor for the purpose of discussing and improving the Work, but
|
| 59 |
+
excluding communication that is conspicuously marked or otherwise
|
| 60 |
+
designated in writing by the copyright owner as "Not a Contribution."
|
| 61 |
+
|
| 62 |
+
"Contributor" shall mean Licensor and any individual or Legal Entity
|
| 63 |
+
on behalf of whom a Contribution has been received by Licensor and
|
| 64 |
+
subsequently incorporated within the Work.
|
| 65 |
+
|
| 66 |
+
2. Grant of Copyright License. Subject to the terms and conditions of
|
| 67 |
+
this License, each Contributor hereby grants to You a perpetual,
|
| 68 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 69 |
+
copyright license to reproduce, prepare Derivative Works of,
|
| 70 |
+
publicly display, publicly perform, sublicense, and distribute the
|
| 71 |
+
Work and such Derivative Works in Source or Object form.
|
| 72 |
+
|
| 73 |
+
3. Grant of Patent License. Subject to the terms and conditions of
|
| 74 |
+
this License, each Contributor hereby grants to You a perpetual,
|
| 75 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 76 |
+
(except as stated in this section) patent license to make, have made,
|
| 77 |
+
use, offer to sell, sell, import, and otherwise transfer the Work,
|
| 78 |
+
where such license applies only to those patent claims licensable
|
| 79 |
+
by such Contributor that are necessarily infringed by their
|
| 80 |
+
Contribution(s) alone or by combination of their Contribution(s)
|
| 81 |
+
with the Work to which such Contribution(s) was submitted. If You
|
| 82 |
+
institute patent litigation against any entity (including a
|
| 83 |
+
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
| 84 |
+
or a Contribution incorporated within the Work constitutes direct
|
| 85 |
+
or contributory patent infringement, then any patent licenses
|
| 86 |
+
granted to You under this License for that Work shall terminate
|
| 87 |
+
as of the date such litigation is filed.
|
| 88 |
+
|
| 89 |
+
4. Redistribution. You may reproduce and distribute copies of the
|
| 90 |
+
Work or Derivative Works thereof in any medium, with or without
|
| 91 |
+
modifications, and in Source or Object form, provided that You
|
| 92 |
+
meet the following conditions:
|
| 93 |
+
|
| 94 |
+
(a) You must give any other recipients of the Work or
|
| 95 |
+
Derivative Works a copy of this License; and
|
| 96 |
+
|
| 97 |
+
(b) You must cause any modified files to carry prominent notices
|
| 98 |
+
stating that You changed the files; and
|
| 99 |
+
|
| 100 |
+
(c) You must retain, in the Source form of any Derivative Works
|
| 101 |
+
that You distribute, all copyright, patent, trademark, and
|
| 102 |
+
attribution notices from the Source form of the Work,
|
| 103 |
+
excluding those notices that do not pertain to any part of
|
| 104 |
+
the Derivative Works; and
|
| 105 |
+
|
| 106 |
+
(d) If the Work includes a "NOTICE" text file as part of its
|
| 107 |
+
distribution, then any Derivative Works that You distribute must
|
| 108 |
+
include a readable copy of the attribution notices contained
|
| 109 |
+
within such NOTICE file, excluding those notices that do not
|
| 110 |
+
pertain to any part of the Derivative Works, in at least one
|
| 111 |
+
of the following places: within a NOTICE text file distributed
|
| 112 |
+
as part of the Derivative Works; within the Source form or
|
| 113 |
+
documentation, if provided along with the Derivative Works; or,
|
| 114 |
+
within a display generated by the Derivative Works, if and
|
| 115 |
+
wherever such third-party notices normally appear. The contents
|
| 116 |
+
of the NOTICE file are for informational purposes only and
|
| 117 |
+
do not modify the License. You may add Your own attribution
|
| 118 |
+
notices within Derivative Works that You distribute, alongside
|
| 119 |
+
or as an addendum to the NOTICE text from the Work, provided
|
| 120 |
+
that such additional attribution notices cannot be construed
|
| 121 |
+
as modifying the License.
|
| 122 |
+
|
| 123 |
+
You may add Your own copyright statement to Your modifications and
|
| 124 |
+
may provide additional or different license terms and conditions
|
| 125 |
+
for use, reproduction, or distribution of Your modifications, or
|
| 126 |
+
for any such Derivative Works as a whole, provided Your use,
|
| 127 |
+
reproduction, and distribution of the Work otherwise complies with
|
| 128 |
+
the conditions stated in this License.
|
| 129 |
+
|
| 130 |
+
5. Submission of Contributions. Unless You explicitly state otherwise,
|
| 131 |
+
any Contribution intentionally submitted for inclusion in the Work
|
| 132 |
+
by You to the Licensor shall be under the terms and conditions of
|
| 133 |
+
this License, without any additional terms or conditions.
|
| 134 |
+
Notwithstanding the above, nothing herein shall supersede or modify
|
| 135 |
+
the terms of any separate license agreement you may have executed
|
| 136 |
+
with Licensor regarding such Contributions.
|
| 137 |
+
|
| 138 |
+
6. Trademarks. This License does not grant permission to use the trade
|
| 139 |
+
names, trademarks, service marks, or product names of the Licensor,
|
| 140 |
+
except as required for reasonable and customary use in describing the
|
| 141 |
+
origin of the Work and reproducing the content of the NOTICE file.
|
| 142 |
+
|
| 143 |
+
7. Disclaimer of Warranty. Unless required by applicable law or
|
| 144 |
+
agreed to in writing, Licensor provides the Work (and each
|
| 145 |
+
Contributor provides its Contributions) on an "AS IS" BASIS,
|
| 146 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
| 147 |
+
implied, including, without limitation, any warranties or conditions
|
| 148 |
+
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
| 149 |
+
PARTICULAR PURPOSE. You are solely responsible for determining the
|
| 150 |
+
appropriateness of using or redistributing the Work and assume any
|
| 151 |
+
risks associated with Your exercise of permissions under this License.
|
| 152 |
+
|
| 153 |
+
8. Limitation of Liability. In no event and under no legal theory,
|
| 154 |
+
whether in tort (including negligence), contract, or otherwise,
|
| 155 |
+
unless required by applicable law (such as deliberate and grossly
|
| 156 |
+
negligent acts) or agreed to in writing, shall any Contributor be
|
| 157 |
+
liable to You for damages, including any direct, indirect, special,
|
| 158 |
+
incidental, or consequential damages of any character arising as a
|
| 159 |
+
result of this License or out of the use or inability to use the
|
| 160 |
+
Work (including but not limited to damages for loss of goodwill,
|
| 161 |
+
work stoppage, computer failure or malfunction, or any and all
|
| 162 |
+
other commercial damages or losses), even if such Contributor
|
| 163 |
+
has been advised of the possibility of such damages.
|
| 164 |
+
|
| 165 |
+
9. Accepting Warranty or Additional Liability. While redistributing
|
| 166 |
+
the Work or Derivative Works thereof, You may choose to offer,
|
| 167 |
+
and charge a fee for, acceptance of support, warranty, indemnity,
|
| 168 |
+
or other liability obligations and/or rights consistent with this
|
| 169 |
+
License. However, in accepting such obligations, You may act only
|
| 170 |
+
on Your own behalf and on Your sole responsibility, not on behalf
|
| 171 |
+
of any other Contributor, and only if You agree to indemnify,
|
| 172 |
+
defend, and hold each Contributor harmless for any liability
|
| 173 |
+
incurred by, or claims asserted against, such Contributor by reason
|
| 174 |
+
of your accepting any such warranty or additional liability.
|
| 175 |
+
|
| 176 |
+
END OF TERMS AND CONDITIONS
|
| 177 |
+
|
| 178 |
+
APPENDIX: How to apply the Apache License to your work.
|
| 179 |
+
|
| 180 |
+
To apply the Apache License to your work, attach the following
|
| 181 |
+
boilerplate notice, with the fields enclosed by brackets "[]"
|
| 182 |
+
replaced with your own identifying information. (Don't include
|
| 183 |
+
the brackets!) The text should be enclosed in the appropriate
|
| 184 |
+
comment syntax for the file format. We also recommend that a
|
| 185 |
+
file or class name and description of purpose be included on the
|
| 186 |
+
same "printed page" as the copyright notice for easier
|
| 187 |
+
identification within third-party archives.
|
| 188 |
+
|
| 189 |
+
Copyright [yyyy] [name of copyright owner]
|
| 190 |
+
|
| 191 |
+
Licensed under the Apache License, Version 2.0 (the "License");
|
| 192 |
+
you may not use this file except in compliance with the License.
|
| 193 |
+
You may obtain a copy of the License at
|
| 194 |
+
|
| 195 |
+
http://www.apache.org/licenses/LICENSE-2.0
|
| 196 |
+
|
| 197 |
+
Unless required by applicable law or agreed to in writing, software
|
| 198 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
| 199 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 200 |
+
See the License for the specific language governing permissions and
|
| 201 |
+
limitations under the License.
|
testbed/Project-MONAI__MONAI/MANIFEST.in
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
include versioneer.py
|
| 2 |
+
include monai/_version.py
|
testbed/Project-MONAI__MONAI/README.md
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<p align="center">
|
| 2 |
+
<img src="https://github.com/Project-MONAI/MONAI/raw/master/docs/images/MONAI-logo-color.png" width="50%" alt='project-monai'>
|
| 3 |
+
</p>
|
| 4 |
+
|
| 5 |
+
**M**edical **O**pen **N**etwork for **AI**
|
| 6 |
+
|
| 7 |
+
[](https://opensource.org/licenses/Apache-2.0)
|
| 8 |
+
[](https://github.com/Project-MONAI/MONAI/commits/master)
|
| 9 |
+
[](https://docs.monai.io/en/latest/?badge=latest)
|
| 10 |
+
[](https://codecov.io/gh/Project-MONAI/MONAI)
|
| 11 |
+
[](https://badge.fury.io/py/monai)
|
| 12 |
+
|
| 13 |
+
MONAI is a [PyTorch](https://pytorch.org/)-based, [open-source](https://github.com/Project-MONAI/MONAI/blob/master/LICENSE) framework for deep learning in healthcare imaging, part of [PyTorch Ecosystem](https://pytorch.org/ecosystem/).
|
| 14 |
+
Its ambitions are:
|
| 15 |
+
- developing a community of academic, industrial and clinical researchers collaborating on a common foundation;
|
| 16 |
+
- creating state-of-the-art, end-to-end training workflows for healthcare imaging;
|
| 17 |
+
- providing researchers with the optimized and standardized way to create and evaluate deep learning models.
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
## Features
|
| 21 |
+
> _The codebase is currently under active development._
|
| 22 |
+
> _Please see [the technical highlights](https://docs.monai.io/en/latest/highlights.html) of the current milestone release._
|
| 23 |
+
|
| 24 |
+
- flexible pre-processing for multi-dimensional medical imaging data;
|
| 25 |
+
- compositional & portable APIs for ease of integration in existing workflows;
|
| 26 |
+
- domain-specific implementations for networks, losses, evaluation metrics and more;
|
| 27 |
+
- customizable design for varying user expertise;
|
| 28 |
+
- multi-GPU data parallelism support.
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
## Installation
|
| 32 |
+
To install [the current release](https://pypi.org/project/monai/):
|
| 33 |
+
```bash
|
| 34 |
+
pip install monai
|
| 35 |
+
```
|
| 36 |
+
|
| 37 |
+
To install from the source code repository:
|
| 38 |
+
```bash
|
| 39 |
+
pip install git+https://github.com/Project-MONAI/MONAI#egg=MONAI
|
| 40 |
+
```
|
| 41 |
+
|
| 42 |
+
Alternatively, pre-built Docker image is available via [DockerHub](https://hub.docker.com/r/projectmonai/monai):
|
| 43 |
+
```bash
|
| 44 |
+
# with docker v19.03+
|
| 45 |
+
docker run --gpus all --rm -ti --ipc=host projectmonai/monai:latest
|
| 46 |
+
```
|
| 47 |
+
|
| 48 |
+
For more details, please refer to [the installation guide](https://docs.monai.io/en/latest/installation.html).
|
| 49 |
+
|
| 50 |
+
## Getting Started
|
| 51 |
+
|
| 52 |
+
[MedNIST demo](https://colab.research.google.com/drive/1wy8XUSnNWlhDNazFdvGBHLfdkGvOHBKe) and [MONAI for PyTorch Users](https://colab.research.google.com/drive/1boqy7ENpKrqaJoxFlbHIBnIODAs1Ih1T) are available on Colab.
|
| 53 |
+
|
| 54 |
+
Tutorials & examples are located at [monai/examples](https://github.com/Project-MONAI/MONAI/tree/master/examples).
|
| 55 |
+
|
| 56 |
+
Technical documentation is available at [docs.monai.io](https://docs.monai.io).
|
| 57 |
+
|
| 58 |
+
## Contributing
|
| 59 |
+
For guidance on making a contribution to MONAI, see the [contributing guidelines](https://github.com/Project-MONAI/MONAI/blob/master/CONTRIBUTING.md).
|
| 60 |
+
|
| 61 |
+
## Links
|
| 62 |
+
- Website: https://monai.io/
|
| 63 |
+
- API documentation: https://docs.monai.io
|
| 64 |
+
- Code: https://github.com/Project-MONAI/MONAI
|
| 65 |
+
- Project tracker: https://github.com/Project-MONAI/MONAI/projects
|
| 66 |
+
- Issue tracker: https://github.com/Project-MONAI/MONAI/issues
|
| 67 |
+
- Wiki: https://github.com/Project-MONAI/MONAI/wiki
|
| 68 |
+
- Test status: https://github.com/Project-MONAI/MONAI/actions
|
testbed/Project-MONAI__MONAI/examples/README.md
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
### 1. Requirements
|
| 2 |
+
Some of the examples may require optional dependencies. In case of any optional import errors,
|
| 3 |
+
please install the relevant packages according to the error message.
|
| 4 |
+
Or install all optional requirements by:
|
| 5 |
+
```
|
| 6 |
+
pip install -r https://raw.githubusercontent.com/Project-MONAI/MONAI/master/requirements-dev.txt
|
| 7 |
+
```
|
| 8 |
+
|
| 9 |
+
### 2. List of examples
|
| 10 |
+
#### [classification_3d](./classification_3d)
|
| 11 |
+
Training and evaluation examples of 3D classification based on DenseNet3D and [IXI dataset](https://brain-development.org/ixi-dataset).
|
| 12 |
+
The examples are standard PyTorch programs and have both dictionary-based and array-based transformation versions.
|
| 13 |
+
#### [classification_3d_ignite](./classification_3d_ignite)
|
| 14 |
+
Training and evaluation examples of 3D classification based on DenseNet3D and [IXI dataset](https://brain-development.org/ixi-dataset).
|
| 15 |
+
The examples are PyTorch Ignite programs and have both dictionary-based and array-based transformation versions.
|
| 16 |
+
#### [distributed_training](./distributed_training)
|
| 17 |
+
The examples show how to execute distributed training and evaluation based on 3 different frameworks:
|
| 18 |
+
- PyTorch native `DistributedDataParallel` module with `torch.distributed.launch`.
|
| 19 |
+
- Horovod APIs with `horovodrun`.
|
| 20 |
+
- PyTorch ignite and MONAI workflows.
|
| 21 |
+
|
| 22 |
+
They can run on several distributed nodes with multiple GPU devices on every node.
|
| 23 |
+
#### [segmentation_3d](./segmentation_3d)
|
| 24 |
+
Training and evaluation examples of 3D segmentation based on UNet3D and synthetic dataset.
|
| 25 |
+
The examples are standard PyTorch programs and have both dictionary-based and array-based versions.
|
| 26 |
+
#### [segmentation_3d_ignite](./segmentation_3d_ignite)
|
| 27 |
+
Training and evaluation examples of 3D segmentation based on UNet3D and synthetic dataset.
|
| 28 |
+
The examples are PyTorch Ignite programs and have both dictionary-base and array-based transformations.
|
| 29 |
+
#### [workflows](./workflows)
|
| 30 |
+
Training and evaluation examples of 3D segmentation based on UNet3D and synthetic dataset.
|
| 31 |
+
The examples are built with MONAI workflows, mainly contain: trainer/evaluator, handlers, post_transforms, etc.
|
| 32 |
+
#### [synthesis](./synthesis)
|
| 33 |
+
A GAN training and evaluation example for a medical image generative adversarial network. Easy run training script uses `GanTrainer` to train a 2D CT scan reconstruction network. Evaluation script generates random samples from a trained network.
|
| 34 |
+
|
| 35 |
+
### 3. List of tutorials
|
| 36 |
+
Please check out https://github.com/Project-MONAI/Tutorials
|
testbed/Project-MONAI__MONAI/examples/classification_3d/densenet_evaluation_array.py
ADDED
|
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2020 MONAI Consortium
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 6 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 7 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 8 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 9 |
+
# See the License for the specific language governing permissions and
|
| 10 |
+
# limitations under the License.
|
| 11 |
+
|
| 12 |
+
import logging
|
| 13 |
+
import os
|
| 14 |
+
import sys
|
| 15 |
+
|
| 16 |
+
import numpy as np
|
| 17 |
+
import torch
|
| 18 |
+
from torch.utils.data import DataLoader
|
| 19 |
+
|
| 20 |
+
import monai
|
| 21 |
+
from monai.data import CSVSaver, NiftiDataset
|
| 22 |
+
from monai.transforms import AddChannel, Compose, Resize, ScaleIntensity, ToTensor
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def main():
|
| 26 |
+
monai.config.print_config()
|
| 27 |
+
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
|
| 28 |
+
|
| 29 |
+
# IXI dataset as a demo, downloadable from https://brain-development.org/ixi-dataset/
|
| 30 |
+
images = [
|
| 31 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI607-Guys-1097-T1.nii.gz"]),
|
| 32 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI175-HH-1570-T1.nii.gz"]),
|
| 33 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI385-HH-2078-T1.nii.gz"]),
|
| 34 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI344-Guys-0905-T1.nii.gz"]),
|
| 35 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI409-Guys-0960-T1.nii.gz"]),
|
| 36 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI584-Guys-1129-T1.nii.gz"]),
|
| 37 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI253-HH-1694-T1.nii.gz"]),
|
| 38 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI092-HH-1436-T1.nii.gz"]),
|
| 39 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI574-IOP-1156-T1.nii.gz"]),
|
| 40 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI585-Guys-1130-T1.nii.gz"]),
|
| 41 |
+
]
|
| 42 |
+
|
| 43 |
+
# 2 binary labels for gender classification: man and woman
|
| 44 |
+
labels = np.array([0, 0, 1, 0, 1, 0, 1, 0, 1, 0], dtype=np.int64)
|
| 45 |
+
|
| 46 |
+
# Define transforms for image
|
| 47 |
+
val_transforms = Compose([ScaleIntensity(), AddChannel(), Resize((96, 96, 96)), ToTensor()])
|
| 48 |
+
|
| 49 |
+
# Define nifti dataset
|
| 50 |
+
val_ds = NiftiDataset(image_files=images, labels=labels, transform=val_transforms, image_only=False)
|
| 51 |
+
# create a validation data loader
|
| 52 |
+
val_loader = DataLoader(val_ds, batch_size=2, num_workers=4, pin_memory=torch.cuda.is_available())
|
| 53 |
+
|
| 54 |
+
# Create DenseNet121
|
| 55 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 56 |
+
model = monai.networks.nets.densenet.densenet121(spatial_dims=3, in_channels=1, out_channels=2).to(device)
|
| 57 |
+
|
| 58 |
+
model.load_state_dict(torch.load("best_metric_model_classification3d_array.pth"))
|
| 59 |
+
model.eval()
|
| 60 |
+
with torch.no_grad():
|
| 61 |
+
num_correct = 0.0
|
| 62 |
+
metric_count = 0
|
| 63 |
+
saver = CSVSaver(output_dir="./output")
|
| 64 |
+
for val_data in val_loader:
|
| 65 |
+
val_images, val_labels = val_data[0].to(device), val_data[1].to(device)
|
| 66 |
+
val_outputs = model(val_images).argmax(dim=1)
|
| 67 |
+
value = torch.eq(val_outputs, val_labels)
|
| 68 |
+
metric_count += len(value)
|
| 69 |
+
num_correct += value.sum().item()
|
| 70 |
+
saver.save_batch(val_outputs, val_data[2])
|
| 71 |
+
metric = num_correct / metric_count
|
| 72 |
+
print("evaluation metric:", metric)
|
| 73 |
+
saver.finalize()
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
if __name__ == "__main__":
|
| 77 |
+
main()
|
testbed/Project-MONAI__MONAI/examples/classification_3d/densenet_evaluation_dict.py
ADDED
|
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2020 MONAI Consortium
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 6 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 7 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 8 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 9 |
+
# See the License for the specific language governing permissions and
|
| 10 |
+
# limitations under the License.
|
| 11 |
+
|
| 12 |
+
import logging
|
| 13 |
+
import os
|
| 14 |
+
import sys
|
| 15 |
+
|
| 16 |
+
import numpy as np
|
| 17 |
+
import torch
|
| 18 |
+
from torch.utils.data import DataLoader
|
| 19 |
+
|
| 20 |
+
import monai
|
| 21 |
+
from monai.data import CSVSaver
|
| 22 |
+
from monai.transforms import AddChanneld, Compose, LoadNiftid, Resized, ScaleIntensityd, ToTensord
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def main():
|
| 26 |
+
monai.config.print_config()
|
| 27 |
+
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
|
| 28 |
+
|
| 29 |
+
# IXI dataset as a demo, downloadable from https://brain-development.org/ixi-dataset/
|
| 30 |
+
images = [
|
| 31 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI607-Guys-1097-T1.nii.gz"]),
|
| 32 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI175-HH-1570-T1.nii.gz"]),
|
| 33 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI385-HH-2078-T1.nii.gz"]),
|
| 34 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI344-Guys-0905-T1.nii.gz"]),
|
| 35 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI409-Guys-0960-T1.nii.gz"]),
|
| 36 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI584-Guys-1129-T1.nii.gz"]),
|
| 37 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI253-HH-1694-T1.nii.gz"]),
|
| 38 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI092-HH-1436-T1.nii.gz"]),
|
| 39 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI574-IOP-1156-T1.nii.gz"]),
|
| 40 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI585-Guys-1130-T1.nii.gz"]),
|
| 41 |
+
]
|
| 42 |
+
|
| 43 |
+
# 2 binary labels for gender classification: man and woman
|
| 44 |
+
labels = np.array([0, 0, 1, 0, 1, 0, 1, 0, 1, 0], dtype=np.int64)
|
| 45 |
+
val_files = [{"img": img, "label": label} for img, label in zip(images, labels)]
|
| 46 |
+
|
| 47 |
+
# Define transforms for image
|
| 48 |
+
val_transforms = Compose(
|
| 49 |
+
[
|
| 50 |
+
LoadNiftid(keys=["img"]),
|
| 51 |
+
AddChanneld(keys=["img"]),
|
| 52 |
+
ScaleIntensityd(keys=["img"]),
|
| 53 |
+
Resized(keys=["img"], spatial_size=(96, 96, 96)),
|
| 54 |
+
ToTensord(keys=["img"]),
|
| 55 |
+
]
|
| 56 |
+
)
|
| 57 |
+
|
| 58 |
+
# create a validation data loader
|
| 59 |
+
val_ds = monai.data.Dataset(data=val_files, transform=val_transforms)
|
| 60 |
+
val_loader = DataLoader(val_ds, batch_size=2, num_workers=4, pin_memory=torch.cuda.is_available())
|
| 61 |
+
|
| 62 |
+
# Create DenseNet121
|
| 63 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 64 |
+
model = monai.networks.nets.densenet.densenet121(spatial_dims=3, in_channels=1, out_channels=2).to(device)
|
| 65 |
+
|
| 66 |
+
model.load_state_dict(torch.load("best_metric_model_classification3d_dict.pth"))
|
| 67 |
+
model.eval()
|
| 68 |
+
with torch.no_grad():
|
| 69 |
+
num_correct = 0.0
|
| 70 |
+
metric_count = 0
|
| 71 |
+
saver = CSVSaver(output_dir="./output")
|
| 72 |
+
for val_data in val_loader:
|
| 73 |
+
val_images, val_labels = val_data["img"].to(device), val_data["label"].to(device)
|
| 74 |
+
val_outputs = model(val_images).argmax(dim=1)
|
| 75 |
+
value = torch.eq(val_outputs, val_labels)
|
| 76 |
+
metric_count += len(value)
|
| 77 |
+
num_correct += value.sum().item()
|
| 78 |
+
saver.save_batch(val_outputs, val_data["img_meta_dict"])
|
| 79 |
+
metric = num_correct / metric_count
|
| 80 |
+
print("evaluation metric:", metric)
|
| 81 |
+
saver.finalize()
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
if __name__ == "__main__":
|
| 85 |
+
main()
|
testbed/Project-MONAI__MONAI/examples/classification_3d/densenet_training_array.py
ADDED
|
@@ -0,0 +1,139 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2020 MONAI Consortium
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 6 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 7 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 8 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 9 |
+
# See the License for the specific language governing permissions and
|
| 10 |
+
# limitations under the License.
|
| 11 |
+
|
| 12 |
+
import logging
|
| 13 |
+
import os
|
| 14 |
+
import sys
|
| 15 |
+
|
| 16 |
+
import numpy as np
|
| 17 |
+
import torch
|
| 18 |
+
from torch.utils.data import DataLoader
|
| 19 |
+
from torch.utils.tensorboard import SummaryWriter
|
| 20 |
+
|
| 21 |
+
import monai
|
| 22 |
+
from monai.data import NiftiDataset
|
| 23 |
+
from monai.transforms import AddChannel, Compose, RandRotate90, Resize, ScaleIntensity, ToTensor
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def main():
|
| 27 |
+
monai.config.print_config()
|
| 28 |
+
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
|
| 29 |
+
|
| 30 |
+
# IXI dataset as a demo, downloadable from https://brain-development.org/ixi-dataset/
|
| 31 |
+
images = [
|
| 32 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI314-IOP-0889-T1.nii.gz"]),
|
| 33 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI249-Guys-1072-T1.nii.gz"]),
|
| 34 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI609-HH-2600-T1.nii.gz"]),
|
| 35 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI173-HH-1590-T1.nii.gz"]),
|
| 36 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI020-Guys-0700-T1.nii.gz"]),
|
| 37 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI342-Guys-0909-T1.nii.gz"]),
|
| 38 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI134-Guys-0780-T1.nii.gz"]),
|
| 39 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI577-HH-2661-T1.nii.gz"]),
|
| 40 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI066-Guys-0731-T1.nii.gz"]),
|
| 41 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI130-HH-1528-T1.nii.gz"]),
|
| 42 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI607-Guys-1097-T1.nii.gz"]),
|
| 43 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI175-HH-1570-T1.nii.gz"]),
|
| 44 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI385-HH-2078-T1.nii.gz"]),
|
| 45 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI344-Guys-0905-T1.nii.gz"]),
|
| 46 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI409-Guys-0960-T1.nii.gz"]),
|
| 47 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI584-Guys-1129-T1.nii.gz"]),
|
| 48 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI253-HH-1694-T1.nii.gz"]),
|
| 49 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI092-HH-1436-T1.nii.gz"]),
|
| 50 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI574-IOP-1156-T1.nii.gz"]),
|
| 51 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI585-Guys-1130-T1.nii.gz"]),
|
| 52 |
+
]
|
| 53 |
+
|
| 54 |
+
# 2 binary labels for gender classification: man and woman
|
| 55 |
+
labels = np.array([0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0], dtype=np.int64)
|
| 56 |
+
|
| 57 |
+
# Define transforms
|
| 58 |
+
train_transforms = Compose([ScaleIntensity(), AddChannel(), Resize((96, 96, 96)), RandRotate90(), ToTensor()])
|
| 59 |
+
val_transforms = Compose([ScaleIntensity(), AddChannel(), Resize((96, 96, 96)), ToTensor()])
|
| 60 |
+
|
| 61 |
+
# Define nifti dataset, data loader
|
| 62 |
+
check_ds = NiftiDataset(image_files=images, labels=labels, transform=train_transforms)
|
| 63 |
+
check_loader = DataLoader(check_ds, batch_size=2, num_workers=2, pin_memory=torch.cuda.is_available())
|
| 64 |
+
im, label = monai.utils.misc.first(check_loader)
|
| 65 |
+
print(type(im), im.shape, label)
|
| 66 |
+
|
| 67 |
+
# create a training data loader
|
| 68 |
+
train_ds = NiftiDataset(image_files=images[:10], labels=labels[:10], transform=train_transforms)
|
| 69 |
+
train_loader = DataLoader(train_ds, batch_size=2, shuffle=True, num_workers=2, pin_memory=torch.cuda.is_available())
|
| 70 |
+
|
| 71 |
+
# create a validation data loader
|
| 72 |
+
val_ds = NiftiDataset(image_files=images[-10:], labels=labels[-10:], transform=val_transforms)
|
| 73 |
+
val_loader = DataLoader(val_ds, batch_size=2, num_workers=2, pin_memory=torch.cuda.is_available())
|
| 74 |
+
|
| 75 |
+
# Create DenseNet121, CrossEntropyLoss and Adam optimizer
|
| 76 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 77 |
+
model = monai.networks.nets.densenet.densenet121(spatial_dims=3, in_channels=1, out_channels=2).to(device)
|
| 78 |
+
loss_function = torch.nn.CrossEntropyLoss()
|
| 79 |
+
optimizer = torch.optim.Adam(model.parameters(), 1e-5)
|
| 80 |
+
|
| 81 |
+
# start a typical PyTorch training
|
| 82 |
+
val_interval = 2
|
| 83 |
+
best_metric = -1
|
| 84 |
+
best_metric_epoch = -1
|
| 85 |
+
epoch_loss_values = list()
|
| 86 |
+
metric_values = list()
|
| 87 |
+
writer = SummaryWriter()
|
| 88 |
+
for epoch in range(5):
|
| 89 |
+
print("-" * 10)
|
| 90 |
+
print(f"epoch {epoch + 1}/{5}")
|
| 91 |
+
model.train()
|
| 92 |
+
epoch_loss = 0
|
| 93 |
+
step = 0
|
| 94 |
+
for batch_data in train_loader:
|
| 95 |
+
step += 1
|
| 96 |
+
inputs, labels = batch_data[0].to(device), batch_data[1].to(device)
|
| 97 |
+
optimizer.zero_grad()
|
| 98 |
+
outputs = model(inputs)
|
| 99 |
+
loss = loss_function(outputs, labels)
|
| 100 |
+
loss.backward()
|
| 101 |
+
optimizer.step()
|
| 102 |
+
epoch_loss += loss.item()
|
| 103 |
+
epoch_len = len(train_ds) // train_loader.batch_size
|
| 104 |
+
print(f"{step}/{epoch_len}, train_loss: {loss.item():.4f}")
|
| 105 |
+
writer.add_scalar("train_loss", loss.item(), epoch_len * epoch + step)
|
| 106 |
+
epoch_loss /= step
|
| 107 |
+
epoch_loss_values.append(epoch_loss)
|
| 108 |
+
print(f"epoch {epoch + 1} average loss: {epoch_loss:.4f}")
|
| 109 |
+
|
| 110 |
+
if (epoch + 1) % val_interval == 0:
|
| 111 |
+
model.eval()
|
| 112 |
+
with torch.no_grad():
|
| 113 |
+
num_correct = 0.0
|
| 114 |
+
metric_count = 0
|
| 115 |
+
for val_data in val_loader:
|
| 116 |
+
val_images, val_labels = val_data[0].to(device), val_data[1].to(device)
|
| 117 |
+
val_outputs = model(val_images)
|
| 118 |
+
value = torch.eq(val_outputs.argmax(dim=1), val_labels)
|
| 119 |
+
metric_count += len(value)
|
| 120 |
+
num_correct += value.sum().item()
|
| 121 |
+
metric = num_correct / metric_count
|
| 122 |
+
metric_values.append(metric)
|
| 123 |
+
if metric > best_metric:
|
| 124 |
+
best_metric = metric
|
| 125 |
+
best_metric_epoch = epoch + 1
|
| 126 |
+
torch.save(model.state_dict(), "best_metric_model_classification3d_array.pth")
|
| 127 |
+
print("saved new best metric model")
|
| 128 |
+
print(
|
| 129 |
+
"current epoch: {} current accuracy: {:.4f} best accuracy: {:.4f} at epoch {}".format(
|
| 130 |
+
epoch + 1, metric, best_metric, best_metric_epoch
|
| 131 |
+
)
|
| 132 |
+
)
|
| 133 |
+
writer.add_scalar("val_accuracy", metric, epoch + 1)
|
| 134 |
+
print(f"train completed, best_metric: {best_metric:.4f} at epoch: {best_metric_epoch}")
|
| 135 |
+
writer.close()
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
if __name__ == "__main__":
|
| 139 |
+
main()
|
testbed/Project-MONAI__MONAI/examples/classification_3d/densenet_training_dict.py
ADDED
|
@@ -0,0 +1,155 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2020 MONAI Consortium
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 6 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 7 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 8 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 9 |
+
# See the License for the specific language governing permissions and
|
| 10 |
+
# limitations under the License.
|
| 11 |
+
|
| 12 |
+
import logging
|
| 13 |
+
import os
|
| 14 |
+
import sys
|
| 15 |
+
|
| 16 |
+
import numpy as np
|
| 17 |
+
import torch
|
| 18 |
+
from torch.utils.data import DataLoader
|
| 19 |
+
from torch.utils.tensorboard import SummaryWriter
|
| 20 |
+
|
| 21 |
+
import monai
|
| 22 |
+
from monai.metrics import compute_roc_auc
|
| 23 |
+
from monai.transforms import AddChanneld, Compose, LoadNiftid, RandRotate90d, Resized, ScaleIntensityd, ToTensord
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def main():
|
| 27 |
+
monai.config.print_config()
|
| 28 |
+
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
|
| 29 |
+
|
| 30 |
+
# IXI dataset as a demo, downloadable from https://brain-development.org/ixi-dataset/
|
| 31 |
+
images = [
|
| 32 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI314-IOP-0889-T1.nii.gz"]),
|
| 33 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI249-Guys-1072-T1.nii.gz"]),
|
| 34 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI609-HH-2600-T1.nii.gz"]),
|
| 35 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI173-HH-1590-T1.nii.gz"]),
|
| 36 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI020-Guys-0700-T1.nii.gz"]),
|
| 37 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI342-Guys-0909-T1.nii.gz"]),
|
| 38 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI134-Guys-0780-T1.nii.gz"]),
|
| 39 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI577-HH-2661-T1.nii.gz"]),
|
| 40 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI066-Guys-0731-T1.nii.gz"]),
|
| 41 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI130-HH-1528-T1.nii.gz"]),
|
| 42 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI607-Guys-1097-T1.nii.gz"]),
|
| 43 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI175-HH-1570-T1.nii.gz"]),
|
| 44 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI385-HH-2078-T1.nii.gz"]),
|
| 45 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI344-Guys-0905-T1.nii.gz"]),
|
| 46 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI409-Guys-0960-T1.nii.gz"]),
|
| 47 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI584-Guys-1129-T1.nii.gz"]),
|
| 48 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI253-HH-1694-T1.nii.gz"]),
|
| 49 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI092-HH-1436-T1.nii.gz"]),
|
| 50 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI574-IOP-1156-T1.nii.gz"]),
|
| 51 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI585-Guys-1130-T1.nii.gz"]),
|
| 52 |
+
]
|
| 53 |
+
|
| 54 |
+
# 2 binary labels for gender classification: man and woman
|
| 55 |
+
labels = np.array([0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0], dtype=np.int64)
|
| 56 |
+
train_files = [{"img": img, "label": label} for img, label in zip(images[:10], labels[:10])]
|
| 57 |
+
val_files = [{"img": img, "label": label} for img, label in zip(images[-10:], labels[-10:])]
|
| 58 |
+
|
| 59 |
+
# Define transforms for image
|
| 60 |
+
train_transforms = Compose(
|
| 61 |
+
[
|
| 62 |
+
LoadNiftid(keys=["img"]),
|
| 63 |
+
AddChanneld(keys=["img"]),
|
| 64 |
+
ScaleIntensityd(keys=["img"]),
|
| 65 |
+
Resized(keys=["img"], spatial_size=(96, 96, 96)),
|
| 66 |
+
RandRotate90d(keys=["img"], prob=0.8, spatial_axes=[0, 2]),
|
| 67 |
+
ToTensord(keys=["img"]),
|
| 68 |
+
]
|
| 69 |
+
)
|
| 70 |
+
val_transforms = Compose(
|
| 71 |
+
[
|
| 72 |
+
LoadNiftid(keys=["img"]),
|
| 73 |
+
AddChanneld(keys=["img"]),
|
| 74 |
+
ScaleIntensityd(keys=["img"]),
|
| 75 |
+
Resized(keys=["img"], spatial_size=(96, 96, 96)),
|
| 76 |
+
ToTensord(keys=["img"]),
|
| 77 |
+
]
|
| 78 |
+
)
|
| 79 |
+
|
| 80 |
+
# Define dataset, data loader
|
| 81 |
+
check_ds = monai.data.Dataset(data=train_files, transform=train_transforms)
|
| 82 |
+
check_loader = DataLoader(check_ds, batch_size=2, num_workers=4, pin_memory=torch.cuda.is_available())
|
| 83 |
+
check_data = monai.utils.misc.first(check_loader)
|
| 84 |
+
print(check_data["img"].shape, check_data["label"])
|
| 85 |
+
|
| 86 |
+
# create a training data loader
|
| 87 |
+
train_ds = monai.data.Dataset(data=train_files, transform=train_transforms)
|
| 88 |
+
train_loader = DataLoader(train_ds, batch_size=2, shuffle=True, num_workers=4, pin_memory=torch.cuda.is_available())
|
| 89 |
+
|
| 90 |
+
# create a validation data loader
|
| 91 |
+
val_ds = monai.data.Dataset(data=val_files, transform=val_transforms)
|
| 92 |
+
val_loader = DataLoader(val_ds, batch_size=2, num_workers=4, pin_memory=torch.cuda.is_available())
|
| 93 |
+
|
| 94 |
+
# Create DenseNet121, CrossEntropyLoss and Adam optimizer
|
| 95 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 96 |
+
model = monai.networks.nets.densenet.densenet121(spatial_dims=3, in_channels=1, out_channels=2).to(device)
|
| 97 |
+
loss_function = torch.nn.CrossEntropyLoss()
|
| 98 |
+
optimizer = torch.optim.Adam(model.parameters(), 1e-5)
|
| 99 |
+
|
| 100 |
+
# start a typical PyTorch training
|
| 101 |
+
val_interval = 2
|
| 102 |
+
best_metric = -1
|
| 103 |
+
best_metric_epoch = -1
|
| 104 |
+
writer = SummaryWriter()
|
| 105 |
+
for epoch in range(5):
|
| 106 |
+
print("-" * 10)
|
| 107 |
+
print(f"epoch {epoch + 1}/{5}")
|
| 108 |
+
model.train()
|
| 109 |
+
epoch_loss = 0
|
| 110 |
+
step = 0
|
| 111 |
+
for batch_data in train_loader:
|
| 112 |
+
step += 1
|
| 113 |
+
inputs, labels = batch_data["img"].to(device), batch_data["label"].to(device)
|
| 114 |
+
optimizer.zero_grad()
|
| 115 |
+
outputs = model(inputs)
|
| 116 |
+
loss = loss_function(outputs, labels)
|
| 117 |
+
loss.backward()
|
| 118 |
+
optimizer.step()
|
| 119 |
+
epoch_loss += loss.item()
|
| 120 |
+
epoch_len = len(train_ds) // train_loader.batch_size
|
| 121 |
+
print(f"{step}/{epoch_len}, train_loss: {loss.item():.4f}")
|
| 122 |
+
writer.add_scalar("train_loss", loss.item(), epoch_len * epoch + step)
|
| 123 |
+
epoch_loss /= step
|
| 124 |
+
print(f"epoch {epoch + 1} average loss: {epoch_loss:.4f}")
|
| 125 |
+
|
| 126 |
+
if (epoch + 1) % val_interval == 0:
|
| 127 |
+
model.eval()
|
| 128 |
+
with torch.no_grad():
|
| 129 |
+
y_pred = torch.tensor([], dtype=torch.float32, device=device)
|
| 130 |
+
y = torch.tensor([], dtype=torch.long, device=device)
|
| 131 |
+
for val_data in val_loader:
|
| 132 |
+
val_images, val_labels = val_data["img"].to(device), val_data["label"].to(device)
|
| 133 |
+
y_pred = torch.cat([y_pred, model(val_images)], dim=0)
|
| 134 |
+
y = torch.cat([y, val_labels], dim=0)
|
| 135 |
+
|
| 136 |
+
acc_value = torch.eq(y_pred.argmax(dim=1), y)
|
| 137 |
+
acc_metric = acc_value.sum().item() / len(acc_value)
|
| 138 |
+
auc_metric = compute_roc_auc(y_pred, y, to_onehot_y=True, softmax=True)
|
| 139 |
+
if acc_metric > best_metric:
|
| 140 |
+
best_metric = acc_metric
|
| 141 |
+
best_metric_epoch = epoch + 1
|
| 142 |
+
torch.save(model.state_dict(), "best_metric_model_classification3d_dict.pth")
|
| 143 |
+
print("saved new best metric model")
|
| 144 |
+
print(
|
| 145 |
+
"current epoch: {} current accuracy: {:.4f} current AUC: {:.4f} best accuracy: {:.4f} at epoch {}".format(
|
| 146 |
+
epoch + 1, acc_metric, auc_metric, best_metric, best_metric_epoch
|
| 147 |
+
)
|
| 148 |
+
)
|
| 149 |
+
writer.add_scalar("val_accuracy", acc_metric, epoch + 1)
|
| 150 |
+
print(f"train completed, best_metric: {best_metric:.4f} at epoch: {best_metric_epoch}")
|
| 151 |
+
writer.close()
|
| 152 |
+
|
| 153 |
+
|
| 154 |
+
if __name__ == "__main__":
|
| 155 |
+
main()
|
testbed/Project-MONAI__MONAI/examples/classification_3d_ignite/densenet_evaluation_array.py
ADDED
|
@@ -0,0 +1,94 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2020 MONAI Consortium
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 6 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 7 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 8 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 9 |
+
# See the License for the specific language governing permissions and
|
| 10 |
+
# limitations under the License.
|
| 11 |
+
|
| 12 |
+
import logging
|
| 13 |
+
import os
|
| 14 |
+
import sys
|
| 15 |
+
|
| 16 |
+
import numpy as np
|
| 17 |
+
import torch
|
| 18 |
+
from ignite.engine import _prepare_batch, create_supervised_evaluator
|
| 19 |
+
from ignite.metrics import Accuracy
|
| 20 |
+
from torch.utils.data import DataLoader
|
| 21 |
+
|
| 22 |
+
import monai
|
| 23 |
+
from monai.data import NiftiDataset
|
| 24 |
+
from monai.handlers import CheckpointLoader, ClassificationSaver, StatsHandler
|
| 25 |
+
from monai.transforms import AddChannel, Compose, Resize, ScaleIntensity, ToTensor
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def main():
|
| 29 |
+
monai.config.print_config()
|
| 30 |
+
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
|
| 31 |
+
|
| 32 |
+
# IXI dataset as a demo, downloadable from https://brain-development.org/ixi-dataset/
|
| 33 |
+
images = [
|
| 34 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI607-Guys-1097-T1.nii.gz"]),
|
| 35 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI175-HH-1570-T1.nii.gz"]),
|
| 36 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI385-HH-2078-T1.nii.gz"]),
|
| 37 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI344-Guys-0905-T1.nii.gz"]),
|
| 38 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI409-Guys-0960-T1.nii.gz"]),
|
| 39 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI584-Guys-1129-T1.nii.gz"]),
|
| 40 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI253-HH-1694-T1.nii.gz"]),
|
| 41 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI092-HH-1436-T1.nii.gz"]),
|
| 42 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI574-IOP-1156-T1.nii.gz"]),
|
| 43 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI585-Guys-1130-T1.nii.gz"]),
|
| 44 |
+
]
|
| 45 |
+
|
| 46 |
+
# 2 binary labels for gender classification: man and woman
|
| 47 |
+
labels = np.array([0, 0, 1, 0, 1, 0, 1, 0, 1, 0], dtype=np.int64)
|
| 48 |
+
|
| 49 |
+
# define transforms for image
|
| 50 |
+
val_transforms = Compose([ScaleIntensity(), AddChannel(), Resize((96, 96, 96)), ToTensor()])
|
| 51 |
+
# define nifti dataset
|
| 52 |
+
val_ds = NiftiDataset(image_files=images, labels=labels, transform=val_transforms, image_only=False)
|
| 53 |
+
# create DenseNet121
|
| 54 |
+
net = monai.networks.nets.densenet.densenet121(spatial_dims=3, in_channels=1, out_channels=2)
|
| 55 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 56 |
+
|
| 57 |
+
metric_name = "Accuracy"
|
| 58 |
+
# add evaluation metric to the evaluator engine
|
| 59 |
+
val_metrics = {metric_name: Accuracy()}
|
| 60 |
+
|
| 61 |
+
def prepare_batch(batch, device=None, non_blocking=False):
|
| 62 |
+
return _prepare_batch((batch[0], batch[1]), device, non_blocking)
|
| 63 |
+
|
| 64 |
+
# Ignite evaluator expects batch=(img, label) and returns output=(y_pred, y) at every iteration,
|
| 65 |
+
# user can add output_transform to return other values
|
| 66 |
+
evaluator = create_supervised_evaluator(net, val_metrics, device, True, prepare_batch=prepare_batch)
|
| 67 |
+
|
| 68 |
+
# add stats event handler to print validation stats via evaluator
|
| 69 |
+
val_stats_handler = StatsHandler(
|
| 70 |
+
name="evaluator",
|
| 71 |
+
output_transform=lambda x: None, # no need to print loss value, so disable per iteration output
|
| 72 |
+
)
|
| 73 |
+
val_stats_handler.attach(evaluator)
|
| 74 |
+
|
| 75 |
+
# for the array data format, assume the 3rd item of batch data is the meta_data
|
| 76 |
+
prediction_saver = ClassificationSaver(
|
| 77 |
+
output_dir="tempdir",
|
| 78 |
+
batch_transform=lambda batch: batch[2],
|
| 79 |
+
output_transform=lambda output: output[0].argmax(1),
|
| 80 |
+
)
|
| 81 |
+
prediction_saver.attach(evaluator)
|
| 82 |
+
|
| 83 |
+
# the model was trained by "densenet_training_array" example
|
| 84 |
+
CheckpointLoader(load_path="./runs_array/net_checkpoint_20.pth", load_dict={"net": net}).attach(evaluator)
|
| 85 |
+
|
| 86 |
+
# create a validation data loader
|
| 87 |
+
val_loader = DataLoader(val_ds, batch_size=2, num_workers=4, pin_memory=torch.cuda.is_available())
|
| 88 |
+
|
| 89 |
+
state = evaluator.run(val_loader)
|
| 90 |
+
print(state)
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
if __name__ == "__main__":
|
| 94 |
+
main()
|
testbed/Project-MONAI__MONAI/examples/classification_3d_ignite/densenet_evaluation_dict.py
ADDED
|
@@ -0,0 +1,102 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2020 MONAI Consortium
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 6 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 7 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 8 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 9 |
+
# See the License for the specific language governing permissions and
|
| 10 |
+
# limitations under the License.
|
| 11 |
+
|
| 12 |
+
import logging
|
| 13 |
+
import os
|
| 14 |
+
import sys
|
| 15 |
+
|
| 16 |
+
import numpy as np
|
| 17 |
+
import torch
|
| 18 |
+
from ignite.engine import _prepare_batch, create_supervised_evaluator
|
| 19 |
+
from ignite.metrics import Accuracy
|
| 20 |
+
from torch.utils.data import DataLoader
|
| 21 |
+
|
| 22 |
+
import monai
|
| 23 |
+
from monai.handlers import CheckpointLoader, ClassificationSaver, StatsHandler
|
| 24 |
+
from monai.transforms import AddChanneld, Compose, LoadNiftid, Resized, ScaleIntensityd, ToTensord
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def main():
|
| 28 |
+
monai.config.print_config()
|
| 29 |
+
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
|
| 30 |
+
|
| 31 |
+
# IXI dataset as a demo, downloadable from https://brain-development.org/ixi-dataset/
|
| 32 |
+
images = [
|
| 33 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI607-Guys-1097-T1.nii.gz"]),
|
| 34 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI175-HH-1570-T1.nii.gz"]),
|
| 35 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI385-HH-2078-T1.nii.gz"]),
|
| 36 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI344-Guys-0905-T1.nii.gz"]),
|
| 37 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI409-Guys-0960-T1.nii.gz"]),
|
| 38 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI584-Guys-1129-T1.nii.gz"]),
|
| 39 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI253-HH-1694-T1.nii.gz"]),
|
| 40 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI092-HH-1436-T1.nii.gz"]),
|
| 41 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI574-IOP-1156-T1.nii.gz"]),
|
| 42 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI585-Guys-1130-T1.nii.gz"]),
|
| 43 |
+
]
|
| 44 |
+
|
| 45 |
+
# 2 binary labels for gender classification: man and woman
|
| 46 |
+
labels = np.array([0, 0, 1, 0, 1, 0, 1, 0, 1, 0], dtype=np.int64)
|
| 47 |
+
val_files = [{"img": img, "label": label} for img, label in zip(images, labels)]
|
| 48 |
+
|
| 49 |
+
# define transforms for image
|
| 50 |
+
val_transforms = Compose(
|
| 51 |
+
[
|
| 52 |
+
LoadNiftid(keys=["img"]),
|
| 53 |
+
AddChanneld(keys=["img"]),
|
| 54 |
+
ScaleIntensityd(keys=["img"]),
|
| 55 |
+
Resized(keys=["img"], spatial_size=(96, 96, 96)),
|
| 56 |
+
ToTensord(keys=["img"]),
|
| 57 |
+
]
|
| 58 |
+
)
|
| 59 |
+
|
| 60 |
+
# create DenseNet121
|
| 61 |
+
net = monai.networks.nets.densenet.densenet121(spatial_dims=3, in_channels=1, out_channels=2)
|
| 62 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 63 |
+
|
| 64 |
+
def prepare_batch(batch, device=None, non_blocking=False):
|
| 65 |
+
return _prepare_batch((batch["img"], batch["label"]), device, non_blocking)
|
| 66 |
+
|
| 67 |
+
metric_name = "Accuracy"
|
| 68 |
+
# add evaluation metric to the evaluator engine
|
| 69 |
+
val_metrics = {metric_name: Accuracy()}
|
| 70 |
+
# Ignite evaluator expects batch=(img, label) and returns output=(y_pred, y) at every iteration,
|
| 71 |
+
# user can add output_transform to return other values
|
| 72 |
+
evaluator = create_supervised_evaluator(net, val_metrics, device, True, prepare_batch=prepare_batch)
|
| 73 |
+
|
| 74 |
+
# add stats event handler to print validation stats via evaluator
|
| 75 |
+
val_stats_handler = StatsHandler(
|
| 76 |
+
name="evaluator",
|
| 77 |
+
output_transform=lambda x: None, # no need to print loss value, so disable per iteration output
|
| 78 |
+
)
|
| 79 |
+
val_stats_handler.attach(evaluator)
|
| 80 |
+
|
| 81 |
+
# for the array data format, assume the 3rd item of batch data is the meta_data
|
| 82 |
+
prediction_saver = ClassificationSaver(
|
| 83 |
+
output_dir="tempdir",
|
| 84 |
+
name="evaluator",
|
| 85 |
+
batch_transform=lambda batch: batch["img_meta_dict"],
|
| 86 |
+
output_transform=lambda output: output[0].argmax(1),
|
| 87 |
+
)
|
| 88 |
+
prediction_saver.attach(evaluator)
|
| 89 |
+
|
| 90 |
+
# the model was trained by "densenet_training_dict" example
|
| 91 |
+
CheckpointLoader(load_path="./runs_dict/net_checkpoint_20.pth", load_dict={"net": net}).attach(evaluator)
|
| 92 |
+
|
| 93 |
+
# create a validation data loader
|
| 94 |
+
val_ds = monai.data.Dataset(data=val_files, transform=val_transforms)
|
| 95 |
+
val_loader = DataLoader(val_ds, batch_size=2, num_workers=4, pin_memory=torch.cuda.is_available())
|
| 96 |
+
|
| 97 |
+
state = evaluator.run(val_loader)
|
| 98 |
+
print(state)
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
if __name__ == "__main__":
|
| 102 |
+
main()
|
testbed/Project-MONAI__MONAI/examples/classification_3d_ignite/densenet_training_array.py
ADDED
|
@@ -0,0 +1,144 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2020 MONAI Consortium
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 6 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 7 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 8 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 9 |
+
# See the License for the specific language governing permissions and
|
| 10 |
+
# limitations under the License.
|
| 11 |
+
|
| 12 |
+
import logging
|
| 13 |
+
import os
|
| 14 |
+
import sys
|
| 15 |
+
|
| 16 |
+
import numpy as np
|
| 17 |
+
import torch
|
| 18 |
+
from ignite.engine import Events, create_supervised_evaluator, create_supervised_trainer
|
| 19 |
+
from ignite.handlers import EarlyStopping, ModelCheckpoint
|
| 20 |
+
from ignite.metrics import Accuracy
|
| 21 |
+
from torch.utils.data import DataLoader
|
| 22 |
+
|
| 23 |
+
import monai
|
| 24 |
+
from monai.data import NiftiDataset
|
| 25 |
+
from monai.handlers import StatsHandler, TensorBoardStatsHandler, stopping_fn_from_metric
|
| 26 |
+
from monai.transforms import AddChannel, Compose, RandRotate90, Resize, ScaleIntensity, ToTensor
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def main():
|
| 30 |
+
monai.config.print_config()
|
| 31 |
+
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
|
| 32 |
+
|
| 33 |
+
# IXI dataset as a demo, downloadable from https://brain-development.org/ixi-dataset/
|
| 34 |
+
images = [
|
| 35 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI314-IOP-0889-T1.nii.gz"]),
|
| 36 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI249-Guys-1072-T1.nii.gz"]),
|
| 37 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI609-HH-2600-T1.nii.gz"]),
|
| 38 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI173-HH-1590-T1.nii.gz"]),
|
| 39 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI020-Guys-0700-T1.nii.gz"]),
|
| 40 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI342-Guys-0909-T1.nii.gz"]),
|
| 41 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI134-Guys-0780-T1.nii.gz"]),
|
| 42 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI577-HH-2661-T1.nii.gz"]),
|
| 43 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI066-Guys-0731-T1.nii.gz"]),
|
| 44 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI130-HH-1528-T1.nii.gz"]),
|
| 45 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI607-Guys-1097-T1.nii.gz"]),
|
| 46 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI175-HH-1570-T1.nii.gz"]),
|
| 47 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI385-HH-2078-T1.nii.gz"]),
|
| 48 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI344-Guys-0905-T1.nii.gz"]),
|
| 49 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI409-Guys-0960-T1.nii.gz"]),
|
| 50 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI584-Guys-1129-T1.nii.gz"]),
|
| 51 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI253-HH-1694-T1.nii.gz"]),
|
| 52 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI092-HH-1436-T1.nii.gz"]),
|
| 53 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI574-IOP-1156-T1.nii.gz"]),
|
| 54 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI585-Guys-1130-T1.nii.gz"]),
|
| 55 |
+
]
|
| 56 |
+
|
| 57 |
+
# 2 binary labels for gender classification: man and woman
|
| 58 |
+
labels = np.array([0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0], dtype=np.int64)
|
| 59 |
+
|
| 60 |
+
# define transforms
|
| 61 |
+
train_transforms = Compose([ScaleIntensity(), AddChannel(), Resize((96, 96, 96)), RandRotate90(), ToTensor()])
|
| 62 |
+
val_transforms = Compose([ScaleIntensity(), AddChannel(), Resize((96, 96, 96)), ToTensor()])
|
| 63 |
+
|
| 64 |
+
# define nifti dataset, data loader
|
| 65 |
+
check_ds = NiftiDataset(image_files=images, labels=labels, transform=train_transforms)
|
| 66 |
+
check_loader = DataLoader(check_ds, batch_size=2, num_workers=2, pin_memory=torch.cuda.is_available())
|
| 67 |
+
im, label = monai.utils.misc.first(check_loader)
|
| 68 |
+
print(type(im), im.shape, label)
|
| 69 |
+
|
| 70 |
+
# create DenseNet121, CrossEntropyLoss and Adam optimizer
|
| 71 |
+
net = monai.networks.nets.densenet.densenet121(spatial_dims=3, in_channels=1, out_channels=2)
|
| 72 |
+
loss = torch.nn.CrossEntropyLoss()
|
| 73 |
+
lr = 1e-5
|
| 74 |
+
opt = torch.optim.Adam(net.parameters(), lr)
|
| 75 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 76 |
+
|
| 77 |
+
# Ignite trainer expects batch=(img, label) and returns output=loss at every iteration,
|
| 78 |
+
# user can add output_transform to return other values, like: y_pred, y, etc.
|
| 79 |
+
trainer = create_supervised_trainer(net, opt, loss, device, False)
|
| 80 |
+
|
| 81 |
+
# adding checkpoint handler to save models (network params and optimizer stats) during training
|
| 82 |
+
checkpoint_handler = ModelCheckpoint("./runs_array/", "net", n_saved=10, require_empty=False)
|
| 83 |
+
trainer.add_event_handler(
|
| 84 |
+
event_name=Events.EPOCH_COMPLETED, handler=checkpoint_handler, to_save={"net": net, "opt": opt}
|
| 85 |
+
)
|
| 86 |
+
|
| 87 |
+
# StatsHandler prints loss at every iteration and print metrics at every epoch,
|
| 88 |
+
# we don't set metrics for trainer here, so just print loss, user can also customize print functions
|
| 89 |
+
# and can use output_transform to convert engine.state.output if it's not loss value
|
| 90 |
+
train_stats_handler = StatsHandler(name="trainer")
|
| 91 |
+
train_stats_handler.attach(trainer)
|
| 92 |
+
|
| 93 |
+
# TensorBoardStatsHandler plots loss at every iteration and plots metrics at every epoch, same as StatsHandler
|
| 94 |
+
train_tensorboard_stats_handler = TensorBoardStatsHandler()
|
| 95 |
+
train_tensorboard_stats_handler.attach(trainer)
|
| 96 |
+
|
| 97 |
+
# set parameters for validation
|
| 98 |
+
validation_every_n_epochs = 1
|
| 99 |
+
|
| 100 |
+
metric_name = "Accuracy"
|
| 101 |
+
# add evaluation metric to the evaluator engine
|
| 102 |
+
val_metrics = {metric_name: Accuracy()}
|
| 103 |
+
# Ignite evaluator expects batch=(img, label) and returns output=(y_pred, y) at every iteration,
|
| 104 |
+
# user can add output_transform to return other values
|
| 105 |
+
evaluator = create_supervised_evaluator(net, val_metrics, device, True)
|
| 106 |
+
|
| 107 |
+
# add stats event handler to print validation stats via evaluator
|
| 108 |
+
val_stats_handler = StatsHandler(
|
| 109 |
+
name="evaluator",
|
| 110 |
+
output_transform=lambda x: None, # no need to print loss value, so disable per iteration output
|
| 111 |
+
global_epoch_transform=lambda x: trainer.state.epoch,
|
| 112 |
+
) # fetch global epoch number from trainer
|
| 113 |
+
val_stats_handler.attach(evaluator)
|
| 114 |
+
|
| 115 |
+
# add handler to record metrics to TensorBoard at every epoch
|
| 116 |
+
val_tensorboard_stats_handler = TensorBoardStatsHandler(
|
| 117 |
+
output_transform=lambda x: None, # no need to plot loss value, so disable per iteration output
|
| 118 |
+
global_epoch_transform=lambda x: trainer.state.epoch,
|
| 119 |
+
) # fetch global epoch number from trainer
|
| 120 |
+
val_tensorboard_stats_handler.attach(evaluator)
|
| 121 |
+
|
| 122 |
+
# add early stopping handler to evaluator
|
| 123 |
+
early_stopper = EarlyStopping(patience=4, score_function=stopping_fn_from_metric(metric_name), trainer=trainer)
|
| 124 |
+
evaluator.add_event_handler(event_name=Events.EPOCH_COMPLETED, handler=early_stopper)
|
| 125 |
+
|
| 126 |
+
# create a validation data loader
|
| 127 |
+
val_ds = NiftiDataset(image_files=images[-10:], labels=labels[-10:], transform=val_transforms)
|
| 128 |
+
val_loader = DataLoader(val_ds, batch_size=2, num_workers=2, pin_memory=torch.cuda.is_available())
|
| 129 |
+
|
| 130 |
+
@trainer.on(Events.EPOCH_COMPLETED(every=validation_every_n_epochs))
|
| 131 |
+
def run_validation(engine):
|
| 132 |
+
evaluator.run(val_loader)
|
| 133 |
+
|
| 134 |
+
# create a training data loader
|
| 135 |
+
train_ds = NiftiDataset(image_files=images[:10], labels=labels[:10], transform=train_transforms)
|
| 136 |
+
train_loader = DataLoader(train_ds, batch_size=2, shuffle=True, num_workers=2, pin_memory=torch.cuda.is_available())
|
| 137 |
+
|
| 138 |
+
train_epochs = 30
|
| 139 |
+
state = trainer.run(train_loader, train_epochs)
|
| 140 |
+
print(state)
|
| 141 |
+
|
| 142 |
+
|
| 143 |
+
if __name__ == "__main__":
|
| 144 |
+
main()
|
testbed/Project-MONAI__MONAI/examples/classification_3d_ignite/densenet_training_dict.py
ADDED
|
@@ -0,0 +1,166 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2020 MONAI Consortium
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 6 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 7 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 8 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 9 |
+
# See the License for the specific language governing permissions and
|
| 10 |
+
# limitations under the License.
|
| 11 |
+
|
| 12 |
+
import logging
|
| 13 |
+
import os
|
| 14 |
+
import sys
|
| 15 |
+
|
| 16 |
+
import numpy as np
|
| 17 |
+
import torch
|
| 18 |
+
from ignite.engine import Events, _prepare_batch, create_supervised_evaluator, create_supervised_trainer
|
| 19 |
+
from ignite.handlers import EarlyStopping, ModelCheckpoint
|
| 20 |
+
from ignite.metrics import Accuracy
|
| 21 |
+
from torch.utils.data import DataLoader
|
| 22 |
+
|
| 23 |
+
import monai
|
| 24 |
+
from monai.handlers import ROCAUC, StatsHandler, TensorBoardStatsHandler, stopping_fn_from_metric
|
| 25 |
+
from monai.transforms import AddChanneld, Compose, LoadNiftid, RandRotate90d, Resized, ScaleIntensityd, ToTensord
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def main():
|
| 29 |
+
monai.config.print_config()
|
| 30 |
+
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
|
| 31 |
+
|
| 32 |
+
# IXI dataset as a demo, downloadable from https://brain-development.org/ixi-dataset/
|
| 33 |
+
images = [
|
| 34 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI314-IOP-0889-T1.nii.gz"]),
|
| 35 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI249-Guys-1072-T1.nii.gz"]),
|
| 36 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI609-HH-2600-T1.nii.gz"]),
|
| 37 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI173-HH-1590-T1.nii.gz"]),
|
| 38 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI020-Guys-0700-T1.nii.gz"]),
|
| 39 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI342-Guys-0909-T1.nii.gz"]),
|
| 40 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI134-Guys-0780-T1.nii.gz"]),
|
| 41 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI577-HH-2661-T1.nii.gz"]),
|
| 42 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI066-Guys-0731-T1.nii.gz"]),
|
| 43 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI130-HH-1528-T1.nii.gz"]),
|
| 44 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI607-Guys-1097-T1.nii.gz"]),
|
| 45 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI175-HH-1570-T1.nii.gz"]),
|
| 46 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI385-HH-2078-T1.nii.gz"]),
|
| 47 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI344-Guys-0905-T1.nii.gz"]),
|
| 48 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI409-Guys-0960-T1.nii.gz"]),
|
| 49 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI584-Guys-1129-T1.nii.gz"]),
|
| 50 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI253-HH-1694-T1.nii.gz"]),
|
| 51 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI092-HH-1436-T1.nii.gz"]),
|
| 52 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI574-IOP-1156-T1.nii.gz"]),
|
| 53 |
+
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI585-Guys-1130-T1.nii.gz"]),
|
| 54 |
+
]
|
| 55 |
+
|
| 56 |
+
# 2 binary labels for gender classification: man and woman
|
| 57 |
+
labels = np.array([0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0], dtype=np.int64)
|
| 58 |
+
train_files = [{"img": img, "label": label} for img, label in zip(images[:10], labels[:10])]
|
| 59 |
+
val_files = [{"img": img, "label": label} for img, label in zip(images[-10:], labels[-10:])]
|
| 60 |
+
|
| 61 |
+
# define transforms for image
|
| 62 |
+
train_transforms = Compose(
|
| 63 |
+
[
|
| 64 |
+
LoadNiftid(keys=["img"]),
|
| 65 |
+
AddChanneld(keys=["img"]),
|
| 66 |
+
ScaleIntensityd(keys=["img"]),
|
| 67 |
+
Resized(keys=["img"], spatial_size=(96, 96, 96)),
|
| 68 |
+
RandRotate90d(keys=["img"], prob=0.8, spatial_axes=[0, 2]),
|
| 69 |
+
ToTensord(keys=["img"]),
|
| 70 |
+
]
|
| 71 |
+
)
|
| 72 |
+
val_transforms = Compose(
|
| 73 |
+
[
|
| 74 |
+
LoadNiftid(keys=["img"]),
|
| 75 |
+
AddChanneld(keys=["img"]),
|
| 76 |
+
ScaleIntensityd(keys=["img"]),
|
| 77 |
+
Resized(keys=["img"], spatial_size=(96, 96, 96)),
|
| 78 |
+
ToTensord(keys=["img"]),
|
| 79 |
+
]
|
| 80 |
+
)
|
| 81 |
+
|
| 82 |
+
# define dataset, data loader
|
| 83 |
+
check_ds = monai.data.Dataset(data=train_files, transform=train_transforms)
|
| 84 |
+
check_loader = DataLoader(check_ds, batch_size=2, num_workers=4, pin_memory=torch.cuda.is_available())
|
| 85 |
+
check_data = monai.utils.misc.first(check_loader)
|
| 86 |
+
print(check_data["img"].shape, check_data["label"])
|
| 87 |
+
|
| 88 |
+
# create DenseNet121, CrossEntropyLoss and Adam optimizer
|
| 89 |
+
net = monai.networks.nets.densenet.densenet121(spatial_dims=3, in_channels=1, out_channels=2)
|
| 90 |
+
loss = torch.nn.CrossEntropyLoss()
|
| 91 |
+
lr = 1e-5
|
| 92 |
+
opt = torch.optim.Adam(net.parameters(), lr)
|
| 93 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 94 |
+
|
| 95 |
+
# Ignite trainer expects batch=(img, label) and returns output=loss at every iteration,
|
| 96 |
+
# user can add output_transform to return other values, like: y_pred, y, etc.
|
| 97 |
+
def prepare_batch(batch, device=None, non_blocking=False):
|
| 98 |
+
|
| 99 |
+
return _prepare_batch((batch["img"], batch["label"]), device, non_blocking)
|
| 100 |
+
|
| 101 |
+
trainer = create_supervised_trainer(net, opt, loss, device, False, prepare_batch=prepare_batch)
|
| 102 |
+
|
| 103 |
+
# adding checkpoint handler to save models (network params and optimizer stats) during training
|
| 104 |
+
checkpoint_handler = ModelCheckpoint("./runs_dict/", "net", n_saved=10, require_empty=False)
|
| 105 |
+
trainer.add_event_handler(
|
| 106 |
+
event_name=Events.EPOCH_COMPLETED, handler=checkpoint_handler, to_save={"net": net, "opt": opt}
|
| 107 |
+
)
|
| 108 |
+
|
| 109 |
+
# StatsHandler prints loss at every iteration and print metrics at every epoch,
|
| 110 |
+
# we don't set metrics for trainer here, so just print loss, user can also customize print functions
|
| 111 |
+
# and can use output_transform to convert engine.state.output if it's not loss value
|
| 112 |
+
train_stats_handler = StatsHandler(name="trainer")
|
| 113 |
+
train_stats_handler.attach(trainer)
|
| 114 |
+
|
| 115 |
+
# TensorBoardStatsHandler plots loss at every iteration and plots metrics at every epoch, same as StatsHandler
|
| 116 |
+
train_tensorboard_stats_handler = TensorBoardStatsHandler()
|
| 117 |
+
train_tensorboard_stats_handler.attach(trainer)
|
| 118 |
+
|
| 119 |
+
# set parameters for validation
|
| 120 |
+
validation_every_n_epochs = 1
|
| 121 |
+
|
| 122 |
+
metric_name = "Accuracy"
|
| 123 |
+
# add evaluation metric to the evaluator engine
|
| 124 |
+
val_metrics = {metric_name: Accuracy(), "AUC": ROCAUC(to_onehot_y=True, softmax=True)}
|
| 125 |
+
# Ignite evaluator expects batch=(img, label) and returns output=(y_pred, y) at every iteration,
|
| 126 |
+
# user can add output_transform to return other values
|
| 127 |
+
evaluator = create_supervised_evaluator(net, val_metrics, device, True, prepare_batch=prepare_batch)
|
| 128 |
+
|
| 129 |
+
# add stats event handler to print validation stats via evaluator
|
| 130 |
+
val_stats_handler = StatsHandler(
|
| 131 |
+
name="evaluator",
|
| 132 |
+
output_transform=lambda x: None, # no need to print loss value, so disable per iteration output
|
| 133 |
+
global_epoch_transform=lambda x: trainer.state.epoch,
|
| 134 |
+
) # fetch global epoch number from trainer
|
| 135 |
+
val_stats_handler.attach(evaluator)
|
| 136 |
+
|
| 137 |
+
# add handler to record metrics to TensorBoard at every epoch
|
| 138 |
+
val_tensorboard_stats_handler = TensorBoardStatsHandler(
|
| 139 |
+
output_transform=lambda x: None, # no need to plot loss value, so disable per iteration output
|
| 140 |
+
global_epoch_transform=lambda x: trainer.state.epoch,
|
| 141 |
+
) # fetch global epoch number from trainer
|
| 142 |
+
val_tensorboard_stats_handler.attach(evaluator)
|
| 143 |
+
|
| 144 |
+
# add early stopping handler to evaluator
|
| 145 |
+
early_stopper = EarlyStopping(patience=4, score_function=stopping_fn_from_metric(metric_name), trainer=trainer)
|
| 146 |
+
evaluator.add_event_handler(event_name=Events.EPOCH_COMPLETED, handler=early_stopper)
|
| 147 |
+
|
| 148 |
+
# create a validation data loader
|
| 149 |
+
val_ds = monai.data.Dataset(data=val_files, transform=val_transforms)
|
| 150 |
+
val_loader = DataLoader(val_ds, batch_size=2, num_workers=4, pin_memory=torch.cuda.is_available())
|
| 151 |
+
|
| 152 |
+
@trainer.on(Events.EPOCH_COMPLETED(every=validation_every_n_epochs))
|
| 153 |
+
def run_validation(engine):
|
| 154 |
+
evaluator.run(val_loader)
|
| 155 |
+
|
| 156 |
+
# create a training data loader
|
| 157 |
+
train_ds = monai.data.Dataset(data=train_files, transform=train_transforms)
|
| 158 |
+
train_loader = DataLoader(train_ds, batch_size=2, shuffle=True, num_workers=4, pin_memory=torch.cuda.is_available())
|
| 159 |
+
|
| 160 |
+
train_epochs = 30
|
| 161 |
+
state = trainer.run(train_loader, train_epochs)
|
| 162 |
+
print(state)
|
| 163 |
+
|
| 164 |
+
|
| 165 |
+
if __name__ == "__main__":
|
| 166 |
+
main()
|
testbed/Project-MONAI__MONAI/examples/distributed_training/unet_evaluation_ddp.py
ADDED
|
@@ -0,0 +1,166 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2020 MONAI Consortium
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 6 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 7 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 8 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 9 |
+
# See the License for the specific language governing permissions and
|
| 10 |
+
# limitations under the License.
|
| 11 |
+
|
| 12 |
+
"""
|
| 13 |
+
This example shows how to execute distributed evaluation based on PyTorch native `DistributedDataParallel` module.
|
| 14 |
+
It can run on several nodes with multiple GPU devices on every node.
|
| 15 |
+
Main steps to set up the distributed evaluation:
|
| 16 |
+
|
| 17 |
+
- Execute `torch.distributed.launch` to create processes on every node for every GPU.
|
| 18 |
+
It receives parameters as below:
|
| 19 |
+
`--nproc_per_node=NUM_GPUS_PER_NODE`
|
| 20 |
+
`--nnodes=NUM_NODES`
|
| 21 |
+
`--node_rank=INDEX_CURRENT_NODE`
|
| 22 |
+
`--master_addr="192.168.1.1"`
|
| 23 |
+
`--master_port=1234`
|
| 24 |
+
For more details, refer to https://github.com/pytorch/pytorch/blob/master/torch/distributed/launch.py.
|
| 25 |
+
Alternatively, we can also use `torch.multiprocessing.spawn` to start program, but it that case, need to handle
|
| 26 |
+
all the above parameters and compute `rank` manually, then set to `init_process_group`, etc.
|
| 27 |
+
`torch.distributed.launch` is even more efficient than `torch.multiprocessing.spawn`.
|
| 28 |
+
- Use `init_process_group` to initialize every process, every GPU runs in a separate process with unique rank.
|
| 29 |
+
Here we use `NVIDIA NCCL` as the backend and must set `init_method="env://"` if use `torch.distributed.launch`.
|
| 30 |
+
- Wrap the model with `DistributedDataParallel` after moving to expected device.
|
| 31 |
+
- Put model file on every node, then load and map to expected GPU device in every process.
|
| 32 |
+
- Wrap Dataset with `DistributedSampler`, disable the `shuffle` in sampler and DataLoader.
|
| 33 |
+
- Compute `Dice Metric` on every process, reduce the results after synchronization.
|
| 34 |
+
|
| 35 |
+
Note:
|
| 36 |
+
`torch.distributed.launch` will launch `nnodes * nproc_per_node = world_size` processes in total.
|
| 37 |
+
Suggest setting exactly the same software environment for every node, especially `PyTorch`, `nccl`, etc.
|
| 38 |
+
A good practice is to use the same MONAI docker image for all nodes directly.
|
| 39 |
+
Example script to execute this program on every node:
|
| 40 |
+
python -m torch.distributed.launch --nproc_per_node=NUM_GPUS_PER_NODE
|
| 41 |
+
--nnodes=NUM_NODES --node_rank=INDEX_CURRENT_NODE
|
| 42 |
+
--master_addr="192.168.1.1" --master_port=1234
|
| 43 |
+
unet_evaluation_ddp.py -d DIR_OF_TESTDATA
|
| 44 |
+
|
| 45 |
+
This example was tested with [Ubuntu 16.04/20.04], [NCCL 2.6.3].
|
| 46 |
+
|
| 47 |
+
Referring to: https://pytorch.org/tutorials/intermediate/ddp_tutorial.html
|
| 48 |
+
|
| 49 |
+
"""
|
| 50 |
+
|
| 51 |
+
import argparse
|
| 52 |
+
import os
|
| 53 |
+
from glob import glob
|
| 54 |
+
|
| 55 |
+
import nibabel as nib
|
| 56 |
+
import numpy as np
|
| 57 |
+
import torch
|
| 58 |
+
import torch.distributed as dist
|
| 59 |
+
from torch.nn.parallel import DistributedDataParallel
|
| 60 |
+
from torch.utils.data.distributed import DistributedSampler
|
| 61 |
+
|
| 62 |
+
import monai
|
| 63 |
+
from monai.data import DataLoader, Dataset, create_test_image_3d
|
| 64 |
+
from monai.inferers import sliding_window_inference
|
| 65 |
+
from monai.metrics import DiceMetric
|
| 66 |
+
from monai.transforms import AsChannelFirstd, Compose, LoadNiftid, ScaleIntensityd, ToTensord
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
def evaluate(args):
|
| 70 |
+
if args.local_rank == 0 and not os.path.exists(args.dir):
|
| 71 |
+
# create 16 random image, mask paris for evaluation
|
| 72 |
+
print(f"generating synthetic data to {args.dir} (this may take a while)")
|
| 73 |
+
os.makedirs(args.dir)
|
| 74 |
+
# set random seed to generate same random data for every node
|
| 75 |
+
np.random.seed(seed=0)
|
| 76 |
+
for i in range(16):
|
| 77 |
+
im, seg = create_test_image_3d(128, 128, 128, num_seg_classes=1, channel_dim=-1)
|
| 78 |
+
n = nib.Nifti1Image(im, np.eye(4))
|
| 79 |
+
nib.save(n, os.path.join(args.dir, f"img{i:d}.nii.gz"))
|
| 80 |
+
n = nib.Nifti1Image(seg, np.eye(4))
|
| 81 |
+
nib.save(n, os.path.join(args.dir, f"seg{i:d}.nii.gz"))
|
| 82 |
+
|
| 83 |
+
# initialize the distributed evaluation process, every GPU runs in a process
|
| 84 |
+
dist.init_process_group(backend="nccl", init_method="env://")
|
| 85 |
+
|
| 86 |
+
images = sorted(glob(os.path.join(args.dir, "img*.nii.gz")))
|
| 87 |
+
segs = sorted(glob(os.path.join(args.dir, "seg*.nii.gz")))
|
| 88 |
+
val_files = [{"img": img, "seg": seg} for img, seg in zip(images, segs)]
|
| 89 |
+
|
| 90 |
+
# define transforms for image and segmentation
|
| 91 |
+
val_transforms = Compose(
|
| 92 |
+
[
|
| 93 |
+
LoadNiftid(keys=["img", "seg"]),
|
| 94 |
+
AsChannelFirstd(keys=["img", "seg"], channel_dim=-1),
|
| 95 |
+
ScaleIntensityd(keys="img"),
|
| 96 |
+
ToTensord(keys=["img", "seg"]),
|
| 97 |
+
]
|
| 98 |
+
)
|
| 99 |
+
|
| 100 |
+
# create a evaluation data loader
|
| 101 |
+
val_ds = Dataset(data=val_files, transform=val_transforms)
|
| 102 |
+
# create a evaluation data sampler
|
| 103 |
+
val_sampler = DistributedSampler(val_ds, shuffle=False)
|
| 104 |
+
# sliding window inference need to input 1 image in every iteration
|
| 105 |
+
val_loader = DataLoader(val_ds, batch_size=1, shuffle=False, num_workers=2, pin_memory=True, sampler=val_sampler)
|
| 106 |
+
dice_metric = DiceMetric(include_background=True, to_onehot_y=False, sigmoid=True, reduction="mean")
|
| 107 |
+
|
| 108 |
+
# create UNet, DiceLoss and Adam optimizer
|
| 109 |
+
device = torch.device(f"cuda:{args.local_rank}")
|
| 110 |
+
model = monai.networks.nets.UNet(
|
| 111 |
+
dimensions=3,
|
| 112 |
+
in_channels=1,
|
| 113 |
+
out_channels=1,
|
| 114 |
+
channels=(16, 32, 64, 128, 256),
|
| 115 |
+
strides=(2, 2, 2, 2),
|
| 116 |
+
num_res_units=2,
|
| 117 |
+
).to(device)
|
| 118 |
+
# wrap the model with DistributedDataParallel module
|
| 119 |
+
model = DistributedDataParallel(model, device_ids=[args.local_rank])
|
| 120 |
+
# config mapping to expected GPU device
|
| 121 |
+
map_location = {"cuda:0": f"cuda:{args.local_rank}"}
|
| 122 |
+
# load model parameters to GPU device
|
| 123 |
+
model.load_state_dict(torch.load("final_model.pth", map_location=map_location))
|
| 124 |
+
|
| 125 |
+
model.eval()
|
| 126 |
+
with torch.no_grad():
|
| 127 |
+
# define PyTorch Tensor to record metrics result at each GPU
|
| 128 |
+
# the first value is `sum` of all dice metric, the second value is `count` of not_nan items
|
| 129 |
+
metric = torch.zeros(2, dtype=torch.float, device=device)
|
| 130 |
+
for val_data in val_loader:
|
| 131 |
+
val_images, val_labels = val_data["img"].to(device), val_data["seg"].to(device)
|
| 132 |
+
# define sliding window size and batch size for windows inference
|
| 133 |
+
roi_size = (96, 96, 96)
|
| 134 |
+
sw_batch_size = 4
|
| 135 |
+
val_outputs = sliding_window_inference(val_images, roi_size, sw_batch_size, model)
|
| 136 |
+
value = dice_metric(y_pred=val_outputs, y=val_labels).squeeze()
|
| 137 |
+
metric[0] += value * dice_metric.not_nans
|
| 138 |
+
metric[1] += dice_metric.not_nans
|
| 139 |
+
# synchronizes all processes and reduce results
|
| 140 |
+
dist.barrier()
|
| 141 |
+
dist.all_reduce(metric, op=torch.distributed.ReduceOp.SUM)
|
| 142 |
+
metric = metric.tolist()
|
| 143 |
+
if dist.get_rank() == 0:
|
| 144 |
+
print("evaluation metric:", metric[0] / metric[1])
|
| 145 |
+
dist.destroy_process_group()
|
| 146 |
+
|
| 147 |
+
|
| 148 |
+
def main():
|
| 149 |
+
parser = argparse.ArgumentParser()
|
| 150 |
+
parser.add_argument("-d", "--dir", default="./testdata", type=str, help="directory to create random data")
|
| 151 |
+
# must parse the command-line argument: ``--local_rank=LOCAL_PROCESS_RANK``, which will be provided by DDP
|
| 152 |
+
parser.add_argument("--local_rank", type=int)
|
| 153 |
+
args = parser.parse_args()
|
| 154 |
+
|
| 155 |
+
evaluate(args=args)
|
| 156 |
+
|
| 157 |
+
|
| 158 |
+
# usage example(refer to https://github.com/pytorch/pytorch/blob/master/torch/distributed/launch.py):
|
| 159 |
+
|
| 160 |
+
# python -m torch.distributed.launch --nproc_per_node=NUM_GPUS_PER_NODE
|
| 161 |
+
# --nnodes=NUM_NODES --node_rank=INDEX_CURRENT_NODE
|
| 162 |
+
# --master_addr="192.168.1.1" --master_port=1234
|
| 163 |
+
# unet_evaluation_ddp.py -d DIR_OF_TESTDATA
|
| 164 |
+
|
| 165 |
+
if __name__ == "__main__":
|
| 166 |
+
main()
|
testbed/Project-MONAI__MONAI/examples/distributed_training/unet_evaluation_horovod.py
ADDED
|
@@ -0,0 +1,165 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2020 MONAI Consortium
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 6 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 7 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 8 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 9 |
+
# See the License for the specific language governing permissions and
|
| 10 |
+
# limitations under the License.
|
| 11 |
+
|
| 12 |
+
"""
|
| 13 |
+
This example shows how to execute distributed evaluation based on Horovod APIs.
|
| 14 |
+
It can run on several nodes with multiple GPU devices on every node.
|
| 15 |
+
Main steps to set up the distributed evaluation:
|
| 16 |
+
|
| 17 |
+
- Install Horovod referring to the guide: https://github.com/horovod/horovod/blob/master/docs/gpus.rst
|
| 18 |
+
If using MONAI docker, which already has NCCL and MPI, can quickly install Horovod with command:
|
| 19 |
+
`HOROVOD_NCCL_INCLUDE=/usr/include HOROVOD_NCCL_LIB=/usr/lib/x86_64-linux-gnu HOROVOD_GPU_OPERATIONS=NCCL \
|
| 20 |
+
pip install --no-cache-dir horovod`
|
| 21 |
+
- Set SSH permissions for root login without password at all nodes except master, referring to:
|
| 22 |
+
http://www.linuxproblem.org/art_9.html
|
| 23 |
+
- Run `hvd.init()` to initialize Horovod.
|
| 24 |
+
- Pin each GPU to a single process to avoid resource contention, use `hvd.local_rank()` to get GPU index.
|
| 25 |
+
And use `hvd.rank()` to get the overall rank index.
|
| 26 |
+
- Wrap Dataset with `DistributedSampler`, disable `shuffle` for sampler and DataLoader.
|
| 27 |
+
- Broadcast the model parameters from rank 0 to all other processes.
|
| 28 |
+
|
| 29 |
+
Note:
|
| 30 |
+
Suggest setting exactly the same software environment for every node, especially `mpi`, `nccl`, etc.
|
| 31 |
+
A good practice is to use the same MONAI docker image for all nodes directly, if using docker, need
|
| 32 |
+
to set SSH permissions both at the node and in docker, referring to Horovod guide for more details:
|
| 33 |
+
https://github.com/horovod/horovod/blob/master/docs/docker.rst
|
| 34 |
+
|
| 35 |
+
Example script to execute this program, only need to run on the master node:
|
| 36 |
+
`horovodrun -np 16 -H server1:4,server2:4,server3:4,server4:4 python unet_evaluation_horovod.py -d "./testdata"`
|
| 37 |
+
|
| 38 |
+
This example was tested with [Ubuntu 16.04/20.04], [NCCL 2.6.3], [horovod 0.19.5].
|
| 39 |
+
|
| 40 |
+
Referring to: https://github.com/horovod/horovod/blob/master/examples/pytorch_mnist.py
|
| 41 |
+
|
| 42 |
+
"""
|
| 43 |
+
|
| 44 |
+
import argparse
|
| 45 |
+
import os
|
| 46 |
+
from glob import glob
|
| 47 |
+
|
| 48 |
+
import horovod.torch as hvd
|
| 49 |
+
import nibabel as nib
|
| 50 |
+
import numpy as np
|
| 51 |
+
import torch
|
| 52 |
+
import torch.multiprocessing as mp
|
| 53 |
+
from torch.utils.data.distributed import DistributedSampler
|
| 54 |
+
|
| 55 |
+
import monai
|
| 56 |
+
from monai.data import DataLoader, Dataset, create_test_image_3d
|
| 57 |
+
from monai.inferers import sliding_window_inference
|
| 58 |
+
from monai.metrics import DiceMetric
|
| 59 |
+
from monai.transforms import AsChannelFirstd, Compose, LoadNiftid, ScaleIntensityd, ToTensord
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
def evaluate(args):
|
| 63 |
+
# initialize Horovod library
|
| 64 |
+
hvd.init()
|
| 65 |
+
# Horovod limits CPU threads to be used per worker
|
| 66 |
+
torch.set_num_threads(1)
|
| 67 |
+
|
| 68 |
+
if hvd.local_rank() == 0 and not os.path.exists(args.dir):
|
| 69 |
+
# create 16 random image, mask paris for evaluation
|
| 70 |
+
print(f"generating synthetic data to {args.dir} (this may take a while)")
|
| 71 |
+
os.makedirs(args.dir)
|
| 72 |
+
# set random seed to generate same random data for every node
|
| 73 |
+
np.random.seed(seed=0)
|
| 74 |
+
for i in range(16):
|
| 75 |
+
im, seg = create_test_image_3d(128, 128, 128, num_seg_classes=1, channel_dim=-1)
|
| 76 |
+
n = nib.Nifti1Image(im, np.eye(4))
|
| 77 |
+
nib.save(n, os.path.join(args.dir, f"img{i:d}.nii.gz"))
|
| 78 |
+
n = nib.Nifti1Image(seg, np.eye(4))
|
| 79 |
+
nib.save(n, os.path.join(args.dir, f"seg{i:d}.nii.gz"))
|
| 80 |
+
|
| 81 |
+
images = sorted(glob(os.path.join(args.dir, "img*.nii.gz")))
|
| 82 |
+
segs = sorted(glob(os.path.join(args.dir, "seg*.nii.gz")))
|
| 83 |
+
val_files = [{"img": img, "seg": seg} for img, seg in zip(images, segs)]
|
| 84 |
+
|
| 85 |
+
# define transforms for image and segmentation
|
| 86 |
+
val_transforms = Compose(
|
| 87 |
+
[
|
| 88 |
+
LoadNiftid(keys=["img", "seg"]),
|
| 89 |
+
AsChannelFirstd(keys=["img", "seg"], channel_dim=-1),
|
| 90 |
+
ScaleIntensityd(keys="img"),
|
| 91 |
+
ToTensord(keys=["img", "seg"]),
|
| 92 |
+
]
|
| 93 |
+
)
|
| 94 |
+
|
| 95 |
+
# create a evaluation data loader
|
| 96 |
+
val_ds = Dataset(data=val_files, transform=val_transforms)
|
| 97 |
+
# create a evaluation data sampler
|
| 98 |
+
val_sampler = DistributedSampler(val_ds, shuffle=False, num_replicas=hvd.size(), rank=hvd.rank())
|
| 99 |
+
# when supported, use "forkserver" to spawn dataloader workers instead of "fork" to prevent
|
| 100 |
+
# issues with Infiniband implementations that are not fork-safe
|
| 101 |
+
multiprocessing_context = None
|
| 102 |
+
if hasattr(mp, "_supports_context") and mp._supports_context and "forkserver" in mp.get_all_start_methods():
|
| 103 |
+
multiprocessing_context = "forkserver"
|
| 104 |
+
# sliding window inference need to input 1 image in every iteration
|
| 105 |
+
val_loader = DataLoader(
|
| 106 |
+
val_ds,
|
| 107 |
+
batch_size=1,
|
| 108 |
+
shuffle=False,
|
| 109 |
+
num_workers=2,
|
| 110 |
+
pin_memory=True,
|
| 111 |
+
sampler=val_sampler,
|
| 112 |
+
multiprocessing_context=multiprocessing_context,
|
| 113 |
+
)
|
| 114 |
+
dice_metric = DiceMetric(include_background=True, to_onehot_y=False, sigmoid=True, reduction="mean")
|
| 115 |
+
|
| 116 |
+
# create UNet, DiceLoss and Adam optimizer
|
| 117 |
+
device = torch.device(f"cuda:{hvd.local_rank()}")
|
| 118 |
+
model = monai.networks.nets.UNet(
|
| 119 |
+
dimensions=3,
|
| 120 |
+
in_channels=1,
|
| 121 |
+
out_channels=1,
|
| 122 |
+
channels=(16, 32, 64, 128, 256),
|
| 123 |
+
strides=(2, 2, 2, 2),
|
| 124 |
+
num_res_units=2,
|
| 125 |
+
).to(device)
|
| 126 |
+
if hvd.rank() == 0:
|
| 127 |
+
# load model parameters for evaluation
|
| 128 |
+
model.load_state_dict(torch.load("final_model.pth"))
|
| 129 |
+
# Horovod broadcasts parameters
|
| 130 |
+
hvd.broadcast_parameters(model.state_dict(), root_rank=0)
|
| 131 |
+
|
| 132 |
+
model.eval()
|
| 133 |
+
with torch.no_grad():
|
| 134 |
+
# define PyTorch Tensor to record metrics result at each GPU
|
| 135 |
+
# the first value is `sum` of all dice metric, the second value is `count` of not_nan items
|
| 136 |
+
metric = torch.zeros(2, dtype=torch.float, device=device)
|
| 137 |
+
for val_data in val_loader:
|
| 138 |
+
val_images, val_labels = val_data["img"].to(device), val_data["seg"].to(device)
|
| 139 |
+
# define sliding window size and batch size for windows inference
|
| 140 |
+
roi_size = (96, 96, 96)
|
| 141 |
+
sw_batch_size = 4
|
| 142 |
+
val_outputs = sliding_window_inference(val_images, roi_size, sw_batch_size, model)
|
| 143 |
+
value = dice_metric(y_pred=val_outputs, y=val_labels).squeeze()
|
| 144 |
+
metric[0] += value * dice_metric.not_nans
|
| 145 |
+
metric[1] += dice_metric.not_nans
|
| 146 |
+
# synchronizes all processes and reduce results
|
| 147 |
+
print(f"metric in rank {hvd.rank()}: sum={metric[0].item()}, count={metric[1].item()}")
|
| 148 |
+
avg_metric = hvd.allreduce(metric, name="mean_dice")
|
| 149 |
+
if hvd.rank() == 0:
|
| 150 |
+
print(f"average metric: sum={avg_metric[0].item()}, count={avg_metric[1].item()}")
|
| 151 |
+
print("evaluation metric:", (avg_metric[0] / avg_metric[1]).item())
|
| 152 |
+
|
| 153 |
+
|
| 154 |
+
def main():
|
| 155 |
+
parser = argparse.ArgumentParser()
|
| 156 |
+
parser.add_argument("-d", "--dir", default="./testdata", type=str, help="directory to create random data")
|
| 157 |
+
args = parser.parse_args()
|
| 158 |
+
|
| 159 |
+
evaluate(args=args)
|
| 160 |
+
|
| 161 |
+
|
| 162 |
+
# Example script to execute this program only on the master node:
|
| 163 |
+
# horovodrun -np 16 -H server1:4,server2:4,server3:4,server4:4 python unet_evaluation_horovod.py -d "./testdata"
|
| 164 |
+
if __name__ == "__main__":
|
| 165 |
+
main()
|
testbed/Project-MONAI__MONAI/examples/distributed_training/unet_evaluation_workflows.py
ADDED
|
@@ -0,0 +1,203 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2020 MONAI Consortium
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 6 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 7 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 8 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 9 |
+
# See the License for the specific language governing permissions and
|
| 10 |
+
# limitations under the License.
|
| 11 |
+
|
| 12 |
+
"""
|
| 13 |
+
This example shows how to execute distributed evaluation based on PyTorch native `DistributedDataParallel` module
|
| 14 |
+
and MONAI workflows. It can run on several nodes with multiple GPU devices on every node.
|
| 15 |
+
Main steps to set up the distributed evaluation:
|
| 16 |
+
|
| 17 |
+
- Execute `torch.distributed.launch` to create processes on every node for every GPU.
|
| 18 |
+
It receives parameters as below:
|
| 19 |
+
`--nproc_per_node=NUM_GPUS_PER_NODE`
|
| 20 |
+
`--nnodes=NUM_NODES`
|
| 21 |
+
`--node_rank=INDEX_CURRENT_NODE`
|
| 22 |
+
`--master_addr="192.168.1.1"`
|
| 23 |
+
`--master_port=1234`
|
| 24 |
+
For more details, refer to https://github.com/pytorch/pytorch/blob/master/torch/distributed/launch.py.
|
| 25 |
+
Alternatively, we can also use `torch.multiprocessing.spawn` to start program, but it that case, need to handle
|
| 26 |
+
all the above parameters and compute `rank` manually, then set to `init_process_group`, etc.
|
| 27 |
+
`torch.distributed.launch` is even more efficient than `torch.multiprocessing.spawn`.
|
| 28 |
+
- Use `init_process_group` to initialize every process, every GPU runs in a separate process with unique rank.
|
| 29 |
+
Here we use `NVIDIA NCCL` as the backend and must set `init_method="env://"` if use `torch.distributed.launch`.
|
| 30 |
+
- Wrap the model with `DistributedDataParallel` after moving to expected device.
|
| 31 |
+
- Put model file on every node, then load and map to expected GPU device in every process.
|
| 32 |
+
- Wrap Dataset with `DistributedSampler`, disable the `shuffle` in sampler and DataLoader.
|
| 33 |
+
- Add `StatsHandler` and `SegmentationSaver` to the master process which is `dist.get_rank() == 0`.
|
| 34 |
+
- ignite can automatically reduce metrics for distributed evaluation, refer to:
|
| 35 |
+
https://github.com/pytorch/ignite/blob/v0.3.0/ignite/metrics/metric.py#L85
|
| 36 |
+
|
| 37 |
+
Note:
|
| 38 |
+
`torch.distributed.launch` will launch `nnodes * nproc_per_node = world_size` processes in total.
|
| 39 |
+
Suggest setting exactly the same software environment for every node, especially `PyTorch`, `nccl`, etc.
|
| 40 |
+
A good practice is to use the same MONAI docker image for all nodes directly.
|
| 41 |
+
Example script to execute this program on every node:
|
| 42 |
+
python -m torch.distributed.launch --nproc_per_node=NUM_GPUS_PER_NODE
|
| 43 |
+
--nnodes=NUM_NODES --node_rank=INDEX_CURRENT_NODE
|
| 44 |
+
--master_addr="192.168.1.1" --master_port=1234
|
| 45 |
+
unet_evaluation_workflows.py -d DIR_OF_TESTDATA
|
| 46 |
+
|
| 47 |
+
This example was tested with [Ubuntu 16.04/20.04], [NCCL 2.6.3].
|
| 48 |
+
|
| 49 |
+
Referring to: https://pytorch.org/tutorials/intermediate/ddp_tutorial.html
|
| 50 |
+
|
| 51 |
+
"""
|
| 52 |
+
|
| 53 |
+
import argparse
|
| 54 |
+
import logging
|
| 55 |
+
import os
|
| 56 |
+
import sys
|
| 57 |
+
from glob import glob
|
| 58 |
+
|
| 59 |
+
import nibabel as nib
|
| 60 |
+
import numpy as np
|
| 61 |
+
import torch
|
| 62 |
+
import torch.distributed as dist
|
| 63 |
+
from ignite.metrics import Accuracy
|
| 64 |
+
from torch.nn.parallel import DistributedDataParallel
|
| 65 |
+
from torch.utils.data.distributed import DistributedSampler
|
| 66 |
+
|
| 67 |
+
import monai
|
| 68 |
+
from monai.data import DataLoader, Dataset, create_test_image_3d
|
| 69 |
+
from monai.engines import SupervisedEvaluator
|
| 70 |
+
from monai.handlers import CheckpointLoader, MeanDice, SegmentationSaver, StatsHandler
|
| 71 |
+
from monai.inferers import SlidingWindowInferer
|
| 72 |
+
from monai.transforms import (
|
| 73 |
+
Activationsd,
|
| 74 |
+
AsChannelFirstd,
|
| 75 |
+
AsDiscreted,
|
| 76 |
+
Compose,
|
| 77 |
+
KeepLargestConnectedComponentd,
|
| 78 |
+
LoadNiftid,
|
| 79 |
+
ScaleIntensityd,
|
| 80 |
+
ToTensord,
|
| 81 |
+
)
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
def evaluate(args):
|
| 85 |
+
if args.local_rank == 0 and not os.path.exists(args.dir):
|
| 86 |
+
# create 16 random image, mask paris for evaluation
|
| 87 |
+
print(f"generating synthetic data to {args.dir} (this may take a while)")
|
| 88 |
+
os.makedirs(args.dir)
|
| 89 |
+
# set random seed to generate same random data for every node
|
| 90 |
+
np.random.seed(seed=0)
|
| 91 |
+
for i in range(16):
|
| 92 |
+
im, seg = create_test_image_3d(128, 128, 128, num_seg_classes=1, channel_dim=-1)
|
| 93 |
+
n = nib.Nifti1Image(im, np.eye(4))
|
| 94 |
+
nib.save(n, os.path.join(args.dir, f"img{i:d}.nii.gz"))
|
| 95 |
+
n = nib.Nifti1Image(seg, np.eye(4))
|
| 96 |
+
nib.save(n, os.path.join(args.dir, f"seg{i:d}.nii.gz"))
|
| 97 |
+
|
| 98 |
+
# initialize the distributed evaluation process, every GPU runs in a process
|
| 99 |
+
dist.init_process_group(backend="nccl", init_method="env://")
|
| 100 |
+
|
| 101 |
+
images = sorted(glob(os.path.join(args.dir, "img*.nii.gz")))
|
| 102 |
+
segs = sorted(glob(os.path.join(args.dir, "seg*.nii.gz")))
|
| 103 |
+
val_files = [{"image": img, "label": seg} for img, seg in zip(images, segs)]
|
| 104 |
+
|
| 105 |
+
# define transforms for image and segmentation
|
| 106 |
+
val_transforms = Compose(
|
| 107 |
+
[
|
| 108 |
+
LoadNiftid(keys=["image", "label"]),
|
| 109 |
+
AsChannelFirstd(keys=["image", "label"], channel_dim=-1),
|
| 110 |
+
ScaleIntensityd(keys="image"),
|
| 111 |
+
ToTensord(keys=["image", "label"]),
|
| 112 |
+
]
|
| 113 |
+
)
|
| 114 |
+
|
| 115 |
+
# create a evaluation data loader
|
| 116 |
+
val_ds = Dataset(data=val_files, transform=val_transforms)
|
| 117 |
+
# create a evaluation data sampler
|
| 118 |
+
val_sampler = DistributedSampler(val_ds, shuffle=False)
|
| 119 |
+
# sliding window inference need to input 1 image in every iteration
|
| 120 |
+
val_loader = DataLoader(val_ds, batch_size=1, shuffle=False, num_workers=2, pin_memory=True, sampler=val_sampler)
|
| 121 |
+
|
| 122 |
+
# create UNet, DiceLoss and Adam optimizer
|
| 123 |
+
device = torch.device(f"cuda:{args.local_rank}")
|
| 124 |
+
net = monai.networks.nets.UNet(
|
| 125 |
+
dimensions=3,
|
| 126 |
+
in_channels=1,
|
| 127 |
+
out_channels=1,
|
| 128 |
+
channels=(16, 32, 64, 128, 256),
|
| 129 |
+
strides=(2, 2, 2, 2),
|
| 130 |
+
num_res_units=2,
|
| 131 |
+
).to(device)
|
| 132 |
+
# wrap the model with DistributedDataParallel module
|
| 133 |
+
net = DistributedDataParallel(net, device_ids=[args.local_rank])
|
| 134 |
+
|
| 135 |
+
val_post_transforms = Compose(
|
| 136 |
+
[
|
| 137 |
+
Activationsd(keys="pred", sigmoid=True),
|
| 138 |
+
AsDiscreted(keys="pred", threshold_values=True),
|
| 139 |
+
KeepLargestConnectedComponentd(keys="pred", applied_labels=[1]),
|
| 140 |
+
]
|
| 141 |
+
)
|
| 142 |
+
val_handlers = [
|
| 143 |
+
CheckpointLoader(
|
| 144 |
+
load_path="./runs/checkpoint_epoch=4.pth",
|
| 145 |
+
load_dict={"net": net},
|
| 146 |
+
# config mapping to expected GPU device
|
| 147 |
+
map_location={"cuda:0": f"cuda:{args.local_rank}"},
|
| 148 |
+
),
|
| 149 |
+
]
|
| 150 |
+
if dist.get_rank() == 0:
|
| 151 |
+
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
|
| 152 |
+
val_handlers.extend(
|
| 153 |
+
[
|
| 154 |
+
StatsHandler(output_transform=lambda x: None),
|
| 155 |
+
SegmentationSaver(
|
| 156 |
+
output_dir="./runs/",
|
| 157 |
+
batch_transform=lambda batch: batch["image_meta_dict"],
|
| 158 |
+
output_transform=lambda output: output["pred"],
|
| 159 |
+
),
|
| 160 |
+
]
|
| 161 |
+
)
|
| 162 |
+
|
| 163 |
+
evaluator = SupervisedEvaluator(
|
| 164 |
+
device=device,
|
| 165 |
+
val_data_loader=val_loader,
|
| 166 |
+
network=net,
|
| 167 |
+
inferer=SlidingWindowInferer(roi_size=(96, 96, 96), sw_batch_size=4, overlap=0.5),
|
| 168 |
+
post_transform=val_post_transforms,
|
| 169 |
+
key_val_metric={
|
| 170 |
+
"val_mean_dice": MeanDice(
|
| 171 |
+
include_background=True,
|
| 172 |
+
output_transform=lambda x: (x["pred"], x["label"]),
|
| 173 |
+
device=device,
|
| 174 |
+
)
|
| 175 |
+
},
|
| 176 |
+
additional_metrics={"val_acc": Accuracy(output_transform=lambda x: (x["pred"], x["label"]), device=device)},
|
| 177 |
+
val_handlers=val_handlers,
|
| 178 |
+
# if no FP16 support in GPU or PyTorch version < 1.6, will not enable AMP evaluation
|
| 179 |
+
amp=True if monai.config.get_torch_version_tuple() >= (1, 6) else False,
|
| 180 |
+
)
|
| 181 |
+
evaluator.run()
|
| 182 |
+
dist.destroy_process_group()
|
| 183 |
+
|
| 184 |
+
|
| 185 |
+
def main():
|
| 186 |
+
parser = argparse.ArgumentParser()
|
| 187 |
+
parser.add_argument("-d", "--dir", default="./testdata", type=str, help="directory to create random data")
|
| 188 |
+
# must parse the command-line argument: ``--local_rank=LOCAL_PROCESS_RANK``, which will be provided by DDP
|
| 189 |
+
parser.add_argument("--local_rank", type=int)
|
| 190 |
+
args = parser.parse_args()
|
| 191 |
+
|
| 192 |
+
evaluate(args=args)
|
| 193 |
+
|
| 194 |
+
|
| 195 |
+
# usage example(refer to https://github.com/pytorch/pytorch/blob/master/torch/distributed/launch.py):
|
| 196 |
+
|
| 197 |
+
# python -m torch.distributed.launch --nproc_per_node=NUM_GPUS_PER_NODE
|
| 198 |
+
# --nnodes=NUM_NODES --node_rank=INDEX_CURRENT_NODE
|
| 199 |
+
# --master_addr="192.168.1.1" --master_port=1234
|
| 200 |
+
# unet_evaluation_workflows.py -d DIR_OF_TESTDATA
|
| 201 |
+
|
| 202 |
+
if __name__ == "__main__":
|
| 203 |
+
main()
|
testbed/Project-MONAI__MONAI/examples/distributed_training/unet_training_ddp.py
ADDED
|
@@ -0,0 +1,193 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2020 MONAI Consortium
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 6 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 7 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 8 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 9 |
+
# See the License for the specific language governing permissions and
|
| 10 |
+
# limitations under the License.
|
| 11 |
+
|
| 12 |
+
"""
|
| 13 |
+
This example shows how to execute distributed training based on PyTorch native `DistributedDataParallel` module.
|
| 14 |
+
It can run on several nodes with multiple GPU devices on every node.
|
| 15 |
+
Main steps to set up the distributed training:
|
| 16 |
+
|
| 17 |
+
- Execute `torch.distributed.launch` to create processes on every node for every GPU.
|
| 18 |
+
It receives parameters as below:
|
| 19 |
+
`--nproc_per_node=NUM_GPUS_PER_NODE`
|
| 20 |
+
`--nnodes=NUM_NODES`
|
| 21 |
+
`--node_rank=INDEX_CURRENT_NODE`
|
| 22 |
+
`--master_addr="192.168.1.1"`
|
| 23 |
+
`--master_port=1234`
|
| 24 |
+
For more details, refer to https://github.com/pytorch/pytorch/blob/master/torch/distributed/launch.py.
|
| 25 |
+
Alternatively, we can also use `torch.multiprocessing.spawn` to start program, but it that case, need to handle
|
| 26 |
+
all the above parameters and compute `rank` manually, then set to `init_process_group`, etc.
|
| 27 |
+
`torch.distributed.launch` is even more efficient than `torch.multiprocessing.spawn` during training.
|
| 28 |
+
- Use `init_process_group` to initialize every process, every GPU runs in a separate process with unique rank.
|
| 29 |
+
Here we use `NVIDIA NCCL` as the backend and must set `init_method="env://"` if use `torch.distributed.launch`.
|
| 30 |
+
- Wrap the model with `DistributedDataParallel` after moving to expected device.
|
| 31 |
+
- Wrap Dataset with `DistributedSampler`, and disable the `shuffle` in DataLoader.
|
| 32 |
+
Instead, shuffle data by `train_sampler.set_epoch(epoch)` before every epoch.
|
| 33 |
+
|
| 34 |
+
Note:
|
| 35 |
+
`torch.distributed.launch` will launch `nnodes * nproc_per_node = world_size` processes in total.
|
| 36 |
+
Suggest setting exactly the same software environment for every node, especially `PyTorch`, `nccl`, etc.
|
| 37 |
+
A good practice is to use the same MONAI docker image for all nodes directly.
|
| 38 |
+
Example script to execute this program on every node:
|
| 39 |
+
python -m torch.distributed.launch --nproc_per_node=NUM_GPUS_PER_NODE
|
| 40 |
+
--nnodes=NUM_NODES --node_rank=INDEX_CURRENT_NODE
|
| 41 |
+
--master_addr="192.168.1.1" --master_port=1234
|
| 42 |
+
unet_training_ddp.py -d DIR_OF_TESTDATA
|
| 43 |
+
|
| 44 |
+
This example was tested with [Ubuntu 16.04/20.04], [NCCL 2.6.3].
|
| 45 |
+
|
| 46 |
+
Referring to: https://pytorch.org/tutorials/intermediate/ddp_tutorial.html
|
| 47 |
+
|
| 48 |
+
"""
|
| 49 |
+
|
| 50 |
+
import argparse
|
| 51 |
+
import os
|
| 52 |
+
import sys
|
| 53 |
+
from glob import glob
|
| 54 |
+
|
| 55 |
+
import nibabel as nib
|
| 56 |
+
import numpy as np
|
| 57 |
+
import torch
|
| 58 |
+
import torch.distributed as dist
|
| 59 |
+
from torch.nn.parallel import DistributedDataParallel
|
| 60 |
+
from torch.utils.data.distributed import DistributedSampler
|
| 61 |
+
|
| 62 |
+
import monai
|
| 63 |
+
from monai.data import DataLoader, Dataset, create_test_image_3d
|
| 64 |
+
from monai.transforms import (
|
| 65 |
+
AsChannelFirstd,
|
| 66 |
+
Compose,
|
| 67 |
+
LoadNiftid,
|
| 68 |
+
RandCropByPosNegLabeld,
|
| 69 |
+
RandRotate90d,
|
| 70 |
+
ScaleIntensityd,
|
| 71 |
+
ToTensord,
|
| 72 |
+
)
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
def train(args):
|
| 76 |
+
# disable logging for processes execpt 0 on every node
|
| 77 |
+
if args.local_rank != 0:
|
| 78 |
+
f = open(os.devnull, "w")
|
| 79 |
+
sys.stdout = sys.stderr = f
|
| 80 |
+
elif not os.path.exists(args.dir):
|
| 81 |
+
# create 40 random image, mask paris for training
|
| 82 |
+
print(f"generating synthetic data to {args.dir} (this may take a while)")
|
| 83 |
+
os.makedirs(args.dir)
|
| 84 |
+
# set random seed to generate same random data for every node
|
| 85 |
+
np.random.seed(seed=0)
|
| 86 |
+
for i in range(40):
|
| 87 |
+
im, seg = create_test_image_3d(128, 128, 128, num_seg_classes=1, channel_dim=-1)
|
| 88 |
+
n = nib.Nifti1Image(im, np.eye(4))
|
| 89 |
+
nib.save(n, os.path.join(args.dir, f"img{i:d}.nii.gz"))
|
| 90 |
+
n = nib.Nifti1Image(seg, np.eye(4))
|
| 91 |
+
nib.save(n, os.path.join(args.dir, f"seg{i:d}.nii.gz"))
|
| 92 |
+
|
| 93 |
+
# initialize the distributed training process, every GPU runs in a process
|
| 94 |
+
dist.init_process_group(backend="nccl", init_method="env://")
|
| 95 |
+
|
| 96 |
+
images = sorted(glob(os.path.join(args.dir, "img*.nii.gz")))
|
| 97 |
+
segs = sorted(glob(os.path.join(args.dir, "seg*.nii.gz")))
|
| 98 |
+
train_files = [{"img": img, "seg": seg} for img, seg in zip(images, segs)]
|
| 99 |
+
|
| 100 |
+
# define transforms for image and segmentation
|
| 101 |
+
train_transforms = Compose(
|
| 102 |
+
[
|
| 103 |
+
LoadNiftid(keys=["img", "seg"]),
|
| 104 |
+
AsChannelFirstd(keys=["img", "seg"], channel_dim=-1),
|
| 105 |
+
ScaleIntensityd(keys="img"),
|
| 106 |
+
RandCropByPosNegLabeld(
|
| 107 |
+
keys=["img", "seg"], label_key="seg", spatial_size=[96, 96, 96], pos=1, neg=1, num_samples=4
|
| 108 |
+
),
|
| 109 |
+
RandRotate90d(keys=["img", "seg"], prob=0.5, spatial_axes=[0, 2]),
|
| 110 |
+
ToTensord(keys=["img", "seg"]),
|
| 111 |
+
]
|
| 112 |
+
)
|
| 113 |
+
|
| 114 |
+
# create a training data loader
|
| 115 |
+
train_ds = Dataset(data=train_files, transform=train_transforms)
|
| 116 |
+
# create a training data sampler
|
| 117 |
+
train_sampler = DistributedSampler(train_ds)
|
| 118 |
+
# use batch_size=2 to load images and use RandCropByPosNegLabeld to generate 2 x 4 images for network training
|
| 119 |
+
train_loader = DataLoader(
|
| 120 |
+
train_ds,
|
| 121 |
+
batch_size=2,
|
| 122 |
+
shuffle=False,
|
| 123 |
+
num_workers=2,
|
| 124 |
+
pin_memory=True,
|
| 125 |
+
sampler=train_sampler,
|
| 126 |
+
)
|
| 127 |
+
|
| 128 |
+
# create UNet, DiceLoss and Adam optimizer
|
| 129 |
+
device = torch.device(f"cuda:{args.local_rank}")
|
| 130 |
+
model = monai.networks.nets.UNet(
|
| 131 |
+
dimensions=3,
|
| 132 |
+
in_channels=1,
|
| 133 |
+
out_channels=1,
|
| 134 |
+
channels=(16, 32, 64, 128, 256),
|
| 135 |
+
strides=(2, 2, 2, 2),
|
| 136 |
+
num_res_units=2,
|
| 137 |
+
).to(device)
|
| 138 |
+
loss_function = monai.losses.DiceLoss(sigmoid=True).to(device)
|
| 139 |
+
optimizer = torch.optim.Adam(model.parameters(), 1e-3)
|
| 140 |
+
# wrap the model with DistributedDataParallel module
|
| 141 |
+
model = DistributedDataParallel(model, device_ids=[args.local_rank])
|
| 142 |
+
|
| 143 |
+
# start a typical PyTorch training
|
| 144 |
+
epoch_loss_values = list()
|
| 145 |
+
for epoch in range(5):
|
| 146 |
+
print("-" * 10)
|
| 147 |
+
print(f"epoch {epoch + 1}/{5}")
|
| 148 |
+
model.train()
|
| 149 |
+
epoch_loss = 0
|
| 150 |
+
step = 0
|
| 151 |
+
train_sampler.set_epoch(epoch)
|
| 152 |
+
for batch_data in train_loader:
|
| 153 |
+
step += 1
|
| 154 |
+
inputs, labels = batch_data["img"].to(device), batch_data["seg"].to(device)
|
| 155 |
+
optimizer.zero_grad()
|
| 156 |
+
outputs = model(inputs)
|
| 157 |
+
loss = loss_function(outputs, labels)
|
| 158 |
+
loss.backward()
|
| 159 |
+
optimizer.step()
|
| 160 |
+
epoch_loss += loss.item()
|
| 161 |
+
epoch_len = len(train_ds) // train_loader.batch_size
|
| 162 |
+
print(f"{step}/{epoch_len}, train_loss: {loss.item():.4f}")
|
| 163 |
+
epoch_loss /= step
|
| 164 |
+
epoch_loss_values.append(epoch_loss)
|
| 165 |
+
print(f"epoch {epoch + 1} average loss: {epoch_loss:.4f}")
|
| 166 |
+
print(f"train completed, epoch losses: {epoch_loss_values}")
|
| 167 |
+
if dist.get_rank() == 0:
|
| 168 |
+
# all processes should see same parameters as they all start from same
|
| 169 |
+
# random parameters and gradients are synchronized in backward passes,
|
| 170 |
+
# therefore, saving it in one process is sufficient
|
| 171 |
+
torch.save(model.state_dict(), "final_model.pth")
|
| 172 |
+
dist.destroy_process_group()
|
| 173 |
+
|
| 174 |
+
|
| 175 |
+
def main():
|
| 176 |
+
parser = argparse.ArgumentParser()
|
| 177 |
+
parser.add_argument("-d", "--dir", default="./testdata", type=str, help="directory to create random data")
|
| 178 |
+
# must parse the command-line argument: ``--local_rank=LOCAL_PROCESS_RANK``, which will be provided by DDP
|
| 179 |
+
parser.add_argument("--local_rank", type=int)
|
| 180 |
+
args = parser.parse_args()
|
| 181 |
+
|
| 182 |
+
train(args=args)
|
| 183 |
+
|
| 184 |
+
|
| 185 |
+
# usage example(refer to https://github.com/pytorch/pytorch/blob/master/torch/distributed/launch.py):
|
| 186 |
+
|
| 187 |
+
# python -m torch.distributed.launch --nproc_per_node=NUM_GPUS_PER_NODE
|
| 188 |
+
# --nnodes=NUM_NODES --node_rank=INDEX_CURRENT_NODE
|
| 189 |
+
# --master_addr="192.168.1.1" --master_port=1234
|
| 190 |
+
# unet_training_ddp.py -d DIR_OF_TESTDATA
|
| 191 |
+
|
| 192 |
+
if __name__ == "__main__":
|
| 193 |
+
main()
|
testbed/Project-MONAI__MONAI/examples/distributed_training/unet_training_horovod.py
ADDED
|
@@ -0,0 +1,193 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2020 MONAI Consortium
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 6 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 7 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 8 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 9 |
+
# See the License for the specific language governing permissions and
|
| 10 |
+
# limitations under the License.
|
| 11 |
+
|
| 12 |
+
"""
|
| 13 |
+
This example shows how to execute distributed training based on Horovod APIs.
|
| 14 |
+
It can run on several nodes with multiple GPU devices on every node.
|
| 15 |
+
Main steps to set up the distributed training:
|
| 16 |
+
|
| 17 |
+
- Install Horovod referring to the guide: https://github.com/horovod/horovod/blob/master/docs/gpus.rst
|
| 18 |
+
If using MONAI docker, which already has NCCL and MPI, can quickly install Horovod with command:
|
| 19 |
+
`HOROVOD_NCCL_INCLUDE=/usr/include HOROVOD_NCCL_LIB=/usr/lib/x86_64-linux-gnu HOROVOD_GPU_OPERATIONS=NCCL \
|
| 20 |
+
pip install --no-cache-dir horovod`
|
| 21 |
+
- Set SSH permissions for root login without password at all nodes except master, referring to:
|
| 22 |
+
http://www.linuxproblem.org/art_9.html
|
| 23 |
+
- Run `hvd.init()` to initialize Horovod.
|
| 24 |
+
- Pin each GPU to a single process to avoid resource contention, use `hvd.local_rank()` to get GPU index.
|
| 25 |
+
And use `hvd.rank()` to get the overall rank index.
|
| 26 |
+
- Wrap Dataset with `DistributedSampler`, and disable the `shuffle` in DataLoader.
|
| 27 |
+
Instead, shuffle data by `train_sampler.set_epoch(epoch)` before every epoch.
|
| 28 |
+
- Wrap the optimizer in hvd.DistributedOptimizer. The distributed optimizer delegates gradient
|
| 29 |
+
computation to the original optimizer, averages gradients using allreduce or allgather,
|
| 30 |
+
and then applies those averaged gradients.
|
| 31 |
+
- Broadcast the initial variable states from rank 0 to all other processes.
|
| 32 |
+
|
| 33 |
+
Note:
|
| 34 |
+
Suggest setting exactly the same software environment for every node, especially `mpi`, `nccl`, etc.
|
| 35 |
+
A good practice is to use the same MONAI docker image for all nodes directly, if using docker, need
|
| 36 |
+
to set SSH permissions both at the node and in docker, referring to Horovod guide for more details:
|
| 37 |
+
https://github.com/horovod/horovod/blob/master/docs/docker.rst
|
| 38 |
+
|
| 39 |
+
Example script to execute this program, only need to run on the master node:
|
| 40 |
+
`horovodrun -np 16 -H server1:4,server2:4,server3:4,server4:4 python unet_training_horovod.py -d "./testdata"`
|
| 41 |
+
|
| 42 |
+
This example was tested with [Ubuntu 16.04/20.04], [NCCL 2.6.3], [horovod 0.19.5].
|
| 43 |
+
|
| 44 |
+
Referring to: https://github.com/horovod/horovod/blob/master/examples/pytorch_mnist.py
|
| 45 |
+
|
| 46 |
+
"""
|
| 47 |
+
|
| 48 |
+
import argparse
|
| 49 |
+
import os
|
| 50 |
+
import sys
|
| 51 |
+
from glob import glob
|
| 52 |
+
|
| 53 |
+
import horovod.torch as hvd
|
| 54 |
+
import nibabel as nib
|
| 55 |
+
import numpy as np
|
| 56 |
+
import torch
|
| 57 |
+
import torch.multiprocessing as mp
|
| 58 |
+
from torch.utils.data.distributed import DistributedSampler
|
| 59 |
+
|
| 60 |
+
import monai
|
| 61 |
+
from monai.data import DataLoader, Dataset, create_test_image_3d
|
| 62 |
+
from monai.transforms import (
|
| 63 |
+
AsChannelFirstd,
|
| 64 |
+
Compose,
|
| 65 |
+
LoadNiftid,
|
| 66 |
+
RandCropByPosNegLabeld,
|
| 67 |
+
RandRotate90d,
|
| 68 |
+
ScaleIntensityd,
|
| 69 |
+
ToTensord,
|
| 70 |
+
)
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
def train(args):
|
| 74 |
+
# initialize Horovod library
|
| 75 |
+
hvd.init()
|
| 76 |
+
# Horovod limits CPU threads to be used per worker
|
| 77 |
+
torch.set_num_threads(1)
|
| 78 |
+
# disable logging for processes execpt 0 on every node
|
| 79 |
+
if hvd.local_rank() != 0:
|
| 80 |
+
f = open(os.devnull, "w")
|
| 81 |
+
sys.stdout = sys.stderr = f
|
| 82 |
+
elif not os.path.exists(args.dir):
|
| 83 |
+
# create 40 random image, mask paris on master node for training
|
| 84 |
+
print(f"generating synthetic data to {args.dir} (this may take a while)")
|
| 85 |
+
os.makedirs(args.dir)
|
| 86 |
+
# set random seed to generate same random data for every node
|
| 87 |
+
np.random.seed(seed=0)
|
| 88 |
+
for i in range(40):
|
| 89 |
+
im, seg = create_test_image_3d(128, 128, 128, num_seg_classes=1, channel_dim=-1)
|
| 90 |
+
n = nib.Nifti1Image(im, np.eye(4))
|
| 91 |
+
nib.save(n, os.path.join(args.dir, f"img{i:d}.nii.gz"))
|
| 92 |
+
n = nib.Nifti1Image(seg, np.eye(4))
|
| 93 |
+
nib.save(n, os.path.join(args.dir, f"seg{i:d}.nii.gz"))
|
| 94 |
+
|
| 95 |
+
images = sorted(glob(os.path.join(args.dir, "img*.nii.gz")))
|
| 96 |
+
segs = sorted(glob(os.path.join(args.dir, "seg*.nii.gz")))
|
| 97 |
+
train_files = [{"img": img, "seg": seg} for img, seg in zip(images, segs)]
|
| 98 |
+
|
| 99 |
+
# define transforms for image and segmentation
|
| 100 |
+
train_transforms = Compose(
|
| 101 |
+
[
|
| 102 |
+
LoadNiftid(keys=["img", "seg"]),
|
| 103 |
+
AsChannelFirstd(keys=["img", "seg"], channel_dim=-1),
|
| 104 |
+
ScaleIntensityd(keys="img"),
|
| 105 |
+
RandCropByPosNegLabeld(
|
| 106 |
+
keys=["img", "seg"], label_key="seg", spatial_size=[96, 96, 96], pos=1, neg=1, num_samples=4
|
| 107 |
+
),
|
| 108 |
+
RandRotate90d(keys=["img", "seg"], prob=0.5, spatial_axes=[0, 2]),
|
| 109 |
+
ToTensord(keys=["img", "seg"]),
|
| 110 |
+
]
|
| 111 |
+
)
|
| 112 |
+
|
| 113 |
+
# create a training data loader
|
| 114 |
+
train_ds = Dataset(data=train_files, transform=train_transforms)
|
| 115 |
+
# create a training data sampler
|
| 116 |
+
train_sampler = DistributedSampler(train_ds, num_replicas=hvd.size(), rank=hvd.rank())
|
| 117 |
+
# when supported, use "forkserver" to spawn dataloader workers instead of "fork" to prevent
|
| 118 |
+
# issues with Infiniband implementations that are not fork-safe
|
| 119 |
+
multiprocessing_context = None
|
| 120 |
+
if hasattr(mp, "_supports_context") and mp._supports_context and "forkserver" in mp.get_all_start_methods():
|
| 121 |
+
multiprocessing_context = "forkserver"
|
| 122 |
+
# use batch_size=2 to load images and use RandCropByPosNegLabeld to generate 2 x 4 images for network training
|
| 123 |
+
train_loader = DataLoader(
|
| 124 |
+
train_ds,
|
| 125 |
+
batch_size=2,
|
| 126 |
+
shuffle=False,
|
| 127 |
+
num_workers=2,
|
| 128 |
+
pin_memory=True,
|
| 129 |
+
sampler=train_sampler,
|
| 130 |
+
multiprocessing_context=multiprocessing_context,
|
| 131 |
+
)
|
| 132 |
+
|
| 133 |
+
# create UNet, DiceLoss and Adam optimizer
|
| 134 |
+
device = torch.device(f"cuda:{hvd.local_rank()}")
|
| 135 |
+
model = monai.networks.nets.UNet(
|
| 136 |
+
dimensions=3,
|
| 137 |
+
in_channels=1,
|
| 138 |
+
out_channels=1,
|
| 139 |
+
channels=(16, 32, 64, 128, 256),
|
| 140 |
+
strides=(2, 2, 2, 2),
|
| 141 |
+
num_res_units=2,
|
| 142 |
+
).to(device)
|
| 143 |
+
loss_function = monai.losses.DiceLoss(sigmoid=True).to(device)
|
| 144 |
+
optimizer = torch.optim.Adam(model.parameters(), 1e-3)
|
| 145 |
+
# Horovod broadcasts parameters & optimizer state
|
| 146 |
+
hvd.broadcast_parameters(model.state_dict(), root_rank=0)
|
| 147 |
+
hvd.broadcast_optimizer_state(optimizer, root_rank=0)
|
| 148 |
+
# Horovod wraps optimizer with DistributedOptimizer
|
| 149 |
+
optimizer = hvd.DistributedOptimizer(optimizer, named_parameters=model.named_parameters())
|
| 150 |
+
|
| 151 |
+
# start a typical PyTorch training
|
| 152 |
+
epoch_loss_values = list()
|
| 153 |
+
for epoch in range(5):
|
| 154 |
+
print("-" * 10)
|
| 155 |
+
print(f"epoch {epoch + 1}/{5}")
|
| 156 |
+
model.train()
|
| 157 |
+
epoch_loss = 0
|
| 158 |
+
step = 0
|
| 159 |
+
train_sampler.set_epoch(epoch)
|
| 160 |
+
for batch_data in train_loader:
|
| 161 |
+
step += 1
|
| 162 |
+
inputs, labels = batch_data["img"].to(device), batch_data["seg"].to(device)
|
| 163 |
+
optimizer.zero_grad()
|
| 164 |
+
outputs = model(inputs)
|
| 165 |
+
loss = loss_function(outputs, labels)
|
| 166 |
+
loss.backward()
|
| 167 |
+
optimizer.step()
|
| 168 |
+
epoch_loss += loss.item()
|
| 169 |
+
epoch_len = len(train_ds) // train_loader.batch_size
|
| 170 |
+
print(f"{step}/{epoch_len}, train_loss: {loss.item():.4f}")
|
| 171 |
+
epoch_loss /= step
|
| 172 |
+
epoch_loss_values.append(epoch_loss)
|
| 173 |
+
print(f"epoch {epoch + 1} average loss: {epoch_loss:.4f}")
|
| 174 |
+
print(f"train completed, epoch losses: {epoch_loss_values}")
|
| 175 |
+
if hvd.rank() == 0:
|
| 176 |
+
# all processes should see same parameters as they all start from same
|
| 177 |
+
# random parameters and gradients are synchronized in backward passes,
|
| 178 |
+
# therefore, saving it in one process is sufficient
|
| 179 |
+
torch.save(model.state_dict(), "final_model.pth")
|
| 180 |
+
|
| 181 |
+
|
| 182 |
+
def main():
|
| 183 |
+
parser = argparse.ArgumentParser()
|
| 184 |
+
parser.add_argument("-d", "--dir", default="./testdata", type=str, help="directory to create random data")
|
| 185 |
+
args = parser.parse_args()
|
| 186 |
+
|
| 187 |
+
train(args=args)
|
| 188 |
+
|
| 189 |
+
|
| 190 |
+
# Example script to execute this program only on the master node:
|
| 191 |
+
# horovodrun -np 16 -H server1:4,server2:4,server3:4,server4:4 python unet_training_horovod.py -d "./testdata"
|
| 192 |
+
if __name__ == "__main__":
|
| 193 |
+
main()
|
testbed/Project-MONAI__MONAI/examples/distributed_training/unet_training_workflows.py
ADDED
|
@@ -0,0 +1,206 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2020 MONAI Consortium
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 6 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 7 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 8 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 9 |
+
# See the License for the specific language governing permissions and
|
| 10 |
+
# limitations under the License.
|
| 11 |
+
|
| 12 |
+
"""
|
| 13 |
+
This example shows how to execute distributed training based on PyTorch native `DistributedDataParallel` module
|
| 14 |
+
and MONAI workflows. It can run on several nodes with multiple GPU devices on every node.
|
| 15 |
+
Main steps to set up the distributed training:
|
| 16 |
+
|
| 17 |
+
- Execute `torch.distributed.launch` to create processes on every node for every GPU.
|
| 18 |
+
It receives parameters as below:
|
| 19 |
+
`--nproc_per_node=NUM_GPUS_PER_NODE`
|
| 20 |
+
`--nnodes=NUM_NODES`
|
| 21 |
+
`--node_rank=INDEX_CURRENT_NODE`
|
| 22 |
+
`--master_addr="192.168.1.1"`
|
| 23 |
+
`--master_port=1234`
|
| 24 |
+
For more details, refer to https://github.com/pytorch/pytorch/blob/master/torch/distributed/launch.py.
|
| 25 |
+
Alternatively, we can also use `torch.multiprocessing.spawn` to start program, but it that case, need to handle
|
| 26 |
+
all the above parameters and compute `rank` manually, then set to `init_process_group`, etc.
|
| 27 |
+
`torch.distributed.launch` is even more efficient than `torch.multiprocessing.spawn` during training.
|
| 28 |
+
- Use `init_process_group` to initialize every process, every GPU runs in a separate process with unique rank.
|
| 29 |
+
Here we use `NVIDIA NCCL` as the backend and must set `init_method="env://"` if use `torch.distributed.launch`.
|
| 30 |
+
- Wrap the model with `DistributedDataParallel` after moving to expected device.
|
| 31 |
+
- Wrap Dataset with `DistributedSampler`, and disable the `shuffle` in DataLoader.
|
| 32 |
+
Instead, `SupervisedTrainer` shuffles data by `train_sampler.set_epoch(epoch)` before every epoch.
|
| 33 |
+
- Add `StatsHandler` and `CheckpointHandler` to the master process which is `dist.get_rank() == 0`.
|
| 34 |
+
- ignite can automatically reduce metrics for distributed training, refer to:
|
| 35 |
+
https://github.com/pytorch/ignite/blob/v0.3.0/ignite/metrics/metric.py#L85
|
| 36 |
+
|
| 37 |
+
Note:
|
| 38 |
+
`torch.distributed.launch` will launch `nnodes * nproc_per_node = world_size` processes in total.
|
| 39 |
+
Suggest setting exactly the same software environment for every node, especially `PyTorch`, `nccl`, etc.
|
| 40 |
+
A good practice is to use the same MONAI docker image for all nodes directly.
|
| 41 |
+
Example script to execute this program on every node:
|
| 42 |
+
python -m torch.distributed.launch --nproc_per_node=NUM_GPUS_PER_NODE
|
| 43 |
+
--nnodes=NUM_NODES --node_rank=INDEX_CURRENT_NODE
|
| 44 |
+
--master_addr="192.168.1.1" --master_port=1234
|
| 45 |
+
unet_training_workflows.py -d DIR_OF_TESTDATA
|
| 46 |
+
|
| 47 |
+
This example was tested with [Ubuntu 16.04/20.04], [NCCL 2.6.3].
|
| 48 |
+
|
| 49 |
+
Referring to: https://pytorch.org/tutorials/intermediate/ddp_tutorial.html
|
| 50 |
+
|
| 51 |
+
"""
|
| 52 |
+
|
| 53 |
+
import argparse
|
| 54 |
+
import logging
|
| 55 |
+
import os
|
| 56 |
+
import sys
|
| 57 |
+
from glob import glob
|
| 58 |
+
|
| 59 |
+
import nibabel as nib
|
| 60 |
+
import numpy as np
|
| 61 |
+
import torch
|
| 62 |
+
import torch.distributed as dist
|
| 63 |
+
from ignite.metrics import Accuracy
|
| 64 |
+
from torch.nn.parallel import DistributedDataParallel
|
| 65 |
+
from torch.utils.data.distributed import DistributedSampler
|
| 66 |
+
|
| 67 |
+
import monai
|
| 68 |
+
from monai.data import DataLoader, Dataset, create_test_image_3d
|
| 69 |
+
from monai.engines import SupervisedTrainer
|
| 70 |
+
from monai.handlers import CheckpointSaver, LrScheduleHandler, StatsHandler
|
| 71 |
+
from monai.inferers import SimpleInferer
|
| 72 |
+
from monai.transforms import (
|
| 73 |
+
Activationsd,
|
| 74 |
+
AsChannelFirstd,
|
| 75 |
+
AsDiscreted,
|
| 76 |
+
Compose,
|
| 77 |
+
KeepLargestConnectedComponentd,
|
| 78 |
+
LoadNiftid,
|
| 79 |
+
RandCropByPosNegLabeld,
|
| 80 |
+
RandRotate90d,
|
| 81 |
+
ScaleIntensityd,
|
| 82 |
+
ToTensord,
|
| 83 |
+
)
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
def train(args):
|
| 87 |
+
if args.local_rank == 0 and not os.path.exists(args.dir):
|
| 88 |
+
# create 40 random image, mask paris for training
|
| 89 |
+
print(f"generating synthetic data to {args.dir} (this may take a while)")
|
| 90 |
+
os.makedirs(args.dir)
|
| 91 |
+
# set random seed to generate same random data for every node
|
| 92 |
+
np.random.seed(seed=0)
|
| 93 |
+
for i in range(40):
|
| 94 |
+
im, seg = create_test_image_3d(128, 128, 128, num_seg_classes=1, channel_dim=-1)
|
| 95 |
+
n = nib.Nifti1Image(im, np.eye(4))
|
| 96 |
+
nib.save(n, os.path.join(args.dir, f"img{i:d}.nii.gz"))
|
| 97 |
+
n = nib.Nifti1Image(seg, np.eye(4))
|
| 98 |
+
nib.save(n, os.path.join(args.dir, f"seg{i:d}.nii.gz"))
|
| 99 |
+
|
| 100 |
+
# initialize the distributed training process, every GPU runs in a process
|
| 101 |
+
dist.init_process_group(backend="nccl", init_method="env://")
|
| 102 |
+
|
| 103 |
+
images = sorted(glob(os.path.join(args.dir, "img*.nii.gz")))
|
| 104 |
+
segs = sorted(glob(os.path.join(args.dir, "seg*.nii.gz")))
|
| 105 |
+
train_files = [{"image": img, "label": seg} for img, seg in zip(images, segs)]
|
| 106 |
+
|
| 107 |
+
# define transforms for image and segmentation
|
| 108 |
+
train_transforms = Compose(
|
| 109 |
+
[
|
| 110 |
+
LoadNiftid(keys=["image", "label"]),
|
| 111 |
+
AsChannelFirstd(keys=["image", "label"], channel_dim=-1),
|
| 112 |
+
ScaleIntensityd(keys="image"),
|
| 113 |
+
RandCropByPosNegLabeld(
|
| 114 |
+
keys=["image", "label"], label_key="label", spatial_size=[96, 96, 96], pos=1, neg=1, num_samples=4
|
| 115 |
+
),
|
| 116 |
+
RandRotate90d(keys=["image", "label"], prob=0.5, spatial_axes=[0, 2]),
|
| 117 |
+
ToTensord(keys=["image", "label"]),
|
| 118 |
+
]
|
| 119 |
+
)
|
| 120 |
+
|
| 121 |
+
# create a training data loader
|
| 122 |
+
train_ds = Dataset(data=train_files, transform=train_transforms)
|
| 123 |
+
# create a training data sampler
|
| 124 |
+
train_sampler = DistributedSampler(train_ds)
|
| 125 |
+
# use batch_size=2 to load images and use RandCropByPosNegLabeld to generate 2 x 4 images for network training
|
| 126 |
+
train_loader = DataLoader(
|
| 127 |
+
train_ds,
|
| 128 |
+
batch_size=2,
|
| 129 |
+
shuffle=False,
|
| 130 |
+
num_workers=2,
|
| 131 |
+
pin_memory=True,
|
| 132 |
+
sampler=train_sampler,
|
| 133 |
+
)
|
| 134 |
+
|
| 135 |
+
# create UNet, DiceLoss and Adam optimizer
|
| 136 |
+
device = torch.device(f"cuda:{args.local_rank}")
|
| 137 |
+
net = monai.networks.nets.UNet(
|
| 138 |
+
dimensions=3,
|
| 139 |
+
in_channels=1,
|
| 140 |
+
out_channels=1,
|
| 141 |
+
channels=(16, 32, 64, 128, 256),
|
| 142 |
+
strides=(2, 2, 2, 2),
|
| 143 |
+
num_res_units=2,
|
| 144 |
+
).to(device)
|
| 145 |
+
loss = monai.losses.DiceLoss(sigmoid=True).to(device)
|
| 146 |
+
opt = torch.optim.Adam(net.parameters(), 1e-3)
|
| 147 |
+
lr_scheduler = torch.optim.lr_scheduler.StepLR(opt, step_size=2, gamma=0.1)
|
| 148 |
+
# wrap the model with DistributedDataParallel module
|
| 149 |
+
net = DistributedDataParallel(net, device_ids=[args.local_rank])
|
| 150 |
+
|
| 151 |
+
train_post_transforms = Compose(
|
| 152 |
+
[
|
| 153 |
+
Activationsd(keys="pred", sigmoid=True),
|
| 154 |
+
AsDiscreted(keys="pred", threshold_values=True),
|
| 155 |
+
KeepLargestConnectedComponentd(keys="pred", applied_labels=[1]),
|
| 156 |
+
]
|
| 157 |
+
)
|
| 158 |
+
train_handlers = [
|
| 159 |
+
LrScheduleHandler(lr_scheduler=lr_scheduler, print_lr=True),
|
| 160 |
+
]
|
| 161 |
+
if dist.get_rank() == 0:
|
| 162 |
+
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
|
| 163 |
+
train_handlers.extend(
|
| 164 |
+
[
|
| 165 |
+
StatsHandler(tag_name="train_loss", output_transform=lambda x: x["loss"]),
|
| 166 |
+
CheckpointSaver(save_dir="./runs/", save_dict={"net": net, "opt": opt}, save_interval=2),
|
| 167 |
+
]
|
| 168 |
+
)
|
| 169 |
+
|
| 170 |
+
trainer = SupervisedTrainer(
|
| 171 |
+
device=device,
|
| 172 |
+
max_epochs=5,
|
| 173 |
+
train_data_loader=train_loader,
|
| 174 |
+
network=net,
|
| 175 |
+
optimizer=opt,
|
| 176 |
+
loss_function=loss,
|
| 177 |
+
inferer=SimpleInferer(),
|
| 178 |
+
# if no FP16 support in GPU or PyTorch version < 1.6, will not enable AMP evaluation
|
| 179 |
+
amp=True if monai.config.get_torch_version_tuple() >= (1, 6) else False,
|
| 180 |
+
post_transform=train_post_transforms,
|
| 181 |
+
key_train_metric={"train_acc": Accuracy(output_transform=lambda x: (x["pred"], x["label"]), device=device)},
|
| 182 |
+
train_handlers=train_handlers,
|
| 183 |
+
)
|
| 184 |
+
trainer.run()
|
| 185 |
+
dist.destroy_process_group()
|
| 186 |
+
|
| 187 |
+
|
| 188 |
+
def main():
|
| 189 |
+
parser = argparse.ArgumentParser()
|
| 190 |
+
parser.add_argument("-d", "--dir", default="./testdata", type=str, help="directory to create random data")
|
| 191 |
+
# must parse the command-line argument: ``--local_rank=LOCAL_PROCESS_RANK``, which will be provided by DDP
|
| 192 |
+
parser.add_argument("--local_rank", type=int)
|
| 193 |
+
args = parser.parse_args()
|
| 194 |
+
|
| 195 |
+
train(args=args)
|
| 196 |
+
|
| 197 |
+
|
| 198 |
+
# usage example(refer to https://github.com/pytorch/pytorch/blob/master/torch/distributed/launch.py):
|
| 199 |
+
|
| 200 |
+
# python -m torch.distributed.launch --nproc_per_node=NUM_GPUS_PER_NODE
|
| 201 |
+
# --nnodes=NUM_NODES --node_rank=INDEX_CURRENT_NODE
|
| 202 |
+
# --master_addr="192.168.1.1" --master_port=1234
|
| 203 |
+
# unet_training_workflows.py -d DIR_OF_TESTDATA
|
| 204 |
+
|
| 205 |
+
if __name__ == "__main__":
|
| 206 |
+
main()
|
testbed/Project-MONAI__MONAI/examples/segmentation_3d/unet_evaluation_array.py
ADDED
|
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2020 MONAI Consortium
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 6 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 7 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 8 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 9 |
+
# See the License for the specific language governing permissions and
|
| 10 |
+
# limitations under the License.
|
| 11 |
+
|
| 12 |
+
import logging
|
| 13 |
+
import os
|
| 14 |
+
import sys
|
| 15 |
+
import tempfile
|
| 16 |
+
from glob import glob
|
| 17 |
+
|
| 18 |
+
import nibabel as nib
|
| 19 |
+
import numpy as np
|
| 20 |
+
import torch
|
| 21 |
+
from torch.utils.data import DataLoader
|
| 22 |
+
|
| 23 |
+
from monai import config
|
| 24 |
+
from monai.data import NiftiDataset, NiftiSaver, create_test_image_3d
|
| 25 |
+
from monai.inferers import sliding_window_inference
|
| 26 |
+
from monai.metrics import DiceMetric
|
| 27 |
+
from monai.networks.nets import UNet
|
| 28 |
+
from monai.transforms import AddChannel, Compose, ScaleIntensity, ToTensor
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def main(tempdir):
|
| 32 |
+
config.print_config()
|
| 33 |
+
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
|
| 34 |
+
|
| 35 |
+
print(f"generating synthetic data to {tempdir} (this may take a while)")
|
| 36 |
+
for i in range(5):
|
| 37 |
+
im, seg = create_test_image_3d(128, 128, 128, num_seg_classes=1)
|
| 38 |
+
|
| 39 |
+
n = nib.Nifti1Image(im, np.eye(4))
|
| 40 |
+
nib.save(n, os.path.join(tempdir, f"im{i:d}.nii.gz"))
|
| 41 |
+
|
| 42 |
+
n = nib.Nifti1Image(seg, np.eye(4))
|
| 43 |
+
nib.save(n, os.path.join(tempdir, f"seg{i:d}.nii.gz"))
|
| 44 |
+
|
| 45 |
+
images = sorted(glob(os.path.join(tempdir, "im*.nii.gz")))
|
| 46 |
+
segs = sorted(glob(os.path.join(tempdir, "seg*.nii.gz")))
|
| 47 |
+
|
| 48 |
+
# define transforms for image and segmentation
|
| 49 |
+
imtrans = Compose([ScaleIntensity(), AddChannel(), ToTensor()])
|
| 50 |
+
segtrans = Compose([AddChannel(), ToTensor()])
|
| 51 |
+
val_ds = NiftiDataset(images, segs, transform=imtrans, seg_transform=segtrans, image_only=False)
|
| 52 |
+
# sliding window inference for one image at every iteration
|
| 53 |
+
val_loader = DataLoader(val_ds, batch_size=1, num_workers=1, pin_memory=torch.cuda.is_available())
|
| 54 |
+
dice_metric = DiceMetric(include_background=True, to_onehot_y=False, sigmoid=True, reduction="mean")
|
| 55 |
+
|
| 56 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 57 |
+
model = UNet(
|
| 58 |
+
dimensions=3,
|
| 59 |
+
in_channels=1,
|
| 60 |
+
out_channels=1,
|
| 61 |
+
channels=(16, 32, 64, 128, 256),
|
| 62 |
+
strides=(2, 2, 2, 2),
|
| 63 |
+
num_res_units=2,
|
| 64 |
+
).to(device)
|
| 65 |
+
|
| 66 |
+
model.load_state_dict(torch.load("best_metric_model_segmentation3d_array.pth"))
|
| 67 |
+
model.eval()
|
| 68 |
+
with torch.no_grad():
|
| 69 |
+
metric_sum = 0.0
|
| 70 |
+
metric_count = 0
|
| 71 |
+
saver = NiftiSaver(output_dir="./output")
|
| 72 |
+
for val_data in val_loader:
|
| 73 |
+
val_images, val_labels = val_data[0].to(device), val_data[1].to(device)
|
| 74 |
+
# define sliding window size and batch size for windows inference
|
| 75 |
+
roi_size = (96, 96, 96)
|
| 76 |
+
sw_batch_size = 4
|
| 77 |
+
val_outputs = sliding_window_inference(val_images, roi_size, sw_batch_size, model)
|
| 78 |
+
value = dice_metric(y_pred=val_outputs, y=val_labels)
|
| 79 |
+
metric_count += len(value)
|
| 80 |
+
metric_sum += value.item() * len(value)
|
| 81 |
+
val_outputs = (val_outputs.sigmoid() >= 0.5).float()
|
| 82 |
+
saver.save_batch(val_outputs, val_data[2])
|
| 83 |
+
metric = metric_sum / metric_count
|
| 84 |
+
print("evaluation metric:", metric)
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
if __name__ == "__main__":
|
| 88 |
+
with tempfile.TemporaryDirectory() as tempdir:
|
| 89 |
+
main(tempdir)
|
testbed/Project-MONAI__MONAI/examples/segmentation_3d/unet_evaluation_dict.py
ADDED
|
@@ -0,0 +1,103 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2020 MONAI Consortium
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 6 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 7 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 8 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 9 |
+
# See the License for the specific language governing permissions and
|
| 10 |
+
# limitations under the License.
|
| 11 |
+
|
| 12 |
+
import logging
|
| 13 |
+
import os
|
| 14 |
+
import sys
|
| 15 |
+
import tempfile
|
| 16 |
+
from glob import glob
|
| 17 |
+
|
| 18 |
+
import nibabel as nib
|
| 19 |
+
import numpy as np
|
| 20 |
+
import torch
|
| 21 |
+
from torch.utils.data import DataLoader
|
| 22 |
+
|
| 23 |
+
import monai
|
| 24 |
+
from monai.data import NiftiSaver, create_test_image_3d, list_data_collate
|
| 25 |
+
from monai.engines import get_devices_spec
|
| 26 |
+
from monai.inferers import sliding_window_inference
|
| 27 |
+
from monai.metrics import DiceMetric
|
| 28 |
+
from monai.networks.nets import UNet
|
| 29 |
+
from monai.transforms import AsChannelFirstd, Compose, LoadNiftid, ScaleIntensityd, ToTensord
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def main(tempdir):
|
| 33 |
+
monai.config.print_config()
|
| 34 |
+
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
|
| 35 |
+
|
| 36 |
+
print(f"generating synthetic data to {tempdir} (this may take a while)")
|
| 37 |
+
for i in range(5):
|
| 38 |
+
im, seg = create_test_image_3d(128, 128, 128, num_seg_classes=1, channel_dim=-1)
|
| 39 |
+
|
| 40 |
+
n = nib.Nifti1Image(im, np.eye(4))
|
| 41 |
+
nib.save(n, os.path.join(tempdir, f"im{i:d}.nii.gz"))
|
| 42 |
+
|
| 43 |
+
n = nib.Nifti1Image(seg, np.eye(4))
|
| 44 |
+
nib.save(n, os.path.join(tempdir, f"seg{i:d}.nii.gz"))
|
| 45 |
+
|
| 46 |
+
images = sorted(glob(os.path.join(tempdir, "im*.nii.gz")))
|
| 47 |
+
segs = sorted(glob(os.path.join(tempdir, "seg*.nii.gz")))
|
| 48 |
+
val_files = [{"img": img, "seg": seg} for img, seg in zip(images, segs)]
|
| 49 |
+
|
| 50 |
+
# define transforms for image and segmentation
|
| 51 |
+
val_transforms = Compose(
|
| 52 |
+
[
|
| 53 |
+
LoadNiftid(keys=["img", "seg"]),
|
| 54 |
+
AsChannelFirstd(keys=["img", "seg"], channel_dim=-1),
|
| 55 |
+
ScaleIntensityd(keys="img"),
|
| 56 |
+
ToTensord(keys=["img", "seg"]),
|
| 57 |
+
]
|
| 58 |
+
)
|
| 59 |
+
val_ds = monai.data.Dataset(data=val_files, transform=val_transforms)
|
| 60 |
+
# sliding window inference need to input 1 image in every iteration
|
| 61 |
+
val_loader = DataLoader(val_ds, batch_size=1, num_workers=4, collate_fn=list_data_collate)
|
| 62 |
+
dice_metric = DiceMetric(include_background=True, to_onehot_y=False, sigmoid=True, reduction="mean")
|
| 63 |
+
|
| 64 |
+
# try to use all the available GPUs
|
| 65 |
+
devices = get_devices_spec(None)
|
| 66 |
+
model = UNet(
|
| 67 |
+
dimensions=3,
|
| 68 |
+
in_channels=1,
|
| 69 |
+
out_channels=1,
|
| 70 |
+
channels=(16, 32, 64, 128, 256),
|
| 71 |
+
strides=(2, 2, 2, 2),
|
| 72 |
+
num_res_units=2,
|
| 73 |
+
).to(devices[0])
|
| 74 |
+
|
| 75 |
+
model.load_state_dict(torch.load("best_metric_model_segmentation3d_dict.pth"))
|
| 76 |
+
|
| 77 |
+
# if we have multiple GPUs, set data parallel to execute sliding window inference
|
| 78 |
+
if len(devices) > 1:
|
| 79 |
+
model = torch.nn.DataParallel(model, device_ids=devices)
|
| 80 |
+
|
| 81 |
+
model.eval()
|
| 82 |
+
with torch.no_grad():
|
| 83 |
+
metric_sum = 0.0
|
| 84 |
+
metric_count = 0
|
| 85 |
+
saver = NiftiSaver(output_dir="./output")
|
| 86 |
+
for val_data in val_loader:
|
| 87 |
+
val_images, val_labels = val_data["img"].to(devices[0]), val_data["seg"].to(devices[0])
|
| 88 |
+
# define sliding window size and batch size for windows inference
|
| 89 |
+
roi_size = (96, 96, 96)
|
| 90 |
+
sw_batch_size = 4
|
| 91 |
+
val_outputs = sliding_window_inference(val_images, roi_size, sw_batch_size, model)
|
| 92 |
+
value = dice_metric(y_pred=val_outputs, y=val_labels)
|
| 93 |
+
metric_count += len(value)
|
| 94 |
+
metric_sum += value.item() * len(value)
|
| 95 |
+
val_outputs = (val_outputs.sigmoid() >= 0.5).float()
|
| 96 |
+
saver.save_batch(val_outputs, val_data["img_meta_dict"])
|
| 97 |
+
metric = metric_sum / metric_count
|
| 98 |
+
print("evaluation metric:", metric)
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
if __name__ == "__main__":
|
| 102 |
+
with tempfile.TemporaryDirectory() as tempdir:
|
| 103 |
+
main(tempdir)
|
testbed/Project-MONAI__MONAI/examples/segmentation_3d/unet_training_array.py
ADDED
|
@@ -0,0 +1,167 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2020 MONAI Consortium
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 6 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 7 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 8 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 9 |
+
# See the License for the specific language governing permissions and
|
| 10 |
+
# limitations under the License.
|
| 11 |
+
|
| 12 |
+
import logging
|
| 13 |
+
import os
|
| 14 |
+
import sys
|
| 15 |
+
import tempfile
|
| 16 |
+
from glob import glob
|
| 17 |
+
|
| 18 |
+
import nibabel as nib
|
| 19 |
+
import numpy as np
|
| 20 |
+
import torch
|
| 21 |
+
from torch.utils.data import DataLoader
|
| 22 |
+
from torch.utils.tensorboard import SummaryWriter
|
| 23 |
+
|
| 24 |
+
import monai
|
| 25 |
+
from monai.data import NiftiDataset, create_test_image_3d
|
| 26 |
+
from monai.inferers import sliding_window_inference
|
| 27 |
+
from monai.metrics import DiceMetric
|
| 28 |
+
from monai.transforms import AddChannel, Compose, RandRotate90, RandSpatialCrop, ScaleIntensity, ToTensor
|
| 29 |
+
from monai.visualize import plot_2d_or_3d_image
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def main(tempdir):
|
| 33 |
+
monai.config.print_config()
|
| 34 |
+
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
|
| 35 |
+
|
| 36 |
+
# create a temporary directory and 40 random image, mask pairs
|
| 37 |
+
print(f"generating synthetic data to {tempdir} (this may take a while)")
|
| 38 |
+
for i in range(40):
|
| 39 |
+
im, seg = create_test_image_3d(128, 128, 128, num_seg_classes=1)
|
| 40 |
+
|
| 41 |
+
n = nib.Nifti1Image(im, np.eye(4))
|
| 42 |
+
nib.save(n, os.path.join(tempdir, f"im{i:d}.nii.gz"))
|
| 43 |
+
|
| 44 |
+
n = nib.Nifti1Image(seg, np.eye(4))
|
| 45 |
+
nib.save(n, os.path.join(tempdir, f"seg{i:d}.nii.gz"))
|
| 46 |
+
|
| 47 |
+
images = sorted(glob(os.path.join(tempdir, "im*.nii.gz")))
|
| 48 |
+
segs = sorted(glob(os.path.join(tempdir, "seg*.nii.gz")))
|
| 49 |
+
|
| 50 |
+
# define transforms for image and segmentation
|
| 51 |
+
train_imtrans = Compose(
|
| 52 |
+
[
|
| 53 |
+
ScaleIntensity(),
|
| 54 |
+
AddChannel(),
|
| 55 |
+
RandSpatialCrop((96, 96, 96), random_size=False),
|
| 56 |
+
RandRotate90(prob=0.5, spatial_axes=(0, 2)),
|
| 57 |
+
ToTensor(),
|
| 58 |
+
]
|
| 59 |
+
)
|
| 60 |
+
train_segtrans = Compose(
|
| 61 |
+
[
|
| 62 |
+
AddChannel(),
|
| 63 |
+
RandSpatialCrop((96, 96, 96), random_size=False),
|
| 64 |
+
RandRotate90(prob=0.5, spatial_axes=(0, 2)),
|
| 65 |
+
ToTensor(),
|
| 66 |
+
]
|
| 67 |
+
)
|
| 68 |
+
val_imtrans = Compose([ScaleIntensity(), AddChannel(), ToTensor()])
|
| 69 |
+
val_segtrans = Compose([AddChannel(), ToTensor()])
|
| 70 |
+
|
| 71 |
+
# define nifti dataset, data loader
|
| 72 |
+
check_ds = NiftiDataset(images, segs, transform=train_imtrans, seg_transform=train_segtrans)
|
| 73 |
+
check_loader = DataLoader(check_ds, batch_size=10, num_workers=2, pin_memory=torch.cuda.is_available())
|
| 74 |
+
im, seg = monai.utils.misc.first(check_loader)
|
| 75 |
+
print(im.shape, seg.shape)
|
| 76 |
+
|
| 77 |
+
# create a training data loader
|
| 78 |
+
train_ds = NiftiDataset(images[:20], segs[:20], transform=train_imtrans, seg_transform=train_segtrans)
|
| 79 |
+
train_loader = DataLoader(train_ds, batch_size=4, shuffle=True, num_workers=8, pin_memory=torch.cuda.is_available())
|
| 80 |
+
# create a validation data loader
|
| 81 |
+
val_ds = NiftiDataset(images[-20:], segs[-20:], transform=val_imtrans, seg_transform=val_segtrans)
|
| 82 |
+
val_loader = DataLoader(val_ds, batch_size=1, num_workers=4, pin_memory=torch.cuda.is_available())
|
| 83 |
+
dice_metric = DiceMetric(include_background=True, to_onehot_y=False, sigmoid=True, reduction="mean")
|
| 84 |
+
|
| 85 |
+
# create UNet, DiceLoss and Adam optimizer
|
| 86 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 87 |
+
model = monai.networks.nets.UNet(
|
| 88 |
+
dimensions=3,
|
| 89 |
+
in_channels=1,
|
| 90 |
+
out_channels=1,
|
| 91 |
+
channels=(16, 32, 64, 128, 256),
|
| 92 |
+
strides=(2, 2, 2, 2),
|
| 93 |
+
num_res_units=2,
|
| 94 |
+
).to(device)
|
| 95 |
+
loss_function = monai.losses.DiceLoss(sigmoid=True)
|
| 96 |
+
optimizer = torch.optim.Adam(model.parameters(), 1e-3)
|
| 97 |
+
|
| 98 |
+
# start a typical PyTorch training
|
| 99 |
+
val_interval = 2
|
| 100 |
+
best_metric = -1
|
| 101 |
+
best_metric_epoch = -1
|
| 102 |
+
epoch_loss_values = list()
|
| 103 |
+
metric_values = list()
|
| 104 |
+
writer = SummaryWriter()
|
| 105 |
+
for epoch in range(5):
|
| 106 |
+
print("-" * 10)
|
| 107 |
+
print(f"epoch {epoch + 1}/{5}")
|
| 108 |
+
model.train()
|
| 109 |
+
epoch_loss = 0
|
| 110 |
+
step = 0
|
| 111 |
+
for batch_data in train_loader:
|
| 112 |
+
step += 1
|
| 113 |
+
inputs, labels = batch_data[0].to(device), batch_data[1].to(device)
|
| 114 |
+
optimizer.zero_grad()
|
| 115 |
+
outputs = model(inputs)
|
| 116 |
+
loss = loss_function(outputs, labels)
|
| 117 |
+
loss.backward()
|
| 118 |
+
optimizer.step()
|
| 119 |
+
epoch_loss += loss.item()
|
| 120 |
+
epoch_len = len(train_ds) // train_loader.batch_size
|
| 121 |
+
print(f"{step}/{epoch_len}, train_loss: {loss.item():.4f}")
|
| 122 |
+
writer.add_scalar("train_loss", loss.item(), epoch_len * epoch + step)
|
| 123 |
+
epoch_loss /= step
|
| 124 |
+
epoch_loss_values.append(epoch_loss)
|
| 125 |
+
print(f"epoch {epoch + 1} average loss: {epoch_loss:.4f}")
|
| 126 |
+
|
| 127 |
+
if (epoch + 1) % val_interval == 0:
|
| 128 |
+
model.eval()
|
| 129 |
+
with torch.no_grad():
|
| 130 |
+
metric_sum = 0.0
|
| 131 |
+
metric_count = 0
|
| 132 |
+
val_images = None
|
| 133 |
+
val_labels = None
|
| 134 |
+
val_outputs = None
|
| 135 |
+
for val_data in val_loader:
|
| 136 |
+
val_images, val_labels = val_data[0].to(device), val_data[1].to(device)
|
| 137 |
+
roi_size = (96, 96, 96)
|
| 138 |
+
sw_batch_size = 4
|
| 139 |
+
val_outputs = sliding_window_inference(val_images, roi_size, sw_batch_size, model)
|
| 140 |
+
value = dice_metric(y_pred=val_outputs, y=val_labels)
|
| 141 |
+
metric_count += len(value)
|
| 142 |
+
metric_sum += value.item() * len(value)
|
| 143 |
+
metric = metric_sum / metric_count
|
| 144 |
+
metric_values.append(metric)
|
| 145 |
+
if metric > best_metric:
|
| 146 |
+
best_metric = metric
|
| 147 |
+
best_metric_epoch = epoch + 1
|
| 148 |
+
torch.save(model.state_dict(), "best_metric_model_segmentation3d_array.pth")
|
| 149 |
+
print("saved new best metric model")
|
| 150 |
+
print(
|
| 151 |
+
"current epoch: {} current mean dice: {:.4f} best mean dice: {:.4f} at epoch {}".format(
|
| 152 |
+
epoch + 1, metric, best_metric, best_metric_epoch
|
| 153 |
+
)
|
| 154 |
+
)
|
| 155 |
+
writer.add_scalar("val_mean_dice", metric, epoch + 1)
|
| 156 |
+
# plot the last model output as GIF image in TensorBoard with the corresponding image and label
|
| 157 |
+
plot_2d_or_3d_image(val_images, epoch + 1, writer, index=0, tag="image")
|
| 158 |
+
plot_2d_or_3d_image(val_labels, epoch + 1, writer, index=0, tag="label")
|
| 159 |
+
plot_2d_or_3d_image(val_outputs, epoch + 1, writer, index=0, tag="output")
|
| 160 |
+
|
| 161 |
+
print(f"train completed, best_metric: {best_metric:.4f} at epoch: {best_metric_epoch}")
|
| 162 |
+
writer.close()
|
| 163 |
+
|
| 164 |
+
|
| 165 |
+
if __name__ == "__main__":
|
| 166 |
+
with tempfile.TemporaryDirectory() as tempdir:
|
| 167 |
+
main(tempdir)
|
testbed/Project-MONAI__MONAI/examples/segmentation_3d/unet_training_dict.py
ADDED
|
@@ -0,0 +1,187 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2020 MONAI Consortium
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 6 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 7 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 8 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 9 |
+
# See the License for the specific language governing permissions and
|
| 10 |
+
# limitations under the License.
|
| 11 |
+
|
| 12 |
+
import logging
|
| 13 |
+
import os
|
| 14 |
+
import sys
|
| 15 |
+
import tempfile
|
| 16 |
+
from glob import glob
|
| 17 |
+
|
| 18 |
+
import nibabel as nib
|
| 19 |
+
import numpy as np
|
| 20 |
+
import torch
|
| 21 |
+
from torch.utils.data import DataLoader
|
| 22 |
+
from torch.utils.tensorboard import SummaryWriter
|
| 23 |
+
|
| 24 |
+
import monai
|
| 25 |
+
from monai.data import create_test_image_3d, list_data_collate
|
| 26 |
+
from monai.inferers import sliding_window_inference
|
| 27 |
+
from monai.metrics import DiceMetric
|
| 28 |
+
from monai.transforms import (
|
| 29 |
+
AsChannelFirstd,
|
| 30 |
+
Compose,
|
| 31 |
+
LoadNiftid,
|
| 32 |
+
RandCropByPosNegLabeld,
|
| 33 |
+
RandRotate90d,
|
| 34 |
+
ScaleIntensityd,
|
| 35 |
+
ToTensord,
|
| 36 |
+
)
|
| 37 |
+
from monai.visualize import plot_2d_or_3d_image
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def main(tempdir):
|
| 41 |
+
monai.config.print_config()
|
| 42 |
+
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
|
| 43 |
+
|
| 44 |
+
# create a temporary directory and 40 random image, mask pairs
|
| 45 |
+
print(f"generating synthetic data to {tempdir} (this may take a while)")
|
| 46 |
+
for i in range(40):
|
| 47 |
+
im, seg = create_test_image_3d(128, 128, 128, num_seg_classes=1, channel_dim=-1)
|
| 48 |
+
|
| 49 |
+
n = nib.Nifti1Image(im, np.eye(4))
|
| 50 |
+
nib.save(n, os.path.join(tempdir, f"img{i:d}.nii.gz"))
|
| 51 |
+
|
| 52 |
+
n = nib.Nifti1Image(seg, np.eye(4))
|
| 53 |
+
nib.save(n, os.path.join(tempdir, f"seg{i:d}.nii.gz"))
|
| 54 |
+
|
| 55 |
+
images = sorted(glob(os.path.join(tempdir, "img*.nii.gz")))
|
| 56 |
+
segs = sorted(glob(os.path.join(tempdir, "seg*.nii.gz")))
|
| 57 |
+
train_files = [{"img": img, "seg": seg} for img, seg in zip(images[:20], segs[:20])]
|
| 58 |
+
val_files = [{"img": img, "seg": seg} for img, seg in zip(images[-20:], segs[-20:])]
|
| 59 |
+
|
| 60 |
+
# define transforms for image and segmentation
|
| 61 |
+
train_transforms = Compose(
|
| 62 |
+
[
|
| 63 |
+
LoadNiftid(keys=["img", "seg"]),
|
| 64 |
+
AsChannelFirstd(keys=["img", "seg"], channel_dim=-1),
|
| 65 |
+
ScaleIntensityd(keys="img"),
|
| 66 |
+
RandCropByPosNegLabeld(
|
| 67 |
+
keys=["img", "seg"], label_key="seg", spatial_size=[96, 96, 96], pos=1, neg=1, num_samples=4
|
| 68 |
+
),
|
| 69 |
+
RandRotate90d(keys=["img", "seg"], prob=0.5, spatial_axes=[0, 2]),
|
| 70 |
+
ToTensord(keys=["img", "seg"]),
|
| 71 |
+
]
|
| 72 |
+
)
|
| 73 |
+
val_transforms = Compose(
|
| 74 |
+
[
|
| 75 |
+
LoadNiftid(keys=["img", "seg"]),
|
| 76 |
+
AsChannelFirstd(keys=["img", "seg"], channel_dim=-1),
|
| 77 |
+
ScaleIntensityd(keys="img"),
|
| 78 |
+
ToTensord(keys=["img", "seg"]),
|
| 79 |
+
]
|
| 80 |
+
)
|
| 81 |
+
|
| 82 |
+
# define dataset, data loader
|
| 83 |
+
check_ds = monai.data.Dataset(data=train_files, transform=train_transforms)
|
| 84 |
+
# use batch_size=2 to load images and use RandCropByPosNegLabeld to generate 2 x 4 images for network training
|
| 85 |
+
check_loader = DataLoader(check_ds, batch_size=2, num_workers=4, collate_fn=list_data_collate)
|
| 86 |
+
check_data = monai.utils.misc.first(check_loader)
|
| 87 |
+
print(check_data["img"].shape, check_data["seg"].shape)
|
| 88 |
+
|
| 89 |
+
# create a training data loader
|
| 90 |
+
train_ds = monai.data.Dataset(data=train_files, transform=train_transforms)
|
| 91 |
+
# use batch_size=2 to load images and use RandCropByPosNegLabeld to generate 2 x 4 images for network training
|
| 92 |
+
train_loader = DataLoader(
|
| 93 |
+
train_ds,
|
| 94 |
+
batch_size=2,
|
| 95 |
+
shuffle=True,
|
| 96 |
+
num_workers=4,
|
| 97 |
+
collate_fn=list_data_collate,
|
| 98 |
+
pin_memory=torch.cuda.is_available(),
|
| 99 |
+
)
|
| 100 |
+
# create a validation data loader
|
| 101 |
+
val_ds = monai.data.Dataset(data=val_files, transform=val_transforms)
|
| 102 |
+
val_loader = DataLoader(val_ds, batch_size=1, num_workers=4, collate_fn=list_data_collate)
|
| 103 |
+
dice_metric = DiceMetric(include_background=True, to_onehot_y=False, sigmoid=True, reduction="mean")
|
| 104 |
+
|
| 105 |
+
# create UNet, DiceLoss and Adam optimizer
|
| 106 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 107 |
+
model = monai.networks.nets.UNet(
|
| 108 |
+
dimensions=3,
|
| 109 |
+
in_channels=1,
|
| 110 |
+
out_channels=1,
|
| 111 |
+
channels=(16, 32, 64, 128, 256),
|
| 112 |
+
strides=(2, 2, 2, 2),
|
| 113 |
+
num_res_units=2,
|
| 114 |
+
).to(device)
|
| 115 |
+
loss_function = monai.losses.DiceLoss(sigmoid=True)
|
| 116 |
+
optimizer = torch.optim.Adam(model.parameters(), 1e-3)
|
| 117 |
+
|
| 118 |
+
# start a typical PyTorch training
|
| 119 |
+
val_interval = 2
|
| 120 |
+
best_metric = -1
|
| 121 |
+
best_metric_epoch = -1
|
| 122 |
+
epoch_loss_values = list()
|
| 123 |
+
metric_values = list()
|
| 124 |
+
writer = SummaryWriter()
|
| 125 |
+
for epoch in range(5):
|
| 126 |
+
print("-" * 10)
|
| 127 |
+
print(f"epoch {epoch + 1}/{5}")
|
| 128 |
+
model.train()
|
| 129 |
+
epoch_loss = 0
|
| 130 |
+
step = 0
|
| 131 |
+
for batch_data in train_loader:
|
| 132 |
+
step += 1
|
| 133 |
+
inputs, labels = batch_data["img"].to(device), batch_data["seg"].to(device)
|
| 134 |
+
optimizer.zero_grad()
|
| 135 |
+
outputs = model(inputs)
|
| 136 |
+
loss = loss_function(outputs, labels)
|
| 137 |
+
loss.backward()
|
| 138 |
+
optimizer.step()
|
| 139 |
+
epoch_loss += loss.item()
|
| 140 |
+
epoch_len = len(train_ds) // train_loader.batch_size
|
| 141 |
+
print(f"{step}/{epoch_len}, train_loss: {loss.item():.4f}")
|
| 142 |
+
writer.add_scalar("train_loss", loss.item(), epoch_len * epoch + step)
|
| 143 |
+
epoch_loss /= step
|
| 144 |
+
epoch_loss_values.append(epoch_loss)
|
| 145 |
+
print(f"epoch {epoch + 1} average loss: {epoch_loss:.4f}")
|
| 146 |
+
|
| 147 |
+
if (epoch + 1) % val_interval == 0:
|
| 148 |
+
model.eval()
|
| 149 |
+
with torch.no_grad():
|
| 150 |
+
metric_sum = 0.0
|
| 151 |
+
metric_count = 0
|
| 152 |
+
val_images = None
|
| 153 |
+
val_labels = None
|
| 154 |
+
val_outputs = None
|
| 155 |
+
for val_data in val_loader:
|
| 156 |
+
val_images, val_labels = val_data["img"].to(device), val_data["seg"].to(device)
|
| 157 |
+
roi_size = (96, 96, 96)
|
| 158 |
+
sw_batch_size = 4
|
| 159 |
+
val_outputs = sliding_window_inference(val_images, roi_size, sw_batch_size, model)
|
| 160 |
+
value = dice_metric(y_pred=val_outputs, y=val_labels)
|
| 161 |
+
metric_count += len(value)
|
| 162 |
+
metric_sum += value.item() * len(value)
|
| 163 |
+
metric = metric_sum / metric_count
|
| 164 |
+
metric_values.append(metric)
|
| 165 |
+
if metric > best_metric:
|
| 166 |
+
best_metric = metric
|
| 167 |
+
best_metric_epoch = epoch + 1
|
| 168 |
+
torch.save(model.state_dict(), "best_metric_model_segmentation3d_dict.pth")
|
| 169 |
+
print("saved new best metric model")
|
| 170 |
+
print(
|
| 171 |
+
"current epoch: {} current mean dice: {:.4f} best mean dice: {:.4f} at epoch {}".format(
|
| 172 |
+
epoch + 1, metric, best_metric, best_metric_epoch
|
| 173 |
+
)
|
| 174 |
+
)
|
| 175 |
+
writer.add_scalar("val_mean_dice", metric, epoch + 1)
|
| 176 |
+
# plot the last model output as GIF image in TensorBoard with the corresponding image and label
|
| 177 |
+
plot_2d_or_3d_image(val_images, epoch + 1, writer, index=0, tag="image")
|
| 178 |
+
plot_2d_or_3d_image(val_labels, epoch + 1, writer, index=0, tag="label")
|
| 179 |
+
plot_2d_or_3d_image(val_outputs, epoch + 1, writer, index=0, tag="output")
|
| 180 |
+
|
| 181 |
+
print(f"train completed, best_metric: {best_metric:.4f} at epoch: {best_metric_epoch}")
|
| 182 |
+
writer.close()
|
| 183 |
+
|
| 184 |
+
|
| 185 |
+
if __name__ == "__main__":
|
| 186 |
+
with tempfile.TemporaryDirectory() as tempdir:
|
| 187 |
+
main(tempdir)
|
testbed/Project-MONAI__MONAI/examples/segmentation_3d_ignite/unet_evaluation_array.py
ADDED
|
@@ -0,0 +1,113 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2020 MONAI Consortium
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 6 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 7 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 8 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 9 |
+
# See the License for the specific language governing permissions and
|
| 10 |
+
# limitations under the License.
|
| 11 |
+
|
| 12 |
+
import logging
|
| 13 |
+
import os
|
| 14 |
+
import sys
|
| 15 |
+
import tempfile
|
| 16 |
+
from glob import glob
|
| 17 |
+
|
| 18 |
+
import nibabel as nib
|
| 19 |
+
import numpy as np
|
| 20 |
+
import torch
|
| 21 |
+
from ignite.engine import Engine
|
| 22 |
+
from torch.utils.data import DataLoader
|
| 23 |
+
|
| 24 |
+
from monai import config
|
| 25 |
+
from monai.data import NiftiDataset, create_test_image_3d
|
| 26 |
+
from monai.handlers import CheckpointLoader, MeanDice, SegmentationSaver, StatsHandler
|
| 27 |
+
from monai.inferers import sliding_window_inference
|
| 28 |
+
from monai.networks import predict_segmentation
|
| 29 |
+
from monai.networks.nets import UNet
|
| 30 |
+
from monai.transforms import AddChannel, Compose, ScaleIntensity, ToTensor
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def main(tempdir):
|
| 34 |
+
config.print_config()
|
| 35 |
+
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
|
| 36 |
+
|
| 37 |
+
print(f"generating synthetic data to {tempdir} (this may take a while)")
|
| 38 |
+
for i in range(5):
|
| 39 |
+
im, seg = create_test_image_3d(128, 128, 128, num_seg_classes=1)
|
| 40 |
+
|
| 41 |
+
n = nib.Nifti1Image(im, np.eye(4))
|
| 42 |
+
nib.save(n, os.path.join(tempdir, f"im{i:d}.nii.gz"))
|
| 43 |
+
|
| 44 |
+
n = nib.Nifti1Image(seg, np.eye(4))
|
| 45 |
+
nib.save(n, os.path.join(tempdir, f"seg{i:d}.nii.gz"))
|
| 46 |
+
|
| 47 |
+
images = sorted(glob(os.path.join(tempdir, "im*.nii.gz")))
|
| 48 |
+
segs = sorted(glob(os.path.join(tempdir, "seg*.nii.gz")))
|
| 49 |
+
|
| 50 |
+
# define transforms for image and segmentation
|
| 51 |
+
imtrans = Compose([ScaleIntensity(), AddChannel(), ToTensor()])
|
| 52 |
+
segtrans = Compose([AddChannel(), ToTensor()])
|
| 53 |
+
ds = NiftiDataset(images, segs, transform=imtrans, seg_transform=segtrans, image_only=False)
|
| 54 |
+
|
| 55 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 56 |
+
net = UNet(
|
| 57 |
+
dimensions=3,
|
| 58 |
+
in_channels=1,
|
| 59 |
+
out_channels=1,
|
| 60 |
+
channels=(16, 32, 64, 128, 256),
|
| 61 |
+
strides=(2, 2, 2, 2),
|
| 62 |
+
num_res_units=2,
|
| 63 |
+
)
|
| 64 |
+
net.to(device)
|
| 65 |
+
|
| 66 |
+
# define sliding window size and batch size for windows inference
|
| 67 |
+
roi_size = (96, 96, 96)
|
| 68 |
+
sw_batch_size = 4
|
| 69 |
+
|
| 70 |
+
def _sliding_window_processor(engine, batch):
|
| 71 |
+
net.eval()
|
| 72 |
+
with torch.no_grad():
|
| 73 |
+
val_images, val_labels = batch[0].to(device), batch[1].to(device)
|
| 74 |
+
seg_probs = sliding_window_inference(val_images, roi_size, sw_batch_size, net)
|
| 75 |
+
return seg_probs, val_labels
|
| 76 |
+
|
| 77 |
+
evaluator = Engine(_sliding_window_processor)
|
| 78 |
+
|
| 79 |
+
# add evaluation metric to the evaluator engine
|
| 80 |
+
MeanDice(sigmoid=True, to_onehot_y=False).attach(evaluator, "Mean_Dice")
|
| 81 |
+
|
| 82 |
+
# StatsHandler prints loss at every iteration and print metrics at every epoch,
|
| 83 |
+
# we don't need to print loss for evaluator, so just print metrics, user can also customize print functions
|
| 84 |
+
val_stats_handler = StatsHandler(
|
| 85 |
+
name="evaluator",
|
| 86 |
+
output_transform=lambda x: None, # no need to print loss value, so disable per iteration output
|
| 87 |
+
)
|
| 88 |
+
val_stats_handler.attach(evaluator)
|
| 89 |
+
|
| 90 |
+
# for the array data format, assume the 3rd item of batch data is the meta_data
|
| 91 |
+
file_saver = SegmentationSaver(
|
| 92 |
+
output_dir="tempdir",
|
| 93 |
+
output_ext=".nii.gz",
|
| 94 |
+
output_postfix="seg",
|
| 95 |
+
name="evaluator",
|
| 96 |
+
batch_transform=lambda x: x[2],
|
| 97 |
+
output_transform=lambda output: predict_segmentation(output[0]),
|
| 98 |
+
)
|
| 99 |
+
file_saver.attach(evaluator)
|
| 100 |
+
|
| 101 |
+
# the model was trained by "unet_training_array" example
|
| 102 |
+
ckpt_saver = CheckpointLoader(load_path="./runs_array/net_checkpoint_100.pth", load_dict={"net": net})
|
| 103 |
+
ckpt_saver.attach(evaluator)
|
| 104 |
+
|
| 105 |
+
# sliding window inference for one image at every iteration
|
| 106 |
+
loader = DataLoader(ds, batch_size=1, num_workers=1, pin_memory=torch.cuda.is_available())
|
| 107 |
+
state = evaluator.run(loader)
|
| 108 |
+
print(state)
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
if __name__ == "__main__":
|
| 112 |
+
with tempfile.TemporaryDirectory() as tempdir:
|
| 113 |
+
main(tempdir)
|
testbed/Project-MONAI__MONAI/examples/segmentation_3d_ignite/unet_evaluation_dict.py
ADDED
|
@@ -0,0 +1,119 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2020 MONAI Consortium
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 6 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 7 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 8 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 9 |
+
# See the License for the specific language governing permissions and
|
| 10 |
+
# limitations under the License.
|
| 11 |
+
|
| 12 |
+
import logging
|
| 13 |
+
import os
|
| 14 |
+
import sys
|
| 15 |
+
import tempfile
|
| 16 |
+
from glob import glob
|
| 17 |
+
|
| 18 |
+
import nibabel as nib
|
| 19 |
+
import numpy as np
|
| 20 |
+
import torch
|
| 21 |
+
from ignite.engine import Engine
|
| 22 |
+
from torch.utils.data import DataLoader
|
| 23 |
+
|
| 24 |
+
import monai
|
| 25 |
+
from monai.data import create_test_image_3d, list_data_collate
|
| 26 |
+
from monai.handlers import CheckpointLoader, MeanDice, SegmentationSaver, StatsHandler
|
| 27 |
+
from monai.inferers import sliding_window_inference
|
| 28 |
+
from monai.networks import predict_segmentation
|
| 29 |
+
from monai.networks.nets import UNet
|
| 30 |
+
from monai.transforms import AsChannelFirstd, Compose, LoadNiftid, ScaleIntensityd, ToTensord
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def main(tempdir):
|
| 34 |
+
monai.config.print_config()
|
| 35 |
+
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
|
| 36 |
+
|
| 37 |
+
print(f"generating synthetic data to {tempdir} (this may take a while)")
|
| 38 |
+
for i in range(5):
|
| 39 |
+
im, seg = create_test_image_3d(128, 128, 128, num_seg_classes=1, channel_dim=-1)
|
| 40 |
+
|
| 41 |
+
n = nib.Nifti1Image(im, np.eye(4))
|
| 42 |
+
nib.save(n, os.path.join(tempdir, f"im{i:d}.nii.gz"))
|
| 43 |
+
|
| 44 |
+
n = nib.Nifti1Image(seg, np.eye(4))
|
| 45 |
+
nib.save(n, os.path.join(tempdir, f"seg{i:d}.nii.gz"))
|
| 46 |
+
|
| 47 |
+
images = sorted(glob(os.path.join(tempdir, "im*.nii.gz")))
|
| 48 |
+
segs = sorted(glob(os.path.join(tempdir, "seg*.nii.gz")))
|
| 49 |
+
val_files = [{"img": img, "seg": seg} for img, seg in zip(images, segs)]
|
| 50 |
+
|
| 51 |
+
# define transforms for image and segmentation
|
| 52 |
+
val_transforms = Compose(
|
| 53 |
+
[
|
| 54 |
+
LoadNiftid(keys=["img", "seg"]),
|
| 55 |
+
AsChannelFirstd(keys=["img", "seg"], channel_dim=-1),
|
| 56 |
+
ScaleIntensityd(keys="img"),
|
| 57 |
+
ToTensord(keys=["img", "seg"]),
|
| 58 |
+
]
|
| 59 |
+
)
|
| 60 |
+
val_ds = monai.data.Dataset(data=val_files, transform=val_transforms)
|
| 61 |
+
|
| 62 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 63 |
+
net = UNet(
|
| 64 |
+
dimensions=3,
|
| 65 |
+
in_channels=1,
|
| 66 |
+
out_channels=1,
|
| 67 |
+
channels=(16, 32, 64, 128, 256),
|
| 68 |
+
strides=(2, 2, 2, 2),
|
| 69 |
+
num_res_units=2,
|
| 70 |
+
)
|
| 71 |
+
net.to(device)
|
| 72 |
+
|
| 73 |
+
# define sliding window size and batch size for windows inference
|
| 74 |
+
roi_size = (96, 96, 96)
|
| 75 |
+
sw_batch_size = 4
|
| 76 |
+
|
| 77 |
+
def _sliding_window_processor(engine, batch):
|
| 78 |
+
net.eval()
|
| 79 |
+
with torch.no_grad():
|
| 80 |
+
val_images, val_labels = batch["img"].to(device), batch["seg"].to(device)
|
| 81 |
+
seg_probs = sliding_window_inference(val_images, roi_size, sw_batch_size, net)
|
| 82 |
+
return seg_probs, val_labels
|
| 83 |
+
|
| 84 |
+
evaluator = Engine(_sliding_window_processor)
|
| 85 |
+
|
| 86 |
+
# add evaluation metric to the evaluator engine
|
| 87 |
+
MeanDice(sigmoid=True, to_onehot_y=False).attach(evaluator, "Mean_Dice")
|
| 88 |
+
|
| 89 |
+
# StatsHandler prints loss at every iteration and print metrics at every epoch,
|
| 90 |
+
# we don't need to print loss for evaluator, so just print metrics, user can also customize print functions
|
| 91 |
+
val_stats_handler = StatsHandler(
|
| 92 |
+
name="evaluator",
|
| 93 |
+
output_transform=lambda x: None, # no need to print loss value, so disable per iteration output
|
| 94 |
+
)
|
| 95 |
+
val_stats_handler.attach(evaluator)
|
| 96 |
+
|
| 97 |
+
# convert the necessary metadata from batch data
|
| 98 |
+
SegmentationSaver(
|
| 99 |
+
output_dir="tempdir",
|
| 100 |
+
output_ext=".nii.gz",
|
| 101 |
+
output_postfix="seg",
|
| 102 |
+
name="evaluator",
|
| 103 |
+
batch_transform=lambda batch: batch["img_meta_dict"],
|
| 104 |
+
output_transform=lambda output: predict_segmentation(output[0]),
|
| 105 |
+
).attach(evaluator)
|
| 106 |
+
# the model was trained by "unet_training_dict" example
|
| 107 |
+
CheckpointLoader(load_path="./runs_dict/net_checkpoint_50.pth", load_dict={"net": net}).attach(evaluator)
|
| 108 |
+
|
| 109 |
+
# sliding window inference for one image at every iteration
|
| 110 |
+
val_loader = DataLoader(
|
| 111 |
+
val_ds, batch_size=1, num_workers=4, collate_fn=list_data_collate, pin_memory=torch.cuda.is_available()
|
| 112 |
+
)
|
| 113 |
+
state = evaluator.run(val_loader)
|
| 114 |
+
print(state)
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
if __name__ == "__main__":
|
| 118 |
+
with tempfile.TemporaryDirectory() as tempdir:
|
| 119 |
+
main(tempdir)
|
testbed/Project-MONAI__MONAI/examples/segmentation_3d_ignite/unet_training_array.py
ADDED
|
@@ -0,0 +1,160 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2020 MONAI Consortium
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 6 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 7 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 8 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 9 |
+
# See the License for the specific language governing permissions and
|
| 10 |
+
# limitations under the License.
|
| 11 |
+
|
| 12 |
+
import logging
|
| 13 |
+
import os
|
| 14 |
+
import sys
|
| 15 |
+
import tempfile
|
| 16 |
+
from glob import glob
|
| 17 |
+
|
| 18 |
+
import nibabel as nib
|
| 19 |
+
import numpy as np
|
| 20 |
+
import torch
|
| 21 |
+
from ignite.engine import Events, create_supervised_evaluator, create_supervised_trainer
|
| 22 |
+
from ignite.handlers import EarlyStopping, ModelCheckpoint
|
| 23 |
+
from torch.utils.data import DataLoader
|
| 24 |
+
|
| 25 |
+
import monai
|
| 26 |
+
from monai.data import NiftiDataset, create_test_image_3d
|
| 27 |
+
from monai.handlers import (
|
| 28 |
+
MeanDice,
|
| 29 |
+
StatsHandler,
|
| 30 |
+
TensorBoardImageHandler,
|
| 31 |
+
TensorBoardStatsHandler,
|
| 32 |
+
stopping_fn_from_metric,
|
| 33 |
+
)
|
| 34 |
+
from monai.networks import predict_segmentation
|
| 35 |
+
from monai.transforms import AddChannel, Compose, RandSpatialCrop, Resize, ScaleIntensity, ToTensor
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
def main(tempdir):
|
| 39 |
+
monai.config.print_config()
|
| 40 |
+
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
|
| 41 |
+
|
| 42 |
+
# create a temporary directory and 40 random image, mask pairs
|
| 43 |
+
print(f"generating synthetic data to {tempdir} (this may take a while)")
|
| 44 |
+
for i in range(40):
|
| 45 |
+
im, seg = create_test_image_3d(128, 128, 128, num_seg_classes=1)
|
| 46 |
+
|
| 47 |
+
n = nib.Nifti1Image(im, np.eye(4))
|
| 48 |
+
nib.save(n, os.path.join(tempdir, f"im{i:d}.nii.gz"))
|
| 49 |
+
|
| 50 |
+
n = nib.Nifti1Image(seg, np.eye(4))
|
| 51 |
+
nib.save(n, os.path.join(tempdir, f"seg{i:d}.nii.gz"))
|
| 52 |
+
|
| 53 |
+
images = sorted(glob(os.path.join(tempdir, "im*.nii.gz")))
|
| 54 |
+
segs = sorted(glob(os.path.join(tempdir, "seg*.nii.gz")))
|
| 55 |
+
|
| 56 |
+
# define transforms for image and segmentation
|
| 57 |
+
train_imtrans = Compose(
|
| 58 |
+
[ScaleIntensity(), AddChannel(), RandSpatialCrop((96, 96, 96), random_size=False), ToTensor()]
|
| 59 |
+
)
|
| 60 |
+
train_segtrans = Compose([AddChannel(), RandSpatialCrop((96, 96, 96), random_size=False), ToTensor()])
|
| 61 |
+
val_imtrans = Compose([ScaleIntensity(), AddChannel(), Resize((96, 96, 96)), ToTensor()])
|
| 62 |
+
val_segtrans = Compose([AddChannel(), Resize((96, 96, 96)), ToTensor()])
|
| 63 |
+
|
| 64 |
+
# define nifti dataset, data loader
|
| 65 |
+
check_ds = NiftiDataset(images, segs, transform=train_imtrans, seg_transform=train_segtrans)
|
| 66 |
+
check_loader = DataLoader(check_ds, batch_size=10, num_workers=2, pin_memory=torch.cuda.is_available())
|
| 67 |
+
im, seg = monai.utils.misc.first(check_loader)
|
| 68 |
+
print(im.shape, seg.shape)
|
| 69 |
+
|
| 70 |
+
# create a training data loader
|
| 71 |
+
train_ds = NiftiDataset(images[:20], segs[:20], transform=train_imtrans, seg_transform=train_segtrans)
|
| 72 |
+
train_loader = DataLoader(train_ds, batch_size=5, shuffle=True, num_workers=8, pin_memory=torch.cuda.is_available())
|
| 73 |
+
# create a validation data loader
|
| 74 |
+
val_ds = NiftiDataset(images[-20:], segs[-20:], transform=val_imtrans, seg_transform=val_segtrans)
|
| 75 |
+
val_loader = DataLoader(val_ds, batch_size=5, num_workers=8, pin_memory=torch.cuda.is_available())
|
| 76 |
+
|
| 77 |
+
# create UNet, DiceLoss and Adam optimizer
|
| 78 |
+
net = monai.networks.nets.UNet(
|
| 79 |
+
dimensions=3,
|
| 80 |
+
in_channels=1,
|
| 81 |
+
out_channels=1,
|
| 82 |
+
channels=(16, 32, 64, 128, 256),
|
| 83 |
+
strides=(2, 2, 2, 2),
|
| 84 |
+
num_res_units=2,
|
| 85 |
+
)
|
| 86 |
+
loss = monai.losses.DiceLoss(sigmoid=True)
|
| 87 |
+
lr = 1e-3
|
| 88 |
+
opt = torch.optim.Adam(net.parameters(), lr)
|
| 89 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 90 |
+
|
| 91 |
+
# Ignite trainer expects batch=(img, seg) and returns output=loss at every iteration,
|
| 92 |
+
# user can add output_transform to return other values, like: y_pred, y, etc.
|
| 93 |
+
trainer = create_supervised_trainer(net, opt, loss, device, False)
|
| 94 |
+
|
| 95 |
+
# adding checkpoint handler to save models (network params and optimizer stats) during training
|
| 96 |
+
checkpoint_handler = ModelCheckpoint("./runs_array/", "net", n_saved=10, require_empty=False)
|
| 97 |
+
trainer.add_event_handler(
|
| 98 |
+
event_name=Events.EPOCH_COMPLETED, handler=checkpoint_handler, to_save={"net": net, "opt": opt}
|
| 99 |
+
)
|
| 100 |
+
|
| 101 |
+
# StatsHandler prints loss at every iteration and print metrics at every epoch,
|
| 102 |
+
# we don't set metrics for trainer here, so just print loss, user can also customize print functions
|
| 103 |
+
# and can use output_transform to convert engine.state.output if it's not a loss value
|
| 104 |
+
train_stats_handler = StatsHandler(name="trainer")
|
| 105 |
+
train_stats_handler.attach(trainer)
|
| 106 |
+
|
| 107 |
+
# TensorBoardStatsHandler plots loss at every iteration and plots metrics at every epoch, same as StatsHandler
|
| 108 |
+
train_tensorboard_stats_handler = TensorBoardStatsHandler()
|
| 109 |
+
train_tensorboard_stats_handler.attach(trainer)
|
| 110 |
+
|
| 111 |
+
validation_every_n_epochs = 1
|
| 112 |
+
# Set parameters for validation
|
| 113 |
+
metric_name = "Mean_Dice"
|
| 114 |
+
# add evaluation metric to the evaluator engine
|
| 115 |
+
val_metrics = {metric_name: MeanDice(sigmoid=True, to_onehot_y=False)}
|
| 116 |
+
|
| 117 |
+
# Ignite evaluator expects batch=(img, seg) and returns output=(y_pred, y) at every iteration,
|
| 118 |
+
# user can add output_transform to return other values
|
| 119 |
+
evaluator = create_supervised_evaluator(net, val_metrics, device, True)
|
| 120 |
+
|
| 121 |
+
@trainer.on(Events.EPOCH_COMPLETED(every=validation_every_n_epochs))
|
| 122 |
+
def run_validation(engine):
|
| 123 |
+
evaluator.run(val_loader)
|
| 124 |
+
|
| 125 |
+
# add early stopping handler to evaluator
|
| 126 |
+
early_stopper = EarlyStopping(patience=4, score_function=stopping_fn_from_metric(metric_name), trainer=trainer)
|
| 127 |
+
evaluator.add_event_handler(event_name=Events.EPOCH_COMPLETED, handler=early_stopper)
|
| 128 |
+
|
| 129 |
+
# add stats event handler to print validation stats via evaluator
|
| 130 |
+
val_stats_handler = StatsHandler(
|
| 131 |
+
name="evaluator",
|
| 132 |
+
output_transform=lambda x: None, # no need to print loss value, so disable per iteration output
|
| 133 |
+
global_epoch_transform=lambda x: trainer.state.epoch,
|
| 134 |
+
) # fetch global epoch number from trainer
|
| 135 |
+
val_stats_handler.attach(evaluator)
|
| 136 |
+
|
| 137 |
+
# add handler to record metrics to TensorBoard at every validation epoch
|
| 138 |
+
val_tensorboard_stats_handler = TensorBoardStatsHandler(
|
| 139 |
+
output_transform=lambda x: None, # no need to plot loss value, so disable per iteration output
|
| 140 |
+
global_epoch_transform=lambda x: trainer.state.epoch,
|
| 141 |
+
) # fetch global epoch number from trainer
|
| 142 |
+
val_tensorboard_stats_handler.attach(evaluator)
|
| 143 |
+
|
| 144 |
+
# add handler to draw the first image and the corresponding label and model output in the last batch
|
| 145 |
+
# here we draw the 3D output as GIF format along Depth axis, at every validation epoch
|
| 146 |
+
val_tensorboard_image_handler = TensorBoardImageHandler(
|
| 147 |
+
batch_transform=lambda batch: (batch[0], batch[1]),
|
| 148 |
+
output_transform=lambda output: predict_segmentation(output[0]),
|
| 149 |
+
global_iter_transform=lambda x: trainer.state.epoch,
|
| 150 |
+
)
|
| 151 |
+
evaluator.add_event_handler(event_name=Events.EPOCH_COMPLETED, handler=val_tensorboard_image_handler)
|
| 152 |
+
|
| 153 |
+
train_epochs = 30
|
| 154 |
+
state = trainer.run(train_loader, train_epochs)
|
| 155 |
+
print(state)
|
| 156 |
+
|
| 157 |
+
|
| 158 |
+
if __name__ == "__main__":
|
| 159 |
+
with tempfile.TemporaryDirectory() as tempdir:
|
| 160 |
+
main(tempdir)
|
testbed/Project-MONAI__MONAI/examples/segmentation_3d_ignite/unet_training_dict.py
ADDED
|
@@ -0,0 +1,200 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2020 MONAI Consortium
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 6 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 7 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 8 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 9 |
+
# See the License for the specific language governing permissions and
|
| 10 |
+
# limitations under the License.
|
| 11 |
+
|
| 12 |
+
import logging
|
| 13 |
+
import os
|
| 14 |
+
import sys
|
| 15 |
+
import tempfile
|
| 16 |
+
from glob import glob
|
| 17 |
+
|
| 18 |
+
import nibabel as nib
|
| 19 |
+
import numpy as np
|
| 20 |
+
import torch
|
| 21 |
+
from ignite.engine import Events, _prepare_batch, create_supervised_evaluator, create_supervised_trainer
|
| 22 |
+
from ignite.handlers import EarlyStopping, ModelCheckpoint
|
| 23 |
+
from torch.utils.data import DataLoader
|
| 24 |
+
|
| 25 |
+
import monai
|
| 26 |
+
from monai.data import create_test_image_3d, list_data_collate
|
| 27 |
+
from monai.handlers import (
|
| 28 |
+
MeanDice,
|
| 29 |
+
StatsHandler,
|
| 30 |
+
TensorBoardImageHandler,
|
| 31 |
+
TensorBoardStatsHandler,
|
| 32 |
+
stopping_fn_from_metric,
|
| 33 |
+
)
|
| 34 |
+
from monai.networks import predict_segmentation
|
| 35 |
+
from monai.transforms import (
|
| 36 |
+
AsChannelFirstd,
|
| 37 |
+
Compose,
|
| 38 |
+
LoadNiftid,
|
| 39 |
+
RandCropByPosNegLabeld,
|
| 40 |
+
RandRotate90d,
|
| 41 |
+
ScaleIntensityd,
|
| 42 |
+
ToTensord,
|
| 43 |
+
)
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
def main(tempdir):
|
| 47 |
+
monai.config.print_config()
|
| 48 |
+
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
|
| 49 |
+
|
| 50 |
+
# create a temporary directory and 40 random image, mask pairs
|
| 51 |
+
print(f"generating synthetic data to {tempdir} (this may take a while)")
|
| 52 |
+
for i in range(40):
|
| 53 |
+
im, seg = create_test_image_3d(128, 128, 128, num_seg_classes=1, channel_dim=-1)
|
| 54 |
+
|
| 55 |
+
n = nib.Nifti1Image(im, np.eye(4))
|
| 56 |
+
nib.save(n, os.path.join(tempdir, f"img{i:d}.nii.gz"))
|
| 57 |
+
|
| 58 |
+
n = nib.Nifti1Image(seg, np.eye(4))
|
| 59 |
+
nib.save(n, os.path.join(tempdir, f"seg{i:d}.nii.gz"))
|
| 60 |
+
|
| 61 |
+
images = sorted(glob(os.path.join(tempdir, "img*.nii.gz")))
|
| 62 |
+
segs = sorted(glob(os.path.join(tempdir, "seg*.nii.gz")))
|
| 63 |
+
train_files = [{"img": img, "seg": seg} for img, seg in zip(images[:20], segs[:20])]
|
| 64 |
+
val_files = [{"img": img, "seg": seg} for img, seg in zip(images[-20:], segs[-20:])]
|
| 65 |
+
|
| 66 |
+
# define transforms for image and segmentation
|
| 67 |
+
train_transforms = Compose(
|
| 68 |
+
[
|
| 69 |
+
LoadNiftid(keys=["img", "seg"]),
|
| 70 |
+
AsChannelFirstd(keys=["img", "seg"], channel_dim=-1),
|
| 71 |
+
ScaleIntensityd(keys="img"),
|
| 72 |
+
RandCropByPosNegLabeld(
|
| 73 |
+
keys=["img", "seg"], label_key="seg", spatial_size=[96, 96, 96], pos=1, neg=1, num_samples=4
|
| 74 |
+
),
|
| 75 |
+
RandRotate90d(keys=["img", "seg"], prob=0.5, spatial_axes=[0, 2]),
|
| 76 |
+
ToTensord(keys=["img", "seg"]),
|
| 77 |
+
]
|
| 78 |
+
)
|
| 79 |
+
val_transforms = Compose(
|
| 80 |
+
[
|
| 81 |
+
LoadNiftid(keys=["img", "seg"]),
|
| 82 |
+
AsChannelFirstd(keys=["img", "seg"], channel_dim=-1),
|
| 83 |
+
ScaleIntensityd(keys="img"),
|
| 84 |
+
ToTensord(keys=["img", "seg"]),
|
| 85 |
+
]
|
| 86 |
+
)
|
| 87 |
+
|
| 88 |
+
# define dataset, data loader
|
| 89 |
+
check_ds = monai.data.Dataset(data=train_files, transform=train_transforms)
|
| 90 |
+
# use batch_size=2 to load images and use RandCropByPosNegLabeld to generate 2 x 4 images for network training
|
| 91 |
+
check_loader = DataLoader(
|
| 92 |
+
check_ds, batch_size=2, num_workers=4, collate_fn=list_data_collate, pin_memory=torch.cuda.is_available()
|
| 93 |
+
)
|
| 94 |
+
check_data = monai.utils.misc.first(check_loader)
|
| 95 |
+
print(check_data["img"].shape, check_data["seg"].shape)
|
| 96 |
+
|
| 97 |
+
# create a training data loader
|
| 98 |
+
train_ds = monai.data.Dataset(data=train_files, transform=train_transforms)
|
| 99 |
+
# use batch_size=2 to load images and use RandCropByPosNegLabeld to generate 2 x 4 images for network training
|
| 100 |
+
train_loader = DataLoader(
|
| 101 |
+
train_ds,
|
| 102 |
+
batch_size=2,
|
| 103 |
+
shuffle=True,
|
| 104 |
+
num_workers=4,
|
| 105 |
+
collate_fn=list_data_collate,
|
| 106 |
+
pin_memory=torch.cuda.is_available(),
|
| 107 |
+
)
|
| 108 |
+
# create a validation data loader
|
| 109 |
+
val_ds = monai.data.Dataset(data=val_files, transform=val_transforms)
|
| 110 |
+
val_loader = DataLoader(
|
| 111 |
+
val_ds, batch_size=5, num_workers=8, collate_fn=list_data_collate, pin_memory=torch.cuda.is_available()
|
| 112 |
+
)
|
| 113 |
+
|
| 114 |
+
# create UNet, DiceLoss and Adam optimizer
|
| 115 |
+
net = monai.networks.nets.UNet(
|
| 116 |
+
dimensions=3,
|
| 117 |
+
in_channels=1,
|
| 118 |
+
out_channels=1,
|
| 119 |
+
channels=(16, 32, 64, 128, 256),
|
| 120 |
+
strides=(2, 2, 2, 2),
|
| 121 |
+
num_res_units=2,
|
| 122 |
+
)
|
| 123 |
+
loss = monai.losses.DiceLoss(sigmoid=True)
|
| 124 |
+
lr = 1e-3
|
| 125 |
+
opt = torch.optim.Adam(net.parameters(), lr)
|
| 126 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 127 |
+
|
| 128 |
+
# Ignite trainer expects batch=(img, seg) and returns output=loss at every iteration,
|
| 129 |
+
# user can add output_transform to return other values, like: y_pred, y, etc.
|
| 130 |
+
def prepare_batch(batch, device=None, non_blocking=False):
|
| 131 |
+
return _prepare_batch((batch["img"], batch["seg"]), device, non_blocking)
|
| 132 |
+
|
| 133 |
+
trainer = create_supervised_trainer(net, opt, loss, device, False, prepare_batch=prepare_batch)
|
| 134 |
+
|
| 135 |
+
# adding checkpoint handler to save models (network params and optimizer stats) during training
|
| 136 |
+
checkpoint_handler = ModelCheckpoint("./runs_dict/", "net", n_saved=10, require_empty=False)
|
| 137 |
+
trainer.add_event_handler(
|
| 138 |
+
event_name=Events.EPOCH_COMPLETED, handler=checkpoint_handler, to_save={"net": net, "opt": opt}
|
| 139 |
+
)
|
| 140 |
+
|
| 141 |
+
# StatsHandler prints loss at every iteration and print metrics at every epoch,
|
| 142 |
+
# we don't set metrics for trainer here, so just print loss, user can also customize print functions
|
| 143 |
+
# and can use output_transform to convert engine.state.output if it's not loss value
|
| 144 |
+
train_stats_handler = StatsHandler(name="trainer")
|
| 145 |
+
train_stats_handler.attach(trainer)
|
| 146 |
+
|
| 147 |
+
# TensorBoardStatsHandler plots loss at every iteration and plots metrics at every epoch, same as StatsHandler
|
| 148 |
+
train_tensorboard_stats_handler = TensorBoardStatsHandler()
|
| 149 |
+
train_tensorboard_stats_handler.attach(trainer)
|
| 150 |
+
|
| 151 |
+
validation_every_n_iters = 5
|
| 152 |
+
# set parameters for validation
|
| 153 |
+
metric_name = "Mean_Dice"
|
| 154 |
+
# add evaluation metric to the evaluator engine
|
| 155 |
+
val_metrics = {metric_name: MeanDice(sigmoid=True, to_onehot_y=False)}
|
| 156 |
+
|
| 157 |
+
# Ignite evaluator expects batch=(img, seg) and returns output=(y_pred, y) at every iteration,
|
| 158 |
+
# user can add output_transform to return other values
|
| 159 |
+
evaluator = create_supervised_evaluator(net, val_metrics, device, True, prepare_batch=prepare_batch)
|
| 160 |
+
|
| 161 |
+
@trainer.on(Events.ITERATION_COMPLETED(every=validation_every_n_iters))
|
| 162 |
+
def run_validation(engine):
|
| 163 |
+
evaluator.run(val_loader)
|
| 164 |
+
|
| 165 |
+
# add early stopping handler to evaluator
|
| 166 |
+
early_stopper = EarlyStopping(patience=4, score_function=stopping_fn_from_metric(metric_name), trainer=trainer)
|
| 167 |
+
evaluator.add_event_handler(event_name=Events.EPOCH_COMPLETED, handler=early_stopper)
|
| 168 |
+
|
| 169 |
+
# add stats event handler to print validation stats via evaluator
|
| 170 |
+
val_stats_handler = StatsHandler(
|
| 171 |
+
name="evaluator",
|
| 172 |
+
output_transform=lambda x: None, # no need to print loss value, so disable per iteration output
|
| 173 |
+
global_epoch_transform=lambda x: trainer.state.epoch,
|
| 174 |
+
) # fetch global epoch number from trainer
|
| 175 |
+
val_stats_handler.attach(evaluator)
|
| 176 |
+
|
| 177 |
+
# add handler to record metrics to TensorBoard at every validation epoch
|
| 178 |
+
val_tensorboard_stats_handler = TensorBoardStatsHandler(
|
| 179 |
+
output_transform=lambda x: None, # no need to plot loss value, so disable per iteration output
|
| 180 |
+
global_epoch_transform=lambda x: trainer.state.iteration,
|
| 181 |
+
) # fetch global iteration number from trainer
|
| 182 |
+
val_tensorboard_stats_handler.attach(evaluator)
|
| 183 |
+
|
| 184 |
+
# add handler to draw the first image and the corresponding label and model output in the last batch
|
| 185 |
+
# here we draw the 3D output as GIF format along the depth axis, every 2 validation iterations.
|
| 186 |
+
val_tensorboard_image_handler = TensorBoardImageHandler(
|
| 187 |
+
batch_transform=lambda batch: (batch["img"], batch["seg"]),
|
| 188 |
+
output_transform=lambda output: predict_segmentation(output[0]),
|
| 189 |
+
global_iter_transform=lambda x: trainer.state.epoch,
|
| 190 |
+
)
|
| 191 |
+
evaluator.add_event_handler(event_name=Events.ITERATION_COMPLETED(every=2), handler=val_tensorboard_image_handler)
|
| 192 |
+
|
| 193 |
+
train_epochs = 5
|
| 194 |
+
state = trainer.run(train_loader, train_epochs)
|
| 195 |
+
print(state)
|
| 196 |
+
|
| 197 |
+
|
| 198 |
+
if __name__ == "__main__":
|
| 199 |
+
with tempfile.TemporaryDirectory() as tempdir:
|
| 200 |
+
main(tempdir)
|
testbed/Project-MONAI__MONAI/examples/synthesis/gan_evaluation.py
ADDED
|
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2020 MONAI Consortium
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 6 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 7 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 8 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 9 |
+
# See the License for the specific language governing permissions and
|
| 10 |
+
# limitations under the License.
|
| 11 |
+
"""
|
| 12 |
+
MONAI GAN Evaluation Example
|
| 13 |
+
Generate fake images from trained generator file.
|
| 14 |
+
|
| 15 |
+
"""
|
| 16 |
+
|
| 17 |
+
import logging
|
| 18 |
+
import os
|
| 19 |
+
import sys
|
| 20 |
+
from glob import glob
|
| 21 |
+
|
| 22 |
+
import torch
|
| 23 |
+
|
| 24 |
+
import monai
|
| 25 |
+
from monai.data import png_writer
|
| 26 |
+
from monai.engines.utils import default_make_latent as make_latent
|
| 27 |
+
from monai.networks.nets import Generator
|
| 28 |
+
from monai.utils.misc import set_determinism
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def save_generator_fakes(run_folder, g_output_tensor):
|
| 32 |
+
for i, image in enumerate(g_output_tensor):
|
| 33 |
+
filename = "gen-fake-%d.png" % (i)
|
| 34 |
+
save_path = os.path.join(run_folder, filename)
|
| 35 |
+
img_array = image[0].cpu().data.numpy()
|
| 36 |
+
png_writer.write_png(img_array, save_path, scale=255)
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def main():
|
| 40 |
+
monai.config.print_config()
|
| 41 |
+
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
|
| 42 |
+
set_determinism(12345)
|
| 43 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 44 |
+
|
| 45 |
+
# load generator
|
| 46 |
+
network_filepath = glob("./model_out/*.pth")[0]
|
| 47 |
+
data = torch.load(network_filepath)
|
| 48 |
+
latent_size = 64
|
| 49 |
+
gen_net = Generator(
|
| 50 |
+
latent_shape=latent_size, start_shape=(latent_size, 8, 8), channels=[32, 16, 8, 1], strides=[2, 2, 2, 1]
|
| 51 |
+
)
|
| 52 |
+
gen_net.conv.add_module("activation", torch.nn.Sigmoid())
|
| 53 |
+
gen_net.load_state_dict(data["g_net"])
|
| 54 |
+
gen_net = gen_net.to(device)
|
| 55 |
+
|
| 56 |
+
# create fakes
|
| 57 |
+
output_dir = "./generated_images"
|
| 58 |
+
if not os.path.isdir(output_dir):
|
| 59 |
+
os.mkdir(output_dir)
|
| 60 |
+
num_fakes = 10
|
| 61 |
+
print("Generating %d fakes and saving in %s" % (num_fakes, output_dir))
|
| 62 |
+
fake_latents = make_latent(num_fakes, latent_size).to(device)
|
| 63 |
+
save_generator_fakes(output_dir, gen_net(fake_latents))
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
if __name__ == "__main__":
|
| 67 |
+
main()
|
testbed/Project-MONAI__MONAI/examples/synthesis/gan_training.py
ADDED
|
@@ -0,0 +1,203 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2020 MONAI Consortium
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 6 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 7 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 8 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 9 |
+
# See the License for the specific language governing permissions and
|
| 10 |
+
# limitations under the License.
|
| 11 |
+
"""
|
| 12 |
+
MONAI Generative Adversarial Networks Workflow Example
|
| 13 |
+
Sample script using MONAI to train a GAN to synthesize images from a latent code.
|
| 14 |
+
|
| 15 |
+
## Get the dataset
|
| 16 |
+
MedNIST.tar.gz link: https://www.dropbox.com/s/5wwskxctvcxiuea/MedNIST.tar.gz
|
| 17 |
+
Extract tarball and set input_dir variable. GAN script trains using hand CT scan jpg images.
|
| 18 |
+
|
| 19 |
+
Dataset information available in MedNIST Tutorial
|
| 20 |
+
https://github.com/Project-MONAI/Tutorials/blob/master/mednist_tutorial.ipynb
|
| 21 |
+
"""
|
| 22 |
+
|
| 23 |
+
import logging
|
| 24 |
+
import os
|
| 25 |
+
import sys
|
| 26 |
+
|
| 27 |
+
import torch
|
| 28 |
+
|
| 29 |
+
import monai
|
| 30 |
+
from monai.apps.utils import download_and_extract
|
| 31 |
+
from monai.data import CacheDataset, DataLoader, png_writer
|
| 32 |
+
from monai.engines import GanTrainer
|
| 33 |
+
from monai.engines.utils import GanKeys as Keys
|
| 34 |
+
from monai.engines.utils import default_make_latent as make_latent
|
| 35 |
+
from monai.handlers import CheckpointSaver, StatsHandler
|
| 36 |
+
from monai.networks import normal_init
|
| 37 |
+
from monai.networks.nets import Discriminator, Generator
|
| 38 |
+
from monai.transforms import (
|
| 39 |
+
AddChannelD,
|
| 40 |
+
Compose,
|
| 41 |
+
LoadPNGD,
|
| 42 |
+
RandFlipD,
|
| 43 |
+
RandRotateD,
|
| 44 |
+
RandZoomD,
|
| 45 |
+
ScaleIntensityD,
|
| 46 |
+
ToTensorD,
|
| 47 |
+
)
|
| 48 |
+
from monai.utils.misc import set_determinism
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
def main():
|
| 52 |
+
monai.config.print_config()
|
| 53 |
+
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
|
| 54 |
+
set_determinism(12345)
|
| 55 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 56 |
+
|
| 57 |
+
# load real data
|
| 58 |
+
mednist_url = "https://www.dropbox.com/s/5wwskxctvcxiuea/MedNIST.tar.gz?dl=1"
|
| 59 |
+
md5_value = "0bc7306e7427e00ad1c5526a6677552d"
|
| 60 |
+
extract_dir = "data"
|
| 61 |
+
tar_save_path = os.path.join(extract_dir, "MedNIST.tar.gz")
|
| 62 |
+
download_and_extract(mednist_url, tar_save_path, extract_dir, md5_value)
|
| 63 |
+
hand_dir = os.path.join(extract_dir, "MedNIST", "Hand")
|
| 64 |
+
real_data = [{"hand": os.path.join(hand_dir, filename)} for filename in os.listdir(hand_dir)]
|
| 65 |
+
|
| 66 |
+
# define real data transforms
|
| 67 |
+
train_transforms = Compose(
|
| 68 |
+
[
|
| 69 |
+
LoadPNGD(keys=["hand"]),
|
| 70 |
+
AddChannelD(keys=["hand"]),
|
| 71 |
+
ScaleIntensityD(keys=["hand"]),
|
| 72 |
+
RandRotateD(keys=["hand"], range_x=15, prob=0.5, keep_size=True),
|
| 73 |
+
RandFlipD(keys=["hand"], spatial_axis=0, prob=0.5),
|
| 74 |
+
RandZoomD(keys=["hand"], min_zoom=0.9, max_zoom=1.1, prob=0.5),
|
| 75 |
+
ToTensorD(keys=["hand"]),
|
| 76 |
+
]
|
| 77 |
+
)
|
| 78 |
+
|
| 79 |
+
# create dataset and dataloader
|
| 80 |
+
real_dataset = CacheDataset(real_data, train_transforms)
|
| 81 |
+
batch_size = 300
|
| 82 |
+
real_dataloader = DataLoader(real_dataset, batch_size=batch_size, shuffle=True, num_workers=10)
|
| 83 |
+
|
| 84 |
+
# define function to process batchdata for input into discriminator
|
| 85 |
+
def prepare_batch(batchdata):
|
| 86 |
+
"""
|
| 87 |
+
Process Dataloader batchdata dict object and return image tensors for D Inferer
|
| 88 |
+
"""
|
| 89 |
+
return batchdata["hand"]
|
| 90 |
+
|
| 91 |
+
# define networks
|
| 92 |
+
disc_net = Discriminator(
|
| 93 |
+
in_shape=(1, 64, 64), channels=(8, 16, 32, 64, 1), strides=(2, 2, 2, 2, 1), num_res_units=1, kernel_size=5
|
| 94 |
+
).to(device)
|
| 95 |
+
|
| 96 |
+
latent_size = 64
|
| 97 |
+
gen_net = Generator(
|
| 98 |
+
latent_shape=latent_size, start_shape=(latent_size, 8, 8), channels=[32, 16, 8, 1], strides=[2, 2, 2, 1]
|
| 99 |
+
)
|
| 100 |
+
|
| 101 |
+
# initialize both networks
|
| 102 |
+
disc_net.apply(normal_init)
|
| 103 |
+
gen_net.apply(normal_init)
|
| 104 |
+
|
| 105 |
+
# input images are scaled to [0,1] so enforce the same of generated outputs
|
| 106 |
+
gen_net.conv.add_module("activation", torch.nn.Sigmoid())
|
| 107 |
+
gen_net = gen_net.to(device)
|
| 108 |
+
|
| 109 |
+
# create optimizers and loss functions
|
| 110 |
+
learning_rate = 2e-4
|
| 111 |
+
betas = (0.5, 0.999)
|
| 112 |
+
disc_opt = torch.optim.Adam(disc_net.parameters(), learning_rate, betas=betas)
|
| 113 |
+
gen_opt = torch.optim.Adam(gen_net.parameters(), learning_rate, betas=betas)
|
| 114 |
+
|
| 115 |
+
disc_loss_criterion = torch.nn.BCELoss()
|
| 116 |
+
gen_loss_criterion = torch.nn.BCELoss()
|
| 117 |
+
real_label = 1
|
| 118 |
+
fake_label = 0
|
| 119 |
+
|
| 120 |
+
def discriminator_loss(gen_images, real_images):
|
| 121 |
+
"""
|
| 122 |
+
The discriminator loss is calculated by comparing D
|
| 123 |
+
prediction for real and generated images.
|
| 124 |
+
|
| 125 |
+
"""
|
| 126 |
+
real = real_images.new_full((real_images.shape[0], 1), real_label)
|
| 127 |
+
gen = gen_images.new_full((gen_images.shape[0], 1), fake_label)
|
| 128 |
+
|
| 129 |
+
realloss = disc_loss_criterion(disc_net(real_images), real)
|
| 130 |
+
genloss = disc_loss_criterion(disc_net(gen_images.detach()), gen)
|
| 131 |
+
|
| 132 |
+
return (genloss + realloss) / 2
|
| 133 |
+
|
| 134 |
+
def generator_loss(gen_images):
|
| 135 |
+
"""
|
| 136 |
+
The generator loss is calculated by determining how realistic
|
| 137 |
+
the discriminator classifies the generated images.
|
| 138 |
+
|
| 139 |
+
"""
|
| 140 |
+
output = disc_net(gen_images)
|
| 141 |
+
cats = output.new_full(output.shape, real_label)
|
| 142 |
+
return gen_loss_criterion(output, cats)
|
| 143 |
+
|
| 144 |
+
# initialize current run dir
|
| 145 |
+
run_dir = "model_out"
|
| 146 |
+
print("Saving model output to: %s " % run_dir)
|
| 147 |
+
|
| 148 |
+
# create workflow handlers
|
| 149 |
+
handlers = [
|
| 150 |
+
StatsHandler(
|
| 151 |
+
name="batch_training_loss",
|
| 152 |
+
output_transform=lambda x: {Keys.GLOSS: x[Keys.GLOSS], Keys.DLOSS: x[Keys.DLOSS]},
|
| 153 |
+
),
|
| 154 |
+
CheckpointSaver(
|
| 155 |
+
save_dir=run_dir,
|
| 156 |
+
save_dict={"g_net": gen_net, "d_net": disc_net},
|
| 157 |
+
save_interval=10,
|
| 158 |
+
save_final=True,
|
| 159 |
+
epoch_level=True,
|
| 160 |
+
),
|
| 161 |
+
]
|
| 162 |
+
|
| 163 |
+
# define key metric
|
| 164 |
+
key_train_metric = None
|
| 165 |
+
|
| 166 |
+
# create adversarial trainer
|
| 167 |
+
disc_train_steps = 5
|
| 168 |
+
num_epochs = 50
|
| 169 |
+
|
| 170 |
+
trainer = GanTrainer(
|
| 171 |
+
device,
|
| 172 |
+
num_epochs,
|
| 173 |
+
real_dataloader,
|
| 174 |
+
gen_net,
|
| 175 |
+
gen_opt,
|
| 176 |
+
generator_loss,
|
| 177 |
+
disc_net,
|
| 178 |
+
disc_opt,
|
| 179 |
+
discriminator_loss,
|
| 180 |
+
d_prepare_batch=prepare_batch,
|
| 181 |
+
d_train_steps=disc_train_steps,
|
| 182 |
+
latent_shape=latent_size,
|
| 183 |
+
key_train_metric=key_train_metric,
|
| 184 |
+
train_handlers=handlers,
|
| 185 |
+
)
|
| 186 |
+
|
| 187 |
+
# run GAN training
|
| 188 |
+
trainer.run()
|
| 189 |
+
|
| 190 |
+
# Training completed, save a few random generated images.
|
| 191 |
+
print("Saving trained generator sample output.")
|
| 192 |
+
test_img_count = 10
|
| 193 |
+
test_latents = make_latent(test_img_count, latent_size).to(device)
|
| 194 |
+
fakes = gen_net(test_latents)
|
| 195 |
+
for i, image in enumerate(fakes):
|
| 196 |
+
filename = "gen-fake-final-%d.png" % (i)
|
| 197 |
+
save_path = os.path.join(run_dir, filename)
|
| 198 |
+
img_array = image[0].cpu().data.numpy()
|
| 199 |
+
png_writer.write_png(img_array, save_path, scale=255)
|
| 200 |
+
|
| 201 |
+
|
| 202 |
+
if __name__ == "__main__":
|
| 203 |
+
main()
|
testbed/Project-MONAI__MONAI/examples/workflows/unet_evaluation_dict.py
ADDED
|
@@ -0,0 +1,121 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2020 MONAI Consortium
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 6 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 7 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 8 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 9 |
+
# See the License for the specific language governing permissions and
|
| 10 |
+
# limitations under the License.
|
| 11 |
+
|
| 12 |
+
import logging
|
| 13 |
+
import os
|
| 14 |
+
import sys
|
| 15 |
+
import tempfile
|
| 16 |
+
from glob import glob
|
| 17 |
+
|
| 18 |
+
import nibabel as nib
|
| 19 |
+
import numpy as np
|
| 20 |
+
import torch
|
| 21 |
+
from ignite.metrics import Accuracy
|
| 22 |
+
|
| 23 |
+
import monai
|
| 24 |
+
from monai.data import create_test_image_3d
|
| 25 |
+
from monai.engines import SupervisedEvaluator
|
| 26 |
+
from monai.handlers import CheckpointLoader, MeanDice, SegmentationSaver, StatsHandler
|
| 27 |
+
from monai.inferers import SlidingWindowInferer
|
| 28 |
+
from monai.transforms import (
|
| 29 |
+
Activationsd,
|
| 30 |
+
AsChannelFirstd,
|
| 31 |
+
AsDiscreted,
|
| 32 |
+
Compose,
|
| 33 |
+
KeepLargestConnectedComponentd,
|
| 34 |
+
LoadNiftid,
|
| 35 |
+
ScaleIntensityd,
|
| 36 |
+
ToTensord,
|
| 37 |
+
)
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def main(tempdir):
|
| 41 |
+
monai.config.print_config()
|
| 42 |
+
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
|
| 43 |
+
|
| 44 |
+
# create a temporary directory and 40 random image, mask pairs
|
| 45 |
+
print(f"generating synthetic data to {tempdir} (this may take a while)")
|
| 46 |
+
for i in range(5):
|
| 47 |
+
im, seg = create_test_image_3d(128, 128, 128, num_seg_classes=1, channel_dim=-1)
|
| 48 |
+
n = nib.Nifti1Image(im, np.eye(4))
|
| 49 |
+
nib.save(n, os.path.join(tempdir, f"im{i:d}.nii.gz"))
|
| 50 |
+
n = nib.Nifti1Image(seg, np.eye(4))
|
| 51 |
+
nib.save(n, os.path.join(tempdir, f"seg{i:d}.nii.gz"))
|
| 52 |
+
|
| 53 |
+
images = sorted(glob(os.path.join(tempdir, "im*.nii.gz")))
|
| 54 |
+
segs = sorted(glob(os.path.join(tempdir, "seg*.nii.gz")))
|
| 55 |
+
val_files = [{"image": img, "label": seg} for img, seg in zip(images, segs)]
|
| 56 |
+
|
| 57 |
+
# model file path
|
| 58 |
+
model_file = glob("./runs/net_key_metric*")[0]
|
| 59 |
+
|
| 60 |
+
# define transforms for image and segmentation
|
| 61 |
+
val_transforms = Compose(
|
| 62 |
+
[
|
| 63 |
+
LoadNiftid(keys=["image", "label"]),
|
| 64 |
+
AsChannelFirstd(keys=["image", "label"], channel_dim=-1),
|
| 65 |
+
ScaleIntensityd(keys="image"),
|
| 66 |
+
ToTensord(keys=["image", "label"]),
|
| 67 |
+
]
|
| 68 |
+
)
|
| 69 |
+
|
| 70 |
+
# create a validation data loader
|
| 71 |
+
val_ds = monai.data.Dataset(data=val_files, transform=val_transforms)
|
| 72 |
+
val_loader = monai.data.DataLoader(val_ds, batch_size=1, num_workers=4)
|
| 73 |
+
|
| 74 |
+
# create UNet, DiceLoss and Adam optimizer
|
| 75 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 76 |
+
net = monai.networks.nets.UNet(
|
| 77 |
+
dimensions=3,
|
| 78 |
+
in_channels=1,
|
| 79 |
+
out_channels=1,
|
| 80 |
+
channels=(16, 32, 64, 128, 256),
|
| 81 |
+
strides=(2, 2, 2, 2),
|
| 82 |
+
num_res_units=2,
|
| 83 |
+
).to(device)
|
| 84 |
+
|
| 85 |
+
val_post_transforms = Compose(
|
| 86 |
+
[
|
| 87 |
+
Activationsd(keys="pred", sigmoid=True),
|
| 88 |
+
AsDiscreted(keys="pred", threshold_values=True),
|
| 89 |
+
KeepLargestConnectedComponentd(keys="pred", applied_labels=[1]),
|
| 90 |
+
]
|
| 91 |
+
)
|
| 92 |
+
val_handlers = [
|
| 93 |
+
StatsHandler(output_transform=lambda x: None),
|
| 94 |
+
CheckpointLoader(load_path=model_file, load_dict={"net": net}),
|
| 95 |
+
SegmentationSaver(
|
| 96 |
+
output_dir="./runs/",
|
| 97 |
+
batch_transform=lambda batch: batch["image_meta_dict"],
|
| 98 |
+
output_transform=lambda output: output["pred"],
|
| 99 |
+
),
|
| 100 |
+
]
|
| 101 |
+
|
| 102 |
+
evaluator = SupervisedEvaluator(
|
| 103 |
+
device=device,
|
| 104 |
+
val_data_loader=val_loader,
|
| 105 |
+
network=net,
|
| 106 |
+
inferer=SlidingWindowInferer(roi_size=(96, 96, 96), sw_batch_size=4, overlap=0.5),
|
| 107 |
+
post_transform=val_post_transforms,
|
| 108 |
+
key_val_metric={
|
| 109 |
+
"val_mean_dice": MeanDice(include_background=True, output_transform=lambda x: (x["pred"], x["label"]))
|
| 110 |
+
},
|
| 111 |
+
additional_metrics={"val_acc": Accuracy(output_transform=lambda x: (x["pred"], x["label"]))},
|
| 112 |
+
val_handlers=val_handlers,
|
| 113 |
+
# if no FP16 support in GPU or PyTorch version < 1.6, will not enable AMP evaluation
|
| 114 |
+
amp=True if monai.config.get_torch_version_tuple() >= (1, 6) else False,
|
| 115 |
+
)
|
| 116 |
+
evaluator.run()
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
if __name__ == "__main__":
|
| 120 |
+
with tempfile.TemporaryDirectory() as tempdir:
|
| 121 |
+
main(tempdir)
|
testbed/Project-MONAI__MONAI/examples/workflows/unet_training_dict.py
ADDED
|
@@ -0,0 +1,179 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2020 MONAI Consortium
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 6 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 7 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 8 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 9 |
+
# See the License for the specific language governing permissions and
|
| 10 |
+
# limitations under the License.
|
| 11 |
+
|
| 12 |
+
import logging
|
| 13 |
+
import os
|
| 14 |
+
import sys
|
| 15 |
+
import tempfile
|
| 16 |
+
from glob import glob
|
| 17 |
+
|
| 18 |
+
import nibabel as nib
|
| 19 |
+
import numpy as np
|
| 20 |
+
import torch
|
| 21 |
+
from ignite.metrics import Accuracy
|
| 22 |
+
|
| 23 |
+
import monai
|
| 24 |
+
from monai.data import create_test_image_3d
|
| 25 |
+
from monai.engines import SupervisedEvaluator, SupervisedTrainer
|
| 26 |
+
from monai.handlers import (
|
| 27 |
+
CheckpointSaver,
|
| 28 |
+
LrScheduleHandler,
|
| 29 |
+
MeanDice,
|
| 30 |
+
StatsHandler,
|
| 31 |
+
TensorBoardImageHandler,
|
| 32 |
+
TensorBoardStatsHandler,
|
| 33 |
+
ValidationHandler,
|
| 34 |
+
)
|
| 35 |
+
from monai.inferers import SimpleInferer, SlidingWindowInferer
|
| 36 |
+
from monai.transforms import (
|
| 37 |
+
Activationsd,
|
| 38 |
+
AsChannelFirstd,
|
| 39 |
+
AsDiscreted,
|
| 40 |
+
Compose,
|
| 41 |
+
KeepLargestConnectedComponentd,
|
| 42 |
+
LoadNiftid,
|
| 43 |
+
RandCropByPosNegLabeld,
|
| 44 |
+
RandRotate90d,
|
| 45 |
+
ScaleIntensityd,
|
| 46 |
+
ToTensord,
|
| 47 |
+
)
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
def main(tempdir):
|
| 51 |
+
monai.config.print_config()
|
| 52 |
+
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
|
| 53 |
+
|
| 54 |
+
# create a temporary directory and 40 random image, mask pairs
|
| 55 |
+
print(f"generating synthetic data to {tempdir} (this may take a while)")
|
| 56 |
+
for i in range(40):
|
| 57 |
+
im, seg = create_test_image_3d(128, 128, 128, num_seg_classes=1, channel_dim=-1)
|
| 58 |
+
n = nib.Nifti1Image(im, np.eye(4))
|
| 59 |
+
nib.save(n, os.path.join(tempdir, f"img{i:d}.nii.gz"))
|
| 60 |
+
n = nib.Nifti1Image(seg, np.eye(4))
|
| 61 |
+
nib.save(n, os.path.join(tempdir, f"seg{i:d}.nii.gz"))
|
| 62 |
+
|
| 63 |
+
images = sorted(glob(os.path.join(tempdir, "img*.nii.gz")))
|
| 64 |
+
segs = sorted(glob(os.path.join(tempdir, "seg*.nii.gz")))
|
| 65 |
+
train_files = [{"image": img, "label": seg} for img, seg in zip(images[:20], segs[:20])]
|
| 66 |
+
val_files = [{"image": img, "label": seg} for img, seg in zip(images[-20:], segs[-20:])]
|
| 67 |
+
|
| 68 |
+
# define transforms for image and segmentation
|
| 69 |
+
train_transforms = Compose(
|
| 70 |
+
[
|
| 71 |
+
LoadNiftid(keys=["image", "label"]),
|
| 72 |
+
AsChannelFirstd(keys=["image", "label"], channel_dim=-1),
|
| 73 |
+
ScaleIntensityd(keys="image"),
|
| 74 |
+
RandCropByPosNegLabeld(
|
| 75 |
+
keys=["image", "label"], label_key="label", spatial_size=[96, 96, 96], pos=1, neg=1, num_samples=4
|
| 76 |
+
),
|
| 77 |
+
RandRotate90d(keys=["image", "label"], prob=0.5, spatial_axes=[0, 2]),
|
| 78 |
+
ToTensord(keys=["image", "label"]),
|
| 79 |
+
]
|
| 80 |
+
)
|
| 81 |
+
val_transforms = Compose(
|
| 82 |
+
[
|
| 83 |
+
LoadNiftid(keys=["image", "label"]),
|
| 84 |
+
AsChannelFirstd(keys=["image", "label"], channel_dim=-1),
|
| 85 |
+
ScaleIntensityd(keys="image"),
|
| 86 |
+
ToTensord(keys=["image", "label"]),
|
| 87 |
+
]
|
| 88 |
+
)
|
| 89 |
+
|
| 90 |
+
# create a training data loader
|
| 91 |
+
train_ds = monai.data.CacheDataset(data=train_files, transform=train_transforms, cache_rate=0.5)
|
| 92 |
+
# use batch_size=2 to load images and use RandCropByPosNegLabeld to generate 2 x 4 images for network training
|
| 93 |
+
train_loader = monai.data.DataLoader(train_ds, batch_size=2, shuffle=True, num_workers=4)
|
| 94 |
+
# create a validation data loader
|
| 95 |
+
val_ds = monai.data.CacheDataset(data=val_files, transform=val_transforms, cache_rate=1.0)
|
| 96 |
+
val_loader = monai.data.DataLoader(val_ds, batch_size=1, num_workers=4)
|
| 97 |
+
|
| 98 |
+
# create UNet, DiceLoss and Adam optimizer
|
| 99 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 100 |
+
net = monai.networks.nets.UNet(
|
| 101 |
+
dimensions=3,
|
| 102 |
+
in_channels=1,
|
| 103 |
+
out_channels=1,
|
| 104 |
+
channels=(16, 32, 64, 128, 256),
|
| 105 |
+
strides=(2, 2, 2, 2),
|
| 106 |
+
num_res_units=2,
|
| 107 |
+
).to(device)
|
| 108 |
+
loss = monai.losses.DiceLoss(sigmoid=True)
|
| 109 |
+
opt = torch.optim.Adam(net.parameters(), 1e-3)
|
| 110 |
+
lr_scheduler = torch.optim.lr_scheduler.StepLR(opt, step_size=2, gamma=0.1)
|
| 111 |
+
|
| 112 |
+
val_post_transforms = Compose(
|
| 113 |
+
[
|
| 114 |
+
Activationsd(keys="pred", sigmoid=True),
|
| 115 |
+
AsDiscreted(keys="pred", threshold_values=True),
|
| 116 |
+
KeepLargestConnectedComponentd(keys="pred", applied_labels=[1]),
|
| 117 |
+
]
|
| 118 |
+
)
|
| 119 |
+
val_handlers = [
|
| 120 |
+
StatsHandler(output_transform=lambda x: None),
|
| 121 |
+
TensorBoardStatsHandler(log_dir="./runs/", output_transform=lambda x: None),
|
| 122 |
+
TensorBoardImageHandler(
|
| 123 |
+
log_dir="./runs/",
|
| 124 |
+
batch_transform=lambda x: (x["image"], x["label"]),
|
| 125 |
+
output_transform=lambda x: x["pred"],
|
| 126 |
+
),
|
| 127 |
+
CheckpointSaver(save_dir="./runs/", save_dict={"net": net}, save_key_metric=True),
|
| 128 |
+
]
|
| 129 |
+
|
| 130 |
+
evaluator = SupervisedEvaluator(
|
| 131 |
+
device=device,
|
| 132 |
+
val_data_loader=val_loader,
|
| 133 |
+
network=net,
|
| 134 |
+
inferer=SlidingWindowInferer(roi_size=(96, 96, 96), sw_batch_size=4, overlap=0.5),
|
| 135 |
+
post_transform=val_post_transforms,
|
| 136 |
+
key_val_metric={
|
| 137 |
+
"val_mean_dice": MeanDice(include_background=True, output_transform=lambda x: (x["pred"], x["label"]))
|
| 138 |
+
},
|
| 139 |
+
additional_metrics={"val_acc": Accuracy(output_transform=lambda x: (x["pred"], x["label"]))},
|
| 140 |
+
val_handlers=val_handlers,
|
| 141 |
+
# if no FP16 support in GPU or PyTorch version < 1.6, will not enable AMP evaluation
|
| 142 |
+
amp=True if monai.config.get_torch_version_tuple() >= (1, 6) else False,
|
| 143 |
+
)
|
| 144 |
+
|
| 145 |
+
train_post_transforms = Compose(
|
| 146 |
+
[
|
| 147 |
+
Activationsd(keys="pred", sigmoid=True),
|
| 148 |
+
AsDiscreted(keys="pred", threshold_values=True),
|
| 149 |
+
KeepLargestConnectedComponentd(keys="pred", applied_labels=[1]),
|
| 150 |
+
]
|
| 151 |
+
)
|
| 152 |
+
train_handlers = [
|
| 153 |
+
LrScheduleHandler(lr_scheduler=lr_scheduler, print_lr=True),
|
| 154 |
+
ValidationHandler(validator=evaluator, interval=2, epoch_level=True),
|
| 155 |
+
StatsHandler(tag_name="train_loss", output_transform=lambda x: x["loss"]),
|
| 156 |
+
TensorBoardStatsHandler(log_dir="./runs/", tag_name="train_loss", output_transform=lambda x: x["loss"]),
|
| 157 |
+
CheckpointSaver(save_dir="./runs/", save_dict={"net": net, "opt": opt}, save_interval=2, epoch_level=True),
|
| 158 |
+
]
|
| 159 |
+
|
| 160 |
+
trainer = SupervisedTrainer(
|
| 161 |
+
device=device,
|
| 162 |
+
max_epochs=5,
|
| 163 |
+
train_data_loader=train_loader,
|
| 164 |
+
network=net,
|
| 165 |
+
optimizer=opt,
|
| 166 |
+
loss_function=loss,
|
| 167 |
+
inferer=SimpleInferer(),
|
| 168 |
+
post_transform=train_post_transforms,
|
| 169 |
+
key_train_metric={"train_acc": Accuracy(output_transform=lambda x: (x["pred"], x["label"]))},
|
| 170 |
+
train_handlers=train_handlers,
|
| 171 |
+
# if no FP16 support in GPU or PyTorch version < 1.6, will not enable AMP training
|
| 172 |
+
amp=True if monai.config.get_torch_version_tuple() >= (1, 6) else False,
|
| 173 |
+
)
|
| 174 |
+
trainer.run()
|
| 175 |
+
|
| 176 |
+
|
| 177 |
+
if __name__ == "__main__":
|
| 178 |
+
with tempfile.TemporaryDirectory() as tempdir:
|
| 179 |
+
main(tempdir)
|
testbed/Project-MONAI__MONAI/monai/README.md
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# MONAI
|
| 2 |
+
|
| 3 |
+
* **apps**: high level medical domain specific deep learning applications.
|
| 4 |
+
|
| 5 |
+
* **config**: for system configuration and diagnostic output.
|
| 6 |
+
|
| 7 |
+
* **data**: for the datasets, readers/writers, and synthetic data
|
| 8 |
+
|
| 9 |
+
* **engines**: engine-derived classes for extending Ignite behaviour.
|
| 10 |
+
|
| 11 |
+
* **handlers**: defines handlers for implementing functionality at various stages in the training process.
|
| 12 |
+
|
| 13 |
+
* **inferers**: defines model inference methods.
|
| 14 |
+
|
| 15 |
+
* **losses**: classes defining loss functions.
|
| 16 |
+
|
| 17 |
+
* **metrics**: defines metric tracking types.
|
| 18 |
+
|
| 19 |
+
* **networks**: contains network definitions, component definitions, and Pytorch specific utilities.
|
| 20 |
+
|
| 21 |
+
* **transforms**: defines data transforms for preprocessing and postprocessing.
|
| 22 |
+
|
| 23 |
+
* **utils**: generic utilities intended to be implemented in pure Python or using Numpy,
|
| 24 |
+
and not with Pytorch, such as namespace aliasing, auto module loading.
|
| 25 |
+
|
| 26 |
+
* **visualize**: utilities for data visualization.
|
testbed/Project-MONAI__MONAI/monai/__init__.py
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2020 MONAI Consortium
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 6 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 7 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 8 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 9 |
+
# See the License for the specific language governing permissions and
|
| 10 |
+
# limitations under the License.
|
| 11 |
+
|
| 12 |
+
import os
|
| 13 |
+
import sys
|
| 14 |
+
|
| 15 |
+
from ._version import get_versions
|
| 16 |
+
from .utils.module import load_submodules
|
| 17 |
+
|
| 18 |
+
__version__ = get_versions()["version"]
|
| 19 |
+
del get_versions
|
| 20 |
+
|
| 21 |
+
__copyright__ = "(c) 2020 MONAI Consortium"
|
| 22 |
+
|
| 23 |
+
__basedir__ = os.path.dirname(__file__)
|
| 24 |
+
|
| 25 |
+
# handlers_* have some external decorators the users may not have installed
|
| 26 |
+
# *.so files and folder "_C" may not exist when the cpp extensions are not compiled
|
| 27 |
+
excludes = "(^(handlers))|((\\.so)$)|(_C)"
|
| 28 |
+
|
| 29 |
+
# load directory modules only, skip loading individual files
|
| 30 |
+
load_submodules(sys.modules[__name__], False, exclude_pattern=excludes)
|
| 31 |
+
|
| 32 |
+
# load all modules, this will trigger all export decorations
|
| 33 |
+
load_submodules(sys.modules[__name__], True, exclude_pattern=excludes)
|
testbed/Project-MONAI__MONAI/monai/_version.py
ADDED
|
@@ -0,0 +1,519 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# This file helps to compute a version number in source trees obtained from
|
| 2 |
+
# git-archive tarball (such as those provided by githubs download-from-tag
|
| 3 |
+
# feature). Distribution tarballs (built by setup.py sdist) and build
|
| 4 |
+
# directories (produced by setup.py build) will contain a much shorter file
|
| 5 |
+
# that just contains the computed version number.
|
| 6 |
+
|
| 7 |
+
# This file is released into the public domain. Generated by
|
| 8 |
+
# versioneer-0.18 (https://github.com/warner/python-versioneer)
|
| 9 |
+
|
| 10 |
+
"""Git implementation of _version.py."""
|
| 11 |
+
|
| 12 |
+
import errno
|
| 13 |
+
import os
|
| 14 |
+
import re
|
| 15 |
+
import subprocess
|
| 16 |
+
import sys
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def get_keywords():
|
| 20 |
+
"""Get the keywords needed to look up the version information."""
|
| 21 |
+
# these strings will be replaced by git during git-archive.
|
| 22 |
+
# setup.py/versioneer.py will grep for the variable names, so they must
|
| 23 |
+
# each be defined on a line of their own. _version.py will just call
|
| 24 |
+
# get_keywords().
|
| 25 |
+
git_refnames = "$Format:%d$"
|
| 26 |
+
git_full = "$Format:%H$"
|
| 27 |
+
git_date = "$Format:%ci$"
|
| 28 |
+
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
|
| 29 |
+
return keywords
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
class VersioneerConfig:
|
| 33 |
+
"""Container for Versioneer configuration parameters."""
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
def get_config():
|
| 37 |
+
"""Create, populate and return the VersioneerConfig() object."""
|
| 38 |
+
# these strings are filled in when 'setup.py versioneer' creates
|
| 39 |
+
# _version.py
|
| 40 |
+
cfg = VersioneerConfig()
|
| 41 |
+
cfg.VCS = "git"
|
| 42 |
+
cfg.style = "pep440"
|
| 43 |
+
cfg.tag_prefix = ""
|
| 44 |
+
cfg.parentdir_prefix = ""
|
| 45 |
+
cfg.versionfile_source = "monai/_version.py"
|
| 46 |
+
cfg.verbose = False
|
| 47 |
+
return cfg
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
class NotThisMethod(Exception):
|
| 51 |
+
"""Exception raised if a method is not valid for the current scenario."""
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
LONG_VERSION_PY = {}
|
| 55 |
+
HANDLERS = {}
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
def register_vcs_handler(vcs, method): # decorator
|
| 59 |
+
"""Decorator to mark a method as the handler for a particular VCS."""
|
| 60 |
+
def decorate(f):
|
| 61 |
+
"""Store f in HANDLERS[vcs][method]."""
|
| 62 |
+
if vcs not in HANDLERS:
|
| 63 |
+
HANDLERS[vcs] = {}
|
| 64 |
+
HANDLERS[vcs][method] = f
|
| 65 |
+
return f
|
| 66 |
+
return decorate
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
|
| 70 |
+
env=None):
|
| 71 |
+
"""Call the given command(s)."""
|
| 72 |
+
assert isinstance(commands, list)
|
| 73 |
+
p = None
|
| 74 |
+
for c in commands:
|
| 75 |
+
try:
|
| 76 |
+
dispcmd = str([c] + args)
|
| 77 |
+
# remember shell=False, so use git.cmd on windows, not just git
|
| 78 |
+
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
|
| 79 |
+
stdout=subprocess.PIPE,
|
| 80 |
+
stderr=(subprocess.PIPE if hide_stderr
|
| 81 |
+
else None))
|
| 82 |
+
break
|
| 83 |
+
except EnvironmentError:
|
| 84 |
+
e = sys.exc_info()[1]
|
| 85 |
+
if e.errno == errno.ENOENT:
|
| 86 |
+
continue
|
| 87 |
+
if verbose:
|
| 88 |
+
print("unable to run %s" % dispcmd)
|
| 89 |
+
print(e)
|
| 90 |
+
return None, None
|
| 91 |
+
else:
|
| 92 |
+
if verbose:
|
| 93 |
+
print("unable to find command, tried %s" % (commands,))
|
| 94 |
+
return None, None
|
| 95 |
+
stdout = p.communicate()[0].strip()
|
| 96 |
+
if sys.version_info[0] >= 3:
|
| 97 |
+
stdout = stdout.decode()
|
| 98 |
+
if p.returncode != 0:
|
| 99 |
+
if verbose:
|
| 100 |
+
print("unable to run %s (error)" % dispcmd)
|
| 101 |
+
print("stdout was %s" % stdout)
|
| 102 |
+
return None, p.returncode
|
| 103 |
+
return stdout, p.returncode
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
def versions_from_parentdir(parentdir_prefix, root, verbose):
|
| 107 |
+
"""Try to determine the version from the parent directory name.
|
| 108 |
+
|
| 109 |
+
Source tarballs conventionally unpack into a directory that includes both
|
| 110 |
+
the project name and a version string. We will also support searching up
|
| 111 |
+
two directory levels for an appropriately named parent directory
|
| 112 |
+
"""
|
| 113 |
+
rootdirs = []
|
| 114 |
+
|
| 115 |
+
for i in range(3):
|
| 116 |
+
dirname = os.path.basename(root)
|
| 117 |
+
if dirname.startswith(parentdir_prefix):
|
| 118 |
+
return {"version": dirname[len(parentdir_prefix):],
|
| 119 |
+
"full-revisionid": None,
|
| 120 |
+
"dirty": False, "error": None, "date": None}
|
| 121 |
+
else:
|
| 122 |
+
rootdirs.append(root)
|
| 123 |
+
root = os.path.dirname(root) # up a level
|
| 124 |
+
|
| 125 |
+
if verbose:
|
| 126 |
+
print("Tried directories %s but none started with prefix %s" %
|
| 127 |
+
(str(rootdirs), parentdir_prefix))
|
| 128 |
+
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
|
| 129 |
+
|
| 130 |
+
|
| 131 |
+
@register_vcs_handler("git", "get_keywords")
|
| 132 |
+
def git_get_keywords(versionfile_abs):
|
| 133 |
+
"""Extract version information from the given file."""
|
| 134 |
+
# the code embedded in _version.py can just fetch the value of these
|
| 135 |
+
# keywords. When used from setup.py, we don't want to import _version.py,
|
| 136 |
+
# so we do it with a regexp instead. This function is not used from
|
| 137 |
+
# _version.py.
|
| 138 |
+
keywords = {}
|
| 139 |
+
try:
|
| 140 |
+
f = open(versionfile_abs, "r")
|
| 141 |
+
for line in f.readlines():
|
| 142 |
+
if line.strip().startswith("git_refnames ="):
|
| 143 |
+
mo = re.search(r'=\s*"(.*)"', line)
|
| 144 |
+
if mo:
|
| 145 |
+
keywords["refnames"] = mo.group(1)
|
| 146 |
+
if line.strip().startswith("git_full ="):
|
| 147 |
+
mo = re.search(r'=\s*"(.*)"', line)
|
| 148 |
+
if mo:
|
| 149 |
+
keywords["full"] = mo.group(1)
|
| 150 |
+
if line.strip().startswith("git_date ="):
|
| 151 |
+
mo = re.search(r'=\s*"(.*)"', line)
|
| 152 |
+
if mo:
|
| 153 |
+
keywords["date"] = mo.group(1)
|
| 154 |
+
f.close()
|
| 155 |
+
except EnvironmentError:
|
| 156 |
+
pass
|
| 157 |
+
return keywords
|
| 158 |
+
|
| 159 |
+
|
| 160 |
+
@register_vcs_handler("git", "keywords")
|
| 161 |
+
def git_versions_from_keywords(keywords, tag_prefix, verbose):
|
| 162 |
+
"""Get version information from git keywords."""
|
| 163 |
+
if not keywords:
|
| 164 |
+
raise NotThisMethod("no keywords at all, weird")
|
| 165 |
+
date = keywords.get("date")
|
| 166 |
+
if date is not None:
|
| 167 |
+
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
|
| 168 |
+
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
|
| 169 |
+
# -like" string, which we must then edit to make compliant), because
|
| 170 |
+
# it's been around since git-1.5.3, and it's too difficult to
|
| 171 |
+
# discover which version we're using, or to work around using an
|
| 172 |
+
# older one.
|
| 173 |
+
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
|
| 174 |
+
refnames = keywords["refnames"].strip()
|
| 175 |
+
if refnames.startswith("$Format"):
|
| 176 |
+
if verbose:
|
| 177 |
+
print("keywords are unexpanded, not using")
|
| 178 |
+
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
|
| 179 |
+
refs = set([r.strip() for r in refnames.strip("()").split(",")])
|
| 180 |
+
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
|
| 181 |
+
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
|
| 182 |
+
TAG = "tag: "
|
| 183 |
+
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
|
| 184 |
+
if not tags:
|
| 185 |
+
# Either we're using git < 1.8.3, or there really are no tags. We use
|
| 186 |
+
# a heuristic: assume all version tags have a digit. The old git %d
|
| 187 |
+
# expansion behaves like git log --decorate=short and strips out the
|
| 188 |
+
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
|
| 189 |
+
# between branches and tags. By ignoring refnames without digits, we
|
| 190 |
+
# filter out many common branch names like "release" and
|
| 191 |
+
# "stabilization", as well as "HEAD" and "master".
|
| 192 |
+
tags = set([r for r in refs if re.search(r'\d', r)])
|
| 193 |
+
if verbose:
|
| 194 |
+
print("discarding '%s', no digits" % ",".join(refs - tags))
|
| 195 |
+
if verbose:
|
| 196 |
+
print("likely tags: %s" % ",".join(sorted(tags)))
|
| 197 |
+
for ref in sorted(tags):
|
| 198 |
+
# sorting will prefer e.g. "2.0" over "2.0rc1"
|
| 199 |
+
if ref.startswith(tag_prefix):
|
| 200 |
+
r = ref[len(tag_prefix):]
|
| 201 |
+
if verbose:
|
| 202 |
+
print("picking %s" % r)
|
| 203 |
+
return {"version": r,
|
| 204 |
+
"full-revisionid": keywords["full"].strip(),
|
| 205 |
+
"dirty": False, "error": None,
|
| 206 |
+
"date": date}
|
| 207 |
+
# no suitable tags, so version is "0+unknown", but full hex is still there
|
| 208 |
+
if verbose:
|
| 209 |
+
print("no suitable tags, using unknown + full revision id")
|
| 210 |
+
return {"version": "0+unknown",
|
| 211 |
+
"full-revisionid": keywords["full"].strip(),
|
| 212 |
+
"dirty": False, "error": "no suitable tags", "date": None}
|
| 213 |
+
|
| 214 |
+
|
| 215 |
+
@register_vcs_handler("git", "pieces_from_vcs")
|
| 216 |
+
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
|
| 217 |
+
"""Get version from 'git describe' in the root of the source tree.
|
| 218 |
+
|
| 219 |
+
This only gets called if the git-archive 'subst' keywords were *not*
|
| 220 |
+
expanded, and _version.py hasn't already been rewritten with a short
|
| 221 |
+
version string, meaning we're inside a checked out source tree.
|
| 222 |
+
"""
|
| 223 |
+
GITS = ["git"]
|
| 224 |
+
if sys.platform == "win32":
|
| 225 |
+
GITS = ["git.cmd", "git.exe"]
|
| 226 |
+
|
| 227 |
+
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
|
| 228 |
+
hide_stderr=True)
|
| 229 |
+
if rc != 0:
|
| 230 |
+
if verbose:
|
| 231 |
+
print("Directory %s not under git control" % root)
|
| 232 |
+
raise NotThisMethod("'git rev-parse --git-dir' returned error")
|
| 233 |
+
|
| 234 |
+
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
|
| 235 |
+
# if there isn't one, this yields HEX[-dirty] (no NUM)
|
| 236 |
+
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
|
| 237 |
+
"--always", "--long",
|
| 238 |
+
"--match", "%s*" % tag_prefix],
|
| 239 |
+
cwd=root)
|
| 240 |
+
# --long was added in git-1.5.5
|
| 241 |
+
if describe_out is None:
|
| 242 |
+
raise NotThisMethod("'git describe' failed")
|
| 243 |
+
describe_out = describe_out.strip()
|
| 244 |
+
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
|
| 245 |
+
if full_out is None:
|
| 246 |
+
raise NotThisMethod("'git rev-parse' failed")
|
| 247 |
+
full_out = full_out.strip()
|
| 248 |
+
|
| 249 |
+
pieces = {}
|
| 250 |
+
pieces["long"] = full_out
|
| 251 |
+
pieces["short"] = full_out[:7] # maybe improved later
|
| 252 |
+
pieces["error"] = None
|
| 253 |
+
|
| 254 |
+
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
|
| 255 |
+
# TAG might have hyphens.
|
| 256 |
+
git_describe = describe_out
|
| 257 |
+
|
| 258 |
+
# look for -dirty suffix
|
| 259 |
+
dirty = git_describe.endswith("-dirty")
|
| 260 |
+
pieces["dirty"] = dirty
|
| 261 |
+
if dirty:
|
| 262 |
+
git_describe = git_describe[:git_describe.rindex("-dirty")]
|
| 263 |
+
|
| 264 |
+
# now we have TAG-NUM-gHEX or HEX
|
| 265 |
+
|
| 266 |
+
if "-" in git_describe:
|
| 267 |
+
# TAG-NUM-gHEX
|
| 268 |
+
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
|
| 269 |
+
if not mo:
|
| 270 |
+
# unparseable. Maybe git-describe is misbehaving?
|
| 271 |
+
pieces["error"] = ("unable to parse git-describe output: '%s'"
|
| 272 |
+
% describe_out)
|
| 273 |
+
return pieces
|
| 274 |
+
|
| 275 |
+
# tag
|
| 276 |
+
full_tag = mo.group(1)
|
| 277 |
+
if not full_tag.startswith(tag_prefix):
|
| 278 |
+
if verbose:
|
| 279 |
+
fmt = "tag '%s' doesn't start with prefix '%s'"
|
| 280 |
+
print(fmt % (full_tag, tag_prefix))
|
| 281 |
+
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
|
| 282 |
+
% (full_tag, tag_prefix))
|
| 283 |
+
return pieces
|
| 284 |
+
pieces["closest-tag"] = full_tag[len(tag_prefix):]
|
| 285 |
+
|
| 286 |
+
# distance: number of commits since tag
|
| 287 |
+
pieces["distance"] = int(mo.group(2))
|
| 288 |
+
|
| 289 |
+
# commit: short hex revision ID
|
| 290 |
+
pieces["short"] = mo.group(3)
|
| 291 |
+
|
| 292 |
+
else:
|
| 293 |
+
# HEX: no tags
|
| 294 |
+
pieces["closest-tag"] = None
|
| 295 |
+
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
|
| 296 |
+
cwd=root)
|
| 297 |
+
pieces["distance"] = int(count_out) # total number of commits
|
| 298 |
+
|
| 299 |
+
# commit date: see ISO-8601 comment in git_versions_from_keywords()
|
| 300 |
+
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
|
| 301 |
+
cwd=root)[0].strip()
|
| 302 |
+
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
|
| 303 |
+
|
| 304 |
+
return pieces
|
| 305 |
+
|
| 306 |
+
|
| 307 |
+
def plus_or_dot(pieces):
|
| 308 |
+
"""Return a + if we don't already have one, else return a ."""
|
| 309 |
+
if "+" in pieces.get("closest-tag", ""):
|
| 310 |
+
return "."
|
| 311 |
+
return "+"
|
| 312 |
+
|
| 313 |
+
|
| 314 |
+
def render_pep440(pieces):
|
| 315 |
+
"""Build up version string, with post-release "local version identifier".
|
| 316 |
+
|
| 317 |
+
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
|
| 318 |
+
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
|
| 319 |
+
|
| 320 |
+
Exceptions:
|
| 321 |
+
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
|
| 322 |
+
"""
|
| 323 |
+
if pieces["closest-tag"]:
|
| 324 |
+
rendered = pieces["closest-tag"]
|
| 325 |
+
if pieces["distance"] or pieces["dirty"]:
|
| 326 |
+
rendered += plus_or_dot(pieces)
|
| 327 |
+
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
|
| 328 |
+
if pieces["dirty"]:
|
| 329 |
+
rendered += ".dirty"
|
| 330 |
+
else:
|
| 331 |
+
# exception #1
|
| 332 |
+
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
|
| 333 |
+
pieces["short"])
|
| 334 |
+
if pieces["dirty"]:
|
| 335 |
+
rendered += ".dirty"
|
| 336 |
+
return rendered
|
| 337 |
+
|
| 338 |
+
|
| 339 |
+
def render_pep440_pre(pieces):
|
| 340 |
+
"""TAG[.post.devDISTANCE] -- No -dirty.
|
| 341 |
+
|
| 342 |
+
Exceptions:
|
| 343 |
+
1: no tags. 0.post.devDISTANCE
|
| 344 |
+
"""
|
| 345 |
+
if pieces["closest-tag"]:
|
| 346 |
+
rendered = pieces["closest-tag"]
|
| 347 |
+
if pieces["distance"]:
|
| 348 |
+
rendered += ".post.dev%d" % pieces["distance"]
|
| 349 |
+
else:
|
| 350 |
+
# exception #1
|
| 351 |
+
rendered = "0.post.dev%d" % pieces["distance"]
|
| 352 |
+
return rendered
|
| 353 |
+
|
| 354 |
+
|
| 355 |
+
def render_pep440_post(pieces):
|
| 356 |
+
"""TAG[.postDISTANCE[.dev0]+gHEX] .
|
| 357 |
+
|
| 358 |
+
The ".dev0" means dirty. Note that .dev0 sorts backwards
|
| 359 |
+
(a dirty tree will appear "older" than the corresponding clean one),
|
| 360 |
+
but you shouldn't be releasing software with -dirty anyways.
|
| 361 |
+
|
| 362 |
+
Exceptions:
|
| 363 |
+
1: no tags. 0.postDISTANCE[.dev0]
|
| 364 |
+
"""
|
| 365 |
+
if pieces["closest-tag"]:
|
| 366 |
+
rendered = pieces["closest-tag"]
|
| 367 |
+
if pieces["distance"] or pieces["dirty"]:
|
| 368 |
+
rendered += ".post%d" % pieces["distance"]
|
| 369 |
+
if pieces["dirty"]:
|
| 370 |
+
rendered += ".dev0"
|
| 371 |
+
rendered += plus_or_dot(pieces)
|
| 372 |
+
rendered += "g%s" % pieces["short"]
|
| 373 |
+
else:
|
| 374 |
+
# exception #1
|
| 375 |
+
rendered = "0.post%d" % pieces["distance"]
|
| 376 |
+
if pieces["dirty"]:
|
| 377 |
+
rendered += ".dev0"
|
| 378 |
+
rendered += "+g%s" % pieces["short"]
|
| 379 |
+
return rendered
|
| 380 |
+
|
| 381 |
+
|
| 382 |
+
def render_pep440_old(pieces):
|
| 383 |
+
"""TAG[.postDISTANCE[.dev0]] .
|
| 384 |
+
|
| 385 |
+
The ".dev0" means dirty.
|
| 386 |
+
|
| 387 |
+
Exceptions:
|
| 388 |
+
1: no tags. 0.postDISTANCE[.dev0]
|
| 389 |
+
"""
|
| 390 |
+
if pieces["closest-tag"]:
|
| 391 |
+
rendered = pieces["closest-tag"]
|
| 392 |
+
if pieces["distance"] or pieces["dirty"]:
|
| 393 |
+
rendered += ".post%d" % pieces["distance"]
|
| 394 |
+
if pieces["dirty"]:
|
| 395 |
+
rendered += ".dev0"
|
| 396 |
+
else:
|
| 397 |
+
# exception #1
|
| 398 |
+
rendered = "0.post%d" % pieces["distance"]
|
| 399 |
+
if pieces["dirty"]:
|
| 400 |
+
rendered += ".dev0"
|
| 401 |
+
return rendered
|
| 402 |
+
|
| 403 |
+
|
| 404 |
+
def render_git_describe(pieces):
|
| 405 |
+
"""TAG[-DISTANCE-gHEX][-dirty].
|
| 406 |
+
|
| 407 |
+
Like 'git describe --tags --dirty --always'.
|
| 408 |
+
|
| 409 |
+
Exceptions:
|
| 410 |
+
1: no tags. HEX[-dirty] (note: no 'g' prefix)
|
| 411 |
+
"""
|
| 412 |
+
if pieces["closest-tag"]:
|
| 413 |
+
rendered = pieces["closest-tag"]
|
| 414 |
+
if pieces["distance"]:
|
| 415 |
+
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
|
| 416 |
+
else:
|
| 417 |
+
# exception #1
|
| 418 |
+
rendered = pieces["short"]
|
| 419 |
+
if pieces["dirty"]:
|
| 420 |
+
rendered += "-dirty"
|
| 421 |
+
return rendered
|
| 422 |
+
|
| 423 |
+
|
| 424 |
+
def render_git_describe_long(pieces):
|
| 425 |
+
"""TAG-DISTANCE-gHEX[-dirty].
|
| 426 |
+
|
| 427 |
+
Like 'git describe --tags --dirty --always -long'.
|
| 428 |
+
The distance/hash is unconditional.
|
| 429 |
+
|
| 430 |
+
Exceptions:
|
| 431 |
+
1: no tags. HEX[-dirty] (note: no 'g' prefix)
|
| 432 |
+
"""
|
| 433 |
+
if pieces["closest-tag"]:
|
| 434 |
+
rendered = pieces["closest-tag"]
|
| 435 |
+
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
|
| 436 |
+
else:
|
| 437 |
+
# exception #1
|
| 438 |
+
rendered = pieces["short"]
|
| 439 |
+
if pieces["dirty"]:
|
| 440 |
+
rendered += "-dirty"
|
| 441 |
+
return rendered
|
| 442 |
+
|
| 443 |
+
|
| 444 |
+
def render(pieces, style):
|
| 445 |
+
"""Render the given version pieces into the requested style."""
|
| 446 |
+
if pieces["error"]:
|
| 447 |
+
return {"version": "unknown",
|
| 448 |
+
"full-revisionid": pieces.get("long"),
|
| 449 |
+
"dirty": None,
|
| 450 |
+
"error": pieces["error"],
|
| 451 |
+
"date": None}
|
| 452 |
+
|
| 453 |
+
if not style or style == "default":
|
| 454 |
+
style = "pep440" # the default
|
| 455 |
+
|
| 456 |
+
if style == "pep440":
|
| 457 |
+
rendered = render_pep440(pieces)
|
| 458 |
+
elif style == "pep440-pre":
|
| 459 |
+
rendered = render_pep440_pre(pieces)
|
| 460 |
+
elif style == "pep440-post":
|
| 461 |
+
rendered = render_pep440_post(pieces)
|
| 462 |
+
elif style == "pep440-old":
|
| 463 |
+
rendered = render_pep440_old(pieces)
|
| 464 |
+
elif style == "git-describe":
|
| 465 |
+
rendered = render_git_describe(pieces)
|
| 466 |
+
elif style == "git-describe-long":
|
| 467 |
+
rendered = render_git_describe_long(pieces)
|
| 468 |
+
else:
|
| 469 |
+
raise ValueError("unknown style '%s'" % style)
|
| 470 |
+
|
| 471 |
+
return {"version": rendered, "full-revisionid": pieces["long"],
|
| 472 |
+
"dirty": pieces["dirty"], "error": None,
|
| 473 |
+
"date": pieces.get("date")}
|
| 474 |
+
|
| 475 |
+
|
| 476 |
+
def get_versions():
|
| 477 |
+
"""Get version information or return default if unable to do so."""
|
| 478 |
+
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
|
| 479 |
+
# __file__, we can work backwards from there to the root. Some
|
| 480 |
+
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
|
| 481 |
+
# case we can only use expanded keywords.
|
| 482 |
+
|
| 483 |
+
cfg = get_config()
|
| 484 |
+
verbose = cfg.verbose
|
| 485 |
+
|
| 486 |
+
try:
|
| 487 |
+
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
|
| 488 |
+
verbose)
|
| 489 |
+
except NotThisMethod:
|
| 490 |
+
pass
|
| 491 |
+
|
| 492 |
+
try:
|
| 493 |
+
root = os.path.realpath(__file__)
|
| 494 |
+
# versionfile_source is the relative path from the top of the source
|
| 495 |
+
# tree (where the .git directory might live) to this file. Invert
|
| 496 |
+
# this to find the root from __file__.
|
| 497 |
+
for i in cfg.versionfile_source.split('/'): # lgtm[py/unused-loop-variable]
|
| 498 |
+
root = os.path.dirname(root)
|
| 499 |
+
except NameError:
|
| 500 |
+
return {"version": "0+unknown", "full-revisionid": None,
|
| 501 |
+
"dirty": None,
|
| 502 |
+
"error": "unable to find root of source tree",
|
| 503 |
+
"date": None}
|
| 504 |
+
|
| 505 |
+
try:
|
| 506 |
+
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
|
| 507 |
+
return render(pieces, cfg.style)
|
| 508 |
+
except NotThisMethod:
|
| 509 |
+
pass
|
| 510 |
+
|
| 511 |
+
try:
|
| 512 |
+
if cfg.parentdir_prefix:
|
| 513 |
+
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
|
| 514 |
+
except NotThisMethod:
|
| 515 |
+
pass
|
| 516 |
+
|
| 517 |
+
return {"version": "0+unknown", "full-revisionid": None,
|
| 518 |
+
"dirty": None,
|
| 519 |
+
"error": "unable to compute version", "date": None}
|
testbed/Project-MONAI__MONAI/monai/apps/__init__.py
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2020 MONAI Consortium
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 6 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 7 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 8 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 9 |
+
# See the License for the specific language governing permissions and
|
| 10 |
+
# limitations under the License.
|
| 11 |
+
|
| 12 |
+
from .datasets import *
|
| 13 |
+
from .utils import *
|
testbed/Project-MONAI__MONAI/monai/apps/datasets.py
ADDED
|
@@ -0,0 +1,265 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2020 MONAI Consortium
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 6 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 7 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 8 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 9 |
+
# See the License for the specific language governing permissions and
|
| 10 |
+
# limitations under the License.
|
| 11 |
+
|
| 12 |
+
import os
|
| 13 |
+
import sys
|
| 14 |
+
from typing import Any, Callable, Dict, List, Optional, Sequence, Union
|
| 15 |
+
|
| 16 |
+
from monai.apps.utils import download_and_extract
|
| 17 |
+
from monai.data import CacheDataset, load_decathalon_datalist
|
| 18 |
+
from monai.transforms import LoadNiftid, LoadPNGd, Randomizable
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
class MedNISTDataset(Randomizable, CacheDataset):
|
| 22 |
+
"""
|
| 23 |
+
The Dataset to automatically download MedNIST data and generate items for training, validation or test.
|
| 24 |
+
It's based on `CacheDataset` to accelerate the training process.
|
| 25 |
+
|
| 26 |
+
Args:
|
| 27 |
+
root_dir: target directory to download and load MedNIST dataset.
|
| 28 |
+
section: expected data section, can be: `training`, `validation` or `test`.
|
| 29 |
+
transform: transforms to execute operations on input data. the default transform is `LoadPNGd`,
|
| 30 |
+
which can load data into numpy array with [H, W] shape. for further usage, use `AddChanneld`
|
| 31 |
+
to convert the shape to [C, H, W, D].
|
| 32 |
+
download: whether to download and extract the MedNIST from resource link, default is False.
|
| 33 |
+
if expected file already exists, skip downloading even set it to True.
|
| 34 |
+
user can manually copy `MedNIST.tar.gz` file or `MedNIST` folder to root directory.
|
| 35 |
+
seed: random seed to randomly split training, validation and test datasets, defaut is 0.
|
| 36 |
+
val_frac: percentage of of validation fraction in the whole dataset, default is 0.1.
|
| 37 |
+
test_frac: percentage of of test fraction in the whole dataset, default is 0.1.
|
| 38 |
+
cache_num: number of items to be cached. Default is `sys.maxsize`.
|
| 39 |
+
will take the minimum of (cache_num, data_length x cache_rate, data_length).
|
| 40 |
+
cache_rate: percentage of cached data in total, default is 1.0 (cache all).
|
| 41 |
+
will take the minimum of (cache_num, data_length x cache_rate, data_length).
|
| 42 |
+
num_workers: the number of worker threads to use.
|
| 43 |
+
if 0 a single thread will be used. Default is 0.
|
| 44 |
+
|
| 45 |
+
Raises:
|
| 46 |
+
ValueError: When ``root_dir`` is not a directory.
|
| 47 |
+
RuntimeError: When ``dataset_dir`` doesn't exist and downloading is not selected (``download=False``).
|
| 48 |
+
|
| 49 |
+
"""
|
| 50 |
+
|
| 51 |
+
resource = "https://www.dropbox.com/s/5wwskxctvcxiuea/MedNIST.tar.gz?dl=1"
|
| 52 |
+
md5 = "0bc7306e7427e00ad1c5526a6677552d"
|
| 53 |
+
compressed_file_name = "MedNIST.tar.gz"
|
| 54 |
+
dataset_folder_name = "MedNIST"
|
| 55 |
+
|
| 56 |
+
def __init__(
|
| 57 |
+
self,
|
| 58 |
+
root_dir: str,
|
| 59 |
+
section: str,
|
| 60 |
+
transform: Union[Sequence[Callable], Callable] = LoadPNGd("image"),
|
| 61 |
+
download: bool = False,
|
| 62 |
+
seed: int = 0,
|
| 63 |
+
val_frac: float = 0.1,
|
| 64 |
+
test_frac: float = 0.1,
|
| 65 |
+
cache_num: int = sys.maxsize,
|
| 66 |
+
cache_rate: float = 1.0,
|
| 67 |
+
num_workers: int = 0,
|
| 68 |
+
) -> None:
|
| 69 |
+
if not os.path.isdir(root_dir):
|
| 70 |
+
raise ValueError("Root directory root_dir must be a directory.")
|
| 71 |
+
self.section = section
|
| 72 |
+
self.val_frac = val_frac
|
| 73 |
+
self.test_frac = test_frac
|
| 74 |
+
self.set_random_state(seed=seed)
|
| 75 |
+
tarfile_name = os.path.join(root_dir, self.compressed_file_name)
|
| 76 |
+
dataset_dir = os.path.join(root_dir, self.dataset_folder_name)
|
| 77 |
+
if download:
|
| 78 |
+
download_and_extract(self.resource, tarfile_name, root_dir, self.md5)
|
| 79 |
+
|
| 80 |
+
if not os.path.exists(dataset_dir):
|
| 81 |
+
raise RuntimeError(
|
| 82 |
+
f"Cannot find dataset directory: {dataset_dir}, please use download=True to download it."
|
| 83 |
+
)
|
| 84 |
+
data = self._generate_data_list(dataset_dir)
|
| 85 |
+
super().__init__(data, transform, cache_num=cache_num, cache_rate=cache_rate, num_workers=num_workers)
|
| 86 |
+
|
| 87 |
+
def randomize(self, data: Optional[Any] = None) -> None:
|
| 88 |
+
self.rann = self.R.random()
|
| 89 |
+
|
| 90 |
+
def _generate_data_list(self, dataset_dir: str) -> List[Dict]:
|
| 91 |
+
"""
|
| 92 |
+
Raises:
|
| 93 |
+
ValueError: When ``section`` is not one of ["training", "validation", "test"].
|
| 94 |
+
|
| 95 |
+
"""
|
| 96 |
+
class_names = sorted((x for x in os.listdir(dataset_dir) if os.path.isdir(os.path.join(dataset_dir, x))))
|
| 97 |
+
num_class = len(class_names)
|
| 98 |
+
image_files = [
|
| 99 |
+
[
|
| 100 |
+
os.path.join(dataset_dir, class_names[i], x)
|
| 101 |
+
for x in os.listdir(os.path.join(dataset_dir, class_names[i]))
|
| 102 |
+
]
|
| 103 |
+
for i in range(num_class)
|
| 104 |
+
]
|
| 105 |
+
num_each = [len(image_files[i]) for i in range(num_class)]
|
| 106 |
+
image_files_list = []
|
| 107 |
+
image_class = []
|
| 108 |
+
for i in range(num_class):
|
| 109 |
+
image_files_list.extend(image_files[i])
|
| 110 |
+
image_class.extend([i] * num_each[i])
|
| 111 |
+
num_total = len(image_class)
|
| 112 |
+
|
| 113 |
+
data = list()
|
| 114 |
+
|
| 115 |
+
for i in range(num_total):
|
| 116 |
+
self.randomize()
|
| 117 |
+
if self.section == "training":
|
| 118 |
+
if self.rann < self.val_frac + self.test_frac:
|
| 119 |
+
continue
|
| 120 |
+
elif self.section == "validation":
|
| 121 |
+
if self.rann >= self.val_frac:
|
| 122 |
+
continue
|
| 123 |
+
elif self.section == "test":
|
| 124 |
+
if self.rann < self.val_frac or self.rann >= self.val_frac + self.test_frac:
|
| 125 |
+
continue
|
| 126 |
+
else:
|
| 127 |
+
raise ValueError(
|
| 128 |
+
f'Unsupported section: {self.section}, available options are ["training", "validation", "test"].'
|
| 129 |
+
)
|
| 130 |
+
data.append({"image": image_files_list[i], "label": image_class[i]})
|
| 131 |
+
return data
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
class DecathlonDataset(Randomizable, CacheDataset):
|
| 135 |
+
"""
|
| 136 |
+
The Dataset to automatically download the data of Medical Segmentation Decathlon challenge
|
| 137 |
+
(http://medicaldecathlon.com/) and generate items for training, validation or test.
|
| 138 |
+
It's based on :py:class:`monai.data.CacheDataset` to accelerate the training process.
|
| 139 |
+
|
| 140 |
+
Args:
|
| 141 |
+
root_dir: user's local directory for caching and loading the MSD datasets.
|
| 142 |
+
task: which task to download and execute: one of list ("Task01_BrainTumour", "Task02_Heart",
|
| 143 |
+
"Task03_Liver", "Task04_Hippocampus", "Task05_Prostate", "Task06_Lung", "Task07_Pancreas",
|
| 144 |
+
"Task08_HepaticVessel", "Task09_Spleen", "Task10_Colon").
|
| 145 |
+
section: expected data section, can be: `training`, `validation` or `test`.
|
| 146 |
+
transform: transforms to execute operations on input data. the default transform is `LoadNiftid`,
|
| 147 |
+
which can load Nifit format data into numpy array with [H, W, D] or [H, W, D, C] shape.
|
| 148 |
+
for further usage, use `AddChanneld` or `AsChannelFirstd` to convert the shape to [C, H, W, D].
|
| 149 |
+
download: whether to download and extract the Decathlon from resource link, default is False.
|
| 150 |
+
if expected file already exists, skip downloading even set it to True.
|
| 151 |
+
user can manually copy tar file or dataset folder to the root directory.
|
| 152 |
+
seed: random seed to randomly split `training`, `validation` and `test` datasets, defaut is 0.
|
| 153 |
+
val_frac: percentage of of validation fraction from the `training` section, default is 0.2.
|
| 154 |
+
Decathlon data only contains `training` section with labels and `test` section without labels,
|
| 155 |
+
so randomly select fraction from the `training` section as the `validation` section.
|
| 156 |
+
cache_num: number of items to be cached. Default is `sys.maxsize`.
|
| 157 |
+
will take the minimum of (cache_num, data_length x cache_rate, data_length).
|
| 158 |
+
cache_rate: percentage of cached data in total, default is 1.0 (cache all).
|
| 159 |
+
will take the minimum of (cache_num, data_length x cache_rate, data_length).
|
| 160 |
+
num_workers: the number of worker threads to use.
|
| 161 |
+
if 0 a single thread will be used. Default is 0.
|
| 162 |
+
|
| 163 |
+
Raises:
|
| 164 |
+
ValueError: When ``root_dir`` is not a directory.
|
| 165 |
+
ValueError: When ``task`` is not one of ["Task01_BrainTumour", "Task02_Heart",
|
| 166 |
+
"Task03_Liver", "Task04_Hippocampus", "Task05_Prostate", "Task06_Lung", "Task07_Pancreas",
|
| 167 |
+
"Task08_HepaticVessel", "Task09_Spleen", "Task10_Colon"].
|
| 168 |
+
RuntimeError: When ``dataset_dir`` doesn't exist and downloading is not selected (``download=False``).
|
| 169 |
+
|
| 170 |
+
Example::
|
| 171 |
+
|
| 172 |
+
transform = Compose(
|
| 173 |
+
[
|
| 174 |
+
LoadNiftid(keys=["image", "label"]),
|
| 175 |
+
AddChanneld(keys=["image", "label"]),
|
| 176 |
+
ScaleIntensityd(keys="image"),
|
| 177 |
+
ToTensord(keys=["image", "label"]),
|
| 178 |
+
]
|
| 179 |
+
)
|
| 180 |
+
|
| 181 |
+
data = DecathlonDataset(
|
| 182 |
+
root_dir="./", task="Task09_Spleen", transform=transform, section="validation", download=True
|
| 183 |
+
)
|
| 184 |
+
|
| 185 |
+
print(data[0]["image"], data[0]["label"])
|
| 186 |
+
|
| 187 |
+
"""
|
| 188 |
+
|
| 189 |
+
resource = {
|
| 190 |
+
"Task01_BrainTumour": "https://msd-for-monai.s3-us-west-2.amazonaws.com/Task01_BrainTumour.tar",
|
| 191 |
+
"Task02_Heart": "https://msd-for-monai.s3-us-west-2.amazonaws.com/Task02_Heart.tar",
|
| 192 |
+
"Task03_Liver": "https://msd-for-monai.s3-us-west-2.amazonaws.com/Task03_Liver.tar",
|
| 193 |
+
"Task04_Hippocampus": "https://msd-for-monai.s3-us-west-2.amazonaws.com/Task04_Hippocampus.tar",
|
| 194 |
+
"Task05_Prostate": "https://msd-for-monai.s3-us-west-2.amazonaws.com/Task05_Prostate.tar",
|
| 195 |
+
"Task06_Lung": "https://msd-for-monai.s3-us-west-2.amazonaws.com/Task06_Lung.tar",
|
| 196 |
+
"Task07_Pancreas": "https://msd-for-monai.s3-us-west-2.amazonaws.com/Task07_Pancreas.tar",
|
| 197 |
+
"Task08_HepaticVessel": "https://msd-for-monai.s3-us-west-2.amazonaws.com/Task08_HepaticVessel.tar",
|
| 198 |
+
"Task09_Spleen": "https://msd-for-monai.s3-us-west-2.amazonaws.com/Task09_Spleen.tar",
|
| 199 |
+
"Task10_Colon": "https://msd-for-monai.s3-us-west-2.amazonaws.com/Task10_Colon.tar",
|
| 200 |
+
}
|
| 201 |
+
md5 = {
|
| 202 |
+
"Task01_BrainTumour": "240a19d752f0d9e9101544901065d872",
|
| 203 |
+
"Task02_Heart": "06ee59366e1e5124267b774dbd654057",
|
| 204 |
+
"Task03_Liver": "a90ec6c4aa7f6a3d087205e23d4e6397",
|
| 205 |
+
"Task04_Hippocampus": "9d24dba78a72977dbd1d2e110310f31b",
|
| 206 |
+
"Task05_Prostate": "35138f08b1efaef89d7424d2bcc928db",
|
| 207 |
+
"Task06_Lung": "8afd997733c7fc0432f71255ba4e52dc",
|
| 208 |
+
"Task07_Pancreas": "4f7080cfca169fa8066d17ce6eb061e4",
|
| 209 |
+
"Task08_HepaticVessel": "641d79e80ec66453921d997fbf12a29c",
|
| 210 |
+
"Task09_Spleen": "410d4a301da4e5b2f6f86ec3ddba524e",
|
| 211 |
+
"Task10_Colon": "bad7a188931dc2f6acf72b08eb6202d0",
|
| 212 |
+
}
|
| 213 |
+
|
| 214 |
+
def __init__(
|
| 215 |
+
self,
|
| 216 |
+
root_dir: str,
|
| 217 |
+
task: str,
|
| 218 |
+
section: str,
|
| 219 |
+
transform: Union[Sequence[Callable], Callable] = LoadNiftid(["image", "label"]),
|
| 220 |
+
download: bool = False,
|
| 221 |
+
seed: int = 0,
|
| 222 |
+
val_frac: float = 0.2,
|
| 223 |
+
cache_num: int = sys.maxsize,
|
| 224 |
+
cache_rate: float = 1.0,
|
| 225 |
+
num_workers: int = 0,
|
| 226 |
+
) -> None:
|
| 227 |
+
if not os.path.isdir(root_dir):
|
| 228 |
+
raise ValueError("Root directory root_dir must be a directory.")
|
| 229 |
+
self.section = section
|
| 230 |
+
self.val_frac = val_frac
|
| 231 |
+
self.set_random_state(seed=seed)
|
| 232 |
+
if task not in self.resource:
|
| 233 |
+
raise ValueError(f"Unsupported task: {task}, available options are: {list(self.resource.keys())}.")
|
| 234 |
+
dataset_dir = os.path.join(root_dir, task)
|
| 235 |
+
tarfile_name = f"{dataset_dir}.tar"
|
| 236 |
+
if download:
|
| 237 |
+
download_and_extract(self.resource[task], tarfile_name, root_dir, self.md5[task])
|
| 238 |
+
|
| 239 |
+
if not os.path.exists(dataset_dir):
|
| 240 |
+
raise RuntimeError(
|
| 241 |
+
f"Cannot find dataset directory: {dataset_dir}, please use download=True to download it."
|
| 242 |
+
)
|
| 243 |
+
data = self._generate_data_list(dataset_dir)
|
| 244 |
+
super().__init__(data, transform, cache_num=cache_num, cache_rate=cache_rate, num_workers=num_workers)
|
| 245 |
+
|
| 246 |
+
def randomize(self, data: Optional[Any] = None) -> None:
|
| 247 |
+
self.rann = self.R.random()
|
| 248 |
+
|
| 249 |
+
def _generate_data_list(self, dataset_dir: str) -> List[Dict]:
|
| 250 |
+
section = "training" if self.section in ["training", "validation"] else "test"
|
| 251 |
+
datalist = load_decathalon_datalist(os.path.join(dataset_dir, "dataset.json"), True, section)
|
| 252 |
+
if section == "test":
|
| 253 |
+
return datalist
|
| 254 |
+
else:
|
| 255 |
+
data = list()
|
| 256 |
+
for i in datalist:
|
| 257 |
+
self.randomize()
|
| 258 |
+
if self.section == "training":
|
| 259 |
+
if self.rann < self.val_frac:
|
| 260 |
+
continue
|
| 261 |
+
else:
|
| 262 |
+
if self.rann >= self.val_frac:
|
| 263 |
+
continue
|
| 264 |
+
data.append(i)
|
| 265 |
+
return data
|
testbed/Project-MONAI__MONAI/monai/apps/utils.py
ADDED
|
@@ -0,0 +1,186 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2020 MONAI Consortium
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 6 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 7 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 8 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 9 |
+
# See the License for the specific language governing permissions and
|
| 10 |
+
# limitations under the License.
|
| 11 |
+
|
| 12 |
+
import hashlib
|
| 13 |
+
import logging
|
| 14 |
+
import os
|
| 15 |
+
import shutil
|
| 16 |
+
import tarfile
|
| 17 |
+
import zipfile
|
| 18 |
+
from typing import Optional
|
| 19 |
+
from urllib.error import ContentTooShortError, HTTPError, URLError
|
| 20 |
+
from urllib.request import Request, urlopen, urlretrieve
|
| 21 |
+
|
| 22 |
+
from monai.utils import optional_import, progress_bar
|
| 23 |
+
|
| 24 |
+
gdown, has_gdown = optional_import("gdown", "3.6")
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def check_md5(filepath: str, md5_value: Optional[str] = None) -> bool:
|
| 28 |
+
"""
|
| 29 |
+
check MD5 signature of specified file.
|
| 30 |
+
|
| 31 |
+
Args:
|
| 32 |
+
filepath: path of source file to verify MD5.
|
| 33 |
+
md5_value: expected MD5 value of the file.
|
| 34 |
+
|
| 35 |
+
"""
|
| 36 |
+
if md5_value is not None:
|
| 37 |
+
md5 = hashlib.md5()
|
| 38 |
+
try:
|
| 39 |
+
with open(filepath, "rb") as f:
|
| 40 |
+
for chunk in iter(lambda: f.read(1024 * 1024), b""):
|
| 41 |
+
md5.update(chunk)
|
| 42 |
+
except Exception as e:
|
| 43 |
+
print(f"Exception in check_md5: {e}")
|
| 44 |
+
return False
|
| 45 |
+
if md5_value != md5.hexdigest():
|
| 46 |
+
return False
|
| 47 |
+
else:
|
| 48 |
+
print(f"expected MD5 is None, skip MD5 check for file {filepath}.")
|
| 49 |
+
|
| 50 |
+
return True
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
def download_url(url: str, filepath: str, md5_value: Optional[str] = None) -> None:
|
| 54 |
+
"""
|
| 55 |
+
Download file from specified URL link, support process bar and MD5 check.
|
| 56 |
+
|
| 57 |
+
Args:
|
| 58 |
+
url: source URL link to download file.
|
| 59 |
+
filepath: target filepath to save the downloaded file.
|
| 60 |
+
md5_value: expected MD5 value to validate the downloaded file.
|
| 61 |
+
if None, skip MD5 validation.
|
| 62 |
+
|
| 63 |
+
Raises:
|
| 64 |
+
RuntimeError: When the MD5 validation of the ``filepath`` existing file fails.
|
| 65 |
+
RuntimeError: When a network issue or denied permission prevents the
|
| 66 |
+
file download from ``url`` to ``filepath``.
|
| 67 |
+
URLError: See urllib.request.urlretrieve.
|
| 68 |
+
HTTPError: See urllib.request.urlretrieve.
|
| 69 |
+
ContentTooShortError: See urllib.request.urlretrieve.
|
| 70 |
+
IOError: See urllib.request.urlretrieve.
|
| 71 |
+
RuntimeError: When the MD5 validation of the ``url`` downloaded file fails.
|
| 72 |
+
|
| 73 |
+
"""
|
| 74 |
+
if os.path.exists(filepath):
|
| 75 |
+
if not check_md5(filepath, md5_value):
|
| 76 |
+
raise RuntimeError(f"MD5 check of existing file failed: filepath={filepath}, expected MD5={md5_value}.")
|
| 77 |
+
print(f"file {filepath} exists, skip downloading.")
|
| 78 |
+
return
|
| 79 |
+
|
| 80 |
+
if url.startswith("https://drive.google.com"):
|
| 81 |
+
if not has_gdown:
|
| 82 |
+
raise RuntimeError("To download files from Google Drive, please install the gdown dependency.")
|
| 83 |
+
os.makedirs(os.path.dirname(filepath), exist_ok=True)
|
| 84 |
+
gdown.download(url, filepath, quiet=False)
|
| 85 |
+
if not os.path.exists(filepath):
|
| 86 |
+
raise RuntimeError(
|
| 87 |
+
f"Download of file from {url} to {filepath} failed due to network issue or denied permission."
|
| 88 |
+
)
|
| 89 |
+
elif url.startswith("https://msd-for-monai.s3-us-west-2.amazonaws.com"):
|
| 90 |
+
block_size = 1024 * 1024
|
| 91 |
+
tmp_file_path = filepath + ".part"
|
| 92 |
+
first_byte = os.path.getsize(tmp_file_path) if os.path.exists(tmp_file_path) else 0
|
| 93 |
+
file_size = -1
|
| 94 |
+
|
| 95 |
+
try:
|
| 96 |
+
file_size = int(urlopen(url).info().get("Content-Length", -1))
|
| 97 |
+
progress_bar(index=first_byte, count=file_size)
|
| 98 |
+
|
| 99 |
+
while first_byte < file_size:
|
| 100 |
+
last_byte = first_byte + block_size if first_byte + block_size < file_size else file_size - 1
|
| 101 |
+
|
| 102 |
+
req = Request(url)
|
| 103 |
+
req.headers["Range"] = "bytes=%s-%s" % (first_byte, last_byte)
|
| 104 |
+
data_chunk = urlopen(req, timeout=10).read()
|
| 105 |
+
with open(tmp_file_path, "ab") as f:
|
| 106 |
+
f.write(data_chunk)
|
| 107 |
+
progress_bar(index=last_byte, count=file_size)
|
| 108 |
+
first_byte = last_byte + 1
|
| 109 |
+
except IOError as e:
|
| 110 |
+
logging.debug("IO Error - %s" % e)
|
| 111 |
+
finally:
|
| 112 |
+
if file_size == os.path.getsize(tmp_file_path):
|
| 113 |
+
if md5_value and not check_md5(tmp_file_path, md5_value):
|
| 114 |
+
raise Exception("Error validating the file against its MD5 hash")
|
| 115 |
+
shutil.move(tmp_file_path, filepath)
|
| 116 |
+
elif file_size == -1:
|
| 117 |
+
raise Exception("Error getting Content-Length from server: %s" % url)
|
| 118 |
+
else:
|
| 119 |
+
os.makedirs(os.path.dirname(filepath), exist_ok=True)
|
| 120 |
+
|
| 121 |
+
def _process_hook(blocknum: int, blocksize: int, totalsize: int):
|
| 122 |
+
progress_bar(blocknum * blocksize, totalsize, f"Downloading {filepath.split('/')[-1]}:")
|
| 123 |
+
|
| 124 |
+
try:
|
| 125 |
+
urlretrieve(url, filepath, reporthook=_process_hook)
|
| 126 |
+
print(f"\ndownloaded file: {filepath}.")
|
| 127 |
+
except (URLError, HTTPError, ContentTooShortError, IOError) as e:
|
| 128 |
+
print(f"download failed from {url} to {filepath}.")
|
| 129 |
+
raise e
|
| 130 |
+
|
| 131 |
+
if not check_md5(filepath, md5_value):
|
| 132 |
+
raise RuntimeError(
|
| 133 |
+
f"MD5 check of downloaded file failed: URL={url}, filepath={filepath}, expected MD5={md5_value}."
|
| 134 |
+
)
|
| 135 |
+
|
| 136 |
+
|
| 137 |
+
def extractall(filepath: str, output_dir: str, md5_value: Optional[str] = None) -> None:
|
| 138 |
+
"""
|
| 139 |
+
Extract file to the output directory.
|
| 140 |
+
Expected file types are: `zip`, `tar.gz` and `tar`.
|
| 141 |
+
|
| 142 |
+
Args:
|
| 143 |
+
filepath: the file path of compressed file.
|
| 144 |
+
output_dir: target directory to save extracted files.
|
| 145 |
+
md5_value: expected MD5 value to validate the compressed file.
|
| 146 |
+
if None, skip MD5 validation.
|
| 147 |
+
|
| 148 |
+
Raises:
|
| 149 |
+
RuntimeError: When the MD5 validation of the ``filepath`` compressed file fails.
|
| 150 |
+
ValueError: When the ``filepath`` file extension is not one of [zip", "tar.gz", "tar"].
|
| 151 |
+
|
| 152 |
+
"""
|
| 153 |
+
target_file = os.path.join(output_dir, os.path.basename(filepath).split(".")[0])
|
| 154 |
+
if os.path.exists(target_file):
|
| 155 |
+
print(f"extracted file {target_file} exists, skip extracting.")
|
| 156 |
+
return
|
| 157 |
+
if not check_md5(filepath, md5_value):
|
| 158 |
+
raise RuntimeError(f"MD5 check of compressed file failed: filepath={filepath}, expected MD5={md5_value}.")
|
| 159 |
+
|
| 160 |
+
if filepath.endswith("zip"):
|
| 161 |
+
zip_file = zipfile.ZipFile(filepath)
|
| 162 |
+
zip_file.extractall(output_dir)
|
| 163 |
+
zip_file.close()
|
| 164 |
+
elif filepath.endswith("tar") or filepath.endswith("tar.gz"):
|
| 165 |
+
tar_file = tarfile.open(filepath)
|
| 166 |
+
tar_file.extractall(output_dir)
|
| 167 |
+
tar_file.close()
|
| 168 |
+
else:
|
| 169 |
+
raise ValueError('Unsupported file extension, available options are: ["zip", "tar.gz", "tar"].')
|
| 170 |
+
|
| 171 |
+
|
| 172 |
+
def download_and_extract(url: str, filepath: str, output_dir: str, md5_value: Optional[str] = None) -> None:
|
| 173 |
+
"""
|
| 174 |
+
Download file from URL and extract it to the output directory.
|
| 175 |
+
|
| 176 |
+
Args:
|
| 177 |
+
url: source URL link to download file.
|
| 178 |
+
filepath: the file path of compressed file.
|
| 179 |
+
output_dir: target directory to save extracted files.
|
| 180 |
+
defaut is None to save in current directory.
|
| 181 |
+
md5_value: expected MD5 value to validate the downloaded file.
|
| 182 |
+
if None, skip MD5 validation.
|
| 183 |
+
|
| 184 |
+
"""
|
| 185 |
+
download_url(url=url, filepath=filepath, md5_value=md5_value)
|
| 186 |
+
extractall(filepath=filepath, output_dir=output_dir, md5_value=md5_value)
|
testbed/Project-MONAI__MONAI/monai/engines/__init__.py
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2020 MONAI Consortium
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 6 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 7 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 8 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 9 |
+
# See the License for the specific language governing permissions and
|
| 10 |
+
# limitations under the License.
|
| 11 |
+
|
| 12 |
+
from .evaluator import *
|
| 13 |
+
from .multi_gpu_supervised_trainer import *
|
| 14 |
+
from .trainer import *
|
testbed/Project-MONAI__MONAI/monai/engines/evaluator.py
ADDED
|
@@ -0,0 +1,280 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2020 MONAI Consortium
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 6 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 7 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 8 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 9 |
+
# See the License for the specific language governing permissions and
|
| 10 |
+
# limitations under the License.
|
| 11 |
+
|
| 12 |
+
from typing import TYPE_CHECKING, Callable, Dict, Optional, Sequence
|
| 13 |
+
|
| 14 |
+
import torch
|
| 15 |
+
from torch.utils.data import DataLoader
|
| 16 |
+
|
| 17 |
+
from monai.engines.utils import CommonKeys as Keys
|
| 18 |
+
from monai.engines.utils import default_prepare_batch
|
| 19 |
+
from monai.engines.workflow import Workflow
|
| 20 |
+
from monai.inferers import Inferer, SimpleInferer
|
| 21 |
+
from monai.transforms import Transform
|
| 22 |
+
from monai.utils import ensure_tuple, exact_version, optional_import
|
| 23 |
+
|
| 24 |
+
if TYPE_CHECKING:
|
| 25 |
+
from ignite.engine import Engine
|
| 26 |
+
from ignite.metrics import Metric
|
| 27 |
+
else:
|
| 28 |
+
Engine, _ = optional_import("ignite.engine", "0.3.0", exact_version, "Engine")
|
| 29 |
+
Metric, _ = optional_import("ignite.metrics", "0.3.0", exact_version, "Metric")
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
class Evaluator(Workflow):
|
| 33 |
+
"""
|
| 34 |
+
Base class for all kinds of evaluators, inherits from Workflow.
|
| 35 |
+
|
| 36 |
+
Args:
|
| 37 |
+
device: an object representing the device on which to run.
|
| 38 |
+
val_data_loader: Ignite engine use data_loader to run, must be torch.DataLoader.
|
| 39 |
+
prepare_batch: function to parse image and label for current iteration.
|
| 40 |
+
iteration_update: the callable function for every iteration, expect to accept `engine`
|
| 41 |
+
and `batchdata` as input parameters. if not provided, use `self._iteration()` instead.
|
| 42 |
+
post_transform: execute additional transformation for the model output data.
|
| 43 |
+
Typically, several Tensor based transforms composed by `Compose`.
|
| 44 |
+
key_val_metric: compute metric when every iteration completed, and save average value to
|
| 45 |
+
engine.state.metrics when epoch completed. key_val_metric is the main metric to compare and save the
|
| 46 |
+
checkpoint into files.
|
| 47 |
+
additional_metrics: more Ignite metrics that also attach to Ignite Engine.
|
| 48 |
+
val_handlers: every handler is a set of Ignite Event-Handlers, must have `attach` function, like:
|
| 49 |
+
CheckpointHandler, StatsHandler, SegmentationSaver, etc.
|
| 50 |
+
amp: whether to enable auto-mixed-precision evaluation, default is False.
|
| 51 |
+
|
| 52 |
+
"""
|
| 53 |
+
|
| 54 |
+
def __init__(
|
| 55 |
+
self,
|
| 56 |
+
device: torch.device,
|
| 57 |
+
val_data_loader: DataLoader,
|
| 58 |
+
prepare_batch: Callable = default_prepare_batch,
|
| 59 |
+
iteration_update: Optional[Callable] = None,
|
| 60 |
+
post_transform: Optional[Transform] = None,
|
| 61 |
+
key_val_metric: Optional[Dict[str, Metric]] = None,
|
| 62 |
+
additional_metrics: Optional[Dict[str, Metric]] = None,
|
| 63 |
+
val_handlers: Optional[Sequence] = None,
|
| 64 |
+
amp: bool = False,
|
| 65 |
+
) -> None:
|
| 66 |
+
super().__init__(
|
| 67 |
+
device=device,
|
| 68 |
+
max_epochs=1,
|
| 69 |
+
data_loader=val_data_loader,
|
| 70 |
+
prepare_batch=prepare_batch,
|
| 71 |
+
iteration_update=iteration_update,
|
| 72 |
+
post_transform=post_transform,
|
| 73 |
+
key_metric=key_val_metric,
|
| 74 |
+
additional_metrics=additional_metrics,
|
| 75 |
+
handlers=val_handlers,
|
| 76 |
+
amp=amp,
|
| 77 |
+
)
|
| 78 |
+
|
| 79 |
+
def run(self, global_epoch: int = 1) -> None:
|
| 80 |
+
"""
|
| 81 |
+
Execute validation/evaluation based on Ignite Engine.
|
| 82 |
+
|
| 83 |
+
Args:
|
| 84 |
+
global_epoch: the overall epoch if during a training. evaluator engine can get it from trainer.
|
| 85 |
+
|
| 86 |
+
"""
|
| 87 |
+
# init env value for current validation process
|
| 88 |
+
self.state.max_epochs = global_epoch
|
| 89 |
+
self.state.epoch = global_epoch - 1
|
| 90 |
+
self.state.iteration = 0
|
| 91 |
+
super().run()
|
| 92 |
+
|
| 93 |
+
def get_validation_stats(self) -> Dict[str, float]:
|
| 94 |
+
return {"best_validation_metric": self.state.best_metric, "best_validation_epoch": self.state.best_metric_epoch}
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
class SupervisedEvaluator(Evaluator):
|
| 98 |
+
"""
|
| 99 |
+
Standard supervised evaluation method with image and label(optional), inherits from evaluator and Workflow.
|
| 100 |
+
|
| 101 |
+
Args:
|
| 102 |
+
device: an object representing the device on which to run.
|
| 103 |
+
val_data_loader: Ignite engine use data_loader to run, must be torch.DataLoader.
|
| 104 |
+
network: use the network to run model forward.
|
| 105 |
+
prepare_batch: function to parse image and label for current iteration.
|
| 106 |
+
iteration_update: the callable function for every iteration, expect to accept `engine`
|
| 107 |
+
and `batchdata` as input parameters. if not provided, use `self._iteration()` instead.
|
| 108 |
+
inferer: inference method that execute model forward on input data, like: SlidingWindow, etc.
|
| 109 |
+
post_transform: execute additional transformation for the model output data.
|
| 110 |
+
Typically, several Tensor based transforms composed by `Compose`.
|
| 111 |
+
key_val_metric: compute metric when every iteration completed, and save average value to
|
| 112 |
+
engine.state.metrics when epoch completed. key_val_metric is the main metric to compare and save the
|
| 113 |
+
checkpoint into files.
|
| 114 |
+
additional_metrics: more Ignite metrics that also attach to Ignite Engine.
|
| 115 |
+
val_handlers: every handler is a set of Ignite Event-Handlers, must have `attach` function, like:
|
| 116 |
+
CheckpointHandler, StatsHandler, SegmentationSaver, etc.
|
| 117 |
+
amp: whether to enable auto-mixed-precision evaluation, default is False.
|
| 118 |
+
|
| 119 |
+
"""
|
| 120 |
+
|
| 121 |
+
def __init__(
|
| 122 |
+
self,
|
| 123 |
+
device: torch.device,
|
| 124 |
+
val_data_loader: DataLoader,
|
| 125 |
+
network: torch.nn.Module,
|
| 126 |
+
prepare_batch: Callable = default_prepare_batch,
|
| 127 |
+
iteration_update: Optional[Callable] = None,
|
| 128 |
+
inferer: Inferer = SimpleInferer(),
|
| 129 |
+
post_transform: Optional[Transform] = None,
|
| 130 |
+
key_val_metric: Optional[Dict[str, Metric]] = None,
|
| 131 |
+
additional_metrics: Optional[Dict[str, Metric]] = None,
|
| 132 |
+
val_handlers: Optional[Sequence] = None,
|
| 133 |
+
amp: bool = False,
|
| 134 |
+
) -> None:
|
| 135 |
+
super().__init__(
|
| 136 |
+
device=device,
|
| 137 |
+
val_data_loader=val_data_loader,
|
| 138 |
+
prepare_batch=prepare_batch,
|
| 139 |
+
iteration_update=iteration_update,
|
| 140 |
+
post_transform=post_transform,
|
| 141 |
+
key_val_metric=key_val_metric,
|
| 142 |
+
additional_metrics=additional_metrics,
|
| 143 |
+
val_handlers=val_handlers,
|
| 144 |
+
amp=amp,
|
| 145 |
+
)
|
| 146 |
+
|
| 147 |
+
self.network = network
|
| 148 |
+
self.inferer = inferer
|
| 149 |
+
|
| 150 |
+
def _iteration(self, engine: Engine, batchdata: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
|
| 151 |
+
"""
|
| 152 |
+
callback function for the Supervised Evaluation processing logic of 1 iteration in Ignite Engine.
|
| 153 |
+
Return below items in a dictionary:
|
| 154 |
+
- IMAGE: image Tensor data for model input, already moved to device.
|
| 155 |
+
- LABEL: label Tensor data corresponding to the image, already moved to device.
|
| 156 |
+
- PRED: prediction result of model.
|
| 157 |
+
|
| 158 |
+
Args:
|
| 159 |
+
engine: Ignite Engine, it can be a trainer, validator or evaluator.
|
| 160 |
+
batchdata: input data for this iteration, usually can be dictionary or tuple of Tensor data.
|
| 161 |
+
|
| 162 |
+
Raises:
|
| 163 |
+
ValueError: When ``batchdata`` is None.
|
| 164 |
+
|
| 165 |
+
"""
|
| 166 |
+
if batchdata is None:
|
| 167 |
+
raise ValueError("Must provide batch data for current iteration.")
|
| 168 |
+
inputs, targets = self.prepare_batch(batchdata)
|
| 169 |
+
inputs = inputs.to(engine.state.device)
|
| 170 |
+
if targets is not None:
|
| 171 |
+
targets = targets.to(engine.state.device)
|
| 172 |
+
|
| 173 |
+
# execute forward computation
|
| 174 |
+
self.network.eval()
|
| 175 |
+
with torch.no_grad():
|
| 176 |
+
if self.amp:
|
| 177 |
+
with torch.cuda.amp.autocast():
|
| 178 |
+
predictions = self.inferer(inputs, self.network)
|
| 179 |
+
else:
|
| 180 |
+
predictions = self.inferer(inputs, self.network)
|
| 181 |
+
|
| 182 |
+
return {Keys.IMAGE: inputs, Keys.LABEL: targets, Keys.PRED: predictions}
|
| 183 |
+
|
| 184 |
+
|
| 185 |
+
class EnsembleEvaluator(Evaluator):
|
| 186 |
+
"""
|
| 187 |
+
Ensemble evaluation for multiple models, inherits from evaluator and Workflow.
|
| 188 |
+
It accepts a list of models for inference and outputs a list of predictions for further operations.
|
| 189 |
+
|
| 190 |
+
Args:
|
| 191 |
+
device: an object representing the device on which to run.
|
| 192 |
+
val_data_loader: Ignite engine use data_loader to run, must be torch.DataLoader.
|
| 193 |
+
networks: use the networks to run model forward in order.
|
| 194 |
+
pred_keys: the keys to store every prediction data.
|
| 195 |
+
the length must exactly match the number of networks.
|
| 196 |
+
prepare_batch: function to parse image and label for current iteration.
|
| 197 |
+
iteration_update: the callable function for every iteration, expect to accept `engine`
|
| 198 |
+
and `batchdata` as input parameters. if not provided, use `self._iteration()` instead.
|
| 199 |
+
inferer: inference method that execute model forward on input data, like: SlidingWindow, etc.
|
| 200 |
+
post_transform: execute additional transformation for the model output data.
|
| 201 |
+
Typically, several Tensor based transforms composed by `Compose`.
|
| 202 |
+
key_val_metric: compute metric when every iteration completed, and save average value to
|
| 203 |
+
engine.state.metrics when epoch completed. key_val_metric is the main metric to compare and save the
|
| 204 |
+
checkpoint into files.
|
| 205 |
+
additional_metrics: more Ignite metrics that also attach to Ignite Engine.
|
| 206 |
+
val_handlers: every handler is a set of Ignite Event-Handlers, must have `attach` function, like:
|
| 207 |
+
CheckpointHandler, StatsHandler, SegmentationSaver, etc.
|
| 208 |
+
amp: whether to enable auto-mixed-precision evaluation, default is False.
|
| 209 |
+
|
| 210 |
+
"""
|
| 211 |
+
|
| 212 |
+
def __init__(
|
| 213 |
+
self,
|
| 214 |
+
device: torch.device,
|
| 215 |
+
val_data_loader: DataLoader,
|
| 216 |
+
networks: Sequence[torch.nn.Module],
|
| 217 |
+
pred_keys: Sequence[str],
|
| 218 |
+
prepare_batch: Callable = default_prepare_batch,
|
| 219 |
+
iteration_update: Optional[Callable] = None,
|
| 220 |
+
inferer: Inferer = SimpleInferer(),
|
| 221 |
+
post_transform: Optional[Transform] = None,
|
| 222 |
+
key_val_metric: Optional[Dict[str, Metric]] = None,
|
| 223 |
+
additional_metrics: Optional[Dict[str, Metric]] = None,
|
| 224 |
+
val_handlers: Optional[Sequence] = None,
|
| 225 |
+
amp: bool = False,
|
| 226 |
+
) -> None:
|
| 227 |
+
super().__init__(
|
| 228 |
+
device=device,
|
| 229 |
+
val_data_loader=val_data_loader,
|
| 230 |
+
prepare_batch=prepare_batch,
|
| 231 |
+
iteration_update=iteration_update,
|
| 232 |
+
post_transform=post_transform,
|
| 233 |
+
key_val_metric=key_val_metric,
|
| 234 |
+
additional_metrics=additional_metrics,
|
| 235 |
+
val_handlers=val_handlers,
|
| 236 |
+
amp=amp,
|
| 237 |
+
)
|
| 238 |
+
|
| 239 |
+
self.networks = ensure_tuple(networks)
|
| 240 |
+
self.pred_keys = ensure_tuple(pred_keys)
|
| 241 |
+
self.inferer = inferer
|
| 242 |
+
|
| 243 |
+
def _iteration(self, engine: Engine, batchdata: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
|
| 244 |
+
"""
|
| 245 |
+
callback function for the Supervised Evaluation processing logic of 1 iteration in Ignite Engine.
|
| 246 |
+
Return below items in a dictionary:
|
| 247 |
+
- IMAGE: image Tensor data for model input, already moved to device.
|
| 248 |
+
- LABEL: label Tensor data corresponding to the image, already moved to device.
|
| 249 |
+
- pred_keys[0]: prediction result of network 0.
|
| 250 |
+
- pred_keys[1]: prediction result of network 1.
|
| 251 |
+
- ... ...
|
| 252 |
+
- pred_keys[N]: prediction result of network N.
|
| 253 |
+
|
| 254 |
+
Args:
|
| 255 |
+
engine: Ignite Engine, it can be a trainer, validator or evaluator.
|
| 256 |
+
batchdata: input data for this iteration, usually can be dictionary or tuple of Tensor data.
|
| 257 |
+
|
| 258 |
+
Raises:
|
| 259 |
+
ValueError: When ``batchdata`` is None.
|
| 260 |
+
|
| 261 |
+
"""
|
| 262 |
+
if batchdata is None:
|
| 263 |
+
raise ValueError("Must provide batch data for current iteration.")
|
| 264 |
+
inputs, targets = self.prepare_batch(batchdata)
|
| 265 |
+
inputs = inputs.to(engine.state.device)
|
| 266 |
+
if targets is not None:
|
| 267 |
+
targets = targets.to(engine.state.device)
|
| 268 |
+
|
| 269 |
+
# execute forward computation
|
| 270 |
+
predictions = {Keys.IMAGE: inputs, Keys.LABEL: targets}
|
| 271 |
+
for idx, network in enumerate(self.networks):
|
| 272 |
+
network.eval()
|
| 273 |
+
with torch.no_grad():
|
| 274 |
+
if self.amp:
|
| 275 |
+
with torch.cuda.amp.autocast():
|
| 276 |
+
predictions.update({self.pred_keys[idx]: self.inferer(inputs, network)})
|
| 277 |
+
else:
|
| 278 |
+
predictions.update({self.pred_keys[idx]: self.inferer(inputs, network)})
|
| 279 |
+
|
| 280 |
+
return predictions
|
testbed/Project-MONAI__MONAI/monai/engines/multi_gpu_supervised_trainer.py
ADDED
|
@@ -0,0 +1,136 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2020 MONAI Consortium
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 6 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 7 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 8 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 9 |
+
# See the License for the specific language governing permissions and
|
| 10 |
+
# limitations under the License.
|
| 11 |
+
|
| 12 |
+
from typing import TYPE_CHECKING, Callable, Dict, Optional, Sequence, Tuple
|
| 13 |
+
|
| 14 |
+
import torch
|
| 15 |
+
import torch.nn
|
| 16 |
+
from torch.nn.parallel import DataParallel, DistributedDataParallel
|
| 17 |
+
from torch.optim.optimizer import Optimizer
|
| 18 |
+
|
| 19 |
+
from monai.engines.utils import get_devices_spec
|
| 20 |
+
from monai.utils import exact_version, optional_import
|
| 21 |
+
|
| 22 |
+
create_supervised_trainer, _ = optional_import("ignite.engine", "0.3.0", exact_version, "create_supervised_trainer")
|
| 23 |
+
create_supervised_evaluator, _ = optional_import("ignite.engine", "0.3.0", exact_version, "create_supervised_evaluator")
|
| 24 |
+
_prepare_batch, _ = optional_import("ignite.engine", "0.3.0", exact_version, "_prepare_batch")
|
| 25 |
+
if TYPE_CHECKING:
|
| 26 |
+
from ignite.engine import Engine
|
| 27 |
+
from ignite.metrics import Metric
|
| 28 |
+
else:
|
| 29 |
+
Engine, _ = optional_import("ignite.engine", "0.3.0", exact_version, "Engine")
|
| 30 |
+
Metric, _ = optional_import("ignite.metrics", "0.3.0", exact_version, "Metric")
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def _default_transform(_x: torch.Tensor, _y: torch.Tensor, _y_pred: torch.Tensor, loss: torch.Tensor) -> float:
|
| 34 |
+
return loss.item()
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def _default_eval_transform(
|
| 38 |
+
x: torch.Tensor, y: torch.Tensor, y_pred: torch.Tensor
|
| 39 |
+
) -> Tuple[torch.Tensor, torch.Tensor]:
|
| 40 |
+
return y_pred, y
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def create_multigpu_supervised_trainer(
|
| 44 |
+
net: torch.nn.Module,
|
| 45 |
+
optimizer: Optimizer,
|
| 46 |
+
loss_fn: Callable,
|
| 47 |
+
devices: Optional[Sequence[torch.device]] = None,
|
| 48 |
+
non_blocking: bool = False,
|
| 49 |
+
prepare_batch: Callable = _prepare_batch,
|
| 50 |
+
output_transform: Callable = _default_transform,
|
| 51 |
+
distributed: bool = False,
|
| 52 |
+
) -> Engine:
|
| 53 |
+
"""
|
| 54 |
+
Derived from `create_supervised_trainer` in Ignite.
|
| 55 |
+
|
| 56 |
+
Factory function for creating a trainer for supervised models.
|
| 57 |
+
|
| 58 |
+
Args:
|
| 59 |
+
net: the network to train.
|
| 60 |
+
optimizer: the optimizer to use.
|
| 61 |
+
loss_fn: the loss function to use.
|
| 62 |
+
devices: device(s) type specification (default: None).
|
| 63 |
+
Applies to both model and batches. None is all devices used, empty list is CPU only.
|
| 64 |
+
non_blocking: if True and this copy is between CPU and GPU, the copy may occur asynchronously
|
| 65 |
+
with respect to the host. For other cases, this argument has no effect.
|
| 66 |
+
prepare_batch: function that receives `batch`, `device`, `non_blocking` and outputs
|
| 67 |
+
tuple of tensors `(batch_x, batch_y)`.
|
| 68 |
+
output_transform: function that receives 'x', 'y', 'y_pred', 'loss' and returns value
|
| 69 |
+
to be assigned to engine's state.output after each iteration. Default is returning `loss.item()`.
|
| 70 |
+
distributed: whether convert model to `DistributedDataParallel`, if have multiple devices, use
|
| 71 |
+
the first device as output device.
|
| 72 |
+
|
| 73 |
+
Returns:
|
| 74 |
+
Engine: a trainer engine with supervised update function.
|
| 75 |
+
|
| 76 |
+
Note:
|
| 77 |
+
`engine.state.output` for this engine is defined by `output_transform` parameter and is the loss
|
| 78 |
+
of the processed batch by default.
|
| 79 |
+
"""
|
| 80 |
+
|
| 81 |
+
devices_ = get_devices_spec(devices)
|
| 82 |
+
if distributed:
|
| 83 |
+
net = DistributedDataParallel(net, device_ids=devices_)
|
| 84 |
+
elif len(devices_) > 1:
|
| 85 |
+
net = DataParallel(net)
|
| 86 |
+
|
| 87 |
+
return create_supervised_trainer(
|
| 88 |
+
net, optimizer, loss_fn, devices_[0], non_blocking, prepare_batch, output_transform
|
| 89 |
+
)
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
def create_multigpu_supervised_evaluator(
|
| 93 |
+
net: torch.nn.Module,
|
| 94 |
+
metrics: Optional[Dict[str, Metric]] = None,
|
| 95 |
+
devices: Optional[Sequence[torch.device]] = None,
|
| 96 |
+
non_blocking: bool = False,
|
| 97 |
+
prepare_batch: Callable = _prepare_batch,
|
| 98 |
+
output_transform: Callable = _default_eval_transform,
|
| 99 |
+
distributed: bool = False,
|
| 100 |
+
) -> Engine:
|
| 101 |
+
"""
|
| 102 |
+
Derived from `create_supervised_evaluator` in Ignite.
|
| 103 |
+
|
| 104 |
+
Factory function for creating an evaluator for supervised models.
|
| 105 |
+
|
| 106 |
+
Args:
|
| 107 |
+
net: the model to train.
|
| 108 |
+
metrics: a map of metric names to Metrics.
|
| 109 |
+
devices: device(s) type specification (default: None).
|
| 110 |
+
Applies to both model and batches. None is all devices used, empty list is CPU only.
|
| 111 |
+
non_blocking: if True and this copy is between CPU and GPU, the copy may occur asynchronously
|
| 112 |
+
with respect to the host. For other cases, this argument has no effect.
|
| 113 |
+
prepare_batch: function that receives `batch`, `device`, `non_blocking` and outputs
|
| 114 |
+
tuple of tensors `(batch_x, batch_y)`.
|
| 115 |
+
output_transform: function that receives 'x', 'y', 'y_pred' and returns value
|
| 116 |
+
to be assigned to engine's state.output after each iteration. Default is returning `(y_pred, y,)`
|
| 117 |
+
which fits output expected by metrics. If you change it you should use `output_transform` in metrics.
|
| 118 |
+
distributed: whether convert model to `DistributedDataParallel`, if have multiple devices, use
|
| 119 |
+
the first device as output device.
|
| 120 |
+
|
| 121 |
+
Note:
|
| 122 |
+
`engine.state.output` for this engine is defined by `output_transform` parameter and is
|
| 123 |
+
a tuple of `(batch_pred, batch_y)` by default.
|
| 124 |
+
|
| 125 |
+
Returns:
|
| 126 |
+
Engine: an evaluator engine with supervised inference function.
|
| 127 |
+
"""
|
| 128 |
+
|
| 129 |
+
devices_ = get_devices_spec(devices)
|
| 130 |
+
|
| 131 |
+
if distributed:
|
| 132 |
+
net = DistributedDataParallel(net, device_ids=devices_)
|
| 133 |
+
elif len(devices_) > 1:
|
| 134 |
+
net = DataParallel(net)
|
| 135 |
+
|
| 136 |
+
return create_supervised_evaluator(net, metrics, devices_[0], non_blocking, prepare_batch, output_transform)
|
testbed/Project-MONAI__MONAI/monai/engines/trainer.py
ADDED
|
@@ -0,0 +1,297 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2020 MONAI Consortium
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 6 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 7 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 8 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 9 |
+
# See the License for the specific language governing permissions and
|
| 10 |
+
# limitations under the License.
|
| 11 |
+
|
| 12 |
+
from typing import TYPE_CHECKING, Callable, Dict, Optional, Sequence, Union
|
| 13 |
+
|
| 14 |
+
import torch
|
| 15 |
+
from torch.optim.optimizer import Optimizer
|
| 16 |
+
from torch.utils.data import DataLoader
|
| 17 |
+
|
| 18 |
+
from monai.engines.utils import CommonKeys as Keys
|
| 19 |
+
from monai.engines.utils import GanKeys, default_make_latent, default_prepare_batch
|
| 20 |
+
from monai.engines.workflow import Workflow
|
| 21 |
+
from monai.inferers import Inferer, SimpleInferer
|
| 22 |
+
from monai.transforms import Transform
|
| 23 |
+
from monai.utils import exact_version, optional_import
|
| 24 |
+
|
| 25 |
+
if TYPE_CHECKING:
|
| 26 |
+
from ignite.engine import Engine
|
| 27 |
+
from ignite.metrics import Metric
|
| 28 |
+
else:
|
| 29 |
+
Engine, _ = optional_import("ignite.engine", "0.3.0", exact_version, "Engine")
|
| 30 |
+
Metric, _ = optional_import("ignite.metrics", "0.3.0", exact_version, "Metric")
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
class Trainer(Workflow):
|
| 34 |
+
"""
|
| 35 |
+
Base class for all kinds of trainers, inherits from Workflow.
|
| 36 |
+
|
| 37 |
+
"""
|
| 38 |
+
|
| 39 |
+
def run(self) -> None:
|
| 40 |
+
"""
|
| 41 |
+
Execute training based on Ignite Engine.
|
| 42 |
+
If call this function multiple times, it will continuously run from the previous state.
|
| 43 |
+
|
| 44 |
+
"""
|
| 45 |
+
if self._is_done(self.state):
|
| 46 |
+
self.state.iteration = 0 # to avoid creating new State instance in ignite Engine.run
|
| 47 |
+
self.scaler = torch.cuda.amp.GradScaler() if self.amp else None
|
| 48 |
+
super().run()
|
| 49 |
+
|
| 50 |
+
def get_train_stats(self) -> Dict[str, float]:
|
| 51 |
+
return {"total_epochs": self.state.max_epochs, "total_iterations": self.state.epoch_length}
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
class SupervisedTrainer(Trainer):
|
| 55 |
+
"""
|
| 56 |
+
Standard supervised training method with image and label, inherits from ``Trainer`` and ``Workflow``.
|
| 57 |
+
|
| 58 |
+
Args:
|
| 59 |
+
device: an object representing the device on which to run.
|
| 60 |
+
max_epochs: the total epoch number for trainer to run.
|
| 61 |
+
train_data_loader: Ignite engine use data_loader to run, must be torch.DataLoader.
|
| 62 |
+
network: to train with this network.
|
| 63 |
+
optimizer: the optimizer associated to the network.
|
| 64 |
+
loss_function: the loss function associated to the optimizer.
|
| 65 |
+
prepare_batch: function to parse image and label for current iteration.
|
| 66 |
+
iteration_update: the callable function for every iteration, expect to accept `engine`
|
| 67 |
+
and `batchdata` as input parameters. if not provided, use `self._iteration()` instead.
|
| 68 |
+
inferer: inference method that execute model forward on input data, like: SlidingWindow, etc.
|
| 69 |
+
post_transform: execute additional transformation for the model output data.
|
| 70 |
+
Typically, several Tensor based transforms composed by `Compose`.
|
| 71 |
+
key_train_metric: compute metric when every iteration completed, and save average value to
|
| 72 |
+
engine.state.metrics when epoch completed. key_train_metric is the main metric to compare and save the
|
| 73 |
+
checkpoint into files.
|
| 74 |
+
additional_metrics: more Ignite metrics that also attach to Ignite Engine.
|
| 75 |
+
train_handlers: every handler is a set of Ignite Event-Handlers, must have `attach` function, like:
|
| 76 |
+
CheckpointHandler, StatsHandler, SegmentationSaver, etc.
|
| 77 |
+
amp: whether to enable auto-mixed-precision training, default is False.
|
| 78 |
+
|
| 79 |
+
"""
|
| 80 |
+
|
| 81 |
+
def __init__(
|
| 82 |
+
self,
|
| 83 |
+
device: torch.device,
|
| 84 |
+
max_epochs: int,
|
| 85 |
+
train_data_loader: DataLoader,
|
| 86 |
+
network: torch.nn.Module,
|
| 87 |
+
optimizer: Optimizer,
|
| 88 |
+
loss_function: Callable,
|
| 89 |
+
prepare_batch: Callable = default_prepare_batch,
|
| 90 |
+
iteration_update: Optional[Callable] = None,
|
| 91 |
+
inferer: Inferer = SimpleInferer(),
|
| 92 |
+
post_transform: Optional[Transform] = None,
|
| 93 |
+
key_train_metric: Optional[Dict[str, Metric]] = None,
|
| 94 |
+
additional_metrics: Optional[Dict[str, Metric]] = None,
|
| 95 |
+
train_handlers: Optional[Sequence] = None,
|
| 96 |
+
amp: bool = False,
|
| 97 |
+
) -> None:
|
| 98 |
+
# set up Ignite engine and environments
|
| 99 |
+
super().__init__(
|
| 100 |
+
device=device,
|
| 101 |
+
max_epochs=max_epochs,
|
| 102 |
+
data_loader=train_data_loader,
|
| 103 |
+
prepare_batch=prepare_batch,
|
| 104 |
+
iteration_update=iteration_update,
|
| 105 |
+
post_transform=post_transform,
|
| 106 |
+
key_metric=key_train_metric,
|
| 107 |
+
additional_metrics=additional_metrics,
|
| 108 |
+
handlers=train_handlers,
|
| 109 |
+
amp=amp,
|
| 110 |
+
)
|
| 111 |
+
|
| 112 |
+
self.network = network
|
| 113 |
+
self.optimizer = optimizer
|
| 114 |
+
self.loss_function = loss_function
|
| 115 |
+
self.inferer = inferer
|
| 116 |
+
|
| 117 |
+
def _iteration(self, engine: Engine, batchdata: Dict[str, torch.Tensor]):
|
| 118 |
+
"""
|
| 119 |
+
Callback function for the Supervised Training processing logic of 1 iteration in Ignite Engine.
|
| 120 |
+
Return below items in a dictionary:
|
| 121 |
+
- IMAGE: image Tensor data for model input, already moved to device.
|
| 122 |
+
- LABEL: label Tensor data corresponding to the image, already moved to device.
|
| 123 |
+
- PRED: prediction result of model.
|
| 124 |
+
- LOSS: loss value computed by loss function.
|
| 125 |
+
|
| 126 |
+
Args:
|
| 127 |
+
engine: Ignite Engine, it can be a trainer, validator or evaluator.
|
| 128 |
+
batchdata: input data for this iteration, usually can be dictionary or tuple of Tensor data.
|
| 129 |
+
|
| 130 |
+
Raises:
|
| 131 |
+
ValueError: When ``batchdata`` is None.
|
| 132 |
+
|
| 133 |
+
"""
|
| 134 |
+
if batchdata is None:
|
| 135 |
+
raise ValueError("Must provide batch data for current iteration.")
|
| 136 |
+
inputs, targets = self.prepare_batch(batchdata)
|
| 137 |
+
inputs, targets = inputs.to(engine.state.device), targets.to(engine.state.device)
|
| 138 |
+
|
| 139 |
+
self.network.train()
|
| 140 |
+
self.optimizer.zero_grad()
|
| 141 |
+
if self.amp and self.scaler is not None:
|
| 142 |
+
with torch.cuda.amp.autocast():
|
| 143 |
+
predictions = self.inferer(inputs, self.network)
|
| 144 |
+
loss = self.loss_function(predictions, targets).mean()
|
| 145 |
+
self.scaler.scale(loss).backward()
|
| 146 |
+
self.scaler.step(self.optimizer)
|
| 147 |
+
self.scaler.update()
|
| 148 |
+
else:
|
| 149 |
+
predictions = self.inferer(inputs, self.network)
|
| 150 |
+
loss = self.loss_function(predictions, targets).mean()
|
| 151 |
+
loss.backward()
|
| 152 |
+
self.optimizer.step()
|
| 153 |
+
|
| 154 |
+
return {Keys.IMAGE: inputs, Keys.LABEL: targets, Keys.PRED: predictions, Keys.LOSS: loss.item()}
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
class GanTrainer(Trainer):
|
| 158 |
+
"""
|
| 159 |
+
Generative adversarial network training based on Goodfellow et al. 2014 https://arxiv.org/abs/1406.266,
|
| 160 |
+
inherits from ``Trainer`` and ``Workflow``.
|
| 161 |
+
|
| 162 |
+
Training Loop: for each batch of data size `m`
|
| 163 |
+
1. Generate `m` fakes from random latent codes.
|
| 164 |
+
2. Update discriminator with these fakes and current batch reals, repeated d_train_steps times.
|
| 165 |
+
3. If g_update_latents, generate `m` fakes from new random latent codes.
|
| 166 |
+
4. Update generator with these fakes using discriminator feedback.
|
| 167 |
+
|
| 168 |
+
Args:
|
| 169 |
+
device: an object representing the device on which to run.
|
| 170 |
+
max_epochs: the total epoch number for engine to run.
|
| 171 |
+
train_data_loader: Core ignite engines uses `DataLoader` for training loop batchdata.
|
| 172 |
+
g_network: generator (G) network architecture.
|
| 173 |
+
g_optimizer: G optimizer function.
|
| 174 |
+
g_loss_function: G loss function for optimizer.
|
| 175 |
+
d_network: discriminator (D) network architecture.
|
| 176 |
+
d_optimizer: D optimizer function.
|
| 177 |
+
d_loss_function: D loss function for optimizer.
|
| 178 |
+
g_inferer: inference method to execute G model forward. Defaults to ``SimpleInferer()``.
|
| 179 |
+
d_inferer: inference method to execute D model forward. Defaults to ``SimpleInferer()``.
|
| 180 |
+
d_train_steps: number of times to update D with real data minibatch. Defaults to ``1``.
|
| 181 |
+
latent_shape: size of G input latent code. Defaults to ``64``.
|
| 182 |
+
d_prepare_batch: callback function to prepare batchdata for D inferer.
|
| 183 |
+
Defaults to return ``GanKeys.REALS`` in batchdata dict.
|
| 184 |
+
g_prepare_batch: callback function to create batch of latent input for G inferer.
|
| 185 |
+
Defaults to return random latents.
|
| 186 |
+
g_update_latents: Calculate G loss with new latent codes. Defaults to ``True``.
|
| 187 |
+
iteration_update: the callable function for every iteration, expect to accept `engine`
|
| 188 |
+
and `batchdata` as input parameters. if not provided, use `self._iteration()` instead.
|
| 189 |
+
post_transform: execute additional transformation for the model output data.
|
| 190 |
+
Typically, several Tensor based transforms composed by `Compose`.
|
| 191 |
+
key_train_metric: compute metric when every iteration completed, and save average value to
|
| 192 |
+
engine.state.metrics when epoch completed. key_train_metric is the main metric to compare and save the
|
| 193 |
+
checkpoint into files.
|
| 194 |
+
additional_metrics: more Ignite metrics that also attach to Ignite Engine.
|
| 195 |
+
train_handlers: every handler is a set of Ignite Event-Handlers, must have `attach` function, like:
|
| 196 |
+
CheckpointHandler, StatsHandler, SegmentationSaver, etc.
|
| 197 |
+
|
| 198 |
+
"""
|
| 199 |
+
|
| 200 |
+
def __init__(
|
| 201 |
+
self,
|
| 202 |
+
device: torch.device,
|
| 203 |
+
max_epochs: int,
|
| 204 |
+
train_data_loader: DataLoader,
|
| 205 |
+
g_network: torch.nn.Module,
|
| 206 |
+
g_optimizer: Optimizer,
|
| 207 |
+
g_loss_function: Callable,
|
| 208 |
+
d_network: torch.nn.Module,
|
| 209 |
+
d_optimizer: Optimizer,
|
| 210 |
+
d_loss_function: Callable,
|
| 211 |
+
g_inferer: Inferer = SimpleInferer(),
|
| 212 |
+
d_inferer: Inferer = SimpleInferer(),
|
| 213 |
+
d_train_steps: int = 1,
|
| 214 |
+
latent_shape: int = 64,
|
| 215 |
+
d_prepare_batch: Callable = default_prepare_batch,
|
| 216 |
+
g_prepare_batch: Callable = default_make_latent,
|
| 217 |
+
g_update_latents: bool = True,
|
| 218 |
+
iteration_update: Optional[Callable] = None,
|
| 219 |
+
post_transform: Optional[Transform] = None,
|
| 220 |
+
key_train_metric: Optional[Dict[str, Metric]] = None,
|
| 221 |
+
additional_metrics: Optional[Dict[str, Metric]] = None,
|
| 222 |
+
train_handlers: Optional[Sequence] = None,
|
| 223 |
+
):
|
| 224 |
+
# set up Ignite engine and environments
|
| 225 |
+
super().__init__(
|
| 226 |
+
device=device,
|
| 227 |
+
max_epochs=max_epochs,
|
| 228 |
+
data_loader=train_data_loader,
|
| 229 |
+
prepare_batch=d_prepare_batch,
|
| 230 |
+
iteration_update=iteration_update,
|
| 231 |
+
key_metric=key_train_metric,
|
| 232 |
+
additional_metrics=additional_metrics,
|
| 233 |
+
handlers=train_handlers,
|
| 234 |
+
post_transform=post_transform,
|
| 235 |
+
)
|
| 236 |
+
self.g_network = g_network
|
| 237 |
+
self.g_optimizer = g_optimizer
|
| 238 |
+
self.g_loss_function = g_loss_function
|
| 239 |
+
self.g_inferer = g_inferer
|
| 240 |
+
self.d_network = d_network
|
| 241 |
+
self.d_optimizer = d_optimizer
|
| 242 |
+
self.d_loss_function = d_loss_function
|
| 243 |
+
self.d_inferer = d_inferer
|
| 244 |
+
self.d_train_steps = d_train_steps
|
| 245 |
+
self.latent_shape = latent_shape
|
| 246 |
+
self.g_prepare_batch = g_prepare_batch
|
| 247 |
+
self.g_update_latents = g_update_latents
|
| 248 |
+
|
| 249 |
+
def _iteration(
|
| 250 |
+
self, engine: Engine, batchdata: Union[Dict, Sequence]
|
| 251 |
+
) -> Dict[str, Union[torch.Tensor, int, float, bool]]:
|
| 252 |
+
"""
|
| 253 |
+
Callback function for Adversarial Training processing logic of 1 iteration in Ignite Engine.
|
| 254 |
+
|
| 255 |
+
Args:
|
| 256 |
+
engine: Ignite Engine, it can be a trainer, validator or evaluator.
|
| 257 |
+
batchdata: input data for this iteration, usually can be dictionary or tuple of Tensor data.
|
| 258 |
+
|
| 259 |
+
Raises:
|
| 260 |
+
ValueError: must provide batch data for current iteration.
|
| 261 |
+
|
| 262 |
+
"""
|
| 263 |
+
if batchdata is None:
|
| 264 |
+
raise ValueError("must provide batch data for current iteration.")
|
| 265 |
+
|
| 266 |
+
d_input = self.prepare_batch(batchdata).to(engine.state.device)
|
| 267 |
+
batch_size = self.data_loader.batch_size
|
| 268 |
+
g_input = self.g_prepare_batch(batch_size, self.latent_shape, batchdata).to(engine.state.device)
|
| 269 |
+
g_output = self.g_inferer(g_input, self.g_network)
|
| 270 |
+
|
| 271 |
+
# Train Discriminator
|
| 272 |
+
d_total_loss = torch.zeros(
|
| 273 |
+
1,
|
| 274 |
+
)
|
| 275 |
+
for _ in range(self.d_train_steps):
|
| 276 |
+
self.d_optimizer.zero_grad()
|
| 277 |
+
dloss = self.d_loss_function(g_output, d_input)
|
| 278 |
+
dloss.backward()
|
| 279 |
+
self.d_optimizer.step()
|
| 280 |
+
d_total_loss += dloss.item()
|
| 281 |
+
|
| 282 |
+
# Train Generator
|
| 283 |
+
if self.g_update_latents:
|
| 284 |
+
g_input = self.g_prepare_batch(batch_size, self.latent_shape, batchdata).to(engine.state.device)
|
| 285 |
+
g_output = self.g_inferer(g_input, self.g_network)
|
| 286 |
+
self.g_optimizer.zero_grad()
|
| 287 |
+
g_loss = self.g_loss_function(g_output)
|
| 288 |
+
g_loss.backward()
|
| 289 |
+
self.g_optimizer.step()
|
| 290 |
+
|
| 291 |
+
return {
|
| 292 |
+
GanKeys.REALS: d_input,
|
| 293 |
+
GanKeys.FAKES: g_output,
|
| 294 |
+
GanKeys.LATENTS: g_input,
|
| 295 |
+
GanKeys.GLOSS: g_loss.item(),
|
| 296 |
+
GanKeys.DLOSS: d_total_loss.item(),
|
| 297 |
+
}
|
testbed/Project-MONAI__MONAI/monai/engines/utils.py
ADDED
|
@@ -0,0 +1,90 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2020 MONAI Consortium
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 6 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 7 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 8 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 9 |
+
# See the License for the specific language governing permissions and
|
| 10 |
+
# limitations under the License.
|
| 11 |
+
|
| 12 |
+
from typing import Dict, List, Optional, Sequence, Tuple, Union
|
| 13 |
+
|
| 14 |
+
import torch
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
class CommonKeys:
|
| 18 |
+
"""
|
| 19 |
+
A set of common keys for dictionary based supervised training process.
|
| 20 |
+
`IMAGE` is the input image data.
|
| 21 |
+
`LABEL` is the training or evaluation label of segmentation or classification task.
|
| 22 |
+
`PRED` is the prediction data of model output.
|
| 23 |
+
`LOSS` is the loss value of current iteration.
|
| 24 |
+
`INFO` is some useful information during training or evaluation, like loss value, etc.
|
| 25 |
+
|
| 26 |
+
"""
|
| 27 |
+
|
| 28 |
+
IMAGE = "image"
|
| 29 |
+
LABEL = "label"
|
| 30 |
+
PRED = "pred"
|
| 31 |
+
LOSS = "loss"
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
class GanKeys:
|
| 35 |
+
"""
|
| 36 |
+
A set of common keys for generative adversarial networks.
|
| 37 |
+
"""
|
| 38 |
+
|
| 39 |
+
REALS = "reals"
|
| 40 |
+
FAKES = "fakes"
|
| 41 |
+
LATENTS = "latents"
|
| 42 |
+
GLOSS = "g_loss"
|
| 43 |
+
DLOSS = "d_loss"
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
def get_devices_spec(devices: Optional[Sequence[torch.device]] = None) -> List[torch.device]:
|
| 47 |
+
"""
|
| 48 |
+
Get a valid specification for one or more devices. If `devices` is None get devices for all CUDA devices available.
|
| 49 |
+
If `devices` is and zero-length structure a single CPU compute device is returned. In any other cases `devices` is
|
| 50 |
+
returned unchanged.
|
| 51 |
+
|
| 52 |
+
Args:
|
| 53 |
+
devices: list of devices to request, None for all GPU devices, [] for CPU.
|
| 54 |
+
|
| 55 |
+
Raises:
|
| 56 |
+
RuntimeError: When all GPUs are selected (``devices=None``) but no GPUs are available.
|
| 57 |
+
|
| 58 |
+
Returns:
|
| 59 |
+
list of torch.device: list of devices.
|
| 60 |
+
|
| 61 |
+
"""
|
| 62 |
+
if devices is None:
|
| 63 |
+
devices = [torch.device(f"cuda:{d:d}") for d in range(torch.cuda.device_count())]
|
| 64 |
+
|
| 65 |
+
if len(devices) == 0:
|
| 66 |
+
raise RuntimeError("No GPU devices available.")
|
| 67 |
+
|
| 68 |
+
elif len(devices) == 0:
|
| 69 |
+
devices = [torch.device("cpu")]
|
| 70 |
+
|
| 71 |
+
else:
|
| 72 |
+
devices = list(devices)
|
| 73 |
+
|
| 74 |
+
return devices
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
def default_prepare_batch(
|
| 78 |
+
batchdata: Dict[str, torch.Tensor]
|
| 79 |
+
) -> Union[Tuple[torch.Tensor, Optional[torch.Tensor]], torch.Tensor]:
|
| 80 |
+
assert isinstance(batchdata, dict), "default prepare_batch expects dictionary input data."
|
| 81 |
+
if CommonKeys.LABEL in batchdata:
|
| 82 |
+
return (batchdata[CommonKeys.IMAGE], batchdata[CommonKeys.LABEL])
|
| 83 |
+
elif GanKeys.REALS in batchdata:
|
| 84 |
+
return batchdata[GanKeys.REALS]
|
| 85 |
+
else:
|
| 86 |
+
return (batchdata[CommonKeys.IMAGE], None)
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
def default_make_latent(num_latents: int, latent_size: int, real_data: Optional[torch.Tensor] = None) -> torch.Tensor:
|
| 90 |
+
return torch.randn(num_latents, latent_size)
|
testbed/Project-MONAI__MONAI/monai/engines/workflow.py
ADDED
|
@@ -0,0 +1,172 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2020 MONAI Consortium
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 6 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 7 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 8 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 9 |
+
# See the License for the specific language governing permissions and
|
| 10 |
+
# limitations under the License.
|
| 11 |
+
|
| 12 |
+
from typing import TYPE_CHECKING, Callable, Dict, Optional, Sequence
|
| 13 |
+
|
| 14 |
+
import torch
|
| 15 |
+
from torch.utils.data import DataLoader
|
| 16 |
+
from torch.utils.data.distributed import DistributedSampler
|
| 17 |
+
|
| 18 |
+
from monai.engines.utils import default_prepare_batch
|
| 19 |
+
from monai.transforms import apply_transform
|
| 20 |
+
from monai.utils import ensure_tuple, exact_version, optional_import
|
| 21 |
+
|
| 22 |
+
IgniteEngine, _ = optional_import("ignite.engine", "0.3.0", exact_version, "Engine")
|
| 23 |
+
State, _ = optional_import("ignite.engine", "0.3.0", exact_version, "State")
|
| 24 |
+
Events, _ = optional_import("ignite.engine", "0.3.0", exact_version, "Events")
|
| 25 |
+
if TYPE_CHECKING:
|
| 26 |
+
from ignite.engine import Engine
|
| 27 |
+
from ignite.metrics import Metric
|
| 28 |
+
else:
|
| 29 |
+
Engine, _ = optional_import("ignite.engine", "0.3.0", exact_version, "Engine")
|
| 30 |
+
Metric, _ = optional_import("ignite.metrics", "0.3.0", exact_version, "Metric")
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
class Workflow(IgniteEngine): # type: ignore[valid-type, misc] # due to optional_import
|
| 34 |
+
"""
|
| 35 |
+
Workflow defines the core work process inheriting from Ignite engine.
|
| 36 |
+
All trainer, validator and evaluator share this same workflow as base class,
|
| 37 |
+
because they all can be treated as same Ignite engine loops.
|
| 38 |
+
It initializes all the sharable data in Ignite engine.state.
|
| 39 |
+
And attach additional processing logics to Ignite engine based on Event-Handler mechanism.
|
| 40 |
+
|
| 41 |
+
Users should consider to inherit from `trainer` or `evaluator` to develop more trainers or evaluators.
|
| 42 |
+
|
| 43 |
+
Args:
|
| 44 |
+
device: an object representing the device on which to run.
|
| 45 |
+
max_epochs: the total epoch number for engine to run, validator and evaluator have only 1 epoch.
|
| 46 |
+
data_loader: Ignite engine use data_loader to run, must be torch.DataLoader.
|
| 47 |
+
prepare_batch: function to parse image and label for every iteration.
|
| 48 |
+
iteration_update: the callable function for every iteration, expect to accept `engine`
|
| 49 |
+
and `batchdata` as input parameters. if not provided, use `self._iteration()` instead.
|
| 50 |
+
post_transform: execute additional transformation for the model output data.
|
| 51 |
+
Typically, several Tensor based transforms composed by `Compose`.
|
| 52 |
+
key_metric: compute metric when every iteration completed, and save average value to
|
| 53 |
+
engine.state.metrics when epoch completed. key_metric is the main metric to compare and save the
|
| 54 |
+
checkpoint into files.
|
| 55 |
+
additional_metrics: more Ignite metrics that also attach to Ignite Engine.
|
| 56 |
+
handlers: every handler is a set of Ignite Event-Handlers, must have `attach` function, like:
|
| 57 |
+
CheckpointHandler, StatsHandler, SegmentationSaver, etc.
|
| 58 |
+
amp: whether to enable auto-mixed-precision training or inference, default is False.
|
| 59 |
+
|
| 60 |
+
Raises:
|
| 61 |
+
TypeError: When ``device`` is not a ``torch.Device``.
|
| 62 |
+
TypeError: When ``data_loader`` is not a ``torch.utils.data.DataLoader``.
|
| 63 |
+
TypeError: When ``key_metric`` is not a ``Optional[dict]``.
|
| 64 |
+
TypeError: When ``additional_metrics`` is not a ``Optional[dict]``.
|
| 65 |
+
|
| 66 |
+
"""
|
| 67 |
+
|
| 68 |
+
def __init__(
|
| 69 |
+
self,
|
| 70 |
+
device: torch.device,
|
| 71 |
+
max_epochs: int,
|
| 72 |
+
data_loader: DataLoader,
|
| 73 |
+
prepare_batch: Callable = default_prepare_batch,
|
| 74 |
+
iteration_update: Optional[Callable] = None,
|
| 75 |
+
post_transform: Optional[Callable] = None,
|
| 76 |
+
key_metric: Optional[Dict[str, Metric]] = None,
|
| 77 |
+
additional_metrics: Optional[Dict[str, Metric]] = None,
|
| 78 |
+
handlers: Optional[Sequence] = None,
|
| 79 |
+
amp: bool = False,
|
| 80 |
+
) -> None:
|
| 81 |
+
if iteration_update is not None:
|
| 82 |
+
super().__init__(iteration_update)
|
| 83 |
+
else:
|
| 84 |
+
super().__init__(self._iteration)
|
| 85 |
+
if not isinstance(device, torch.device):
|
| 86 |
+
raise TypeError(f"device must be a torch.device but is {type(device).__name__}.")
|
| 87 |
+
if not isinstance(data_loader, DataLoader):
|
| 88 |
+
raise TypeError(f"data_loader must be a torch.utils.data.DataLoader but is {type(data_loader).__name__}.")
|
| 89 |
+
sampler = data_loader.__dict__["sampler"]
|
| 90 |
+
if isinstance(sampler, DistributedSampler):
|
| 91 |
+
|
| 92 |
+
@self.on(Events.EPOCH_STARTED)
|
| 93 |
+
def set_sampler_epoch(engine: Engine):
|
| 94 |
+
sampler.set_epoch(engine.state.epoch)
|
| 95 |
+
|
| 96 |
+
# set all sharable data for the workflow based on Ignite engine.state
|
| 97 |
+
self.state = State(
|
| 98 |
+
seed=0,
|
| 99 |
+
iteration=0,
|
| 100 |
+
epoch=0,
|
| 101 |
+
max_epochs=max_epochs,
|
| 102 |
+
epoch_length=-1,
|
| 103 |
+
output=None,
|
| 104 |
+
batch=None,
|
| 105 |
+
metrics={},
|
| 106 |
+
dataloader=None,
|
| 107 |
+
device=device,
|
| 108 |
+
key_metric_name=None, # we can set many metrics, only use key_metric to compare and save the best model
|
| 109 |
+
best_metric=-1,
|
| 110 |
+
best_metric_epoch=-1,
|
| 111 |
+
)
|
| 112 |
+
self.data_loader = data_loader
|
| 113 |
+
self.prepare_batch = prepare_batch
|
| 114 |
+
|
| 115 |
+
if post_transform is not None:
|
| 116 |
+
|
| 117 |
+
@self.on(Events.ITERATION_COMPLETED)
|
| 118 |
+
def run_post_transform(engine: Engine) -> None:
|
| 119 |
+
assert post_transform is not None
|
| 120 |
+
engine.state.output = apply_transform(post_transform, engine.state.output)
|
| 121 |
+
|
| 122 |
+
if key_metric is not None:
|
| 123 |
+
|
| 124 |
+
if not isinstance(key_metric, dict):
|
| 125 |
+
raise TypeError(f"key_metric must be None or a dict but is {type(key_metric).__name__}.")
|
| 126 |
+
self.state.key_metric_name = list(key_metric.keys())[0]
|
| 127 |
+
metrics = key_metric
|
| 128 |
+
if additional_metrics is not None and len(additional_metrics) > 0:
|
| 129 |
+
if not isinstance(additional_metrics, dict):
|
| 130 |
+
raise TypeError(
|
| 131 |
+
f"additional_metrics must be None or a dict but is {type(additional_metrics).__name__}."
|
| 132 |
+
)
|
| 133 |
+
metrics.update(additional_metrics)
|
| 134 |
+
for name, metric in metrics.items():
|
| 135 |
+
metric.attach(self, name)
|
| 136 |
+
|
| 137 |
+
@self.on(Events.EPOCH_COMPLETED)
|
| 138 |
+
def _compare_metrics(engine: Engine) -> None:
|
| 139 |
+
if engine.state.key_metric_name is not None:
|
| 140 |
+
current_val_metric = engine.state.metrics[engine.state.key_metric_name]
|
| 141 |
+
if current_val_metric > engine.state.best_metric:
|
| 142 |
+
self.logger.info(f"Got new best metric of {engine.state.key_metric_name}: {current_val_metric}")
|
| 143 |
+
engine.state.best_metric = current_val_metric
|
| 144 |
+
engine.state.best_metric_epoch = engine.state.epoch
|
| 145 |
+
|
| 146 |
+
if handlers is not None:
|
| 147 |
+
handlers_ = ensure_tuple(handlers)
|
| 148 |
+
for handler in handlers_:
|
| 149 |
+
handler.attach(self)
|
| 150 |
+
self.amp = amp
|
| 151 |
+
|
| 152 |
+
def run(self) -> None:
|
| 153 |
+
"""
|
| 154 |
+
Execute training, validation or evaluation based on Ignite Engine.
|
| 155 |
+
|
| 156 |
+
"""
|
| 157 |
+
super().run(data=self.data_loader, epoch_length=len(self.data_loader))
|
| 158 |
+
|
| 159 |
+
def _iteration(self, engine: Engine, batchdata: Dict[str, torch.Tensor]):
|
| 160 |
+
"""
|
| 161 |
+
Abstract callback function for the processing logic of 1 iteration in Ignite Engine.
|
| 162 |
+
Need subclass to implement different logics, like SupervisedTrainer/Evaluator, GANTrainer, etc.
|
| 163 |
+
|
| 164 |
+
Args:
|
| 165 |
+
engine: Ignite Engine, it can be a trainer, validator or evaluator.
|
| 166 |
+
batchdata: input data for this iteration, usually can be dictionary or tuple of Tensor data.
|
| 167 |
+
|
| 168 |
+
Raises:
|
| 169 |
+
NotImplementedError: When the subclass does not override this method.
|
| 170 |
+
|
| 171 |
+
"""
|
| 172 |
+
raise NotImplementedError(f"Subclass {self.__class__.__name__} must implement this method.")
|
testbed/Project-MONAI__MONAI/monai/inferers/__init__.py
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2020 MONAI Consortium
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 6 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 7 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 8 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 9 |
+
# See the License for the specific language governing permissions and
|
| 10 |
+
# limitations under the License.
|
| 11 |
+
|
| 12 |
+
from .inferer import *
|
| 13 |
+
from .utils import sliding_window_inference
|
testbed/Project-MONAI__MONAI/monai/inferers/inferer.py
ADDED
|
@@ -0,0 +1,110 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2020 MONAI Consortium
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 6 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 7 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 8 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 9 |
+
# See the License for the specific language governing permissions and
|
| 10 |
+
# limitations under the License.
|
| 11 |
+
|
| 12 |
+
from abc import ABC, abstractmethod
|
| 13 |
+
from typing import Sequence, Union
|
| 14 |
+
|
| 15 |
+
import torch
|
| 16 |
+
|
| 17 |
+
from monai.inferers.utils import sliding_window_inference
|
| 18 |
+
from monai.utils import BlendMode
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
class Inferer(ABC):
|
| 22 |
+
"""
|
| 23 |
+
A base class for model inference.
|
| 24 |
+
Extend this class to support operations during inference, e.g. a sliding window method.
|
| 25 |
+
"""
|
| 26 |
+
|
| 27 |
+
@abstractmethod
|
| 28 |
+
def __call__(self, inputs: torch.Tensor, network: torch.nn.Module):
|
| 29 |
+
"""
|
| 30 |
+
Run inference on `inputs` with the `network` model.
|
| 31 |
+
|
| 32 |
+
Args:
|
| 33 |
+
inputs: input of the model inference.
|
| 34 |
+
network: model for inference.
|
| 35 |
+
|
| 36 |
+
Raises:
|
| 37 |
+
NotImplementedError: When the subclass does not override this method.
|
| 38 |
+
|
| 39 |
+
"""
|
| 40 |
+
raise NotImplementedError(f"Subclass {self.__class__.__name__} must implement this method.")
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
class SimpleInferer(Inferer):
|
| 44 |
+
"""
|
| 45 |
+
SimpleInferer is the normal inference method that run model forward() directly.
|
| 46 |
+
|
| 47 |
+
"""
|
| 48 |
+
|
| 49 |
+
def __init__(self) -> None:
|
| 50 |
+
Inferer.__init__(self)
|
| 51 |
+
|
| 52 |
+
def __call__(self, inputs: torch.Tensor, network: torch.nn.Module):
|
| 53 |
+
"""Unified callable function API of Inferers.
|
| 54 |
+
|
| 55 |
+
Args:
|
| 56 |
+
inputs: model input data for inference.
|
| 57 |
+
network: target model to execute inference.
|
| 58 |
+
|
| 59 |
+
"""
|
| 60 |
+
return network(inputs)
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
class SlidingWindowInferer(Inferer):
|
| 64 |
+
"""
|
| 65 |
+
Sliding window method for model inference,
|
| 66 |
+
with `sw_batch_size` windows for every model.forward().
|
| 67 |
+
|
| 68 |
+
Args:
|
| 69 |
+
roi_size: the window size to execute SlidingWindow evaluation.
|
| 70 |
+
If it has non-positive components, the corresponding `inputs` size will be used.
|
| 71 |
+
if the components of the `roi_size` are non-positive values, the transform will use the
|
| 72 |
+
corresponding components of img size. For example, `roi_size=(32, -1)` will be adapted
|
| 73 |
+
to `(32, 64)` if the second spatial dimension size of img is `64`.
|
| 74 |
+
sw_batch_size: the batch size to run window slices.
|
| 75 |
+
overlap: Amount of overlap between scans.
|
| 76 |
+
mode: {``"constant"``, ``"gaussian"``}
|
| 77 |
+
How to blend output of overlapping windows. Defaults to ``"constant"``.
|
| 78 |
+
|
| 79 |
+
- ``"constant``": gives equal weight to all predictions.
|
| 80 |
+
- ``"gaussian``": gives less weight to predictions on edges of windows.
|
| 81 |
+
|
| 82 |
+
Note:
|
| 83 |
+
the "sw_batch_size" here is to run a batch of window slices of 1 input image,
|
| 84 |
+
not batch size of input images.
|
| 85 |
+
|
| 86 |
+
"""
|
| 87 |
+
|
| 88 |
+
def __init__(
|
| 89 |
+
self,
|
| 90 |
+
roi_size: Union[Sequence[int], int],
|
| 91 |
+
sw_batch_size: int = 1,
|
| 92 |
+
overlap: float = 0.25,
|
| 93 |
+
mode: Union[BlendMode, str] = BlendMode.CONSTANT,
|
| 94 |
+
) -> None:
|
| 95 |
+
Inferer.__init__(self)
|
| 96 |
+
self.roi_size = roi_size
|
| 97 |
+
self.sw_batch_size = sw_batch_size
|
| 98 |
+
self.overlap = overlap
|
| 99 |
+
self.mode: BlendMode = BlendMode(mode)
|
| 100 |
+
|
| 101 |
+
def __call__(self, inputs: torch.Tensor, network: torch.nn.Module) -> torch.Tensor:
|
| 102 |
+
"""
|
| 103 |
+
Unified callable function API of Inferers.
|
| 104 |
+
|
| 105 |
+
Args:
|
| 106 |
+
inputs: model input data for inference.
|
| 107 |
+
network: target model to execute inference.
|
| 108 |
+
|
| 109 |
+
"""
|
| 110 |
+
return sliding_window_inference(inputs, self.roi_size, self.sw_batch_size, network, self.overlap, self.mode)
|