diff --git a/.gitattributes b/.gitattributes index ba514c652617311193743b3a49e151c09fc55626..f8bfaba2369d1b33e1ecb6e29023bda93e166aa1 100644 --- a/.gitattributes +++ b/.gitattributes @@ -336,3 +336,6 @@ my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/scipy.libs/ my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/brotli/_brotli.abi3.so filter=lfs diff=lfs merge=lfs -text my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/opencv_python.libs/libxcb-xkb-9ba31ab3.so.1.0.0 filter=lfs diff=lfs merge=lfs -text my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/opencv_python.libs/libssl-28bef1ac.so.1.1 filter=lfs diff=lfs merge=lfs -text +my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/opencv_python.libs/libxkbcommon-71ae2972.so.0.0.0 filter=lfs diff=lfs merge=lfs -text +my_container_sandbox/workspace/anaconda3/lib/libnvvm.so.4.0.0 filter=lfs diff=lfs merge=lfs -text +my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/sklearn/_isotonic.cpython-38-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text diff --git a/my_container_sandbox/workspace/anaconda3/lib/libnvvm.so.4.0.0 b/my_container_sandbox/workspace/anaconda3/lib/libnvvm.so.4.0.0 new file mode 100644 index 0000000000000000000000000000000000000000..f302c97afef1282496ab97b3548370dfac09fcb8 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/libnvvm.so.4.0.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f0c95d249f3d60d67dbd191174fe1d8b9f393b1887ae1a54b4988823c7e42a31 +size 26650200 diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/data/__pycache__/__init__.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/data/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0aafd2d31bab58ffe3fc2738ea2a7daf29222bcb Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/data/__pycache__/__init__.cpython-38.pyc differ diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/data/__pycache__/box_utils.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/data/__pycache__/box_utils.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..38f0419de7cf12c02ba4a23580f116b6c8438980 Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/data/__pycache__/box_utils.cpython-38.pyc differ diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/data/__pycache__/csv_saver.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/data/__pycache__/csv_saver.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..710056fc3ba62ad23594630a87557f1bd8becc99 Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/data/__pycache__/csv_saver.cpython-38.pyc differ diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/data/__pycache__/dataset.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/data/__pycache__/dataset.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8aeeba1fbb22f284f4dbbee6ede9448733cd5d57 Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/data/__pycache__/dataset.cpython-38.pyc differ diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/data/__pycache__/dataset_summary.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/data/__pycache__/dataset_summary.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..18a146c2daf23a83350dbedba9567afa66abbd0e Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/data/__pycache__/dataset_summary.cpython-38.pyc differ diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/data/__pycache__/folder_layout.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/data/__pycache__/folder_layout.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ed7bbacdde04a1062c330d6e84f40c7c9ab618f3 Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/data/__pycache__/folder_layout.cpython-38.pyc differ diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/data/__pycache__/grid_dataset.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/data/__pycache__/grid_dataset.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cf5fdce4208de19694b600f59fe69dc7c40eebfc Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/data/__pycache__/grid_dataset.cpython-38.pyc differ diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/data/__pycache__/image_writer.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/data/__pycache__/image_writer.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b20937d09b2015c39fdfe2d2e1bcdd6b5381fd37 Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/data/__pycache__/image_writer.cpython-38.pyc differ diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/data/__pycache__/iterable_dataset.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/data/__pycache__/iterable_dataset.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..61068eab6e32311ee63310c87413874e319a025c Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/data/__pycache__/iterable_dataset.cpython-38.pyc differ diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/data/__pycache__/meta_obj.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/data/__pycache__/meta_obj.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..681e9c45850938b820920b1f8855de9c953fe36d Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/data/__pycache__/meta_obj.cpython-38.pyc differ diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/data/__pycache__/meta_tensor.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/data/__pycache__/meta_tensor.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..77674f135b2ce18c60ee7528cfa239945615fb3f Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/data/__pycache__/meta_tensor.cpython-38.pyc differ diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/data/__pycache__/nifti_saver.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/data/__pycache__/nifti_saver.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..07283284b131ac2c6bc992c89530f39db28cc551 Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/data/__pycache__/nifti_saver.cpython-38.pyc differ diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/data/__pycache__/nifti_writer.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/data/__pycache__/nifti_writer.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..40fa82b87527911c44b3bc01b9b2e2a5608b6e01 Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/data/__pycache__/nifti_writer.cpython-38.pyc differ diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/data/__pycache__/samplers.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/data/__pycache__/samplers.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..22cb10273c2b6068b5e67a2f24b9400bd6e2ca87 Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/data/__pycache__/samplers.cpython-38.pyc differ diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/data/__pycache__/synthetic.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/data/__pycache__/synthetic.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8cb99f735b38080a310e75b1d2bff2690a9414fa Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/data/__pycache__/synthetic.cpython-38.pyc differ diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/data/__pycache__/test_time_augmentation.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/data/__pycache__/test_time_augmentation.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..de25d3435776e4e740ed975f7af8925d286ce729 Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/data/__pycache__/test_time_augmentation.cpython-38.pyc differ diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/data/__pycache__/thread_buffer.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/data/__pycache__/thread_buffer.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e8636be46c8bdc14d0d47d04c8451392af3c5321 Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/data/__pycache__/thread_buffer.cpython-38.pyc differ diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/data/__pycache__/torchscript_utils.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/data/__pycache__/torchscript_utils.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ced20eb552f68170e5a9a1d79e402d1b4f200f76 Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/data/__pycache__/torchscript_utils.cpython-38.pyc differ diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/data/__pycache__/utils.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/data/__pycache__/utils.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..826feb37a869a988a31e02fa617f86f2286b2824 Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/data/__pycache__/utils.cpython-38.pyc differ diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/data/__pycache__/wsi_datasets.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/data/__pycache__/wsi_datasets.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7b6a1f68ed63fe8cf057847c7830fc75c74f46ce Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/data/__pycache__/wsi_datasets.cpython-38.pyc differ diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/data/__pycache__/wsi_reader.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/data/__pycache__/wsi_reader.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ec666714c48f380b2350d1710218bea6eb0fd05d Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/data/__pycache__/wsi_reader.cpython-38.pyc differ diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/handlers/__pycache__/checkpoint_loader.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/handlers/__pycache__/checkpoint_loader.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e61b35b311e8239924a8d025efefe9051ee41928 Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/handlers/__pycache__/checkpoint_loader.cpython-38.pyc differ diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/handlers/__pycache__/parameter_scheduler.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/handlers/__pycache__/parameter_scheduler.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8ae466c95114358cc7eb01812fc288449c82803d Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/handlers/__pycache__/parameter_scheduler.cpython-38.pyc differ diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/handlers/__pycache__/utils.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/handlers/__pycache__/utils.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1b298a48fc31c5f8f739207fdc507a17edfa53ea Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/handlers/__pycache__/utils.cpython-38.pyc differ diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/losses/__pycache__/__init__.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/losses/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e84bfd4bb13be6df5867b5d5ac9fad8f4920ff58 Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/losses/__pycache__/__init__.cpython-38.pyc differ diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/losses/__pycache__/contrastive.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/losses/__pycache__/contrastive.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..76fe89e95be02825ef089c05502e66b6de301f70 Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/losses/__pycache__/contrastive.cpython-38.pyc differ diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/losses/__pycache__/deform.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/losses/__pycache__/deform.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9a34c046cc9435bf7aae295aba357019d583ef79 Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/losses/__pycache__/deform.cpython-38.pyc differ diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/losses/__pycache__/dice.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/losses/__pycache__/dice.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..71ff6e8f29bd6dbb6e77799582cf7a32be8d7c69 Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/losses/__pycache__/dice.cpython-38.pyc differ diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/losses/__pycache__/focal_loss.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/losses/__pycache__/focal_loss.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..07d92b3dbea690ce06a44090d0c6a14fa79641ec Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/losses/__pycache__/focal_loss.cpython-38.pyc differ diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/losses/__pycache__/giou_loss.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/losses/__pycache__/giou_loss.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..beef6f771aad8d16af480c6553afb1625e7ed016 Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/losses/__pycache__/giou_loss.cpython-38.pyc differ diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/losses/__pycache__/image_dissimilarity.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/losses/__pycache__/image_dissimilarity.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2e01cbd4baee4f2e99bbf8cae97fc9054b4c0b4f Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/losses/__pycache__/image_dissimilarity.cpython-38.pyc differ diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/losses/__pycache__/multi_scale.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/losses/__pycache__/multi_scale.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d9f35474216ac61cbe53ea57d55dbcbc423b8606 Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/losses/__pycache__/multi_scale.cpython-38.pyc differ diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/losses/__pycache__/spatial_mask.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/losses/__pycache__/spatial_mask.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6c826eef413ef08871c48420f6320d850c0d5d21 Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/losses/__pycache__/spatial_mask.cpython-38.pyc differ diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/losses/__pycache__/tversky.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/losses/__pycache__/tversky.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..473c77e83298b0343ea11de63aadfde2814d419c Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/losses/__pycache__/tversky.cpython-38.pyc differ diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/networks/__pycache__/__init__.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/networks/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d6368eebb2f171f81507da895e5b4059dbb2c0c8 Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/networks/__pycache__/__init__.cpython-38.pyc differ diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/networks/blocks/__init__.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/networks/blocks/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..27feffea107f16f72fe4cca3127ded366fbae12a --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/networks/blocks/__init__.py @@ -0,0 +1,40 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .acti_norm import ADN +from .activation import MemoryEfficientSwish, Mish, Swish +from .aspp import SimpleASPP +from .backbone_fpn_utils import BackboneWithFPN +from .convolutions import Convolution, ResidualUnit +from .crf import CRF +from .dints_block import ActiConvNormBlock, FactorizedIncreaseBlock, FactorizedReduceBlock, P3DActiConvNormBlock +from .downsample import MaxAvgPool +from .dynunet_block import UnetBasicBlock, UnetOutBlock, UnetResBlock, UnetUpBlock, get_output_padding, get_padding +from .fcn import FCN, GCN, MCFCN, Refine +from .feature_pyramid_network import ExtraFPNBlock, FeaturePyramidNetwork, LastLevelMaxPool, LastLevelP6P7 +from .localnet_block import LocalNetDownSampleBlock, LocalNetFeatureExtractorBlock, LocalNetUpSampleBlock +from .mlp import MLPBlock +from .patchembedding import PatchEmbed, PatchEmbeddingBlock +from .regunet_block import RegistrationDownSampleBlock, RegistrationExtractionBlock, RegistrationResidualConvBlock +from .segresnet_block import ResBlock +from .selfattention import SABlock +from .squeeze_and_excitation import ( + ChannelSELayer, + ResidualSELayer, + SEBlock, + SEBottleneck, + SEResNetBottleneck, + SEResNeXtBottleneck, +) +from .transformerblock import TransformerBlock +from .unetr_block import UnetrBasicBlock, UnetrPrUpBlock, UnetrUpBlock +from .upsample import SubpixelUpsample, Subpixelupsample, SubpixelUpSample, Upsample, UpSample +from .warp import DVF2DDF, Warp diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/networks/blocks/acti_norm.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/networks/blocks/acti_norm.py new file mode 100644 index 0000000000000000000000000000000000000000..6aeaa7d2752f963a51008aed2655a9cd0a697eda --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/networks/blocks/acti_norm.py @@ -0,0 +1,101 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Optional, Tuple, Union + +import torch.nn as nn + +from monai.networks.layers.utils import get_act_layer, get_dropout_layer, get_norm_layer + + +class ADN(nn.Sequential): + """ + Constructs a sequential module of optional activation (A), dropout (D), and normalization (N) layers + with an arbitrary order:: + + -- (Norm) -- (Dropout) -- (Acti) -- + + Args: + ordering: a string representing the ordering of activation, dropout, and normalization. Defaults to "NDA". + in_channels: `C` from an expected input of size (N, C, H[, W, D]). + act: activation type and arguments. Defaults to PReLU. + norm: feature normalization type and arguments. Defaults to instance norm. + norm_dim: determine the spatial dimensions of the normalization layer. + defaults to `dropout_dim` if unspecified. + dropout: dropout ratio. Defaults to no dropout. + dropout_dim: determine the spatial dimensions of dropout. + defaults to `norm_dim` if unspecified. + + - When dropout_dim = 1, randomly zeroes some of the elements for each channel. + - When dropout_dim = 2, Randomly zeroes out entire channels (a channel is a 2D feature map). + - When dropout_dim = 3, Randomly zeroes out entire channels (a channel is a 3D feature map). + + Examples:: + + # activation, group norm, dropout + >>> norm_params = ("GROUP", {"num_groups": 1, "affine": False}) + >>> ADN(norm=norm_params, in_channels=1, dropout_dim=1, dropout=0.8, ordering="AND") + ADN( + (A): ReLU() + (N): GroupNorm(1, 1, eps=1e-05, affine=False) + (D): Dropout(p=0.8, inplace=False) + ) + + # LeakyReLU, dropout + >>> act_params = ("leakyrelu", {"negative_slope": 0.1, "inplace": True}) + >>> ADN(act=act_params, in_channels=1, dropout_dim=1, dropout=0.8, ordering="AD") + ADN( + (A): LeakyReLU(negative_slope=0.1, inplace=True) + (D): Dropout(p=0.8, inplace=False) + ) + + See also: + + :py:class:`monai.networks.layers.Dropout` + :py:class:`monai.networks.layers.Act` + :py:class:`monai.networks.layers.Norm` + :py:class:`monai.networks.layers.split_args` + + """ + + def __init__( + self, + ordering: str = "NDA", + in_channels: Optional[int] = None, + act: Optional[Union[Tuple, str]] = "RELU", + norm: Optional[Union[Tuple, str]] = None, + norm_dim: Optional[int] = None, + dropout: Optional[Union[Tuple, str, float]] = None, + dropout_dim: Optional[int] = None, + ) -> None: + super().__init__() + + op_dict = {"A": None, "D": None, "N": None} + # define the normalization type and the arguments to the constructor + if norm is not None: + if norm_dim is None and dropout_dim is None: + raise ValueError("norm_dim or dropout_dim needs to be specified.") + op_dict["N"] = get_norm_layer(name=norm, spatial_dims=norm_dim or dropout_dim, channels=in_channels) + + # define the activation type and the arguments to the constructor + if act is not None: + op_dict["A"] = get_act_layer(act) + + if dropout is not None: + if norm_dim is None and dropout_dim is None: + raise ValueError("norm_dim or dropout_dim needs to be specified.") + op_dict["D"] = get_dropout_layer(name=dropout, dropout_dim=dropout_dim or norm_dim) + + for item in ordering.upper(): + if item not in op_dict: + raise ValueError(f"ordering must be a string of {op_dict}, got {item} in it.") + if op_dict[item] is not None: + self.add_module(item, op_dict[item]) diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/networks/blocks/activation.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/networks/blocks/activation.py new file mode 100644 index 0000000000000000000000000000000000000000..1526b3705663495cc08b9a9232968c4380c3ec56 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/networks/blocks/activation.py @@ -0,0 +1,162 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch +from torch import nn + +from monai.utils import optional_import + +if optional_import("torch.nn.functional", name="mish")[1]: + + def monai_mish(x, inplace: bool = False): + return torch.nn.functional.mish(x, inplace=inplace) + +else: + + def monai_mish(x, inplace: bool = False): + return x * torch.tanh(torch.nn.functional.softplus(x)) + + +if optional_import("torch.nn.functional", name="silu")[1]: + + def monai_swish(x, inplace: bool = False): + return torch.nn.functional.silu(x, inplace=inplace) + +else: + + def monai_swish(x, inplace: bool = False): + return SwishImplementation.apply(x) + + +class Swish(nn.Module): + r"""Applies the element-wise function: + + .. math:: + \text{Swish}(x) = x * \text{Sigmoid}(\alpha * x) ~~~~\text{for constant value}~ \alpha. + + Citation: Searching for Activation Functions, Ramachandran et al., 2017, https://arxiv.org/abs/1710.05941. + + + Shape: + - Input: :math:`(N, *)` where `*` means, any number of additional dimensions + - Output: :math:`(N, *)`, same shape as the input + + + Examples:: + + >>> import torch + >>> from monai.networks.layers.factories import Act + >>> m = Act['swish']() + >>> input = torch.randn(2) + >>> output = m(input) + """ + + def __init__(self, alpha=1.0): + super().__init__() + self.alpha = alpha + + def forward(self, input: torch.Tensor) -> torch.Tensor: + return input * torch.sigmoid(self.alpha * input) + + +class SwishImplementation(torch.autograd.Function): + r"""Memory efficient implementation for training + Follows recommendation from: + https://github.com/lukemelas/EfficientNet-PyTorch/issues/18#issuecomment-511677853 + + Results in ~ 30% memory saving during training as compared to Swish() + """ + + @staticmethod + def forward(ctx, input): + result = input * torch.sigmoid(input) + ctx.save_for_backward(input) + return result + + @staticmethod + def backward(ctx, grad_output): + input = ctx.saved_tensors[0] + sigmoid_input = torch.sigmoid(input) + return grad_output * (sigmoid_input * (1 + input * (1 - sigmoid_input))) + + +class MemoryEfficientSwish(nn.Module): + r"""Applies the element-wise function: + + .. math:: + \text{Swish}(x) = x * \text{Sigmoid}(\alpha * x) ~~~~\text{for constant value}~ \alpha=1. + + Memory efficient implementation for training following recommendation from: + https://github.com/lukemelas/EfficientNet-PyTorch/issues/18#issuecomment-511677853 + + Results in ~ 30% memory saving during training as compared to Swish() + + Citation: Searching for Activation Functions, Ramachandran et al., 2017, https://arxiv.org/abs/1710.05941. + + From Pytorch 1.7.0+, the optimized version of `Swish` named `SiLU` is implemented, + this class will utilize `torch.nn.functional.silu` to do the calculation if meets the version. + + Shape: + - Input: :math:`(N, *)` where `*` means, any number of additional + dimensions + - Output: :math:`(N, *)`, same shape as the input + + + Examples:: + + >>> import torch + >>> from monai.networks.layers.factories import Act + >>> m = Act['memswish']() + >>> input = torch.randn(2) + >>> output = m(input) + """ + + def __init__(self, inplace: bool = False): + super().__init__() + # inplace only works when using torch.nn.functional.silu + self.inplace = inplace + + def forward(self, input: torch.Tensor): + return monai_swish(input, self.inplace) + + +class Mish(nn.Module): + r"""Applies the element-wise function: + + .. math:: + \text{Mish}(x) = x * tanh(\text{softplus}(x)). + + Citation: Mish: A Self Regularized Non-Monotonic Activation Function, Diganta Misra, 2019, https://arxiv.org/abs/1908.08681. + + From Pytorch 1.9.0+, the optimized version of `Mish` is implemented, + this class will utilize `torch.nn.functional.mish` to do the calculation if meets the version. + + Shape: + - Input: :math:`(N, *)` where `*` means, any number of additional dimensions + - Output: :math:`(N, *)`, same shape as the input + + + Examples:: + + >>> import torch + >>> from monai.networks.layers.factories import Act + >>> m = Act['mish']() + >>> input = torch.randn(2) + >>> output = m(input) + """ + + def __init__(self, inplace: bool = False): + super().__init__() + # inplace only works when using torch.nn.functional.mish + self.inplace = inplace + + def forward(self, input: torch.Tensor): + return monai_mish(input, self.inplace) diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/networks/blocks/aspp.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/networks/blocks/aspp.py new file mode 100644 index 0000000000000000000000000000000000000000..8d43530fa734bb8cf0e8794691a12ad83bf59cf2 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/networks/blocks/aspp.py @@ -0,0 +1,105 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Optional, Sequence, Tuple, Union + +import torch +import torch.nn as nn + +from monai.networks.blocks.convolutions import Convolution +from monai.networks.layers import same_padding +from monai.networks.layers.factories import Conv + + +class SimpleASPP(nn.Module): + """ + A simplified version of the atrous spatial pyramid pooling (ASPP) module. + + Chen et al., Encoder-Decoder with Atrous Separable Convolution for Semantic Image Segmentation. + https://arxiv.org/abs/1802.02611 + + Wang et al., A Noise-robust Framework for Automatic Segmentation of COVID-19 Pneumonia Lesions + from CT Images. https://ieeexplore.ieee.org/document/9109297 + """ + + def __init__( + self, + spatial_dims: int, + in_channels: int, + conv_out_channels: int, + kernel_sizes: Sequence[int] = (1, 3, 3, 3), + dilations: Sequence[int] = (1, 2, 4, 6), + norm_type: Optional[Union[Tuple, str]] = "BATCH", + acti_type: Optional[Union[Tuple, str]] = "LEAKYRELU", + bias: bool = False, + ) -> None: + """ + Args: + spatial_dims: number of spatial dimensions, could be 1, 2, or 3. + in_channels: number of input channels. + conv_out_channels: number of output channels of each atrous conv. + The final number of output channels is conv_out_channels * len(kernel_sizes). + kernel_sizes: a sequence of four convolutional kernel sizes. + Defaults to (1, 3, 3, 3) for four (dilated) convolutions. + dilations: a sequence of four convolutional dilation parameters. + Defaults to (1, 2, 4, 6) for four (dilated) convolutions. + norm_type: final kernel-size-one convolution normalization type. + Defaults to batch norm. + acti_type: final kernel-size-one convolution activation type. + Defaults to leaky ReLU. + bias: whether to have a bias term in convolution blocks. Defaults to False. + According to `Performance Tuning Guide `_, + if a conv layer is directly followed by a batch norm layer, bias should be False. + + Raises: + ValueError: When ``kernel_sizes`` length differs from ``dilations``. + + See also: + + :py:class:`monai.networks.layers.Act` + :py:class:`monai.networks.layers.Conv` + :py:class:`monai.networks.layers.Norm` + + """ + super().__init__() + if len(kernel_sizes) != len(dilations): + raise ValueError( + "kernel_sizes and dilations length must match, " + f"got kernel_sizes={len(kernel_sizes)} dilations={len(dilations)}." + ) + pads = tuple(same_padding(k, d) for k, d in zip(kernel_sizes, dilations)) + + self.convs = nn.ModuleList() + for k, d, p in zip(kernel_sizes, dilations, pads): + _conv = Conv[Conv.CONV, spatial_dims]( + in_channels=in_channels, out_channels=conv_out_channels, kernel_size=k, dilation=d, padding=p + ) + self.convs.append(_conv) + + out_channels = conv_out_channels * len(pads) # final conv. output channels + self.conv_k1 = Convolution( + spatial_dims=spatial_dims, + in_channels=out_channels, + out_channels=out_channels, + kernel_size=1, + act=acti_type, + norm=norm_type, + bias=bias, + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """ + Args: + x: in shape (batch, channel, spatial_1[, spatial_2, ...]). + """ + x_out = torch.cat([conv(x) for conv in self.convs], dim=1) + x_out = self.conv_k1(x_out) + return x_out diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/networks/blocks/backbone_fpn_utils.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/networks/blocks/backbone_fpn_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..c663485583e99fcd0de5c4fdddc38051e8ce34dd --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/networks/blocks/backbone_fpn_utils.py @@ -0,0 +1,176 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# ========================================================================= +# Adapted from https://github.com/pytorch/vision/blob/release/0.12/torchvision/models/detection/backbone_utils.py +# which has the following license... +# https://github.com/pytorch/vision/blob/main/LICENSE +# +# BSD 3-Clause License + +# Copyright (c) Soumith Chintala 2016, +# All rights reserved. + +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: + +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. + +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. + +# * Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. + +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +""" +This script is modified from from torchvision to support N-D images, +by overriding the definition of convolutional layers and pooling layers. + +https://github.com/pytorch/vision/blob/release/0.12/torchvision/models/detection/backbone_utils.py +""" + +from typing import Dict, List, Optional, Union + +from torch import Tensor, nn + +from monai.networks.nets import resnet +from monai.utils import optional_import + +from .feature_pyramid_network import ExtraFPNBlock, FeaturePyramidNetwork, LastLevelMaxPool + +torchvision_models, _ = optional_import("torchvision.models") + +__all__ = ["BackboneWithFPN"] + + +class BackboneWithFPN(nn.Module): + """ + Adds an FPN on top of a model. + Internally, it uses torchvision.models._utils.IntermediateLayerGetter to + extract a submodel that returns the feature maps specified in return_layers. + The same limitations of IntermediateLayerGetter apply here. + + Same code as https://github.com/pytorch/vision/blob/release/0.12/torchvision/models/detection/backbone_utils.py + Except that this class uses spatial_dims + + Args: + backbone: backbone network + return_layers: a dict containing the names + of the modules for which the activations will be returned as + the key of the dict, and the value of the dict is the name + of the returned activation (which the user can specify). + in_channels_list: number of channels for each feature map + that is returned, in the order they are present in the OrderedDict + out_channels: number of channels in the FPN. + spatial_dims: 2D or 3D images + """ + + def __init__( + self, + backbone: nn.Module, + return_layers: Dict[str, str], + in_channels_list: List[int], + out_channels: int, + spatial_dims: Union[int, None] = None, + extra_blocks: Optional[ExtraFPNBlock] = None, + ) -> None: + super().__init__() + + # if spatial_dims is not specified, try to find it from backbone. + if spatial_dims is None: + if hasattr(backbone, "spatial_dims") and isinstance(backbone.spatial_dims, int): + spatial_dims = backbone.spatial_dims + elif isinstance(backbone.conv1, nn.Conv2d): + spatial_dims = 2 + elif isinstance(backbone.conv1, nn.Conv3d): + spatial_dims = 3 + else: + raise ValueError("Could not find spatial_dims of backbone, please specify it.") + + if extra_blocks is None: + extra_blocks = LastLevelMaxPool(spatial_dims) + + self.body = torchvision_models._utils.IntermediateLayerGetter(backbone, return_layers=return_layers) + self.fpn = FeaturePyramidNetwork( + spatial_dims=spatial_dims, + in_channels_list=in_channels_list, + out_channels=out_channels, + extra_blocks=extra_blocks, + ) + self.out_channels = out_channels + + def forward(self, x: Tensor) -> Dict[str, Tensor]: + """ + Computes the resulted feature maps of the network. + + Args: + x: input images + + Returns: + feature maps after FPN layers. They are ordered from highest resolution first. + """ + x = self.body(x) # backbone + y: Dict[str, Tensor] = self.fpn(x) # FPN + return y + + +def _resnet_fpn_extractor( + backbone: resnet.ResNet, + spatial_dims: int, + trainable_layers: int = 5, + returned_layers: Optional[List[int]] = None, + extra_blocks: Optional[ExtraFPNBlock] = None, +) -> BackboneWithFPN: + """ + Same code as https://github.com/pytorch/vision/blob/release/0.12/torchvision/models/detection/backbone_utils.py + Except that ``in_channels_stage2 = backbone.in_planes // 8`` instead of ``in_channels_stage2 = backbone.inplanes // 8``, + and it requires spatial_dims: 2D or 3D images. + """ + + # select layers that wont be frozen + if trainable_layers < 0 or trainable_layers > 5: + raise ValueError(f"Trainable layers should be in the range [0,5], got {trainable_layers}") + layers_to_train = ["layer4", "layer3", "layer2", "layer1", "conv1"][:trainable_layers] + if trainable_layers == 5: + layers_to_train.append("bn1") + for name, parameter in backbone.named_parameters(): + if all([not name.startswith(layer) for layer in layers_to_train]): + parameter.requires_grad_(False) + + if extra_blocks is None: + extra_blocks = LastLevelMaxPool(spatial_dims) + + if returned_layers is None: + returned_layers = [1, 2, 3, 4] + if min(returned_layers) <= 0 or max(returned_layers) >= 5: + raise ValueError(f"Each returned layer should be in the range [1,4]. Got {returned_layers}") + return_layers = {f"layer{k}": str(v) for v, k in enumerate(returned_layers)} + + in_channels_stage2 = backbone.in_planes // 8 + in_channels_list = [in_channels_stage2 * 2 ** (i - 1) for i in returned_layers] + out_channels = 256 + return BackboneWithFPN( + backbone, return_layers, in_channels_list, out_channels, extra_blocks=extra_blocks, spatial_dims=spatial_dims + ) diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/networks/blocks/convolutions.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/networks/blocks/convolutions.py new file mode 100644 index 0000000000000000000000000000000000000000..37530668a333a70b16533f4df5b95f6f328568f0 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/networks/blocks/convolutions.py @@ -0,0 +1,326 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Optional, Sequence, Tuple, Union + +import numpy as np +import torch +import torch.nn as nn + +from monai.networks.blocks import ADN +from monai.networks.layers.convutils import same_padding, stride_minus_kernel_padding +from monai.networks.layers.factories import Conv +from monai.utils.deprecate_utils import deprecated_arg + + +class Convolution(nn.Sequential): + """ + Constructs a convolution with normalization, optional dropout, and optional activation layers:: + + -- (Conv|ConvTrans) -- (Norm -- Dropout -- Acti) -- + + if ``conv_only`` set to ``True``:: + + -- (Conv|ConvTrans) -- + + For example: + + .. code-block:: python + + from monai.networks.blocks import Convolution + + conv = Convolution( + dimensions=3, + in_channels=1, + out_channels=1, + adn_ordering="ADN", + act=("prelu", {"init": 0.2}), + dropout=0.1, + norm=("layer", {"normalized_shape": (10, 10, 10)}), + ) + print(conv) + + output:: + + Convolution( + (conv): Conv3d(1, 1, kernel_size=(3, 3, 3), stride=(1, 1, 1), padding=(1, 1, 1)) + (adn): ADN( + (A): PReLU(num_parameters=1) + (D): Dropout(p=0.1, inplace=False) + (N): LayerNorm((10, 10, 10), eps=1e-05, elementwise_affine=True) + ) + ) + + Args: + spatial_dims: number of spatial dimensions. + in_channels: number of input channels. + out_channels: number of output channels. + strides: convolution stride. Defaults to 1. + kernel_size: convolution kernel size. Defaults to 3. + adn_ordering: a string representing the ordering of activation, normalization, and dropout. + Defaults to "NDA". + act: activation type and arguments. Defaults to PReLU. + norm: feature normalization type and arguments. Defaults to instance norm. + dropout: dropout ratio. Defaults to no dropout. + dropout_dim: determine the spatial dimensions of dropout. Defaults to 1. + + - When dropout_dim = 1, randomly zeroes some of the elements for each channel. + - When dropout_dim = 2, Randomly zeroes out entire channels (a channel is a 2D feature map). + - When dropout_dim = 3, Randomly zeroes out entire channels (a channel is a 3D feature map). + + The value of dropout_dim should be no no larger than the value of `spatial_dims`. + dilation: dilation rate. Defaults to 1. + groups: controls the connections between inputs and outputs. Defaults to 1. + bias: whether to have a bias term. Defaults to True. + conv_only: whether to use the convolutional layer only. Defaults to False. + is_transposed: if True uses ConvTrans instead of Conv. Defaults to False. + padding: controls the amount of implicit zero-paddings on both sides for padding number of points + for each dimension. Defaults to None. + output_padding: controls the additional size added to one side of the output shape. + Defaults to None. + + .. deprecated:: 0.6.0 + ``dimensions`` is deprecated, use ``spatial_dims`` instead. + + See also: + + :py:class:`monai.networks.layers.Conv` + :py:class:`monai.networks.blocks.ADN` + + """ + + @deprecated_arg( + name="dimensions", new_name="spatial_dims", since="0.6", msg_suffix="Please use `spatial_dims` instead." + ) + def __init__( + self, + spatial_dims: int, + in_channels: int, + out_channels: int, + strides: Union[Sequence[int], int] = 1, + kernel_size: Union[Sequence[int], int] = 3, + adn_ordering: str = "NDA", + act: Optional[Union[Tuple, str]] = "PRELU", + norm: Optional[Union[Tuple, str]] = "INSTANCE", + dropout: Optional[Union[Tuple, str, float]] = None, + dropout_dim: Optional[int] = 1, + dilation: Union[Sequence[int], int] = 1, + groups: int = 1, + bias: bool = True, + conv_only: bool = False, + is_transposed: bool = False, + padding: Optional[Union[Sequence[int], int]] = None, + output_padding: Optional[Union[Sequence[int], int]] = None, + dimensions: Optional[int] = None, + ) -> None: + super().__init__() + self.dimensions = spatial_dims if dimensions is None else dimensions + self.in_channels = in_channels + self.out_channels = out_channels + self.is_transposed = is_transposed + if padding is None: + padding = same_padding(kernel_size, dilation) + conv_type = Conv[Conv.CONVTRANS if is_transposed else Conv.CONV, self.dimensions] + + conv: nn.Module + if is_transposed: + if output_padding is None: + output_padding = stride_minus_kernel_padding(1, strides) + conv = conv_type( + in_channels, + out_channels, + kernel_size=kernel_size, + stride=strides, + padding=padding, + output_padding=output_padding, + groups=groups, + bias=bias, + dilation=dilation, + ) + else: + conv = conv_type( + in_channels, + out_channels, + kernel_size=kernel_size, + stride=strides, + padding=padding, + dilation=dilation, + groups=groups, + bias=bias, + ) + + self.add_module("conv", conv) + + if not conv_only: + self.add_module( + "adn", + ADN( + ordering=adn_ordering, + in_channels=out_channels, + act=act, + norm=norm, + norm_dim=self.dimensions, + dropout=dropout, + dropout_dim=dropout_dim, + ), + ) + + +class ResidualUnit(nn.Module): + """ + Residual module with multiple convolutions and a residual connection. + + For example: + + .. code-block:: python + + from monai.networks.blocks import ResidualUnit + + convs = ResidualUnit( + spatial_dims=3, + in_channels=1, + out_channels=1, + adn_ordering="AN", + act=("prelu", {"init": 0.2}), + norm=("layer", {"normalized_shape": (10, 10, 10)}), + ) + print(convs) + + output:: + + ResidualUnit( + (conv): Sequential( + (unit0): Convolution( + (conv): Conv3d(1, 1, kernel_size=(3, 3, 3), stride=(1, 1, 1), padding=(1, 1, 1)) + (adn): ADN( + (A): PReLU(num_parameters=1) + (N): LayerNorm((10, 10, 10), eps=1e-05, elementwise_affine=True) + ) + ) + (unit1): Convolution( + (conv): Conv3d(1, 1, kernel_size=(3, 3, 3), stride=(1, 1, 1), padding=(1, 1, 1)) + (adn): ADN( + (A): PReLU(num_parameters=1) + (N): LayerNorm((10, 10, 10), eps=1e-05, elementwise_affine=True) + ) + ) + ) + (residual): Identity() + ) + + Args: + spatial_dims: number of spatial dimensions. + in_channels: number of input channels. + out_channels: number of output channels. + strides: convolution stride. Defaults to 1. + kernel_size: convolution kernel size. Defaults to 3. + subunits: number of convolutions. Defaults to 2. + adn_ordering: a string representing the ordering of activation, normalization, and dropout. + Defaults to "NDA". + act: activation type and arguments. Defaults to PReLU. + norm: feature normalization type and arguments. Defaults to instance norm. + dropout: dropout ratio. Defaults to no dropout. + dropout_dim: determine the dimensions of dropout. Defaults to 1. + + - When dropout_dim = 1, randomly zeroes some of the elements for each channel. + - When dropout_dim = 2, Randomly zero out entire channels (a channel is a 2D feature map). + - When dropout_dim = 3, Randomly zero out entire channels (a channel is a 3D feature map). + + The value of dropout_dim should be no no larger than the value of `dimensions`. + dilation: dilation rate. Defaults to 1. + bias: whether to have a bias term. Defaults to True. + last_conv_only: for the last subunit, whether to use the convolutional layer only. + Defaults to False. + padding: controls the amount of implicit zero-paddings on both sides for padding number of points + for each dimension. Defaults to None. + + .. deprecated:: 0.6.0 + ``dimensions`` is deprecated, use ``spatial_dims`` instead. + + See also: + + :py:class:`monai.networks.blocks.Convolution` + + """ + + @deprecated_arg(name="dimensions", since="0.6", msg_suffix="Please use `spatial_dims` instead.") + def __init__( + self, + spatial_dims: int, + in_channels: int, + out_channels: int, + strides: Union[Sequence[int], int] = 1, + kernel_size: Union[Sequence[int], int] = 3, + subunits: int = 2, + adn_ordering: str = "NDA", + act: Optional[Union[Tuple, str]] = "PRELU", + norm: Optional[Union[Tuple, str]] = "INSTANCE", + dropout: Optional[Union[Tuple, str, float]] = None, + dropout_dim: Optional[int] = 1, + dilation: Union[Sequence[int], int] = 1, + bias: bool = True, + last_conv_only: bool = False, + padding: Optional[Union[Sequence[int], int]] = None, + dimensions: Optional[int] = None, + ) -> None: + super().__init__() + self.dimensions = spatial_dims if dimensions is None else dimensions + self.in_channels = in_channels + self.out_channels = out_channels + self.conv = nn.Sequential() + self.residual = nn.Identity() + if not padding: + padding = same_padding(kernel_size, dilation) + schannels = in_channels + sstrides = strides + subunits = max(1, subunits) + + for su in range(subunits): + conv_only = last_conv_only and su == (subunits - 1) + unit = Convolution( + self.dimensions, + schannels, + out_channels, + strides=sstrides, + kernel_size=kernel_size, + adn_ordering=adn_ordering, + act=act, + norm=norm, + dropout=dropout, + dropout_dim=dropout_dim, + dilation=dilation, + bias=bias, + conv_only=conv_only, + padding=padding, + ) + + self.conv.add_module(f"unit{su:d}", unit) + + # after first loop set channels and strides to what they should be for subsequent units + schannels = out_channels + sstrides = 1 + + # apply convolution to input to change number of output channels and size to match that coming from self.conv + if np.prod(strides) != 1 or in_channels != out_channels: + rkernel_size = kernel_size + rpadding = padding + + if np.prod(strides) == 1: # if only adapting number of channels a 1x1 kernel is used with no padding + rkernel_size = 1 + rpadding = 0 + + conv_type = Conv[Conv.CONV, self.dimensions] + self.residual = conv_type(in_channels, out_channels, rkernel_size, strides, rpadding, bias=bias) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + res: torch.Tensor = self.residual(x) # create the additive residual from x + cx: torch.Tensor = self.conv(x) # apply x to sequence of operations + return cx + res # add the residual to the output diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/networks/blocks/crf.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/networks/blocks/crf.py new file mode 100644 index 0000000000000000000000000000000000000000..b6382adf5f5ba19112084d1b507068e1b5ef3db0 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/networks/blocks/crf.py @@ -0,0 +1,120 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Optional + +import torch +from torch.nn.functional import softmax + +from monai.networks.layers.filtering import PHLFilter +from monai.networks.utils import meshgrid_ij + +__all__ = ["CRF"] + + +class CRF(torch.nn.Module): + """ + Conditional Random Field: Combines message passing with a class + compatibility convolution into an iterative process designed + to successively minimise the energy of the class labeling. + + In this implementation, the message passing step is a weighted + combination of a gaussian filter and a bilateral filter. + The bilateral term is included to respect existing structure + within the reference tensor. + + See: + https://arxiv.org/abs/1502.03240 + """ + + def __init__( + self, + iterations: int = 5, + bilateral_weight: float = 1.0, + gaussian_weight: float = 1.0, + bilateral_spatial_sigma: float = 5.0, + bilateral_color_sigma: float = 0.5, + gaussian_spatial_sigma: float = 5.0, + update_factor: float = 3.0, + compatibility_matrix: Optional[torch.Tensor] = None, + ): + """ + Args: + iterations: the number of iterations. + bilateral_weight: the weighting of the bilateral term in the message passing step. + gaussian_weight: the weighting of the gaussian term in the message passing step. + bilateral_spatial_sigma: standard deviation in spatial coordinates for the bilateral term. + bilateral_color_sigma: standard deviation in color space for the bilateral term. + gaussian_spatial_sigma: standard deviation in spatial coordinates for the gaussian term. + update_factor: determines the magnitude of each update. + compatibility_matrix: a matrix describing class compatibility, + should be NxN where N is the number of classes. + """ + super().__init__() + self.iterations = iterations + self.bilateral_weight = bilateral_weight + self.gaussian_weight = gaussian_weight + self.bilateral_spatial_sigma = bilateral_spatial_sigma + self.bilateral_color_sigma = bilateral_color_sigma + self.gaussian_spatial_sigma = gaussian_spatial_sigma + self.update_factor = update_factor + self.compatibility_matrix = compatibility_matrix + + def forward(self, input_tensor: torch.Tensor, reference_tensor: torch.Tensor): + """ + Args: + input_tensor: tensor containing initial class logits. + reference_tensor: the reference tensor used to guide the message passing. + + Returns: + output (torch.Tensor): output tensor. + """ + + # constructing spatial feature tensor + spatial_features = _create_coordinate_tensor(reference_tensor) + + # constructing final feature tensors for bilateral and gaussian kernel + bilateral_features = torch.cat( + [spatial_features / self.bilateral_spatial_sigma, reference_tensor / self.bilateral_color_sigma], dim=1 + ) + gaussian_features = spatial_features / self.gaussian_spatial_sigma + + # setting up output tensor + output_tensor = softmax(input_tensor, dim=1) + + # mean field loop + for _ in range(self.iterations): + + # message passing step for both kernels + bilateral_output = PHLFilter.apply(output_tensor, bilateral_features) + gaussian_output = PHLFilter.apply(output_tensor, gaussian_features) + + # combining filter outputs + combined_output = self.bilateral_weight * bilateral_output + self.gaussian_weight * gaussian_output + + # optionally running a compatibility transform + if self.compatibility_matrix is not None: + flat = combined_output.flatten(start_dim=2).permute(0, 2, 1) + flat = torch.matmul(flat, self.compatibility_matrix) + combined_output = flat.permute(0, 2, 1).reshape(combined_output.shape) + + # update and normalize + output_tensor = softmax(input_tensor + self.update_factor * combined_output, dim=1) + + return output_tensor + + +# helper methods +def _create_coordinate_tensor(tensor): + axes = [torch.arange(tensor.size(i)) for i in range(2, tensor.dim())] + grids = meshgrid_ij(axes) + coords = torch.stack(grids).to(device=tensor.device, dtype=tensor.dtype) + return torch.stack(tensor.size(0) * [coords], dim=0) diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/networks/blocks/dints_block.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/networks/blocks/dints_block.py new file mode 100644 index 0000000000000000000000000000000000000000..b7365f50e33a07cd240e5a6edb8ef7d1ce1e24ad --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/networks/blocks/dints_block.py @@ -0,0 +1,272 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from typing import Tuple, Union + +import torch + +from monai.networks.layers.factories import Conv +from monai.networks.layers.utils import get_act_layer, get_norm_layer + +__all__ = ["FactorizedIncreaseBlock", "FactorizedReduceBlock", "P3DActiConvNormBlock", "ActiConvNormBlock"] + + +class FactorizedIncreaseBlock(torch.nn.Sequential): + """ + Up-sampling the features by two using linear interpolation and convolutions. + """ + + def __init__( + self, + in_channel: int, + out_channel: int, + spatial_dims: int = 3, + act_name: Union[Tuple, str] = "RELU", + norm_name: Union[Tuple, str] = ("INSTANCE", {"affine": True}), + ): + """ + Args: + in_channel: number of input channels + out_channel: number of output channels + spatial_dims: number of spatial dimensions + act_name: activation layer type and arguments. + norm_name: feature normalization type and arguments. + """ + super().__init__() + self._in_channel = in_channel + self._out_channel = out_channel + self._spatial_dims = spatial_dims + if self._spatial_dims not in (2, 3): + raise ValueError("spatial_dims must be 2 or 3.") + + conv_type = Conv[Conv.CONV, self._spatial_dims] + mode = "trilinear" if self._spatial_dims == 3 else "bilinear" + self.add_module("up", torch.nn.Upsample(scale_factor=2, mode=mode, align_corners=True)) + self.add_module("acti", get_act_layer(name=act_name)) + self.add_module( + "conv", + conv_type( + in_channels=self._in_channel, + out_channels=self._out_channel, + kernel_size=1, + stride=1, + padding=0, + groups=1, + bias=False, + dilation=1, + ), + ) + self.add_module( + "norm", get_norm_layer(name=norm_name, spatial_dims=self._spatial_dims, channels=self._out_channel) + ) + + +class FactorizedReduceBlock(torch.nn.Module): + """ + Down-sampling the feature by 2 using stride. + The length along each spatial dimension must be a multiple of 2. + """ + + def __init__( + self, + in_channel: int, + out_channel: int, + spatial_dims: int = 3, + act_name: Union[Tuple, str] = "RELU", + norm_name: Union[Tuple, str] = ("INSTANCE", {"affine": True}), + ): + """ + Args: + in_channel: number of input channels + out_channel: number of output channels. + spatial_dims: number of spatial dimensions. + act_name: activation layer type and arguments. + norm_name: feature normalization type and arguments. + """ + super().__init__() + self._in_channel = in_channel + self._out_channel = out_channel + self._spatial_dims = spatial_dims + if self._spatial_dims not in (2, 3): + raise ValueError("spatial_dims must be 2 or 3.") + + conv_type = Conv[Conv.CONV, self._spatial_dims] + + self.act = get_act_layer(name=act_name) + self.conv_1 = conv_type( + in_channels=self._in_channel, + out_channels=self._out_channel // 2, + kernel_size=1, + stride=2, + padding=0, + groups=1, + bias=False, + dilation=1, + ) + self.conv_2 = conv_type( + in_channels=self._in_channel, + out_channels=self._out_channel - self._out_channel // 2, + kernel_size=1, + stride=2, + padding=0, + groups=1, + bias=False, + dilation=1, + ) + self.norm = get_norm_layer(name=norm_name, spatial_dims=self._spatial_dims, channels=self._out_channel) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """ + The length along each spatial dimension must be a multiple of 2. + """ + x = self.act(x) + if self._spatial_dims == 3: + out = torch.cat([self.conv_1(x), self.conv_2(x[:, :, 1:, 1:, 1:])], dim=1) + else: + out = torch.cat([self.conv_1(x), self.conv_2(x[:, :, 1:, 1:])], dim=1) + out = self.norm(out) + return out + + +class P3DActiConvNormBlock(torch.nn.Sequential): + """ + -- (act) -- (conv) -- (norm) -- + """ + + def __init__( + self, + in_channel: int, + out_channel: int, + kernel_size: int, + padding: int, + mode: int = 0, + act_name: Union[Tuple, str] = "RELU", + norm_name: Union[Tuple, str] = ("INSTANCE", {"affine": True}), + ): + """ + Args: + in_channel: number of input channels. + out_channel: number of output channels. + kernel_size: kernel size to be expanded to 3D. + padding: padding size to be expanded to 3D. + mode: mode for the anisotropic kernels: + + - 0: ``(k, k, 1)``, ``(1, 1, k)``, + - 1: ``(k, 1, k)``, ``(1, k, 1)``, + - 2: ``(1, k, k)``. ``(k, 1, 1)``. + + act_name: activation layer type and arguments. + norm_name: feature normalization type and arguments. + """ + super().__init__() + self._in_channel = in_channel + self._out_channel = out_channel + self._p3dmode = int(mode) + + conv_type = Conv[Conv.CONV, 3] + + if self._p3dmode == 0: # (k, k, 1), (1, 1, k) + kernel_size0 = (kernel_size, kernel_size, 1) + kernel_size1 = (1, 1, kernel_size) + padding0 = (padding, padding, 0) + padding1 = (0, 0, padding) + elif self._p3dmode == 1: # (k, 1, k), (1, k, 1) + kernel_size0 = (kernel_size, 1, kernel_size) + kernel_size1 = (1, kernel_size, 1) + padding0 = (padding, 0, padding) + padding1 = (0, padding, 0) + elif self._p3dmode == 2: # (1, k, k), (k, 1, 1) + kernel_size0 = (1, kernel_size, kernel_size) + kernel_size1 = (kernel_size, 1, 1) + padding0 = (0, padding, padding) + padding1 = (padding, 0, 0) + else: + raise ValueError("`mode` must be 0, 1, or 2.") + + self.add_module("acti", get_act_layer(name=act_name)) + self.add_module( + "conv", + conv_type( + in_channels=self._in_channel, + out_channels=self._in_channel, + kernel_size=kernel_size0, + stride=1, + padding=padding0, + groups=1, + bias=False, + dilation=1, + ), + ) + self.add_module( + "conv_1", + conv_type( + in_channels=self._in_channel, + out_channels=self._out_channel, + kernel_size=kernel_size1, + stride=1, + padding=padding1, + groups=1, + bias=False, + dilation=1, + ), + ) + self.add_module("norm", get_norm_layer(name=norm_name, spatial_dims=3, channels=self._out_channel)) + + +class ActiConvNormBlock(torch.nn.Sequential): + """ + -- (Acti) -- (Conv) -- (Norm) -- + """ + + def __init__( + self, + in_channel: int, + out_channel: int, + kernel_size: int = 3, + padding: int = 1, + spatial_dims: int = 3, + act_name: Union[Tuple, str] = "RELU", + norm_name: Union[Tuple, str] = ("INSTANCE", {"affine": True}), + ): + """ + Args: + in_channel: number of input channels. + out_channel: number of output channels. + kernel_size: kernel size of the convolution. + padding: padding size of the convolution. + spatial_dims: number of spatial dimensions. + act_name: activation layer type and arguments. + norm_name: feature normalization type and arguments. + """ + super().__init__() + self._in_channel = in_channel + self._out_channel = out_channel + self._spatial_dims = spatial_dims + + conv_type = Conv[Conv.CONV, self._spatial_dims] + self.add_module("acti", get_act_layer(name=act_name)) + self.add_module( + "conv", + conv_type( + in_channels=self._in_channel, + out_channels=self._out_channel, + kernel_size=kernel_size, + stride=1, + padding=padding, + groups=1, + bias=False, + dilation=1, + ), + ) + self.add_module( + "norm", get_norm_layer(name=norm_name, spatial_dims=self._spatial_dims, channels=self._out_channel) + ) diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/networks/blocks/downsample.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/networks/blocks/downsample.py new file mode 100644 index 0000000000000000000000000000000000000000..9b0d5dd4b9988736142982bd41589a2511fd7ec2 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/networks/blocks/downsample.py @@ -0,0 +1,61 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Optional, Sequence, Union + +import torch +import torch.nn as nn + +from monai.networks.layers.factories import Pool +from monai.utils import ensure_tuple_rep + + +class MaxAvgPool(nn.Module): + """ + Downsample with both maxpooling and avgpooling, + double the channel size by concatenating the downsampled feature maps. + """ + + def __init__( + self, + spatial_dims: int, + kernel_size: Union[Sequence[int], int], + stride: Optional[Union[Sequence[int], int]] = None, + padding: Union[Sequence[int], int] = 0, + ceil_mode: bool = False, + ) -> None: + """ + Args: + spatial_dims: number of spatial dimensions of the input image. + kernel_size: the kernel size of both pooling operations. + stride: the stride of the window. Default value is `kernel_size`. + padding: implicit zero padding to be added to both pooling operations. + ceil_mode: when True, will use ceil instead of floor to compute the output shape. + """ + super().__init__() + _params = { + "kernel_size": ensure_tuple_rep(kernel_size, spatial_dims), + "stride": None if stride is None else ensure_tuple_rep(stride, spatial_dims), + "padding": ensure_tuple_rep(padding, spatial_dims), + "ceil_mode": ceil_mode, + } + self.max_pool = Pool[Pool.MAX, spatial_dims](**_params) + self.avg_pool = Pool[Pool.AVG, spatial_dims](**_params) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """ + Args: + x: Tensor in shape (batch, channel, spatial_1[, spatial_2, ...]). + + Returns: + Tensor in shape (batch, 2*channel, spatial_1[, spatial_2, ...]). + """ + return torch.cat([self.max_pool(x), self.avg_pool(x)], dim=1) diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/networks/blocks/dynunet_block.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/networks/blocks/dynunet_block.py new file mode 100644 index 0000000000000000000000000000000000000000..0df4980dcd31c00954f7f56f6d9209526d390f3b --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/networks/blocks/dynunet_block.py @@ -0,0 +1,288 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Optional, Sequence, Tuple, Union + +import numpy as np +import torch +import torch.nn as nn + +from monai.networks.blocks.convolutions import Convolution +from monai.networks.layers.factories import Act, Norm +from monai.networks.layers.utils import get_act_layer, get_norm_layer + + +class UnetResBlock(nn.Module): + """ + A skip-connection based module that can be used for DynUNet, based on: + `Automated Design of Deep Learning Methods for Biomedical Image Segmentation `_. + `nnU-Net: Self-adapting Framework for U-Net-Based Medical Image Segmentation `_. + + Args: + spatial_dims: number of spatial dimensions. + in_channels: number of input channels. + out_channels: number of output channels. + kernel_size: convolution kernel size. + stride: convolution stride. + norm_name: feature normalization type and arguments. + act_name: activation layer type and arguments. + dropout: dropout probability. + + """ + + def __init__( + self, + spatial_dims: int, + in_channels: int, + out_channels: int, + kernel_size: Union[Sequence[int], int], + stride: Union[Sequence[int], int], + norm_name: Union[Tuple, str], + act_name: Union[Tuple, str] = ("leakyrelu", {"inplace": True, "negative_slope": 0.01}), + dropout: Optional[Union[Tuple, str, float]] = None, + ): + super().__init__() + self.conv1 = get_conv_layer( + spatial_dims, + in_channels, + out_channels, + kernel_size=kernel_size, + stride=stride, + dropout=dropout, + conv_only=True, + ) + self.conv2 = get_conv_layer( + spatial_dims, out_channels, out_channels, kernel_size=kernel_size, stride=1, dropout=dropout, conv_only=True + ) + self.lrelu = get_act_layer(name=act_name) + self.norm1 = get_norm_layer(name=norm_name, spatial_dims=spatial_dims, channels=out_channels) + self.norm2 = get_norm_layer(name=norm_name, spatial_dims=spatial_dims, channels=out_channels) + self.downsample = in_channels != out_channels + stride_np = np.atleast_1d(stride) + if not np.all(stride_np == 1): + self.downsample = True + if self.downsample: + self.conv3 = get_conv_layer( + spatial_dims, in_channels, out_channels, kernel_size=1, stride=stride, dropout=dropout, conv_only=True + ) + self.norm3 = get_norm_layer(name=norm_name, spatial_dims=spatial_dims, channels=out_channels) + + def forward(self, inp): + residual = inp + out = self.conv1(inp) + out = self.norm1(out) + out = self.lrelu(out) + out = self.conv2(out) + out = self.norm2(out) + if hasattr(self, "conv3"): + residual = self.conv3(residual) + if hasattr(self, "norm3"): + residual = self.norm3(residual) + out += residual + out = self.lrelu(out) + return out + + +class UnetBasicBlock(nn.Module): + """ + A CNN module module that can be used for DynUNet, based on: + `Automated Design of Deep Learning Methods for Biomedical Image Segmentation `_. + `nnU-Net: Self-adapting Framework for U-Net-Based Medical Image Segmentation `_. + + Args: + spatial_dims: number of spatial dimensions. + in_channels: number of input channels. + out_channels: number of output channels. + kernel_size: convolution kernel size. + stride: convolution stride. + norm_name: feature normalization type and arguments. + act_name: activation layer type and arguments. + dropout: dropout probability. + + """ + + def __init__( + self, + spatial_dims: int, + in_channels: int, + out_channels: int, + kernel_size: Union[Sequence[int], int], + stride: Union[Sequence[int], int], + norm_name: Union[Tuple, str], + act_name: Union[Tuple, str] = ("leakyrelu", {"inplace": True, "negative_slope": 0.01}), + dropout: Optional[Union[Tuple, str, float]] = None, + ): + super().__init__() + self.conv1 = get_conv_layer( + spatial_dims, + in_channels, + out_channels, + kernel_size=kernel_size, + stride=stride, + dropout=dropout, + conv_only=True, + ) + self.conv2 = get_conv_layer( + spatial_dims, out_channels, out_channels, kernel_size=kernel_size, stride=1, dropout=dropout, conv_only=True + ) + self.lrelu = get_act_layer(name=act_name) + self.norm1 = get_norm_layer(name=norm_name, spatial_dims=spatial_dims, channels=out_channels) + self.norm2 = get_norm_layer(name=norm_name, spatial_dims=spatial_dims, channels=out_channels) + + def forward(self, inp): + out = self.conv1(inp) + out = self.norm1(out) + out = self.lrelu(out) + out = self.conv2(out) + out = self.norm2(out) + out = self.lrelu(out) + return out + + +class UnetUpBlock(nn.Module): + """ + An upsampling module that can be used for DynUNet, based on: + `Automated Design of Deep Learning Methods for Biomedical Image Segmentation `_. + `nnU-Net: Self-adapting Framework for U-Net-Based Medical Image Segmentation `_. + + Args: + spatial_dims: number of spatial dimensions. + in_channels: number of input channels. + out_channels: number of output channels. + kernel_size: convolution kernel size. + stride: convolution stride. + upsample_kernel_size: convolution kernel size for transposed convolution layers. + norm_name: feature normalization type and arguments. + act_name: activation layer type and arguments. + dropout: dropout probability. + trans_bias: transposed convolution bias. + + """ + + def __init__( + self, + spatial_dims: int, + in_channels: int, + out_channels: int, + kernel_size: Union[Sequence[int], int], + stride: Union[Sequence[int], int], + upsample_kernel_size: Union[Sequence[int], int], + norm_name: Union[Tuple, str], + act_name: Union[Tuple, str] = ("leakyrelu", {"inplace": True, "negative_slope": 0.01}), + dropout: Optional[Union[Tuple, str, float]] = None, + trans_bias: bool = False, + ): + super().__init__() + upsample_stride = upsample_kernel_size + self.transp_conv = get_conv_layer( + spatial_dims, + in_channels, + out_channels, + kernel_size=upsample_kernel_size, + stride=upsample_stride, + dropout=dropout, + bias=trans_bias, + conv_only=True, + is_transposed=True, + ) + self.conv_block = UnetBasicBlock( + spatial_dims, + out_channels + out_channels, + out_channels, + kernel_size=kernel_size, + stride=1, + dropout=dropout, + norm_name=norm_name, + act_name=act_name, + ) + + def forward(self, inp, skip): + # number of channels for skip should equals to out_channels + out = self.transp_conv(inp) + out = torch.cat((out, skip), dim=1) + out = self.conv_block(out) + return out + + +class UnetOutBlock(nn.Module): + def __init__( + self, spatial_dims: int, in_channels: int, out_channels: int, dropout: Optional[Union[Tuple, str, float]] = None + ): + super().__init__() + self.conv = get_conv_layer( + spatial_dims, in_channels, out_channels, kernel_size=1, stride=1, dropout=dropout, bias=True, conv_only=True + ) + + def forward(self, inp): + return self.conv(inp) + + +def get_conv_layer( + spatial_dims: int, + in_channels: int, + out_channels: int, + kernel_size: Union[Sequence[int], int] = 3, + stride: Union[Sequence[int], int] = 1, + act: Optional[Union[Tuple, str]] = Act.PRELU, + norm: Union[Tuple, str] = Norm.INSTANCE, + dropout: Optional[Union[Tuple, str, float]] = None, + bias: bool = False, + conv_only: bool = True, + is_transposed: bool = False, +): + padding = get_padding(kernel_size, stride) + output_padding = None + if is_transposed: + output_padding = get_output_padding(kernel_size, stride, padding) + return Convolution( + spatial_dims, + in_channels, + out_channels, + strides=stride, + kernel_size=kernel_size, + act=act, + norm=norm, + dropout=dropout, + bias=bias, + conv_only=conv_only, + is_transposed=is_transposed, + padding=padding, + output_padding=output_padding, + ) + + +def get_padding( + kernel_size: Union[Sequence[int], int], stride: Union[Sequence[int], int] +) -> Union[Tuple[int, ...], int]: + + kernel_size_np = np.atleast_1d(kernel_size) + stride_np = np.atleast_1d(stride) + padding_np = (kernel_size_np - stride_np + 1) / 2 + if np.min(padding_np) < 0: + raise AssertionError("padding value should not be negative, please change the kernel size and/or stride.") + padding = tuple(int(p) for p in padding_np) + + return padding if len(padding) > 1 else padding[0] + + +def get_output_padding( + kernel_size: Union[Sequence[int], int], stride: Union[Sequence[int], int], padding: Union[Sequence[int], int] +) -> Union[Tuple[int, ...], int]: + kernel_size_np = np.atleast_1d(kernel_size) + stride_np = np.atleast_1d(stride) + padding_np = np.atleast_1d(padding) + + out_padding_np = 2 * padding_np + stride_np - kernel_size_np + if np.min(out_padding_np) < 0: + raise AssertionError("out_padding value should not be negative, please change the kernel size and/or stride.") + out_padding = tuple(int(p) for p in out_padding_np) + + return out_padding if len(out_padding) > 1 else out_padding[0] diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/networks/blocks/fcn.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/networks/blocks/fcn.py new file mode 100644 index 0000000000000000000000000000000000000000..5833d4a262e312cea114d697554ff6b3e74f408f --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/networks/blocks/fcn.py @@ -0,0 +1,242 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Type + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from monai.networks.blocks.convolutions import Convolution +from monai.networks.blocks.upsample import UpSample +from monai.networks.layers.factories import Act, Conv, Norm +from monai.utils import optional_import + +models, _ = optional_import("torchvision", name="models") + + +class GCN(nn.Module): + """ + The Global Convolutional Network module using large 1D + Kx1 and 1xK kernels to represent 2D kernels. + """ + + def __init__(self, inplanes: int, planes: int, ks: int = 7): + """ + Args: + inplanes: number of input channels. + planes: number of output channels. + ks: kernel size for one dimension. Defaults to 7. + """ + super().__init__() + + conv2d_type: Type[nn.Conv2d] = Conv[Conv.CONV, 2] + self.conv_l1 = conv2d_type(in_channels=inplanes, out_channels=planes, kernel_size=(ks, 1), padding=(ks // 2, 0)) + self.conv_l2 = conv2d_type(in_channels=planes, out_channels=planes, kernel_size=(1, ks), padding=(0, ks // 2)) + self.conv_r1 = conv2d_type(in_channels=inplanes, out_channels=planes, kernel_size=(1, ks), padding=(0, ks // 2)) + self.conv_r2 = conv2d_type(in_channels=planes, out_channels=planes, kernel_size=(ks, 1), padding=(ks // 2, 0)) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """ + Args: + x: in shape (batch, inplanes, spatial_1, spatial_2). + """ + x_l = self.conv_l1(x) + x_l = self.conv_l2(x_l) + x_r = self.conv_r1(x) + x_r = self.conv_r2(x_r) + x = x_l + x_r + return x + + +class Refine(nn.Module): + """ + Simple residual block to refine the details of the activation maps. + """ + + def __init__(self, planes: int): + """ + Args: + planes: number of input channels. + """ + super().__init__() + + relu_type: Type[nn.ReLU] = Act[Act.RELU] + conv2d_type: Type[nn.Conv2d] = Conv[Conv.CONV, 2] + norm2d_type: Type[nn.BatchNorm2d] = Norm[Norm.BATCH, 2] + + self.bn = norm2d_type(num_features=planes) + self.relu = relu_type(inplace=True) + self.conv1 = conv2d_type(in_channels=planes, out_channels=planes, kernel_size=3, padding=1) + self.conv2 = conv2d_type(in_channels=planes, out_channels=planes, kernel_size=3, padding=1) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """ + Args: + x: in shape (batch, planes, spatial_1, spatial_2). + """ + residual = x + x = self.bn(x) + x = self.relu(x) + x = self.conv1(x) + x = self.bn(x) + x = self.relu(x) + x = self.conv2(x) + + return residual + x + + +class FCN(nn.Module): + """ + 2D FCN network with 3 input channels. The small decoder is built + with the GCN and Refine modules. + The code is adapted from `lsqshr's official 2D code `_. + + Args: + out_channels: number of output channels. Defaults to 1. + upsample_mode: [``"transpose"``, ``"bilinear"``] + The mode of upsampling manipulations. + Using the second mode cannot guarantee the model's reproducibility. Defaults to ``bilinear``. + + - ``transpose``, uses transposed convolution layers. + - ``bilinear``, uses bilinear interpolation. + + pretrained: If True, returns a model pre-trained on ImageNet + progress: If True, displays a progress bar of the download to stderr. + """ + + def __init__( + self, out_channels: int = 1, upsample_mode: str = "bilinear", pretrained: bool = True, progress: bool = True + ): + super().__init__() + + conv2d_type: Type[nn.Conv2d] = Conv[Conv.CONV, 2] + + self.upsample_mode = upsample_mode + self.conv2d_type = conv2d_type + self.out_channels = out_channels + resnet = models.resnet50(pretrained=pretrained, progress=progress) + + self.conv1 = resnet.conv1 + self.bn0 = resnet.bn1 + self.relu = resnet.relu + self.maxpool = resnet.maxpool + + self.layer1 = resnet.layer1 + self.layer2 = resnet.layer2 + self.layer3 = resnet.layer3 + self.layer4 = resnet.layer4 + + self.gcn1 = GCN(2048, self.out_channels) + self.gcn2 = GCN(1024, self.out_channels) + self.gcn3 = GCN(512, self.out_channels) + self.gcn4 = GCN(64, self.out_channels) + self.gcn5 = GCN(64, self.out_channels) + + self.refine1 = Refine(self.out_channels) + self.refine2 = Refine(self.out_channels) + self.refine3 = Refine(self.out_channels) + self.refine4 = Refine(self.out_channels) + self.refine5 = Refine(self.out_channels) + self.refine6 = Refine(self.out_channels) + self.refine7 = Refine(self.out_channels) + self.refine8 = Refine(self.out_channels) + self.refine9 = Refine(self.out_channels) + self.refine10 = Refine(self.out_channels) + self.transformer = self.conv2d_type(in_channels=256, out_channels=64, kernel_size=1) + + if self.upsample_mode == "transpose": + self.up_conv = UpSample(spatial_dims=2, in_channels=self.out_channels, scale_factor=2, mode="deconv") + + def forward(self, x: torch.Tensor): + """ + Args: + x: in shape (batch, 3, spatial_1, spatial_2). + """ + org_input = x + x = self.conv1(x) + x = self.bn0(x) + x = self.relu(x) + conv_x = x + x = self.maxpool(x) + pool_x = x + + fm1 = self.layer1(x) + fm2 = self.layer2(fm1) + fm3 = self.layer3(fm2) + fm4 = self.layer4(fm3) + + gcfm1 = self.refine1(self.gcn1(fm4)) + gcfm2 = self.refine2(self.gcn2(fm3)) + gcfm3 = self.refine3(self.gcn3(fm2)) + gcfm4 = self.refine4(self.gcn4(pool_x)) + gcfm5 = self.refine5(self.gcn5(conv_x)) + + if self.upsample_mode == "transpose": + fs1 = self.refine6(self.up_conv(gcfm1) + gcfm2) + fs2 = self.refine7(self.up_conv(fs1) + gcfm3) + fs3 = self.refine8(self.up_conv(fs2) + gcfm4) + fs4 = self.refine9(self.up_conv(fs3) + gcfm5) + return self.refine10(self.up_conv(fs4)) + fs1 = self.refine6(F.interpolate(gcfm1, fm3.size()[2:], mode=self.upsample_mode, align_corners=True) + gcfm2) + fs2 = self.refine7(F.interpolate(fs1, fm2.size()[2:], mode=self.upsample_mode, align_corners=True) + gcfm3) + fs3 = self.refine8(F.interpolate(fs2, pool_x.size()[2:], mode=self.upsample_mode, align_corners=True) + gcfm4) + fs4 = self.refine9(F.interpolate(fs3, conv_x.size()[2:], mode=self.upsample_mode, align_corners=True) + gcfm5) + return self.refine10(F.interpolate(fs4, org_input.size()[2:], mode=self.upsample_mode, align_corners=True)) + + +class MCFCN(FCN): + """ + The multi-channel version of the 2D FCN module. + Adds a projection layer to take arbitrary number of inputs. + + Args: + in_channels: number of input channels. Defaults to 3. + out_channels: number of output channels. Defaults to 1. + upsample_mode: [``"transpose"``, ``"bilinear"``] + The mode of upsampling manipulations. + Using the second mode cannot guarantee the model's reproducibility. Defaults to ``bilinear``. + + - ``transpose``, uses transposed convolution layers. + - ``bilinear``, uses bilinear interpolate. + pretrained: If True, returns a model pre-trained on ImageNet + progress: If True, displays a progress bar of the download to stderr. + """ + + def __init__( + self, + in_channels: int = 3, + out_channels: int = 1, + upsample_mode: str = "bilinear", + pretrained: bool = True, + progress: bool = True, + ): + super().__init__( + out_channels=out_channels, upsample_mode=upsample_mode, pretrained=pretrained, progress=progress + ) + + self.init_proj = Convolution( + spatial_dims=2, + in_channels=in_channels, + out_channels=3, + kernel_size=1, + act=("relu", {"inplace": True}), + norm=Norm.BATCH, + bias=False, + ) + + def forward(self, x: torch.Tensor): + """ + Args: + x: in shape (batch, in_channels, spatial_1, spatial_2). + """ + x = self.init_proj(x) + return super().forward(x) diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/networks/blocks/feature_pyramid_network.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/networks/blocks/feature_pyramid_network.py new file mode 100644 index 0000000000000000000000000000000000000000..2f7b903a19894347f24842e88bb44cfdec1751b2 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/networks/blocks/feature_pyramid_network.py @@ -0,0 +1,263 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# ========================================================================= +# Adapted from https://github.com/pytorch/vision/blob/release/0.12/torchvision/ops/feature_pyramid_network.py +# which has the following license... +# https://github.com/pytorch/vision/blob/main/LICENSE +# +# BSD 3-Clause License + +# Copyright (c) Soumith Chintala 2016, +# All rights reserved. + +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: + +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. + +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. + +# * Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. + +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +""" +This script is modified from from torchvision to support N-D images, +by overriding the definition of convolutional layers and pooling layers. + +https://github.com/pytorch/vision/blob/release/0.12/torchvision/ops/feature_pyramid_network.py +""" + +from collections import OrderedDict +from typing import Callable, Dict, List, Optional, Tuple, Type, Union + +import torch.nn.functional as F +from torch import Tensor, nn + +from monai.networks.layers.factories import Conv, Pool + +__all__ = ["ExtraFPNBlock", "LastLevelMaxPool", "LastLevelP6P7", "FeaturePyramidNetwork"] + + +class ExtraFPNBlock(nn.Module): + """ + Base class for the extra block in the FPN. + + Same code as https://github.com/pytorch/vision/blob/release/0.12/torchvision/ops/feature_pyramid_network.py + """ + + def forward(self, results: List[Tensor], x: List[Tensor], names: List[str]) -> Tuple[List[Tensor], List[str]]: + """ + Compute extended set of results of the FPN and their names. + + Args: + results: the result of the FPN + x: the original feature maps + names: the names for each one of the original feature maps + + Returns: + - the extended set of results of the FPN + - the extended set of names for the results + """ + pass + + +class LastLevelMaxPool(ExtraFPNBlock): + """ + Applies a max_pool2d or max_pool3d on top of the last feature map. Serves as an ``extra_blocks`` + in :class:`~monai.networks.blocks.feature_pyramid_network.FeaturePyramidNetwork` . + """ + + def __init__(self, spatial_dims: int): + super().__init__() + pool_type: Type[Union[nn.MaxPool1d, nn.MaxPool2d, nn.MaxPool3d]] = Pool[Pool.MAX, spatial_dims] + self.maxpool = pool_type(kernel_size=1, stride=2, padding=0) + + def forward(self, results: List[Tensor], x: List[Tensor], names: List[str]) -> Tuple[List[Tensor], List[str]]: + names.append("pool") + results.append(self.maxpool(results[-1])) + return results, names + + +class LastLevelP6P7(ExtraFPNBlock): + """ + This module is used in RetinaNet to generate extra layers, P6 and P7. + Serves as an ``extra_blocks`` + in :class:`~monai.networks.blocks.feature_pyramid_network.FeaturePyramidNetwork` . + """ + + def __init__(self, spatial_dims: int, in_channels: int, out_channels: int): + super().__init__() + conv_type: Callable = Conv[Conv.CONV, spatial_dims] + self.p6 = conv_type(in_channels, out_channels, kernel_size=3, stride=2, padding=1) + self.p7 = conv_type(out_channels, out_channels, kernel_size=3, stride=2, padding=1) + for module in [self.p6, self.p7]: + nn.init.kaiming_uniform_(module.weight, a=1) + nn.init.constant_(module.bias, 0) + self.use_P5 = in_channels == out_channels + + def forward(self, results: List[Tensor], x: List[Tensor], names: List[str]) -> Tuple[List[Tensor], List[str]]: + p5, c5 = results[-1], x[-1] + x5 = p5 if self.use_P5 else c5 + p6 = self.p6(x5) + p7 = self.p7(F.relu(p6)) + results.extend([p6, p7]) + names.extend(["p6", "p7"]) + return results, names + + +class FeaturePyramidNetwork(nn.Module): + """ + Module that adds a FPN from on top of a set of feature maps. This is based on + `"Feature Pyramid Network for Object Detection" `_. + + The feature maps are currently supposed to be in increasing depth + order. + + The input to the model is expected to be an OrderedDict[Tensor], containing + the feature maps on top of which the FPN will be added. + + Args: + spatial_dims: 2D or 3D images + in_channels_list: number of channels for each feature map that + is passed to the module + out_channels: number of channels of the FPN representation + extra_blocks: if provided, extra operations will + be performed. It is expected to take the fpn features, the original + features and the names of the original features as input, and returns + a new list of feature maps and their corresponding names + + Examples:: + + >>> m = FeaturePyramidNetwork(2, [10, 20, 30], 5) + >>> # get some dummy data + >>> x = OrderedDict() + >>> x['feat0'] = torch.rand(1, 10, 64, 64) + >>> x['feat2'] = torch.rand(1, 20, 16, 16) + >>> x['feat3'] = torch.rand(1, 30, 8, 8) + >>> # compute the FPN on top of x + >>> output = m(x) + >>> print([(k, v.shape) for k, v in output.items()]) + >>> # returns + >>> [('feat0', torch.Size([1, 5, 64, 64])), + >>> ('feat2', torch.Size([1, 5, 16, 16])), + >>> ('feat3', torch.Size([1, 5, 8, 8]))] + + """ + + def __init__( + self, + spatial_dims: int, + in_channels_list: List[int], + out_channels: int, + extra_blocks: Optional[ExtraFPNBlock] = None, + ): + super().__init__() + + conv_type: Callable = Conv[Conv.CONV, spatial_dims] + + self.inner_blocks = nn.ModuleList() + self.layer_blocks = nn.ModuleList() + for in_channels in in_channels_list: + if in_channels == 0: + raise ValueError("in_channels=0 is currently not supported") + inner_block_module = conv_type(in_channels, out_channels, 1) + layer_block_module = conv_type(out_channels, out_channels, 3, padding=1) + self.inner_blocks.append(inner_block_module) + self.layer_blocks.append(layer_block_module) + + # initialize parameters now to avoid modifying the initialization of top_blocks + conv_type_: Type[nn.Module] = Conv[Conv.CONV, spatial_dims] + for m in self.modules(): + if isinstance(m, conv_type_): + nn.init.kaiming_uniform_(m.weight, a=1) + nn.init.constant_(m.bias, 0.0) # type: ignore + + if extra_blocks is not None: + if not isinstance(extra_blocks, ExtraFPNBlock): + raise AssertionError + self.extra_blocks = extra_blocks + + def get_result_from_inner_blocks(self, x: Tensor, idx: int) -> Tensor: + """ + This is equivalent to self.inner_blocks[idx](x), + but torchscript doesn't support this yet + """ + num_blocks = len(self.inner_blocks) + if idx < 0: + idx += num_blocks + out = x + for i, module in enumerate(self.inner_blocks): + if i == idx: + out = module(x) + return out + + def get_result_from_layer_blocks(self, x: Tensor, idx: int) -> Tensor: + """ + This is equivalent to self.layer_blocks[idx](x), + but torchscript doesn't support this yet + """ + num_blocks = len(self.layer_blocks) + if idx < 0: + idx += num_blocks + out = x + for i, module in enumerate(self.layer_blocks): + if i == idx: + out = module(x) + return out + + def forward(self, x: Dict[str, Tensor]) -> Dict[str, Tensor]: + """ + Computes the FPN for a set of feature maps. + + Args: + x: feature maps for each feature level. + + Returns: + feature maps after FPN layers. They are ordered from highest resolution first. + """ + # unpack OrderedDict into two lists for easier handling + names = list(x.keys()) + x_values: List[Tensor] = list(x.values()) + + last_inner = self.get_result_from_inner_blocks(x_values[-1], -1) + results = [] + results.append(self.get_result_from_layer_blocks(last_inner, -1)) + + for idx in range(len(x_values) - 2, -1, -1): + inner_lateral = self.get_result_from_inner_blocks(x_values[idx], idx) + feat_shape = inner_lateral.shape[2:] + inner_top_down = F.interpolate(last_inner, size=feat_shape, mode="nearest") + last_inner = inner_lateral + inner_top_down + results.insert(0, self.get_result_from_layer_blocks(last_inner, idx)) + + if self.extra_blocks is not None: + results, names = self.extra_blocks(results, x_values, names) + + # make it back an OrderedDict + out = OrderedDict([(k, v) for k, v in zip(names, results)]) + + return out diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/networks/blocks/localnet_block.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/networks/blocks/localnet_block.py new file mode 100644 index 0000000000000000000000000000000000000000..41b76c7d4cf0e6ae1367eb6979cdd7dd72618df1 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/networks/blocks/localnet_block.py @@ -0,0 +1,286 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Optional, Sequence, Tuple, Type, Union + +import torch +from torch import nn +from torch.nn import functional as F + +from monai.networks.blocks import Convolution +from monai.networks.layers import same_padding +from monai.networks.layers.factories import Conv, Norm, Pool + + +def get_conv_block( + spatial_dims: int, + in_channels: int, + out_channels: int, + kernel_size: Union[Sequence[int], int] = 3, + act: Optional[Union[Tuple, str]] = "RELU", + norm: Optional[Union[Tuple, str]] = "BATCH", +) -> nn.Module: + padding = same_padding(kernel_size) + mod: nn.Module = Convolution( + spatial_dims, + in_channels, + out_channels, + kernel_size=kernel_size, + act=act, + norm=norm, + bias=False, + conv_only=False, + padding=padding, + ) + return mod + + +def get_conv_layer( + spatial_dims: int, in_channels: int, out_channels: int, kernel_size: Union[Sequence[int], int] = 3 +) -> nn.Module: + padding = same_padding(kernel_size) + mod: nn.Module = Convolution( + spatial_dims, in_channels, out_channels, kernel_size=kernel_size, bias=False, conv_only=True, padding=padding + ) + return mod + + +def get_deconv_block(spatial_dims: int, in_channels: int, out_channels: int) -> nn.Module: + mod: nn.Module = Convolution( + spatial_dims=spatial_dims, + in_channels=in_channels, + out_channels=out_channels, + strides=2, + act="RELU", + norm="BATCH", + bias=False, + is_transposed=True, + padding=1, + output_padding=1, + ) + return mod + + +class ResidualBlock(nn.Module): + def __init__( + self, spatial_dims: int, in_channels: int, out_channels: int, kernel_size: Union[Sequence[int], int] + ) -> None: + super().__init__() + if in_channels != out_channels: + raise ValueError( + f"expecting in_channels == out_channels, " f"got in_channels={in_channels}, out_channels={out_channels}" + ) + self.conv_block = get_conv_block( + spatial_dims=spatial_dims, in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size + ) + self.conv = get_conv_layer( + spatial_dims=spatial_dims, in_channels=out_channels, out_channels=out_channels, kernel_size=kernel_size + ) + self.norm = Norm[Norm.BATCH, spatial_dims](out_channels) + self.relu = nn.ReLU() + + def forward(self, x) -> torch.Tensor: + out: torch.Tensor = self.relu(self.norm(self.conv(self.conv_block(x))) + x) + return out + + +class LocalNetResidualBlock(nn.Module): + def __init__(self, spatial_dims: int, in_channels: int, out_channels: int) -> None: + super().__init__() + if in_channels != out_channels: + raise ValueError( + f"expecting in_channels == out_channels, " f"got in_channels={in_channels}, out_channels={out_channels}" + ) + self.conv_layer = get_conv_layer(spatial_dims=spatial_dims, in_channels=in_channels, out_channels=out_channels) + self.norm = Norm[Norm.BATCH, spatial_dims](out_channels) + self.relu = nn.ReLU() + + def forward(self, x, mid) -> torch.Tensor: + out: torch.Tensor = self.relu(self.norm(self.conv_layer(x)) + mid) + return out + + +class LocalNetDownSampleBlock(nn.Module): + """ + A down-sample module that can be used for LocalNet, based on: + `Weakly-supervised convolutional neural networks for multimodal image registration + `_. + `Label-driven weakly-supervised learning for multimodal deformable image registration + `_. + + Adapted from: + DeepReg (https://github.com/DeepRegNet/DeepReg) + """ + + def __init__( + self, spatial_dims: int, in_channels: int, out_channels: int, kernel_size: Union[Sequence[int], int] + ) -> None: + """ + Args: + spatial_dims: number of spatial dimensions. + in_channels: number of input channels. + out_channels: number of output channels. + kernel_size: convolution kernel size. + Raises: + NotImplementedError: when ``kernel_size`` is even + """ + super().__init__() + self.conv_block = get_conv_block( + spatial_dims=spatial_dims, in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size + ) + self.residual_block = ResidualBlock( + spatial_dims=spatial_dims, in_channels=out_channels, out_channels=out_channels, kernel_size=kernel_size + ) + self.max_pool = Pool[Pool.MAX, spatial_dims](kernel_size=2) + + def forward(self, x) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Halves the spatial dimensions. + A tuple of (x, mid) is returned: + + - x is the downsample result, in shape (batch, ``out_channels``, insize_1 / 2, insize_2 / 2, [insize_3 / 2]), + - mid is the mid-level feature, in shape (batch, ``out_channels``, insize_1, insize_2, [insize_3]) + + Args: + x: Tensor in shape (batch, ``in_channels``, insize_1, insize_2, [insize_3]) + + Raises: + ValueError: when input spatial dimensions are not even. + """ + for i in x.shape[2:]: + if i % 2 != 0: + raise ValueError("expecting x spatial dimensions be even, " f"got x of shape {x.shape}") + x = self.conv_block(x) + mid = self.residual_block(x) + x = self.max_pool(mid) + return x, mid + + +class LocalNetUpSampleBlock(nn.Module): + """ + A up-sample module that can be used for LocalNet, based on: + `Weakly-supervised convolutional neural networks for multimodal image registration + `_. + `Label-driven weakly-supervised learning for multimodal deformable image registration + `_. + + Adapted from: + DeepReg (https://github.com/DeepRegNet/DeepReg) + """ + + def __init__(self, spatial_dims: int, in_channels: int, out_channels: int) -> None: + """ + Args: + spatial_dims: number of spatial dimensions. + in_channels: number of input channels. + out_channels: number of output channels. + Raises: + ValueError: when ``in_channels != 2 * out_channels`` + """ + super().__init__() + self.deconv_block = get_deconv_block( + spatial_dims=spatial_dims, in_channels=in_channels, out_channels=out_channels + ) + self.conv_block = get_conv_block(spatial_dims=spatial_dims, in_channels=out_channels, out_channels=out_channels) + self.residual_block = LocalNetResidualBlock( + spatial_dims=spatial_dims, in_channels=out_channels, out_channels=out_channels + ) + if in_channels / out_channels != 2: + raise ValueError( + f"expecting in_channels == 2 * out_channels, " + f"got in_channels={in_channels}, out_channels={out_channels}" + ) + self.out_channels = out_channels + + def addictive_upsampling(self, x, mid) -> torch.Tensor: + x = F.interpolate(x, mid.shape[2:]) + # [(batch, out_channels, ...), (batch, out_channels, ...)] + x = x.split(split_size=int(self.out_channels), dim=1) + # (batch, out_channels, ...) + out: torch.Tensor = torch.sum(torch.stack(x, dim=-1), dim=-1) + return out + + def forward(self, x, mid) -> torch.Tensor: + """ + Halves the channel and doubles the spatial dimensions. + + Args: + x: feature to be up-sampled, in shape (batch, ``in_channels``, insize_1, insize_2, [insize_3]) + mid: mid-level feature saved during down-sampling, + in shape (batch, ``out_channels``, midsize_1, midsize_2, [midsize_3]) + + Raises: + ValueError: when ``midsize != insize * 2`` + """ + for i, j in zip(x.shape[2:], mid.shape[2:]): + if j != 2 * i: + raise ValueError( + "expecting mid spatial dimensions be exactly the double of x spatial dimensions, " + f"got x of shape {x.shape}, mid of shape {mid.shape}" + ) + h0 = self.deconv_block(x) + self.addictive_upsampling(x, mid) + r1 = h0 + mid + r2 = self.conv_block(h0) + out: torch.Tensor = self.residual_block(r2, r1) + return out + + +class LocalNetFeatureExtractorBlock(nn.Module): + """ + A feature-extraction module that can be used for LocalNet, based on: + `Weakly-supervised convolutional neural networks for multimodal image registration + `_. + `Label-driven weakly-supervised learning for multimodal deformable image registration + `_. + + Adapted from: + DeepReg (https://github.com/DeepRegNet/DeepReg) + """ + + def __init__( + self, + spatial_dims: int, + in_channels: int, + out_channels: int, + act: Optional[Union[Tuple, str]] = "RELU", + initializer: str = "kaiming_uniform", + ) -> None: + """ + Args: + spatial_dims: number of spatial dimensions. + in_channels: number of input channels. + out_channels: number of output channels. + act: activation type and arguments. Defaults to ReLU. + kernel_initializer: kernel initializer. Defaults to None. + """ + super().__init__() + self.conv_block = get_conv_block( + spatial_dims=spatial_dims, in_channels=in_channels, out_channels=out_channels, act=act, norm=None + ) + conv_type: Type[Union[nn.Conv1d, nn.Conv2d, nn.Conv3d]] = Conv[Conv.CONV, spatial_dims] + for m in self.conv_block.modules(): + if isinstance(m, conv_type): + if initializer == "kaiming_uniform": + nn.init.kaiming_normal_(torch.as_tensor(m.weight)) + elif initializer == "zeros": + nn.init.zeros_(torch.as_tensor(m.weight)) + else: + raise ValueError( + f"initializer {initializer} is not supported, " "currently supporting kaiming_uniform and zeros" + ) + + def forward(self, x) -> torch.Tensor: + """ + Args: + x: Tensor in shape (batch, ``in_channels``, insize_1, insize_2, [insize_3]) + """ + out: torch.Tensor = self.conv_block(x) + return out diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/networks/blocks/mlp.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/networks/blocks/mlp.py new file mode 100644 index 0000000000000000000000000000000000000000..0feeb044f34c88967a1c419f27985cd47d033a78 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/networks/blocks/mlp.py @@ -0,0 +1,73 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Tuple, Union + +import torch.nn as nn + +from monai.networks.layers import get_act_layer +from monai.utils import look_up_option + +SUPPORTED_DROPOUT_MODE = {"vit", "swin"} + + +class MLPBlock(nn.Module): + """ + A multi-layer perceptron block, based on: "Dosovitskiy et al., + An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale " + """ + + def __init__( + self, + hidden_size: int, + mlp_dim: int, + dropout_rate: float = 0.0, + act: Union[Tuple, str] = "GELU", + dropout_mode="vit", + ) -> None: + """ + Args: + hidden_size: dimension of hidden layer. + mlp_dim: dimension of feedforward layer. If 0, `hidden_size` will be used. + dropout_rate: faction of the input units to drop. + act: activation type and arguments. Defaults to GELU. + dropout_mode: dropout mode, can be "vit" or "swin". + "vit" mode uses two dropout instances as implemented in + https://github.com/google-research/vision_transformer/blob/main/vit_jax/models.py#L87 + "swin" corresponds to one instance as implemented in + https://github.com/microsoft/Swin-Transformer/blob/main/models/swin_mlp.py#L23 + + + """ + + super().__init__() + + if not (0 <= dropout_rate <= 1): + raise ValueError("dropout_rate should be between 0 and 1.") + mlp_dim = mlp_dim or hidden_size + self.linear1 = nn.Linear(hidden_size, mlp_dim) + self.linear2 = nn.Linear(mlp_dim, hidden_size) + self.fn = get_act_layer(act) + self.drop1 = nn.Dropout(dropout_rate) + dropout_opt = look_up_option(dropout_mode, SUPPORTED_DROPOUT_MODE) + if dropout_opt == "vit": + self.drop2 = nn.Dropout(dropout_rate) + elif dropout_opt == "swin": + self.drop2 = self.drop1 + else: + raise ValueError(f"dropout_mode should be one of {SUPPORTED_DROPOUT_MODE}") + + def forward(self, x): + x = self.fn(self.linear1(x)) + x = self.drop1(x) + x = self.linear2(x) + x = self.drop2(x) + return x diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/networks/blocks/patchembedding.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/networks/blocks/patchembedding.py new file mode 100644 index 0000000000000000000000000000000000000000..f02f6342e8a7ebd5b16129c8740bf414f8e55572 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/networks/blocks/patchembedding.py @@ -0,0 +1,200 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Sequence, Type, Union + +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.nn import LayerNorm + +from monai.networks.layers import Conv, trunc_normal_ +from monai.utils import ensure_tuple_rep, optional_import +from monai.utils.module import look_up_option + +Rearrange, _ = optional_import("einops.layers.torch", name="Rearrange") +SUPPORTED_EMBEDDING_TYPES = {"conv", "perceptron"} + + +class PatchEmbeddingBlock(nn.Module): + """ + A patch embedding block, based on: "Dosovitskiy et al., + An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale " + + Example:: + + >>> from monai.networks.blocks import PatchEmbeddingBlock + >>> PatchEmbeddingBlock(in_channels=4, img_size=32, patch_size=8, hidden_size=32, num_heads=4, pos_embed="conv") + + """ + + def __init__( + self, + in_channels: int, + img_size: Union[Sequence[int], int], + patch_size: Union[Sequence[int], int], + hidden_size: int, + num_heads: int, + pos_embed: str, + dropout_rate: float = 0.0, + spatial_dims: int = 3, + ) -> None: + """ + Args: + in_channels: dimension of input channels. + img_size: dimension of input image. + patch_size: dimension of patch size. + hidden_size: dimension of hidden layer. + num_heads: number of attention heads. + pos_embed: position embedding layer type. + dropout_rate: faction of the input units to drop. + spatial_dims: number of spatial dimensions. + + + """ + + super().__init__() + + if not (0 <= dropout_rate <= 1): + raise ValueError("dropout_rate should be between 0 and 1.") + + if hidden_size % num_heads != 0: + raise ValueError("hidden size should be divisible by num_heads.") + + self.pos_embed = look_up_option(pos_embed, SUPPORTED_EMBEDDING_TYPES) + + img_size = ensure_tuple_rep(img_size, spatial_dims) + patch_size = ensure_tuple_rep(patch_size, spatial_dims) + for m, p in zip(img_size, patch_size): + if m < p: + raise ValueError("patch_size should be smaller than img_size.") + if self.pos_embed == "perceptron" and m % p != 0: + raise ValueError("patch_size should be divisible by img_size for perceptron.") + self.n_patches = np.prod([im_d // p_d for im_d, p_d in zip(img_size, patch_size)]) + self.patch_dim = int(in_channels * np.prod(patch_size)) + + self.patch_embeddings: nn.Module + if self.pos_embed == "conv": + self.patch_embeddings = Conv[Conv.CONV, spatial_dims]( + in_channels=in_channels, out_channels=hidden_size, kernel_size=patch_size, stride=patch_size + ) + elif self.pos_embed == "perceptron": + # for 3d: "b c (h p1) (w p2) (d p3)-> b (h w d) (p1 p2 p3 c)" + chars = (("h", "p1"), ("w", "p2"), ("d", "p3"))[:spatial_dims] + from_chars = "b c " + " ".join(f"({k} {v})" for k, v in chars) + to_chars = f"b ({' '.join([c[0] for c in chars])}) ({' '.join([c[1] for c in chars])} c)" + axes_len = {f"p{i+1}": p for i, p in enumerate(patch_size)} + self.patch_embeddings = nn.Sequential( + Rearrange(f"{from_chars} -> {to_chars}", **axes_len), nn.Linear(self.patch_dim, hidden_size) + ) + self.position_embeddings = nn.Parameter(torch.zeros(1, self.n_patches, hidden_size)) + self.dropout = nn.Dropout(dropout_rate) + trunc_normal_(self.position_embeddings, mean=0.0, std=0.02, a=-2.0, b=2.0) + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, mean=0.0, std=0.02, a=-2.0, b=2.0) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + def forward(self, x): + x = self.patch_embeddings(x) + if self.pos_embed == "conv": + x = x.flatten(2).transpose(-1, -2) + embeddings = x + self.position_embeddings + embeddings = self.dropout(embeddings) + return embeddings + + +class PatchEmbed(nn.Module): + """ + Patch embedding block based on: "Liu et al., + Swin Transformer: Hierarchical Vision Transformer using Shifted Windows + " + https://github.com/microsoft/Swin-Transformer + + Unlike ViT patch embedding block: (1) input is padded to satisfy window size requirements (2) normalized if + specified (3) position embedding is not used. + + Example:: + + >>> from monai.networks.blocks import PatchEmbed + >>> PatchEmbed(patch_size=2, in_chans=1, embed_dim=48, norm_layer=nn.LayerNorm, spatial_dims=3) + """ + + def __init__( + self, + patch_size: Union[Sequence[int], int] = 2, + in_chans: int = 1, + embed_dim: int = 48, + norm_layer: Type[LayerNorm] = nn.LayerNorm, # type: ignore + spatial_dims: int = 3, + ) -> None: + """ + Args: + patch_size: dimension of patch size. + in_chans: dimension of input channels. + embed_dim: number of linear projection output channels. + norm_layer: normalization layer. + spatial_dims: spatial dimension. + """ + + super().__init__() + + if not (spatial_dims == 2 or spatial_dims == 3): + raise ValueError("spatial dimension should be 2 or 3.") + + patch_size = ensure_tuple_rep(patch_size, spatial_dims) + self.patch_size = patch_size + self.embed_dim = embed_dim + self.proj = Conv[Conv.CONV, spatial_dims]( + in_channels=in_chans, out_channels=embed_dim, kernel_size=patch_size, stride=patch_size + ) + if norm_layer is not None: + self.norm = norm_layer(embed_dim) + else: + self.norm = None + + def forward(self, x): + x_shape = x.size() + if len(x_shape) == 5: + _, _, d, h, w = x_shape + if w % self.patch_size[2] != 0: + x = F.pad(x, (0, self.patch_size[2] - w % self.patch_size[2])) + if h % self.patch_size[1] != 0: + x = F.pad(x, (0, 0, 0, self.patch_size[1] - h % self.patch_size[1])) + if d % self.patch_size[0] != 0: + x = F.pad(x, (0, 0, 0, 0, 0, self.patch_size[0] - d % self.patch_size[0])) + + elif len(x_shape) == 4: + _, _, h, w = x.size() + if w % self.patch_size[1] != 0: + x = F.pad(x, (0, self.patch_size[1] - w % self.patch_size[1])) + if h % self.patch_size[0] != 0: + x = F.pad(x, (0, 0, 0, self.patch_size[0] - h % self.patch_size[0])) + + x = self.proj(x) + if self.norm is not None: + x_shape = x.size() + x = x.flatten(2).transpose(1, 2) + x = self.norm(x) + if len(x_shape) == 5: + d, wh, ww = x_shape[2], x_shape[3], x_shape[4] + x = x.transpose(1, 2).view(-1, self.embed_dim, d, wh, ww) + elif len(x_shape) == 4: + wh, ww = x_shape[2], x_shape[3] + x = x.transpose(1, 2).view(-1, self.embed_dim, wh, ww) + return x diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/networks/blocks/regunet_block.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/networks/blocks/regunet_block.py new file mode 100644 index 0000000000000000000000000000000000000000..78e2598b4b734dc8924643194e05f607b585da25 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/networks/blocks/regunet_block.py @@ -0,0 +1,247 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import List, Optional, Sequence, Tuple, Type, Union + +import torch +from torch import nn +from torch.nn import functional as F + +from monai.networks.blocks import Convolution +from monai.networks.layers import Conv, Norm, Pool, same_padding + + +def get_conv_block( + spatial_dims: int, + in_channels: int, + out_channels: int, + kernel_size: Union[Sequence[int], int] = 3, + strides: int = 1, + padding: Optional[Union[Tuple[int, ...], int]] = None, + act: Optional[Union[Tuple, str]] = "RELU", + norm: Optional[Union[Tuple, str]] = "BATCH", + initializer: Optional[str] = "kaiming_uniform", +) -> nn.Module: + if padding is None: + padding = same_padding(kernel_size) + conv_block: nn.Module = Convolution( + spatial_dims, + in_channels, + out_channels, + kernel_size=kernel_size, + strides=strides, + act=act, + norm=norm, + bias=False, + conv_only=False, + padding=padding, + ) + conv_type: Type[Union[nn.Conv1d, nn.Conv2d, nn.Conv3d]] = Conv[Conv.CONV, spatial_dims] + for m in conv_block.modules(): + if isinstance(m, conv_type): + if initializer == "kaiming_uniform": + nn.init.kaiming_normal_(torch.as_tensor(m.weight)) + elif initializer == "zeros": + nn.init.zeros_(torch.as_tensor(m.weight)) + else: + raise ValueError( + f"initializer {initializer} is not supported, " "currently supporting kaiming_uniform and zeros" + ) + return conv_block + + +def get_conv_layer( + spatial_dims: int, in_channels: int, out_channels: int, kernel_size: Union[Sequence[int], int] = 3 +) -> nn.Module: + padding = same_padding(kernel_size) + mod: nn.Module = Convolution( + spatial_dims, in_channels, out_channels, kernel_size=kernel_size, bias=False, conv_only=True, padding=padding + ) + return mod + + +class RegistrationResidualConvBlock(nn.Module): + """ + A block with skip links and layer - norm - activation. + Only changes the number of channels, the spatial size is kept same. + """ + + def __init__( + self, spatial_dims: int, in_channels: int, out_channels: int, num_layers: int = 2, kernel_size: int = 3 + ): + """ + + Args: + spatial_dims: number of spatial dimensions + in_channels: number of input channels + out_channels: number of output channels + num_layers: number of layers inside the block + kernel_size: kernel_size + """ + super().__init__() + self.num_layers = num_layers + self.layers = nn.ModuleList( + [ + get_conv_layer( + spatial_dims=spatial_dims, + in_channels=in_channels if i == 0 else out_channels, + out_channels=out_channels, + kernel_size=kernel_size, + ) + for i in range(num_layers) + ] + ) + self.norms = nn.ModuleList([Norm[Norm.BATCH, spatial_dims](out_channels) for _ in range(num_layers)]) + self.acts = nn.ModuleList([nn.ReLU() for _ in range(num_layers)]) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """ + + Args: + x: Tensor in shape (batch, ``in_channels``, insize_1, insize_2, [insize_3]) + + Returns: + Tensor in shape (batch, ``out_channels``, insize_1, insize_2, [insize_3]), + with the same spatial size as ``x`` + """ + skip = x + for i, (conv, norm, act) in enumerate(zip(self.layers, self.norms, self.acts)): + x = conv(x) + x = norm(x) + if i == self.num_layers - 1: + # last block + x = x + skip + x = act(x) + return x + + +class RegistrationDownSampleBlock(nn.Module): + """ + A down-sample module used in RegUNet to half the spatial size. + The number of channels is kept same. + + Adapted from: + DeepReg (https://github.com/DeepRegNet/DeepReg) + """ + + def __init__(self, spatial_dims: int, channels: int, pooling: bool) -> None: + """ + Args: + spatial_dims: number of spatial dimensions. + channels: channels + pooling: use MaxPool if True, strided conv if False + """ + super().__init__() + if pooling: + self.layer = Pool[Pool.MAX, spatial_dims](kernel_size=2) + else: + self.layer = get_conv_block( + spatial_dims=spatial_dims, + in_channels=channels, + out_channels=channels, + kernel_size=2, + strides=2, + padding=0, + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """ + Halves the spatial dimensions and keeps the same channel. + output in shape (batch, ``channels``, insize_1 / 2, insize_2 / 2, [insize_3 / 2]), + + Args: + x: Tensor in shape (batch, ``channels``, insize_1, insize_2, [insize_3]) + + Raises: + ValueError: when input spatial dimensions are not even. + """ + for i in x.shape[2:]: + if i % 2 != 0: + raise ValueError("expecting x spatial dimensions be even, " f"got x of shape {x.shape}") + out: torch.Tensor = self.layer(x) + return out + + +def get_deconv_block(spatial_dims: int, in_channels: int, out_channels: int) -> nn.Module: + mod: nn.Module = Convolution( + spatial_dims=spatial_dims, + in_channels=in_channels, + out_channels=out_channels, + strides=2, + act="RELU", + norm="BATCH", + bias=False, + is_transposed=True, + padding=1, + output_padding=1, + ) + return mod + + +class RegistrationExtractionBlock(nn.Module): + """ + The Extraction Block used in RegUNet. + Extracts feature from each ``extract_levels`` and takes the average. + """ + + def __init__( + self, + spatial_dims: int, + extract_levels: Tuple[int], + num_channels: Union[Tuple[int], List[int]], + out_channels: int, + kernel_initializer: Optional[str] = "kaiming_uniform", + activation: Optional[str] = None, + ): + """ + + Args: + spatial_dims: number of spatial dimensions + extract_levels: spatial levels to extract feature from, 0 refers to the input scale + num_channels: number of channels at each scale level, + List or Tuple of length equals to `depth` of the RegNet + out_channels: number of output channels + kernel_initializer: kernel initializer + activation: kernel activation function + """ + super().__init__() + self.extract_levels = extract_levels + self.max_level = max(extract_levels) + self.layers = nn.ModuleList( + [ + get_conv_block( + spatial_dims=spatial_dims, + in_channels=num_channels[d], + out_channels=out_channels, + norm=None, + act=activation, + initializer=kernel_initializer, + ) + for d in extract_levels + ] + ) + + def forward(self, x: List[torch.Tensor], image_size: List[int]) -> torch.Tensor: + """ + + Args: + x: Decoded feature at different spatial levels, sorted from deep to shallow + image_size: output image size + + Returns: + Tensor of shape (batch, `out_channels`, size1, size2, size3), where (size1, size2, size3) = ``image_size`` + """ + feature_list = [ + F.interpolate(layer(x[self.max_level - level]), size=image_size) + for layer, level in zip(self.layers, self.extract_levels) + ] + out: torch.Tensor = torch.mean(torch.stack(feature_list, dim=0), dim=0) + return out diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/networks/blocks/segresnet_block.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/networks/blocks/segresnet_block.py new file mode 100644 index 0000000000000000000000000000000000000000..ded270ab526cc809664ff738d3292fc3f79aac40 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/networks/blocks/segresnet_block.py @@ -0,0 +1,94 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Tuple, Union + +import torch.nn as nn + +from monai.networks.blocks.convolutions import Convolution +from monai.networks.blocks.upsample import UpSample +from monai.networks.layers.utils import get_act_layer, get_norm_layer +from monai.utils import InterpolateMode, UpsampleMode + + +def get_conv_layer( + spatial_dims: int, in_channels: int, out_channels: int, kernel_size: int = 3, stride: int = 1, bias: bool = False +): + + return Convolution( + spatial_dims, in_channels, out_channels, strides=stride, kernel_size=kernel_size, bias=bias, conv_only=True + ) + + +def get_upsample_layer( + spatial_dims: int, in_channels: int, upsample_mode: Union[UpsampleMode, str] = "nontrainable", scale_factor: int = 2 +): + return UpSample( + spatial_dims=spatial_dims, + in_channels=in_channels, + out_channels=in_channels, + scale_factor=scale_factor, + mode=upsample_mode, + interp_mode=InterpolateMode.LINEAR, + align_corners=False, + ) + + +class ResBlock(nn.Module): + """ + ResBlock employs skip connection and two convolution blocks and is used + in SegResNet based on `3D MRI brain tumor segmentation using autoencoder regularization + `_. + """ + + def __init__( + self, + spatial_dims: int, + in_channels: int, + norm: Union[Tuple, str], + kernel_size: int = 3, + act: Union[Tuple, str] = ("RELU", {"inplace": True}), + ) -> None: + """ + Args: + spatial_dims: number of spatial dimensions, could be 1, 2 or 3. + in_channels: number of input channels. + norm: feature normalization type and arguments. + kernel_size: convolution kernel size, the value should be an odd number. Defaults to 3. + act: activation type and arguments. Defaults to ``RELU``. + """ + + super().__init__() + + if kernel_size % 2 != 1: + raise AssertionError("kernel_size should be an odd number.") + + self.norm1 = get_norm_layer(name=norm, spatial_dims=spatial_dims, channels=in_channels) + self.norm2 = get_norm_layer(name=norm, spatial_dims=spatial_dims, channels=in_channels) + self.act = get_act_layer(act) + self.conv1 = get_conv_layer(spatial_dims, in_channels=in_channels, out_channels=in_channels) + self.conv2 = get_conv_layer(spatial_dims, in_channels=in_channels, out_channels=in_channels) + + def forward(self, x): + + identity = x + + x = self.norm1(x) + x = self.act(x) + x = self.conv1(x) + + x = self.norm2(x) + x = self.act(x) + x = self.conv2(x) + + x += identity + + return x diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/networks/blocks/selfattention.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/networks/blocks/selfattention.py new file mode 100644 index 0000000000000000000000000000000000000000..db92111d1426d48f852fa3b382344c31b99bb952 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/networks/blocks/selfattention.py @@ -0,0 +1,62 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch +import torch.nn as nn + +from monai.utils import optional_import + +Rearrange, _ = optional_import("einops.layers.torch", name="Rearrange") + + +class SABlock(nn.Module): + """ + A self-attention block, based on: "Dosovitskiy et al., + An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale " + """ + + def __init__(self, hidden_size: int, num_heads: int, dropout_rate: float = 0.0) -> None: + """ + Args: + hidden_size: dimension of hidden layer. + num_heads: number of attention heads. + dropout_rate: faction of the input units to drop. + + """ + + super().__init__() + + if not (0 <= dropout_rate <= 1): + raise ValueError("dropout_rate should be between 0 and 1.") + + if hidden_size % num_heads != 0: + raise ValueError("hidden size should be divisible by num_heads.") + + self.num_heads = num_heads + self.out_proj = nn.Linear(hidden_size, hidden_size) + self.qkv = nn.Linear(hidden_size, hidden_size * 3, bias=False) + self.input_rearrange = Rearrange("b h (qkv l d) -> qkv b l h d", qkv=3, l=num_heads) + self.out_rearrange = Rearrange("b h l d -> b l (h d)") + self.drop_output = nn.Dropout(dropout_rate) + self.drop_weights = nn.Dropout(dropout_rate) + self.head_dim = hidden_size // num_heads + self.scale = self.head_dim**-0.5 + + def forward(self, x): + output = self.input_rearrange(self.qkv(x)) + q, k, v = output[0], output[1], output[2] + att_mat = (torch.einsum("blxd,blyd->blxy", q, k) * self.scale).softmax(dim=-1) + att_mat = self.drop_weights(att_mat) + x = torch.einsum("bhxy,bhyd->bhxd", att_mat, v) + x = self.out_rearrange(x) + x = self.out_proj(x) + x = self.drop_output(x) + return x diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/networks/blocks/squeeze_and_excitation.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/networks/blocks/squeeze_and_excitation.py new file mode 100644 index 0000000000000000000000000000000000000000..a9ac57aa4f7cae47e6f262adb9d63ce78ba99cf1 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/networks/blocks/squeeze_and_excitation.py @@ -0,0 +1,380 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math +from typing import Dict, Optional, Tuple, Union + +import torch +import torch.nn as nn + +from monai.networks.blocks import Convolution +from monai.networks.layers.factories import Act, Conv, Norm, Pool, split_args + + +class ChannelSELayer(nn.Module): + """ + Re-implementation of the Squeeze-and-Excitation block based on: + "Hu et al., Squeeze-and-Excitation Networks, https://arxiv.org/abs/1709.01507". + """ + + def __init__( + self, + spatial_dims: int, + in_channels: int, + r: int = 2, + acti_type_1: Union[Tuple[str, Dict], str] = ("relu", {"inplace": True}), + acti_type_2: Union[Tuple[str, Dict], str] = "sigmoid", + add_residual: bool = False, + ) -> None: + """ + Args: + spatial_dims: number of spatial dimensions, could be 1, 2, or 3. + in_channels: number of input channels. + r: the reduction ratio r in the paper. Defaults to 2. + acti_type_1: activation type of the hidden squeeze layer. Defaults to ``("relu", {"inplace": True})``. + acti_type_2: activation type of the output squeeze layer. Defaults to "sigmoid". + + Raises: + ValueError: When ``r`` is nonpositive or larger than ``in_channels``. + + See also: + + :py:class:`monai.networks.layers.Act` + + """ + super().__init__() + + self.add_residual = add_residual + + pool_type = Pool[Pool.ADAPTIVEAVG, spatial_dims] + self.avg_pool = pool_type(1) # spatial size (1, 1, ...) + + channels = int(in_channels // r) + if channels <= 0: + raise ValueError(f"r must be positive and smaller than in_channels, got r={r} in_channels={in_channels}.") + + act_1, act_1_args = split_args(acti_type_1) + act_2, act_2_args = split_args(acti_type_2) + self.fc = nn.Sequential( + nn.Linear(in_channels, channels, bias=True), + Act[act_1](**act_1_args), + nn.Linear(channels, in_channels, bias=True), + Act[act_2](**act_2_args), + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """ + Args: + x: in shape (batch, in_channels, spatial_1[, spatial_2, ...]). + """ + b, c = x.shape[:2] + y: torch.Tensor = self.avg_pool(x).view(b, c) + y = self.fc(y).view([b, c] + [1] * (x.ndim - 2)) + result = x * y + + # Residual connection is moved here instead of providing an override of forward in ResidualSELayer since + # Torchscript has an issue with using super(). + if self.add_residual: + result += x + + return result + + +class ResidualSELayer(ChannelSELayer): + """ + A "squeeze-and-excitation"-like layer with a residual connection:: + + --+-- SE --o-- + | | + +--------+ + """ + + def __init__( + self, + spatial_dims: int, + in_channels: int, + r: int = 2, + acti_type_1: Union[Tuple[str, Dict], str] = "leakyrelu", + acti_type_2: Union[Tuple[str, Dict], str] = "relu", + ) -> None: + """ + Args: + spatial_dims: number of spatial dimensions, could be 1, 2, or 3. + in_channels: number of input channels. + r: the reduction ratio r in the paper. Defaults to 2. + acti_type_1: defaults to "leakyrelu". + acti_type_2: defaults to "relu". + + See also: + :py:class:`monai.networks.blocks.ChannelSELayer` + """ + super().__init__( + spatial_dims=spatial_dims, + in_channels=in_channels, + r=r, + acti_type_1=acti_type_1, + acti_type_2=acti_type_2, + add_residual=True, + ) + + +class SEBlock(nn.Module): + """ + Residual module enhanced with Squeeze-and-Excitation:: + + ----+- conv1 -- conv2 -- conv3 -- SE -o--- + | | + +---(channel project if needed)----+ + + Re-implementation of the SE-Resnet block based on: + "Hu et al., Squeeze-and-Excitation Networks, https://arxiv.org/abs/1709.01507". + """ + + def __init__( + self, + spatial_dims: int, + in_channels: int, + n_chns_1: int, + n_chns_2: int, + n_chns_3: int, + conv_param_1: Optional[Dict] = None, + conv_param_2: Optional[Dict] = None, + conv_param_3: Optional[Dict] = None, + project: Optional[Convolution] = None, + r: int = 2, + acti_type_1: Union[Tuple[str, Dict], str] = ("relu", {"inplace": True}), + acti_type_2: Union[Tuple[str, Dict], str] = "sigmoid", + acti_type_final: Optional[Union[Tuple[str, Dict], str]] = ("relu", {"inplace": True}), + ): + """ + Args: + spatial_dims: number of spatial dimensions, could be 1, 2, or 3. + in_channels: number of input channels. + n_chns_1: number of output channels in the 1st convolution. + n_chns_2: number of output channels in the 2nd convolution. + n_chns_3: number of output channels in the 3rd convolution. + conv_param_1: additional parameters to the 1st convolution. + Defaults to ``{"kernel_size": 1, "norm": Norm.BATCH, "act": ("relu", {"inplace": True})}`` + conv_param_2: additional parameters to the 2nd convolution. + Defaults to ``{"kernel_size": 3, "norm": Norm.BATCH, "act": ("relu", {"inplace": True})}`` + conv_param_3: additional parameters to the 3rd convolution. + Defaults to ``{"kernel_size": 1, "norm": Norm.BATCH, "act": None}`` + project: in the case of residual chns and output chns doesn't match, a project + (Conv) layer/block is used to adjust the number of chns. In SENET, it is + consisted with a Conv layer as well as a Norm layer. + Defaults to None (chns are matchable) or a Conv layer with kernel size 1. + r: the reduction ratio r in the paper. Defaults to 2. + acti_type_1: activation type of the hidden squeeze layer. Defaults to "relu". + acti_type_2: activation type of the output squeeze layer. Defaults to "sigmoid". + acti_type_final: activation type of the end of the block. Defaults to "relu". + + See also: + + :py:class:`monai.networks.blocks.ChannelSELayer` + + """ + super().__init__() + + if not conv_param_1: + conv_param_1 = {"kernel_size": 1, "norm": Norm.BATCH, "act": ("relu", {"inplace": True})} + self.conv1 = Convolution( + spatial_dims=spatial_dims, in_channels=in_channels, out_channels=n_chns_1, **conv_param_1 + ) + + if not conv_param_2: + conv_param_2 = {"kernel_size": 3, "norm": Norm.BATCH, "act": ("relu", {"inplace": True})} + self.conv2 = Convolution(spatial_dims=spatial_dims, in_channels=n_chns_1, out_channels=n_chns_2, **conv_param_2) + + if not conv_param_3: + conv_param_3 = {"kernel_size": 1, "norm": Norm.BATCH, "act": None} + self.conv3 = Convolution(spatial_dims=spatial_dims, in_channels=n_chns_2, out_channels=n_chns_3, **conv_param_3) + + self.se_layer = ChannelSELayer( + spatial_dims=spatial_dims, in_channels=n_chns_3, r=r, acti_type_1=acti_type_1, acti_type_2=acti_type_2 + ) + + if project is None and in_channels != n_chns_3: + self.project = Conv[Conv.CONV, spatial_dims](in_channels, n_chns_3, kernel_size=1) + elif project is None: + self.project = nn.Identity() + else: + self.project = project + + if acti_type_final is not None: + act_final, act_final_args = split_args(acti_type_final) + self.act = Act[act_final](**act_final_args) + else: + self.act = nn.Identity() + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """ + Args: + x: in shape (batch, in_channels, spatial_1[, spatial_2, ...]). + """ + residual = self.project(x) + x = self.conv1(x) + x = self.conv2(x) + x = self.conv3(x) + x = self.se_layer(x) + x += residual + x = self.act(x) + return x + + +class SEBottleneck(SEBlock): + """ + Bottleneck for SENet154. + """ + + expansion = 4 + + def __init__( + self, + spatial_dims: int, + inplanes: int, + planes: int, + groups: int, + reduction: int, + stride: int = 1, + downsample: Optional[Convolution] = None, + ) -> None: + + conv_param_1 = { + "strides": 1, + "kernel_size": 1, + "act": ("relu", {"inplace": True}), + "norm": Norm.BATCH, + "bias": False, + } + conv_param_2 = { + "strides": stride, + "kernel_size": 3, + "act": ("relu", {"inplace": True}), + "norm": Norm.BATCH, + "groups": groups, + "bias": False, + } + conv_param_3 = {"strides": 1, "kernel_size": 1, "act": None, "norm": Norm.BATCH, "bias": False} + + super().__init__( + spatial_dims=spatial_dims, + in_channels=inplanes, + n_chns_1=planes * 2, + n_chns_2=planes * 4, + n_chns_3=planes * 4, + conv_param_1=conv_param_1, + conv_param_2=conv_param_2, + conv_param_3=conv_param_3, + project=downsample, + r=reduction, + ) + + +class SEResNetBottleneck(SEBlock): + """ + ResNet bottleneck with a Squeeze-and-Excitation module. It follows Caffe + implementation and uses `strides=stride` in `conv1` and not in `conv2` + (the latter is used in the torchvision implementation of ResNet). + """ + + expansion = 4 + + def __init__( + self, + spatial_dims: int, + inplanes: int, + planes: int, + groups: int, + reduction: int, + stride: int = 1, + downsample: Optional[Convolution] = None, + ) -> None: + + conv_param_1 = { + "strides": stride, + "kernel_size": 1, + "act": ("relu", {"inplace": True}), + "norm": Norm.BATCH, + "bias": False, + } + conv_param_2 = { + "strides": 1, + "kernel_size": 3, + "act": ("relu", {"inplace": True}), + "norm": Norm.BATCH, + "groups": groups, + "bias": False, + } + conv_param_3 = {"strides": 1, "kernel_size": 1, "act": None, "norm": Norm.BATCH, "bias": False} + + super().__init__( + spatial_dims=spatial_dims, + in_channels=inplanes, + n_chns_1=planes, + n_chns_2=planes, + n_chns_3=planes * 4, + conv_param_1=conv_param_1, + conv_param_2=conv_param_2, + conv_param_3=conv_param_3, + project=downsample, + r=reduction, + ) + + +class SEResNeXtBottleneck(SEBlock): + """ + ResNeXt bottleneck type C with a Squeeze-and-Excitation module. + """ + + expansion = 4 + + def __init__( + self, + spatial_dims: int, + inplanes: int, + planes: int, + groups: int, + reduction: int, + stride: int = 1, + downsample: Optional[Convolution] = None, + base_width: int = 4, + ) -> None: + + conv_param_1 = { + "strides": 1, + "kernel_size": 1, + "act": ("relu", {"inplace": True}), + "norm": Norm.BATCH, + "bias": False, + } + conv_param_2 = { + "strides": stride, + "kernel_size": 3, + "act": ("relu", {"inplace": True}), + "norm": Norm.BATCH, + "groups": groups, + "bias": False, + } + conv_param_3 = {"strides": 1, "kernel_size": 1, "act": None, "norm": Norm.BATCH, "bias": False} + width = math.floor(planes * (base_width / 64)) * groups + + super().__init__( + spatial_dims=spatial_dims, + in_channels=inplanes, + n_chns_1=width, + n_chns_2=width, + n_chns_3=planes * 4, + conv_param_1=conv_param_1, + conv_param_2=conv_param_2, + conv_param_3=conv_param_3, + project=downsample, + r=reduction, + ) diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/networks/blocks/transformerblock.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/networks/blocks/transformerblock.py new file mode 100644 index 0000000000000000000000000000000000000000..616d84e067fadf0d7d92f927c91f7c5e8354ad94 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/networks/blocks/transformerblock.py @@ -0,0 +1,50 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch.nn as nn + +from monai.networks.blocks.mlp import MLPBlock +from monai.networks.blocks.selfattention import SABlock + + +class TransformerBlock(nn.Module): + """ + A transformer block, based on: "Dosovitskiy et al., + An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale " + """ + + def __init__(self, hidden_size: int, mlp_dim: int, num_heads: int, dropout_rate: float = 0.0) -> None: + """ + Args: + hidden_size: dimension of hidden layer. + mlp_dim: dimension of feedforward layer. + num_heads: number of attention heads. + dropout_rate: faction of the input units to drop. + + """ + + super().__init__() + + if not (0 <= dropout_rate <= 1): + raise ValueError("dropout_rate should be between 0 and 1.") + + if hidden_size % num_heads != 0: + raise ValueError("hidden_size should be divisible by num_heads.") + + self.mlp = MLPBlock(hidden_size, mlp_dim, dropout_rate) + self.norm1 = nn.LayerNorm(hidden_size) + self.attn = SABlock(hidden_size, num_heads, dropout_rate) + self.norm2 = nn.LayerNorm(hidden_size) + + def forward(self, x): + x = x + self.attn(self.norm1(x)) + x = x + self.mlp(self.norm2(x)) + return x diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/networks/blocks/unetr_block.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/networks/blocks/unetr_block.py new file mode 100644 index 0000000000000000000000000000000000000000..a9d871a644fc017b22fc1b862fa5268f86a7494c --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/networks/blocks/unetr_block.py @@ -0,0 +1,258 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from typing import Sequence, Tuple, Union + +import torch +import torch.nn as nn + +from monai.networks.blocks.dynunet_block import UnetBasicBlock, UnetResBlock, get_conv_layer + + +class UnetrUpBlock(nn.Module): + """ + An upsampling module that can be used for UNETR: "Hatamizadeh et al., + UNETR: Transformers for 3D Medical Image Segmentation " + """ + + def __init__( + self, + spatial_dims: int, + in_channels: int, + out_channels: int, + kernel_size: Union[Sequence[int], int], + upsample_kernel_size: Union[Sequence[int], int], + norm_name: Union[Tuple, str], + res_block: bool = False, + ) -> None: + """ + Args: + spatial_dims: number of spatial dimensions. + in_channels: number of input channels. + out_channels: number of output channels. + kernel_size: convolution kernel size. + upsample_kernel_size: convolution kernel size for transposed convolution layers. + norm_name: feature normalization type and arguments. + res_block: bool argument to determine if residual block is used. + + """ + + super().__init__() + upsample_stride = upsample_kernel_size + self.transp_conv = get_conv_layer( + spatial_dims, + in_channels, + out_channels, + kernel_size=upsample_kernel_size, + stride=upsample_stride, + conv_only=True, + is_transposed=True, + ) + + if res_block: + self.conv_block = UnetResBlock( + spatial_dims, + out_channels + out_channels, + out_channels, + kernel_size=kernel_size, + stride=1, + norm_name=norm_name, + ) + else: + self.conv_block = UnetBasicBlock( # type: ignore + spatial_dims, + out_channels + out_channels, + out_channels, + kernel_size=kernel_size, + stride=1, + norm_name=norm_name, + ) + + def forward(self, inp, skip): + # number of channels for skip should equals to out_channels + out = self.transp_conv(inp) + out = torch.cat((out, skip), dim=1) + out = self.conv_block(out) + return out + + +class UnetrPrUpBlock(nn.Module): + """ + A projection upsampling module that can be used for UNETR: "Hatamizadeh et al., + UNETR: Transformers for 3D Medical Image Segmentation " + """ + + def __init__( + self, + spatial_dims: int, + in_channels: int, + out_channels: int, + num_layer: int, + kernel_size: Union[Sequence[int], int], + stride: Union[Sequence[int], int], + upsample_kernel_size: Union[Sequence[int], int], + norm_name: Union[Tuple, str], + conv_block: bool = False, + res_block: bool = False, + ) -> None: + """ + Args: + spatial_dims: number of spatial dimensions. + in_channels: number of input channels. + out_channels: number of output channels. + num_layer: number of upsampling blocks. + kernel_size: convolution kernel size. + stride: convolution stride. + upsample_kernel_size: convolution kernel size for transposed convolution layers. + norm_name: feature normalization type and arguments. + conv_block: bool argument to determine if convolutional block is used. + res_block: bool argument to determine if residual block is used. + + """ + + super().__init__() + + upsample_stride = upsample_kernel_size + self.transp_conv_init = get_conv_layer( + spatial_dims, + in_channels, + out_channels, + kernel_size=upsample_kernel_size, + stride=upsample_stride, + conv_only=True, + is_transposed=True, + ) + if conv_block: + if res_block: + self.blocks = nn.ModuleList( + [ + nn.Sequential( + get_conv_layer( + spatial_dims, + out_channels, + out_channels, + kernel_size=upsample_kernel_size, + stride=upsample_stride, + conv_only=True, + is_transposed=True, + ), + UnetResBlock( + spatial_dims=spatial_dims, + in_channels=out_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + norm_name=norm_name, + ), + ) + for i in range(num_layer) + ] + ) + else: + self.blocks = nn.ModuleList( + [ + nn.Sequential( + get_conv_layer( + spatial_dims, + out_channels, + out_channels, + kernel_size=upsample_kernel_size, + stride=upsample_stride, + conv_only=True, + is_transposed=True, + ), + UnetBasicBlock( + spatial_dims=spatial_dims, + in_channels=out_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + norm_name=norm_name, + ), + ) + for i in range(num_layer) + ] + ) + else: + self.blocks = nn.ModuleList( + [ + get_conv_layer( + spatial_dims, + out_channels, + out_channels, + kernel_size=upsample_kernel_size, + stride=upsample_stride, + conv_only=True, + is_transposed=True, + ) + for i in range(num_layer) + ] + ) + + def forward(self, x): + x = self.transp_conv_init(x) + for blk in self.blocks: + x = blk(x) + return x + + +class UnetrBasicBlock(nn.Module): + """ + A CNN module that can be used for UNETR, based on: "Hatamizadeh et al., + UNETR: Transformers for 3D Medical Image Segmentation " + """ + + def __init__( + self, + spatial_dims: int, + in_channels: int, + out_channels: int, + kernel_size: Union[Sequence[int], int], + stride: Union[Sequence[int], int], + norm_name: Union[Tuple, str], + res_block: bool = False, + ) -> None: + """ + Args: + spatial_dims: number of spatial dimensions. + in_channels: number of input channels. + out_channels: number of output channels. + kernel_size: convolution kernel size. + stride: convolution stride. + norm_name: feature normalization type and arguments. + res_block: bool argument to determine if residual block is used. + + """ + + super().__init__() + + if res_block: + self.layer = UnetResBlock( + spatial_dims=spatial_dims, + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + norm_name=norm_name, + ) + else: + self.layer = UnetBasicBlock( # type: ignore + spatial_dims=spatial_dims, + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + norm_name=norm_name, + ) + + def forward(self, inp): + return self.layer(inp) diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/networks/blocks/upsample.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/networks/blocks/upsample.py new file mode 100644 index 0000000000000000000000000000000000000000..fa3929df20f99a1e4c0afd18e00038d96facbfcd --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/networks/blocks/upsample.py @@ -0,0 +1,262 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Optional, Sequence, Tuple, Union + +import torch +import torch.nn as nn + +from monai.networks.layers.factories import Conv, Pad, Pool +from monai.networks.utils import icnr_init, pixelshuffle +from monai.utils import InterpolateMode, UpsampleMode, deprecated_arg, ensure_tuple_rep, look_up_option + +__all__ = ["Upsample", "UpSample", "SubpixelUpsample", "Subpixelupsample", "SubpixelUpSample"] + + +class UpSample(nn.Sequential): + """ + Upsamples data by `scale_factor`. + Supported modes are: + + - "deconv": uses a transposed convolution. + - "nontrainable": uses :py:class:`torch.nn.Upsample`. + - "pixelshuffle": uses :py:class:`monai.networks.blocks.SubpixelUpsample`. + + This module can optionally take a pre-convolution + (often used to map the number of features from `in_channels` to `out_channels`). + """ + + @deprecated_arg( + name="dimensions", new_name="spatial_dims", since="0.6", msg_suffix="Please use `spatial_dims` instead." + ) + def __init__( + self, + spatial_dims: int, + in_channels: Optional[int] = None, + out_channels: Optional[int] = None, + scale_factor: Union[Sequence[float], float] = 2, + size: Optional[Union[Tuple[int], int]] = None, + mode: Union[UpsampleMode, str] = UpsampleMode.DECONV, + pre_conv: Optional[Union[nn.Module, str]] = "default", + interp_mode: Union[InterpolateMode, str] = InterpolateMode.LINEAR, + align_corners: Optional[bool] = True, + bias: bool = True, + apply_pad_pool: bool = True, + dimensions: Optional[int] = None, + ) -> None: + """ + Args: + spatial_dims: number of spatial dimensions of the input image. + in_channels: number of channels of the input image. + out_channels: number of channels of the output image. Defaults to `in_channels`. + scale_factor: multiplier for spatial size. Has to match input size if it is a tuple. Defaults to 2. + size: spatial size of the output image. + Only used when ``mode`` is ``UpsampleMode.NONTRAINABLE``. + In torch.nn.functional.interpolate, only one of `size` or `scale_factor` should be defined, + thus if size is defined, `scale_factor` will not be used. + Defaults to None. + mode: {``"deconv"``, ``"nontrainable"``, ``"pixelshuffle"``}. Defaults to ``"deconv"``. + pre_conv: a conv block applied before upsampling. Defaults to "default". + When ``conv_block`` is ``"default"``, one reserved conv layer will be utilized when + Only used in the "nontrainable" or "pixelshuffle" mode. + interp_mode: {``"nearest"``, ``"linear"``, ``"bilinear"``, ``"bicubic"``, ``"trilinear"``} + Only used in the "nontrainable" mode. + If ends with ``"linear"`` will use ``spatial dims`` to determine the correct interpolation. + This corresponds to linear, bilinear, trilinear for 1D, 2D, and 3D respectively. + The interpolation mode. Defaults to ``"linear"``. + See also: https://pytorch.org/docs/stable/generated/torch.nn.Upsample.html + align_corners: set the align_corners parameter of `torch.nn.Upsample`. Defaults to True. + Only used in the "nontrainable" mode. + bias: whether to have a bias term in the default preconv and deconv layers. Defaults to True. + apply_pad_pool: if True the upsampled tensor is padded then average pooling is applied with a kernel the + size of `scale_factor` with a stride of 1. See also: :py:class:`monai.networks.blocks.SubpixelUpsample`. + Only used in the "pixelshuffle" mode. + + .. deprecated:: 0.6.0 + ``dimensions`` is deprecated, use ``spatial_dims`` instead. + """ + super().__init__() + if dimensions is not None: + spatial_dims = dimensions + scale_factor_ = ensure_tuple_rep(scale_factor, spatial_dims) + up_mode = look_up_option(mode, UpsampleMode) + if up_mode == UpsampleMode.DECONV: + if not in_channels: + raise ValueError(f"in_channels needs to be specified in the '{mode}' mode.") + self.add_module( + "deconv", + Conv[Conv.CONVTRANS, spatial_dims]( + in_channels=in_channels, + out_channels=out_channels or in_channels, + kernel_size=scale_factor_, + stride=scale_factor_, + bias=bias, + ), + ) + elif up_mode == UpsampleMode.NONTRAINABLE: + if pre_conv == "default" and (out_channels != in_channels): # defaults to no conv if out_chns==in_chns + if not in_channels: + raise ValueError(f"in_channels needs to be specified in the '{mode}' mode.") + self.add_module( + "preconv", + Conv[Conv.CONV, spatial_dims]( + in_channels=in_channels, out_channels=out_channels or in_channels, kernel_size=1, bias=bias + ), + ) + elif pre_conv is not None and pre_conv != "default": + self.add_module("preconv", pre_conv) # type: ignore + elif pre_conv is None and (out_channels != in_channels): + raise ValueError( + "in the nontrainable mode, if not setting pre_conv, out_channels should equal to in_channels." + ) + + interp_mode = InterpolateMode(interp_mode) + linear_mode = [InterpolateMode.LINEAR, InterpolateMode.BILINEAR, InterpolateMode.TRILINEAR] + if interp_mode in linear_mode: # choose mode based on dimensions + interp_mode = linear_mode[spatial_dims - 1] + self.add_module( + "upsample_non_trainable", + nn.Upsample( + size=size, + scale_factor=None if size else scale_factor_, + mode=interp_mode.value, + align_corners=align_corners, + ), + ) + elif up_mode == UpsampleMode.PIXELSHUFFLE: + self.add_module( + "pixelshuffle", + SubpixelUpsample( + spatial_dims=spatial_dims, + in_channels=in_channels, + out_channels=out_channels, + scale_factor=scale_factor_[0], # isotropic + conv_block=pre_conv, + apply_pad_pool=apply_pad_pool, + bias=bias, + ), + ) + else: + raise NotImplementedError(f"Unsupported upsampling mode {mode}.") + + +class SubpixelUpsample(nn.Module): + """ + Upsample via using a subpixel CNN. This module supports 1D, 2D and 3D input images. + The module is consisted with two parts. First of all, a convolutional layer is employed + to increase the number of channels into: ``in_channels * (scale_factor ** dimensions)``. + Secondly, a pixel shuffle manipulation is utilized to aggregates the feature maps from + low resolution space and build the super resolution space. + The first part of the module is not fixed, a sequential layers can be used to replace the + default single layer. + + See: Shi et al., 2016, "Real-Time Single Image and Video Super-Resolution + Using a nEfficient Sub-Pixel Convolutional Neural Network." + + See: Aitken et al., 2017, "Checkerboard artifact free sub-pixel convolution". + + The idea comes from: + https://arxiv.org/abs/1609.05158 + + The pixel shuffle mechanism refers to: + https://pytorch.org/docs/stable/generated/torch.nn.PixelShuffle.html#torch.nn.PixelShuffle. + and: + https://github.com/pytorch/pytorch/pull/6340. + + """ + + @deprecated_arg( + name="dimensions", new_name="spatial_dims", since="0.6", msg_suffix="Please use `spatial_dims` instead." + ) + def __init__( + self, + spatial_dims: int, + in_channels: Optional[int], + out_channels: Optional[int] = None, + scale_factor: int = 2, + conv_block: Optional[Union[nn.Module, str]] = "default", + apply_pad_pool: bool = True, + bias: bool = True, + dimensions: Optional[int] = None, + ) -> None: + """ + Args: + spatial_dims: number of spatial dimensions of the input image. + in_channels: number of channels of the input image. + out_channels: optional number of channels of the output image. + scale_factor: multiplier for spatial size. Defaults to 2. + conv_block: a conv block to extract feature maps before upsampling. Defaults to None. + + - When ``conv_block`` is ``"default"``, one reserved conv layer will be utilized. + - When ``conv_block`` is an ``nn.module``, + please ensure the output number of channels is divisible ``(scale_factor ** dimensions)``. + + apply_pad_pool: if True the upsampled tensor is padded then average pooling is applied with a kernel the + size of `scale_factor` with a stride of 1. This implements the nearest neighbour resize convolution + component of subpixel convolutions described in Aitken et al. + bias: whether to have a bias term in the default conv_block. Defaults to True. + + .. deprecated:: 0.6.0 + ``dimensions`` is deprecated, use ``spatial_dims`` instead. + """ + super().__init__() + + if scale_factor <= 0: + raise ValueError(f"The `scale_factor` multiplier must be an integer greater than 0, got {scale_factor}.") + + self.dimensions = spatial_dims if dimensions is None else dimensions + self.scale_factor = scale_factor + + if conv_block == "default": + out_channels = out_channels or in_channels + if not out_channels: + raise ValueError("in_channels need to be specified.") + conv_out_channels = out_channels * (scale_factor**self.dimensions) + self.conv_block = Conv[Conv.CONV, self.dimensions]( + in_channels=in_channels, out_channels=conv_out_channels, kernel_size=3, stride=1, padding=1, bias=bias + ) + + icnr_init(self.conv_block, self.scale_factor) + elif conv_block is None: + self.conv_block = nn.Identity() + else: + self.conv_block = conv_block + + self.pad_pool: nn.Module = nn.Identity() + + if apply_pad_pool: + pool_type = Pool[Pool.AVG, self.dimensions] + pad_type = Pad[Pad.CONSTANTPAD, self.dimensions] + + self.pad_pool = nn.Sequential( + pad_type(padding=(self.scale_factor - 1, 0) * self.dimensions, value=0.0), + pool_type(kernel_size=self.scale_factor, stride=1), + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """ + Args: + x: Tensor in shape (batch, channel, spatial_1[, spatial_2, ...). + """ + x = self.conv_block(x) + if x.shape[1] % (self.scale_factor**self.dimensions) != 0: + raise ValueError( + f"Number of channels after `conv_block` ({x.shape[1]}) must be evenly " + "divisible by scale_factor ** dimensions " + f"({self.scale_factor}^{self.dimensions}={self.scale_factor**self.dimensions})." + ) + x = pixelshuffle(x, self.dimensions, self.scale_factor) + x = self.pad_pool(x) + return x + + +Upsample = UpSample +Subpixelupsample = SubpixelUpSample = SubpixelUpsample diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/networks/blocks/warp.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/networks/blocks/warp.py new file mode 100644 index 0000000000000000000000000000000000000000..5b925258b62bca09fc6e886b12caf04f007c18ec --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/networks/blocks/warp.py @@ -0,0 +1,156 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import warnings +from typing import List + +import torch +from torch import nn +from torch.nn import functional as F + +from monai.config.deviceconfig import USE_COMPILED +from monai.networks.layers.spatial_transforms import grid_pull +from monai.networks.utils import meshgrid_ij +from monai.utils import GridSampleMode, GridSamplePadMode, optional_import + +_C, _ = optional_import("monai._C") + +__all__ = ["Warp", "DVF2DDF"] + + +class Warp(nn.Module): + """ + Warp an image with given dense displacement field (DDF). + """ + + def __init__(self, mode=GridSampleMode.BILINEAR.value, padding_mode=GridSamplePadMode.BORDER.value): + """ + For pytorch native APIs, the possible values are: + + - mode: ``"nearest"``, ``"bilinear"``, ``"bicubic"``. + - padding_mode: ``"zeros"``, ``"border"``, ``"reflection"`` + + See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html + + For MONAI C++/CUDA extensions, the possible values are: + + - mode: ``"nearest"``, ``"bilinear"``, ``"bicubic"``, 0, 1, ... + - padding_mode: ``"zeros"``, ``"border"``, ``"reflection"``, 0, 1, ... + + See also: :py:class:`monai.networks.layers.grid_pull` + """ + super().__init__() + # resolves _interp_mode for different methods + + if USE_COMPILED: + if mode in (inter.value for inter in GridSampleMode): + mode = GridSampleMode(mode) + if mode == GridSampleMode.BILINEAR: + mode = 1 + elif mode == GridSampleMode.NEAREST: + mode = 0 + elif mode == GridSampleMode.BICUBIC: + mode = 3 + else: + mode = 1 # default to linear + self._interp_mode = mode + else: + warnings.warn("monai.networks.blocks.Warp: Using PyTorch native grid_sample.") + self._interp_mode = GridSampleMode(mode).value + + # resolves _padding_mode for different methods + if USE_COMPILED: + if padding_mode in (pad.value for pad in GridSamplePadMode): + padding_mode = GridSamplePadMode(padding_mode) + if padding_mode == GridSamplePadMode.ZEROS: + padding_mode = 7 + elif padding_mode == GridSamplePadMode.BORDER: + padding_mode = 0 + elif padding_mode == GridSamplePadMode.REFLECTION: + padding_mode = 1 + else: + padding_mode = 0 # default to nearest + self._padding_mode = padding_mode + else: + self._padding_mode = GridSamplePadMode(padding_mode).value + + @staticmethod + def get_reference_grid(ddf: torch.Tensor) -> torch.Tensor: + mesh_points = [torch.arange(0, dim) for dim in ddf.shape[2:]] + grid = torch.stack(meshgrid_ij(*mesh_points), dim=0) # (spatial_dims, ...) + grid = torch.stack([grid] * ddf.shape[0], dim=0) # (batch, spatial_dims, ...) + grid = grid.to(ddf) + return grid + + def forward(self, image: torch.Tensor, ddf: torch.Tensor): + """ + Args: + image: Tensor in shape (batch, num_channels, H, W[, D]) + ddf: Tensor in the same spatial size as image, in shape (batch, ``spatial_dims``, H, W[, D]) + + Returns: + warped_image in the same shape as image (batch, num_channels, H, W[, D]) + """ + spatial_dims = len(image.shape) - 2 + if spatial_dims not in (2, 3): + raise NotImplementedError(f"got unsupported spatial_dims={spatial_dims}, currently support 2 or 3.") + ddf_shape = (image.shape[0], spatial_dims) + tuple(image.shape[2:]) + if ddf.shape != ddf_shape: + raise ValueError( + f"Given input {spatial_dims}-d image shape {image.shape}, " f"the input DDF shape must be {ddf_shape}." + ) + grid = self.get_reference_grid(ddf) + ddf + grid = grid.permute([0] + list(range(2, 2 + spatial_dims)) + [1]) # (batch, ..., spatial_dims) + + if not USE_COMPILED: # pytorch native grid_sample + for i, dim in enumerate(grid.shape[1:-1]): + grid[..., i] = grid[..., i] * 2 / (dim - 1) - 1 + index_ordering: List[int] = list(range(spatial_dims - 1, -1, -1)) + grid = grid[..., index_ordering] # z, y, x -> x, y, z + return F.grid_sample( + image, grid, mode=self._interp_mode, padding_mode=f"{self._padding_mode}", align_corners=True + ) + + # using csrc resampling + return grid_pull(image, grid, bound=self._padding_mode, extrapolate=True, interpolation=self._interp_mode) + + +class DVF2DDF(nn.Module): + """ + Layer calculates a dense displacement field (DDF) from a dense velocity field (DVF) + with scaling and squaring. + + Adapted from: + DeepReg (https://github.com/DeepRegNet/DeepReg) + + """ + + def __init__( + self, num_steps: int = 7, mode=GridSampleMode.BILINEAR.value, padding_mode=GridSamplePadMode.ZEROS.value + ): + super().__init__() + if num_steps <= 0: + raise ValueError(f"expecting positive num_steps, got {num_steps}") + self.num_steps = num_steps + self.warp_layer = Warp(mode=mode, padding_mode=padding_mode) + + def forward(self, dvf): + """ + Args: + dvf: dvf to be transformed, in shape (batch, ``spatial_dims``, H, W[,D]) + + Returns: + a dense displacement field + """ + ddf: torch.Tensor = dvf / (2**self.num_steps) + for _ in range(self.num_steps): + ddf = ddf + self.warp_layer(image=ddf, ddf=ddf) + return ddf diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/networks/nets/ahnet.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/networks/nets/ahnet.py new file mode 100644 index 0000000000000000000000000000000000000000..b481374aa109984db021557dcf89d220ae07fd00 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/networks/nets/ahnet.py @@ -0,0 +1,542 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math +from typing import Optional, Sequence, Type, Union + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from monai.networks.blocks.fcn import FCN +from monai.networks.layers.factories import Act, Conv, Norm, Pool + +__all__ = ["AHnet", "Ahnet", "AHNet"] + + +class Bottleneck3x3x1(nn.Module): + + expansion = 4 + + def __init__( + self, + spatial_dims: int, + inplanes: int, + planes: int, + stride: Union[Sequence[int], int] = 1, + downsample: Optional[nn.Sequential] = None, + ) -> None: + + super().__init__() + + conv_type = Conv[Conv.CONV, spatial_dims] + norm_type: Type[Union[nn.BatchNorm2d, nn.BatchNorm3d]] = Norm[Norm.BATCH, spatial_dims] + pool_type: Type[Union[nn.MaxPool2d, nn.MaxPool3d]] = Pool[Pool.MAX, spatial_dims] + relu_type: Type[nn.ReLU] = Act[Act.RELU] + + self.conv1 = conv_type(inplanes, planes, kernel_size=1, bias=False) + self.bn1 = norm_type(planes) + self.conv2 = conv_type( + planes, + planes, + kernel_size=(3, 3, 1)[-spatial_dims:], + stride=stride, + padding=(1, 1, 0)[-spatial_dims:], + bias=False, + ) + self.bn2 = norm_type(planes) + self.conv3 = conv_type(planes, planes * 4, kernel_size=1, bias=False) + self.bn3 = norm_type(planes * 4) + self.relu = relu_type(inplace=True) + self.downsample = downsample + self.stride = stride + self.pool = pool_type(kernel_size=(1, 1, 2)[-spatial_dims:], stride=(1, 1, 2)[-spatial_dims:]) + + def forward(self, x): + residual = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.bn3(out) + + if self.downsample is not None: + residual = self.downsample(x) + if out.size() != residual.size(): + out = self.pool(out) + + out += residual + out = self.relu(out) + + return out + + +class Projection(nn.Sequential): + def __init__(self, spatial_dims: int, num_input_features: int, num_output_features: int): + super().__init__() + + conv_type = Conv[Conv.CONV, spatial_dims] + norm_type: Type[Union[nn.BatchNorm2d, nn.BatchNorm3d]] = Norm[Norm.BATCH, spatial_dims] + relu_type: Type[nn.ReLU] = Act[Act.RELU] + + self.add_module("norm", norm_type(num_input_features)) + self.add_module("relu", relu_type(inplace=True)) + self.add_module("conv", conv_type(num_input_features, num_output_features, kernel_size=1, stride=1, bias=False)) + + +class DenseBlock(nn.Sequential): + def __init__( + self, + spatial_dims: int, + num_layers: int, + num_input_features: int, + bn_size: int, + growth_rate: int, + dropout_prob: float, + ): + super().__init__() + for i in range(num_layers): + layer = Pseudo3DLayer( + spatial_dims, num_input_features + i * growth_rate, growth_rate, bn_size, dropout_prob + ) + self.add_module("denselayer%d" % (i + 1), layer) + + +class UpTransition(nn.Sequential): + def __init__( + self, spatial_dims: int, num_input_features: int, num_output_features: int, upsample_mode: str = "transpose" + ): + super().__init__() + + conv_type = Conv[Conv.CONV, spatial_dims] + norm_type: Type[Union[nn.BatchNorm2d, nn.BatchNorm3d]] = Norm[Norm.BATCH, spatial_dims] + relu_type: Type[nn.ReLU] = Act[Act.RELU] + + self.add_module("norm", norm_type(num_input_features)) + self.add_module("relu", relu_type(inplace=True)) + self.add_module("conv", conv_type(num_input_features, num_output_features, kernel_size=1, stride=1, bias=False)) + if upsample_mode == "transpose": + conv_trans_type = Conv[Conv.CONVTRANS, spatial_dims] + self.add_module( + "up", conv_trans_type(num_output_features, num_output_features, kernel_size=2, stride=2, bias=False) + ) + else: + align_corners: Optional[bool] = None + if upsample_mode in ["trilinear", "bilinear"]: + align_corners = True + self.add_module("up", nn.Upsample(scale_factor=2, mode=upsample_mode, align_corners=align_corners)) + + +class Final(nn.Sequential): + def __init__( + self, spatial_dims: int, num_input_features: int, num_output_features: int, upsample_mode: str = "transpose" + ): + super().__init__() + + conv_type = Conv[Conv.CONV, spatial_dims] + norm_type: Type[Union[nn.BatchNorm2d, nn.BatchNorm3d]] = Norm[Norm.BATCH, spatial_dims] + relu_type: Type[nn.ReLU] = Act[Act.RELU] + + self.add_module("norm", norm_type(num_input_features)) + self.add_module("relu", relu_type(inplace=True)) + self.add_module( + "conv", + conv_type( + num_input_features, + num_output_features, + kernel_size=(3, 3, 1)[-spatial_dims:], + stride=1, + padding=(1, 1, 0)[-spatial_dims:], + bias=False, + ), + ) + if upsample_mode == "transpose": + conv_trans_type = Conv[Conv.CONVTRANS, spatial_dims] + self.add_module( + "up", conv_trans_type(num_output_features, num_output_features, kernel_size=2, stride=2, bias=False) + ) + else: + align_corners: Optional[bool] = None + if upsample_mode in ["trilinear", "bilinear"]: + align_corners = True + self.add_module("up", nn.Upsample(scale_factor=2, mode=upsample_mode, align_corners=align_corners)) + + +class Pseudo3DLayer(nn.Module): + def __init__(self, spatial_dims: int, num_input_features: int, growth_rate: int, bn_size: int, dropout_prob: float): + super().__init__() + # 1x1x1 + + conv_type = Conv[Conv.CONV, spatial_dims] + norm_type: Type[Union[nn.BatchNorm2d, nn.BatchNorm3d]] = Norm[Norm.BATCH, spatial_dims] + relu_type: Type[nn.ReLU] = Act[Act.RELU] + + self.bn1 = norm_type(num_input_features) + self.relu1 = relu_type(inplace=True) + self.conv1 = conv_type(num_input_features, bn_size * growth_rate, kernel_size=1, stride=1, bias=False) + # 3x3x1 + self.bn2 = norm_type(bn_size * growth_rate) + self.relu2 = relu_type(inplace=True) + self.conv2 = conv_type( + bn_size * growth_rate, + growth_rate, + kernel_size=(3, 3, 1)[-spatial_dims:], + stride=1, + padding=(1, 1, 0)[-spatial_dims:], + bias=False, + ) + # 1x1x3 + self.bn3 = norm_type(growth_rate) + self.relu3 = relu_type(inplace=True) + self.conv3 = conv_type( + growth_rate, + growth_rate, + kernel_size=(1, 1, 3)[-spatial_dims:], + stride=1, + padding=(0, 0, 1)[-spatial_dims:], + bias=False, + ) + # 1x1x1 + self.bn4 = norm_type(growth_rate) + self.relu4 = relu_type(inplace=True) + self.conv4 = conv_type(growth_rate, growth_rate, kernel_size=1, stride=1, bias=False) + self.dropout_prob = dropout_prob + + def forward(self, x): + inx = x + x = self.bn1(x) + x = self.relu1(x) + x = self.conv1(x) + + x = self.bn2(x) + x = self.relu2(x) + x3x3x1 = self.conv2(x) + + x = self.bn3(x3x3x1) + x = self.relu3(x) + x1x1x3 = self.conv3(x) + + x = x3x3x1 + x1x1x3 + x = self.bn4(x) + x = self.relu4(x) + new_features = self.conv4(x) + + self.dropout_prob = 0.0 # Dropout will make trouble! + # since we use the train mode for inference + if self.dropout_prob > 0.0: + new_features = F.dropout(new_features, p=self.dropout_prob, training=self.training) + return torch.cat([inx, new_features], 1) + + +class PSP(nn.Module): + def __init__(self, spatial_dims: int, psp_block_num: int, in_ch: int, upsample_mode: str = "transpose"): + super().__init__() + self.up_modules = nn.ModuleList() + conv_type = Conv[Conv.CONV, spatial_dims] + pool_type: Type[Union[nn.MaxPool2d, nn.MaxPool3d]] = Pool[Pool.MAX, spatial_dims] + + self.pool_modules = nn.ModuleList() + self.project_modules = nn.ModuleList() + + for i in range(psp_block_num): + size = (2 ** (i + 3), 2 ** (i + 3), 1)[-spatial_dims:] + self.pool_modules.append(pool_type(kernel_size=size, stride=size)) + self.project_modules.append( + conv_type(in_ch, 1, kernel_size=(1, 1, 1)[-spatial_dims:], stride=1, padding=(1, 1, 0)[-spatial_dims:]) + ) + + self.spatial_dims = spatial_dims + self.psp_block_num = psp_block_num + self.upsample_mode = upsample_mode + + if self.upsample_mode == "transpose": + conv_trans_type = Conv[Conv.CONVTRANS, spatial_dims] + for i in range(psp_block_num): + size = (2 ** (i + 3), 2 ** (i + 3), 1)[-spatial_dims:] + pad_size = (2 ** (i + 3), 2 ** (i + 3), 0)[-spatial_dims:] + self.up_modules.append(conv_trans_type(1, 1, kernel_size=size, stride=size, padding=pad_size)) + + def forward(self, x): + outputs = [] + if self.upsample_mode == "transpose": + for (project_module, pool_module, up_module) in zip( + self.project_modules, self.pool_modules, self.up_modules + ): + output = up_module(project_module(pool_module(x))) + outputs.append(output) + else: + for (project_module, pool_module) in zip(self.project_modules, self.pool_modules): + interpolate_size = x.shape[2:] + align_corners: Optional[bool] = None + if self.upsample_mode in ["trilinear", "bilinear"]: + align_corners = True + output = F.interpolate( + project_module(pool_module(x)), + size=interpolate_size, + mode=self.upsample_mode, + align_corners=align_corners, + ) + outputs.append(output) + x = torch.cat(outputs, dim=1) + return x + + +class AHNet(nn.Module): + """ + AHNet based on `Anisotropic Hybrid Network `_. + Adapted from `lsqshr's official code `_. + Except from the original network that supports 3D inputs, this implementation also supports 2D inputs. + According to the `tests for deconvolutions `_, using + ``"transpose"`` rather than linear interpolations is faster. Therefore, this implementation sets ``"transpose"`` + as the default upsampling method. + + To meet the requirements of the structure, the input size for each spatial dimension + (except the last one) should be: divisible by 2 ** (psp_block_num + 3) and no less than 32 in ``transpose`` mode, + and should be divisible by 32 and no less than 2 ** (psp_block_num + 3) in other upsample modes. + In addition, the input size for the last spatial dimension should be divisible by 32, and at least one spatial size + should be no less than 64. + + Args: + layers: number of residual blocks for 4 layers of the network (layer1...layer4). Defaults to ``(3, 4, 6, 3)``. + spatial_dims: spatial dimension of the input data. Defaults to 3. + in_channels: number of input channels for the network. Default to 1. + out_channels: number of output channels for the network. Defaults to 1. + psp_block_num: the number of pyramid volumetric pooling modules used at the end of the network before the final + output layer for extracting multiscale features. The number should be an integer that belongs to [0,4]. Defaults + to 4. + upsample_mode: [``"transpose"``, ``"bilinear"``, ``"trilinear"``, ``nearest``] + The mode of upsampling manipulations. + Using the last two modes cannot guarantee the model's reproducibility. Defaults to ``transpose``. + + - ``"transpose"``, uses transposed convolution layers. + - ``"bilinear"``, uses bilinear interpolate. + - ``"trilinear"``, uses trilinear interpolate. + - ``"nearest"``, uses nearest interpolate. + pretrained: whether to load pretrained weights from ResNet50 to initialize convolution layers, default to False. + progress: If True, displays a progress bar of the download of pretrained weights to stderr. + """ + + def __init__( + self, + layers: tuple = (3, 4, 6, 3), + spatial_dims: int = 3, + in_channels: int = 1, + out_channels: int = 1, + psp_block_num: int = 4, + upsample_mode: str = "transpose", + pretrained: bool = False, + progress: bool = True, + ): + self.inplanes = 64 + super().__init__() + + conv_type = Conv[Conv.CONV, spatial_dims] + conv_trans_type = Conv[Conv.CONVTRANS, spatial_dims] + norm_type = Norm[Norm.BATCH, spatial_dims] + pool_type: Type[Union[nn.MaxPool2d, nn.MaxPool3d]] = Pool[Pool.MAX, spatial_dims] + relu_type: Type[nn.ReLU] = Act[Act.RELU] + conv2d_type: Type[nn.Conv2d] = Conv[Conv.CONV, 2] + norm2d_type: Type[nn.BatchNorm2d] = Norm[Norm.BATCH, 2] + + self.conv2d_type = conv2d_type + self.norm2d_type = norm2d_type + self.conv_type = conv_type + self.norm_type = norm_type + self.relu_type = relu_type + self.pool_type = pool_type + self.spatial_dims = spatial_dims + self.psp_block_num = psp_block_num + self.psp = None + + if spatial_dims not in [2, 3]: + raise AssertionError("spatial_dims can only be 2 or 3.") + if psp_block_num not in [0, 1, 2, 3, 4]: + raise AssertionError("psp_block_num should be an integer that belongs to [0, 4].") + + self.conv1 = conv_type( + in_channels, + 64, + kernel_size=(7, 7, 3)[-spatial_dims:], + stride=(2, 2, 1)[-spatial_dims:], + padding=(3, 3, 1)[-spatial_dims:], + bias=False, + ) + self.pool1 = pool_type(kernel_size=(1, 1, 2)[-spatial_dims:], stride=(1, 1, 2)[-spatial_dims:]) + self.bn0 = norm_type(64) + self.relu = relu_type(inplace=True) + if upsample_mode in ["transpose", "nearest"]: + # To maintain the determinism, the value of kernel_size and stride should be the same. + # (you can check this link for reference: https://github.com/Project-MONAI/MONAI/pull/815 ) + self.maxpool = pool_type(kernel_size=(2, 2, 2)[-spatial_dims:], stride=2) + else: + self.maxpool = pool_type(kernel_size=(3, 3, 3)[-spatial_dims:], stride=2, padding=1) + + self.layer1 = self._make_layer(Bottleneck3x3x1, 64, layers[0], stride=1) + self.layer2 = self._make_layer(Bottleneck3x3x1, 128, layers[1], stride=2) + self.layer3 = self._make_layer(Bottleneck3x3x1, 256, layers[2], stride=2) + self.layer4 = self._make_layer(Bottleneck3x3x1, 512, layers[3], stride=2) + + # Make the 3D dense decoder layers + densegrowth = 20 + densebn = 4 + ndenselayer = 3 + + num_init_features = 64 + noutres1 = 256 + noutres2 = 512 + noutres3 = 1024 + noutres4 = 2048 + + self.up0 = UpTransition(spatial_dims, noutres4, noutres3, upsample_mode) + self.dense0 = DenseBlock(spatial_dims, ndenselayer, noutres3, densebn, densegrowth, 0.0) + noutdense = noutres3 + ndenselayer * densegrowth + + self.up1 = UpTransition(spatial_dims, noutdense, noutres2, upsample_mode) + self.dense1 = DenseBlock(spatial_dims, ndenselayer, noutres2, densebn, densegrowth, 0.0) + noutdense1 = noutres2 + ndenselayer * densegrowth + + self.up2 = UpTransition(spatial_dims, noutdense1, noutres1, upsample_mode) + self.dense2 = DenseBlock(spatial_dims, ndenselayer, noutres1, densebn, densegrowth, 0.0) + noutdense2 = noutres1 + ndenselayer * densegrowth + + self.trans1 = Projection(spatial_dims, noutdense2, num_init_features) + self.dense3 = DenseBlock(spatial_dims, ndenselayer, num_init_features, densebn, densegrowth, 0.0) + noutdense3 = num_init_features + densegrowth * ndenselayer + + self.up3 = UpTransition(spatial_dims, noutdense3, num_init_features, upsample_mode) + self.dense4 = DenseBlock(spatial_dims, ndenselayer, num_init_features, densebn, densegrowth, 0.0) + noutdense4 = num_init_features + densegrowth * ndenselayer + + self.psp = PSP(spatial_dims, psp_block_num, noutdense4, upsample_mode) + self.final = Final(spatial_dims, psp_block_num + noutdense4, out_channels, upsample_mode) + + # Initialise parameters + for m in self.modules(): + if isinstance(m, (conv_type, conv_trans_type)): + n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + m.weight.data.normal_(0, math.sqrt(2.0 / n)) + elif isinstance(m, norm_type): + m.weight.data.fill_(1) + m.bias.data.zero_() + + if pretrained: + net2d = FCN(pretrained=True, progress=progress) + self.copy_from(net2d) + + def _make_layer(self, block: Type[Bottleneck3x3x1], planes: int, blocks: int, stride: int = 1) -> nn.Sequential: + downsample = None + if stride != 1 or self.inplanes != planes * block.expansion: + downsample = nn.Sequential( + self.conv_type( + self.inplanes, + planes * block.expansion, + kernel_size=1, + stride=(stride, stride, 1)[: self.spatial_dims], + bias=False, + ), + self.pool_type( + kernel_size=(1, 1, stride)[: self.spatial_dims], stride=(1, 1, stride)[: self.spatial_dims] + ), + self.norm_type(planes * block.expansion), + ) + + layers = [] + layers.append( + block(self.spatial_dims, self.inplanes, planes, (stride, stride, 1)[: self.spatial_dims], downsample) + ) + self.inplanes = planes * block.expansion + for _ in range(1, blocks): + layers.append(block(self.spatial_dims, self.inplanes, planes)) + return nn.Sequential(*layers) + + def forward(self, x): + x = self.conv1(x) + x = self.pool1(x) + x = self.bn0(x) + x = self.relu(x) + conv_x = x + x = self.maxpool(x) + pool_x = x + + fm1 = self.layer1(x) + fm2 = self.layer2(fm1) + fm3 = self.layer3(fm2) + fm4 = self.layer4(fm3) + + sum0 = self.up0(fm4) + fm3 + d0 = self.dense0(sum0) + + sum1 = self.up1(d0) + fm2 + d1 = self.dense1(sum1) + + sum2 = self.up2(d1) + fm1 + d2 = self.dense2(sum2) + + sum3 = self.trans1(d2) + pool_x + d3 = self.dense3(sum3) + + sum4 = self.up3(d3) + conv_x + d4 = self.dense4(sum4) + if self.psp_block_num > 0: + psp = self.psp(d4) + x = torch.cat((psp, d4), dim=1) + else: + x = d4 + return self.final(x) + + def copy_from(self, net): + # This method only supports for 3D AHNet, the input channel should be 1. + p2d, p3d = next(net.conv1.parameters()), next(self.conv1.parameters()) + + # From 64x3x7x7 -> 64x3x7x7x1 -> 64x1x7x7x3 + weights = p2d.data.unsqueeze(dim=4).permute(0, 4, 2, 3, 1).clone() + p3d.data = weights.repeat([1, p3d.shape[1], 1, 1, 1]) + + # Copy the initial module BN0 + copy_bn_param(net.bn0, self.bn0) + + # Copy layer1 to layer4 + for i in range(1, 5): + layer_num = "layer" + str(i) + + layer_2d = [] + layer_3d = [] + for m1 in vars(net)["_modules"][layer_num].modules(): + if isinstance(m1, (self.norm2d_type, self.conv2d_type)): + layer_2d.append(m1) + for m2 in vars(self)["_modules"][layer_num].modules(): + if isinstance(m2, (self.norm_type, self.conv_type)): + layer_3d.append(m2) + + for m1, m2 in zip(layer_2d, layer_3d): + if isinstance(m1, self.conv2d_type): + copy_conv_param(m1, m2) + if isinstance(m1, self.norm2d_type): + copy_bn_param(m1, m2) + + +def copy_conv_param(module2d, module3d): + for p2d, p3d in zip(module2d.parameters(), module3d.parameters()): + p3d.data[:] = p2d.data.unsqueeze(dim=4).clone()[:] + + +def copy_bn_param(module2d, module3d): + for p2d, p3d in zip(module2d.parameters(), module3d.parameters()): + p3d.data[:] = p2d.data[:] # Two parameter gamma and beta + + +AHnet = Ahnet = AHNet diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/networks/nets/basic_unet.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/networks/nets/basic_unet.py new file mode 100644 index 0000000000000000000000000000000000000000..1e468465767912fc4bc218eb7ce268b4a515656a --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/networks/nets/basic_unet.py @@ -0,0 +1,302 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Optional, Sequence, Union + +import torch +import torch.nn as nn + +from monai.networks.blocks import Convolution, UpSample +from monai.networks.layers.factories import Conv, Pool +from monai.utils import deprecated_arg, ensure_tuple_rep + +__all__ = ["BasicUnet", "Basicunet", "basicunet", "BasicUNet"] + + +class TwoConv(nn.Sequential): + """two convolutions.""" + + @deprecated_arg(name="dim", new_name="spatial_dims", since="0.6", msg_suffix="Please use `spatial_dims` instead.") + def __init__( + self, + spatial_dims: int, + in_chns: int, + out_chns: int, + act: Union[str, tuple], + norm: Union[str, tuple], + bias: bool, + dropout: Union[float, tuple] = 0.0, + dim: Optional[int] = None, + ): + """ + Args: + spatial_dims: number of spatial dimensions. + in_chns: number of input channels. + out_chns: number of output channels. + act: activation type and arguments. + norm: feature normalization type and arguments. + bias: whether to have a bias term in convolution blocks. + dropout: dropout ratio. Defaults to no dropout. + + .. deprecated:: 0.6.0 + ``dim`` is deprecated, use ``spatial_dims`` instead. + """ + super().__init__() + + if dim is not None: + spatial_dims = dim + conv_0 = Convolution(spatial_dims, in_chns, out_chns, act=act, norm=norm, dropout=dropout, bias=bias, padding=1) + conv_1 = Convolution( + spatial_dims, out_chns, out_chns, act=act, norm=norm, dropout=dropout, bias=bias, padding=1 + ) + self.add_module("conv_0", conv_0) + self.add_module("conv_1", conv_1) + + +class Down(nn.Sequential): + """maxpooling downsampling and two convolutions.""" + + @deprecated_arg(name="dim", new_name="spatial_dims", since="0.6", msg_suffix="Please use `spatial_dims` instead.") + def __init__( + self, + spatial_dims: int, + in_chns: int, + out_chns: int, + act: Union[str, tuple], + norm: Union[str, tuple], + bias: bool, + dropout: Union[float, tuple] = 0.0, + dim: Optional[int] = None, + ): + """ + Args: + spatial_dims: number of spatial dimensions. + in_chns: number of input channels. + out_chns: number of output channels. + act: activation type and arguments. + norm: feature normalization type and arguments. + bias: whether to have a bias term in convolution blocks. + dropout: dropout ratio. Defaults to no dropout. + + .. deprecated:: 0.6.0 + ``dim`` is deprecated, use ``spatial_dims`` instead. + """ + super().__init__() + if dim is not None: + spatial_dims = dim + max_pooling = Pool["MAX", spatial_dims](kernel_size=2) + convs = TwoConv(spatial_dims, in_chns, out_chns, act, norm, bias, dropout) + self.add_module("max_pooling", max_pooling) + self.add_module("convs", convs) + + +class UpCat(nn.Module): + """upsampling, concatenation with the encoder feature map, two convolutions""" + + @deprecated_arg(name="dim", new_name="spatial_dims", since="0.6", msg_suffix="Please use `spatial_dims` instead.") + def __init__( + self, + spatial_dims: int, + in_chns: int, + cat_chns: int, + out_chns: int, + act: Union[str, tuple], + norm: Union[str, tuple], + bias: bool, + dropout: Union[float, tuple] = 0.0, + upsample: str = "deconv", + pre_conv: Optional[Union[nn.Module, str]] = "default", + interp_mode: str = "linear", + align_corners: Optional[bool] = True, + halves: bool = True, + dim: Optional[int] = None, + ): + """ + Args: + spatial_dims: number of spatial dimensions. + in_chns: number of input channels to be upsampled. + cat_chns: number of channels from the decoder. + out_chns: number of output channels. + act: activation type and arguments. + norm: feature normalization type and arguments. + bias: whether to have a bias term in convolution blocks. + dropout: dropout ratio. Defaults to no dropout. + upsample: upsampling mode, available options are + ``"deconv"``, ``"pixelshuffle"``, ``"nontrainable"``. + pre_conv: a conv block applied before upsampling. + Only used in the "nontrainable" or "pixelshuffle" mode. + interp_mode: {``"nearest"``, ``"linear"``, ``"bilinear"``, ``"bicubic"``, ``"trilinear"``} + Only used in the "nontrainable" mode. + align_corners: set the align_corners parameter for upsample. Defaults to True. + Only used in the "nontrainable" mode. + halves: whether to halve the number of channels during upsampling. + This parameter does not work on ``nontrainable`` mode if ``pre_conv`` is `None`. + + .. deprecated:: 0.6.0 + ``dim`` is deprecated, use ``spatial_dims`` instead. + """ + super().__init__() + if dim is not None: + spatial_dims = dim + if upsample == "nontrainable" and pre_conv is None: + up_chns = in_chns + else: + up_chns = in_chns // 2 if halves else in_chns + self.upsample = UpSample( + spatial_dims, + in_chns, + up_chns, + 2, + mode=upsample, + pre_conv=pre_conv, + interp_mode=interp_mode, + align_corners=align_corners, + ) + self.convs = TwoConv(spatial_dims, cat_chns + up_chns, out_chns, act, norm, bias, dropout) + + def forward(self, x: torch.Tensor, x_e: Optional[torch.Tensor]): + """ + + Args: + x: features to be upsampled. + x_e: features from the encoder. + """ + x_0 = self.upsample(x) + + if x_e is not None: + # handling spatial shapes due to the 2x maxpooling with odd edge lengths. + dimensions = len(x.shape) - 2 + sp = [0] * (dimensions * 2) + for i in range(dimensions): + if x_e.shape[-i - 1] != x_0.shape[-i - 1]: + sp[i * 2 + 1] = 1 + x_0 = torch.nn.functional.pad(x_0, sp, "replicate") + x = self.convs(torch.cat([x_e, x_0], dim=1)) # input channels: (cat_chns + up_chns) + else: + x = self.convs(x_0) + + return x + + +class BasicUNet(nn.Module): + @deprecated_arg( + name="dimensions", new_name="spatial_dims", since="0.6", msg_suffix="Please use `spatial_dims` instead." + ) + def __init__( + self, + spatial_dims: int = 3, + in_channels: int = 1, + out_channels: int = 2, + features: Sequence[int] = (32, 32, 64, 128, 256, 32), + act: Union[str, tuple] = ("LeakyReLU", {"negative_slope": 0.1, "inplace": True}), + norm: Union[str, tuple] = ("instance", {"affine": True}), + bias: bool = True, + dropout: Union[float, tuple] = 0.0, + upsample: str = "deconv", + dimensions: Optional[int] = None, + ): + """ + A UNet implementation with 1D/2D/3D supports. + + Based on: + + Falk et al. "U-Net – Deep Learning for Cell Counting, Detection, and + Morphometry". Nature Methods 16, 67–70 (2019), DOI: + http://dx.doi.org/10.1038/s41592-018-0261-2 + + Args: + spatial_dims: number of spatial dimensions. Defaults to 3 for spatial 3D inputs. + in_channels: number of input channels. Defaults to 1. + out_channels: number of output channels. Defaults to 2. + features: six integers as numbers of features. + Defaults to ``(32, 32, 64, 128, 256, 32)``, + + - the first five values correspond to the five-level encoder feature sizes. + - the last value corresponds to the feature size after the last upsampling. + + act: activation type and arguments. Defaults to LeakyReLU. + norm: feature normalization type and arguments. Defaults to instance norm. + bias: whether to have a bias term in convolution blocks. Defaults to True. + According to `Performance Tuning Guide `_, + if a conv layer is directly followed by a batch norm layer, bias should be False. + dropout: dropout ratio. Defaults to no dropout. + upsample: upsampling mode, available options are + ``"deconv"``, ``"pixelshuffle"``, ``"nontrainable"``. + + .. deprecated:: 0.6.0 + ``dimensions`` is deprecated, use ``spatial_dims`` instead. + + Examples:: + + # for spatial 2D + >>> net = BasicUNet(spatial_dims=2, features=(64, 128, 256, 512, 1024, 128)) + + # for spatial 2D, with group norm + >>> net = BasicUNet(spatial_dims=2, features=(64, 128, 256, 512, 1024, 128), norm=("group", {"num_groups": 4})) + + # for spatial 3D + >>> net = BasicUNet(spatial_dims=3, features=(32, 32, 64, 128, 256, 32)) + + See Also + + - :py:class:`monai.networks.nets.DynUNet` + - :py:class:`monai.networks.nets.UNet` + + """ + super().__init__() + if dimensions is not None: + spatial_dims = dimensions + + fea = ensure_tuple_rep(features, 6) + print(f"BasicUNet features: {fea}.") + + self.conv_0 = TwoConv(spatial_dims, in_channels, features[0], act, norm, bias, dropout) + self.down_1 = Down(spatial_dims, fea[0], fea[1], act, norm, bias, dropout) + self.down_2 = Down(spatial_dims, fea[1], fea[2], act, norm, bias, dropout) + self.down_3 = Down(spatial_dims, fea[2], fea[3], act, norm, bias, dropout) + self.down_4 = Down(spatial_dims, fea[3], fea[4], act, norm, bias, dropout) + + self.upcat_4 = UpCat(spatial_dims, fea[4], fea[3], fea[3], act, norm, bias, dropout, upsample) + self.upcat_3 = UpCat(spatial_dims, fea[3], fea[2], fea[2], act, norm, bias, dropout, upsample) + self.upcat_2 = UpCat(spatial_dims, fea[2], fea[1], fea[1], act, norm, bias, dropout, upsample) + self.upcat_1 = UpCat(spatial_dims, fea[1], fea[0], fea[5], act, norm, bias, dropout, upsample, halves=False) + + self.final_conv = Conv["conv", spatial_dims](fea[5], out_channels, kernel_size=1) + + def forward(self, x: torch.Tensor): + """ + Args: + x: input should have spatially N dimensions + ``(Batch, in_channels, dim_0[, dim_1, ..., dim_N])``, N is defined by `dimensions`. + It is recommended to have ``dim_n % 16 == 0`` to ensure all maxpooling inputs have + even edge lengths. + + Returns: + A torch Tensor of "raw" predictions in shape + ``(Batch, out_channels, dim_0[, dim_1, ..., dim_N])``. + """ + x0 = self.conv_0(x) + + x1 = self.down_1(x0) + x2 = self.down_2(x1) + x3 = self.down_3(x2) + x4 = self.down_4(x3) + + u4 = self.upcat_4(x4, x3) + u3 = self.upcat_3(u4, x2) + u2 = self.upcat_2(u3, x1) + u1 = self.upcat_1(u2, x0) + + logits = self.final_conv(u1) + return logits + + +BasicUnet = Basicunet = basicunet = BasicUNet diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/networks/nets/densenet.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/networks/nets/densenet.py new file mode 100644 index 0000000000000000000000000000000000000000..52bd2fa99419b09212165227cb63a10016d8e77e --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/networks/nets/densenet.py @@ -0,0 +1,379 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import re +from collections import OrderedDict +from typing import Callable, Sequence, Type, Union + +import torch +import torch.nn as nn +from torch.hub import load_state_dict_from_url + +from monai.networks.layers.factories import Conv, Dropout, Pool +from monai.networks.layers.utils import get_act_layer, get_norm_layer +from monai.utils.module import look_up_option + +__all__ = [ + "DenseNet", + "Densenet", + "DenseNet121", + "densenet121", + "Densenet121", + "DenseNet169", + "densenet169", + "Densenet169", + "DenseNet201", + "densenet201", + "Densenet201", + "DenseNet264", + "densenet264", + "Densenet264", +] + + +class _DenseLayer(nn.Module): + def __init__( + self, + spatial_dims: int, + in_channels: int, + growth_rate: int, + bn_size: int, + dropout_prob: float, + act: Union[str, tuple] = ("relu", {"inplace": True}), + norm: Union[str, tuple] = "batch", + ) -> None: + """ + Args: + spatial_dims: number of spatial dimensions of the input image. + in_channels: number of the input channel. + growth_rate: how many filters to add each layer (k in paper). + bn_size: multiplicative factor for number of bottle neck layers. + (i.e. bn_size * k features in the bottleneck layer) + dropout_prob: dropout rate after each dense layer. + act: activation type and arguments. Defaults to relu. + norm: feature normalization type and arguments. Defaults to batch norm. + """ + super().__init__() + + out_channels = bn_size * growth_rate + conv_type: Callable = Conv[Conv.CONV, spatial_dims] + dropout_type: Callable = Dropout[Dropout.DROPOUT, spatial_dims] + + self.layers = nn.Sequential() + + self.layers.add_module("norm1", get_norm_layer(name=norm, spatial_dims=spatial_dims, channels=in_channels)) + self.layers.add_module("relu1", get_act_layer(name=act)) + self.layers.add_module("conv1", conv_type(in_channels, out_channels, kernel_size=1, bias=False)) + + self.layers.add_module("norm2", get_norm_layer(name=norm, spatial_dims=spatial_dims, channels=out_channels)) + self.layers.add_module("relu2", get_act_layer(name=act)) + self.layers.add_module("conv2", conv_type(out_channels, growth_rate, kernel_size=3, padding=1, bias=False)) + + if dropout_prob > 0: + self.layers.add_module("dropout", dropout_type(dropout_prob)) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + new_features = self.layers(x) + return torch.cat([x, new_features], 1) + + +class _DenseBlock(nn.Sequential): + def __init__( + self, + spatial_dims: int, + layers: int, + in_channels: int, + bn_size: int, + growth_rate: int, + dropout_prob: float, + act: Union[str, tuple] = ("relu", {"inplace": True}), + norm: Union[str, tuple] = "batch", + ) -> None: + """ + Args: + spatial_dims: number of spatial dimensions of the input image. + layers: number of layers in the block. + in_channels: number of the input channel. + bn_size: multiplicative factor for number of bottle neck layers. + (i.e. bn_size * k features in the bottleneck layer) + growth_rate: how many filters to add each layer (k in paper). + dropout_prob: dropout rate after each dense layer. + act: activation type and arguments. Defaults to relu. + norm: feature normalization type and arguments. Defaults to batch norm. + """ + super().__init__() + for i in range(layers): + layer = _DenseLayer(spatial_dims, in_channels, growth_rate, bn_size, dropout_prob, act=act, norm=norm) + in_channels += growth_rate + self.add_module("denselayer%d" % (i + 1), layer) + + +class _Transition(nn.Sequential): + def __init__( + self, + spatial_dims: int, + in_channels: int, + out_channels: int, + act: Union[str, tuple] = ("relu", {"inplace": True}), + norm: Union[str, tuple] = "batch", + ) -> None: + """ + Args: + spatial_dims: number of spatial dimensions of the input image. + in_channels: number of the input channel. + out_channels: number of the output classes. + act: activation type and arguments. Defaults to relu. + norm: feature normalization type and arguments. Defaults to batch norm. + """ + super().__init__() + + conv_type: Callable = Conv[Conv.CONV, spatial_dims] + pool_type: Callable = Pool[Pool.AVG, spatial_dims] + + self.add_module("norm", get_norm_layer(name=norm, spatial_dims=spatial_dims, channels=in_channels)) + self.add_module("relu", get_act_layer(name=act)) + self.add_module("conv", conv_type(in_channels, out_channels, kernel_size=1, bias=False)) + self.add_module("pool", pool_type(kernel_size=2, stride=2)) + + +class DenseNet(nn.Module): + """ + Densenet based on: `Densely Connected Convolutional Networks `_. + Adapted from PyTorch Hub 2D version: https://pytorch.org/vision/stable/models.html#id16. + This network is non-determistic When `spatial_dims` is 3 and CUDA is enabled. Please check the link below + for more details: + https://pytorch.org/docs/stable/generated/torch.use_deterministic_algorithms.html#torch.use_deterministic_algorithms + + Args: + spatial_dims: number of spatial dimensions of the input image. + in_channels: number of the input channel. + out_channels: number of the output classes. + init_features: number of filters in the first convolution layer. + growth_rate: how many filters to add each layer (k in paper). + block_config: how many layers in each pooling block. + bn_size: multiplicative factor for number of bottle neck layers. + (i.e. bn_size * k features in the bottleneck layer) + act: activation type and arguments. Defaults to relu. + norm: feature normalization type and arguments. Defaults to batch norm. + dropout_prob: dropout rate after each dense layer. + """ + + def __init__( + self, + spatial_dims: int, + in_channels: int, + out_channels: int, + init_features: int = 64, + growth_rate: int = 32, + block_config: Sequence[int] = (6, 12, 24, 16), + bn_size: int = 4, + act: Union[str, tuple] = ("relu", {"inplace": True}), + norm: Union[str, tuple] = "batch", + dropout_prob: float = 0.0, + ) -> None: + + super().__init__() + + conv_type: Type[Union[nn.Conv1d, nn.Conv2d, nn.Conv3d]] = Conv[Conv.CONV, spatial_dims] + pool_type: Type[Union[nn.MaxPool1d, nn.MaxPool2d, nn.MaxPool3d]] = Pool[Pool.MAX, spatial_dims] + avg_pool_type: Type[Union[nn.AdaptiveAvgPool1d, nn.AdaptiveAvgPool2d, nn.AdaptiveAvgPool3d]] = Pool[ + Pool.ADAPTIVEAVG, spatial_dims + ] + + self.features = nn.Sequential( + OrderedDict( + [ + ("conv0", conv_type(in_channels, init_features, kernel_size=7, stride=2, padding=3, bias=False)), + ("norm0", get_norm_layer(name=norm, spatial_dims=spatial_dims, channels=init_features)), + ("relu0", get_act_layer(name=act)), + ("pool0", pool_type(kernel_size=3, stride=2, padding=1)), + ] + ) + ) + + in_channels = init_features + for i, num_layers in enumerate(block_config): + block = _DenseBlock( + spatial_dims=spatial_dims, + layers=num_layers, + in_channels=in_channels, + bn_size=bn_size, + growth_rate=growth_rate, + dropout_prob=dropout_prob, + act=act, + norm=norm, + ) + self.features.add_module(f"denseblock{i + 1}", block) + in_channels += num_layers * growth_rate + if i == len(block_config) - 1: + self.features.add_module( + "norm5", get_norm_layer(name=norm, spatial_dims=spatial_dims, channels=in_channels) + ) + else: + _out_channels = in_channels // 2 + trans = _Transition( + spatial_dims, in_channels=in_channels, out_channels=_out_channels, act=act, norm=norm + ) + self.features.add_module(f"transition{i + 1}", trans) + in_channels = _out_channels + + # pooling and classification + self.class_layers = nn.Sequential( + OrderedDict( + [ + ("relu", get_act_layer(name=act)), + ("pool", avg_pool_type(1)), + ("flatten", nn.Flatten(1)), + ("out", nn.Linear(in_channels, out_channels)), + ] + ) + ) + + for m in self.modules(): + if isinstance(m, conv_type): + nn.init.kaiming_normal_(torch.as_tensor(m.weight)) + elif isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d)): + nn.init.constant_(torch.as_tensor(m.weight), 1) + nn.init.constant_(torch.as_tensor(m.bias), 0) + elif isinstance(m, nn.Linear): + nn.init.constant_(torch.as_tensor(m.bias), 0) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = self.features(x) + x = self.class_layers(x) + return x + + +def _load_state_dict(model: nn.Module, arch: str, progress: bool): + """ + This function is used to load pretrained models. + Adapted from PyTorch Hub 2D version: https://pytorch.org/vision/stable/models.html#id16. + + """ + model_urls = { + "densenet121": "https://download.pytorch.org/models/densenet121-a639ec97.pth", + "densenet169": "https://download.pytorch.org/models/densenet169-b2777c0a.pth", + "densenet201": "https://download.pytorch.org/models/densenet201-c1103571.pth", + } + model_url = look_up_option(arch, model_urls, None) + if model_url is None: + raise ValueError( + "only 'densenet121', 'densenet169' and 'densenet201' are supported to load pretrained weights." + ) + + pattern = re.compile( + r"^(.*denselayer\d+)(\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$" + ) + + state_dict = load_state_dict_from_url(model_url, progress=progress) + for key in list(state_dict.keys()): + res = pattern.match(key) + if res: + new_key = res.group(1) + ".layers" + res.group(2) + res.group(3) + state_dict[new_key] = state_dict[key] + del state_dict[key] + + model_dict = model.state_dict() + state_dict = { + k: v for k, v in state_dict.items() if (k in model_dict) and (model_dict[k].shape == state_dict[k].shape) + } + model_dict.update(state_dict) + model.load_state_dict(model_dict) + + +class DenseNet121(DenseNet): + """DenseNet121 with optional pretrained support when `spatial_dims` is 2.""" + + def __init__( + self, + init_features: int = 64, + growth_rate: int = 32, + block_config: Sequence[int] = (6, 12, 24, 16), + pretrained: bool = False, + progress: bool = True, + **kwargs, + ) -> None: + super().__init__(init_features=init_features, growth_rate=growth_rate, block_config=block_config, **kwargs) + if pretrained: + if kwargs["spatial_dims"] > 2: + raise NotImplementedError( + "Parameter `spatial_dims` is > 2 ; currently PyTorch Hub does not" + "provide pretrained models for more than two spatial dimensions." + ) + _load_state_dict(self, "densenet121", progress) + + +class DenseNet169(DenseNet): + """DenseNet169 with optional pretrained support when `spatial_dims` is 2.""" + + def __init__( + self, + init_features: int = 64, + growth_rate: int = 32, + block_config: Sequence[int] = (6, 12, 32, 32), + pretrained: bool = False, + progress: bool = True, + **kwargs, + ) -> None: + super().__init__(init_features=init_features, growth_rate=growth_rate, block_config=block_config, **kwargs) + if pretrained: + if kwargs["spatial_dims"] > 2: + raise NotImplementedError( + "Parameter `spatial_dims` is > 2 ; currently PyTorch Hub does not" + "provide pretrained models for more than two spatial dimensions." + ) + _load_state_dict(self, "densenet169", progress) + + +class DenseNet201(DenseNet): + """DenseNet201 with optional pretrained support when `spatial_dims` is 2.""" + + def __init__( + self, + init_features: int = 64, + growth_rate: int = 32, + block_config: Sequence[int] = (6, 12, 48, 32), + pretrained: bool = False, + progress: bool = True, + **kwargs, + ) -> None: + super().__init__(init_features=init_features, growth_rate=growth_rate, block_config=block_config, **kwargs) + if pretrained: + if kwargs["spatial_dims"] > 2: + raise NotImplementedError( + "Parameter `spatial_dims` is > 2 ; currently PyTorch Hub does not" + "provide pretrained models for more than two spatial dimensions." + ) + _load_state_dict(self, "densenet201", progress) + + +class DenseNet264(DenseNet): + """DenseNet264""" + + def __init__( + self, + init_features: int = 64, + growth_rate: int = 32, + block_config: Sequence[int] = (6, 12, 64, 48), + pretrained: bool = False, + progress: bool = True, + **kwargs, + ) -> None: + super().__init__(init_features=init_features, growth_rate=growth_rate, block_config=block_config, **kwargs) + if pretrained: + raise NotImplementedError("Currently PyTorch Hub does not provide densenet264 pretrained models.") + + +Densenet = DenseNet +Densenet121 = densenet121 = DenseNet121 +Densenet169 = densenet169 = DenseNet169 +Densenet201 = densenet201 = DenseNet201 +Densenet264 = densenet264 = DenseNet264 diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/networks/nets/efficientnet.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/networks/nets/efficientnet.py new file mode 100644 index 0000000000000000000000000000000000000000..fa5efbc4ef22ba0214196941ddc7688841b7f1d9 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/networks/nets/efficientnet.py @@ -0,0 +1,943 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math +import operator +import re +from functools import reduce +from typing import List, NamedTuple, Optional, Tuple, Type, Union + +import torch +from torch import nn +from torch.utils import model_zoo + +from monai.networks.layers.factories import Act, Conv, Pad, Pool +from monai.networks.layers.utils import get_norm_layer +from monai.utils.module import look_up_option + +__all__ = [ + "EfficientNet", + "EfficientNetBN", + "get_efficientnet_image_size", + "drop_connect", + "EfficientNetBNFeatures", + "BlockArgs", +] + +efficientnet_params = { + # model_name: (width_mult, depth_mult, image_size, dropout_rate, dropconnect_rate) + "efficientnet-b0": (1.0, 1.0, 224, 0.2, 0.2), + "efficientnet-b1": (1.0, 1.1, 240, 0.2, 0.2), + "efficientnet-b2": (1.1, 1.2, 260, 0.3, 0.2), + "efficientnet-b3": (1.2, 1.4, 300, 0.3, 0.2), + "efficientnet-b4": (1.4, 1.8, 380, 0.4, 0.2), + "efficientnet-b5": (1.6, 2.2, 456, 0.4, 0.2), + "efficientnet-b6": (1.8, 2.6, 528, 0.5, 0.2), + "efficientnet-b7": (2.0, 3.1, 600, 0.5, 0.2), + "efficientnet-b8": (2.2, 3.6, 672, 0.5, 0.2), + "efficientnet-l2": (4.3, 5.3, 800, 0.5, 0.2), +} + +url_map = { + "efficientnet-b0": "https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b0-355c32eb.pth", + "efficientnet-b1": "https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b1-f1951068.pth", + "efficientnet-b2": "https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b2-8bb594d6.pth", + "efficientnet-b3": "https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b3-5fb5a3c3.pth", + "efficientnet-b4": "https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b4-6ed6700e.pth", + "efficientnet-b5": "https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b5-b6417697.pth", + "efficientnet-b6": "https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b6-c76e70fd.pth", + "efficientnet-b7": "https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b7-dcc49843.pth", + # trained with adversarial examples, simplify the name to decrease string length + "b0-ap": "https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/adv-efficientnet-b0-b64d5a18.pth", + "b1-ap": "https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/adv-efficientnet-b1-0f3ce85a.pth", + "b2-ap": "https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/adv-efficientnet-b2-6e9d97e5.pth", + "b3-ap": "https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/adv-efficientnet-b3-cdd7c0f4.pth", + "b4-ap": "https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/adv-efficientnet-b4-44fb3a87.pth", + "b5-ap": "https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/adv-efficientnet-b5-86493f6b.pth", + "b6-ap": "https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/adv-efficientnet-b6-ac80338e.pth", + "b7-ap": "https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/adv-efficientnet-b7-4652b6dd.pth", + "b8-ap": "https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/adv-efficientnet-b8-22a8fe65.pth", +} + + +class MBConvBlock(nn.Module): + def __init__( + self, + spatial_dims: int, + in_channels: int, + out_channels: int, + kernel_size: int, + stride: int, + image_size: List[int], + expand_ratio: int, + se_ratio: Optional[float], + id_skip: Optional[bool] = True, + norm: Union[str, tuple] = ("batch", {"eps": 1e-3, "momentum": 0.01}), + drop_connect_rate: Optional[float] = 0.2, + ) -> None: + """ + Mobile Inverted Residual Bottleneck Block. + + Args: + spatial_dims: number of spatial dimensions. + in_channels: number of input channels. + out_channels: number of output channels. + kernel_size: size of the kernel for conv ops. + stride: stride to use for conv ops. + image_size: input image resolution. + expand_ratio: expansion ratio for inverted bottleneck. + se_ratio: squeeze-excitation ratio for se layers. + id_skip: whether to use skip connection. + norm: feature normalization type and arguments. Defaults to batch norm. + drop_connect_rate: dropconnect rate for drop connection (individual weights) layers. + + References: + [1] https://arxiv.org/abs/1704.04861 (MobileNet v1) + [2] https://arxiv.org/abs/1801.04381 (MobileNet v2) + [3] https://arxiv.org/abs/1905.02244 (MobileNet v3) + """ + super().__init__() + + # select the type of N-Dimensional layers to use + # these are based on spatial dims and selected from MONAI factories + conv_type = Conv["conv", spatial_dims] + adaptivepool_type = Pool["adaptiveavg", spatial_dims] + + self.in_channels = in_channels + self.out_channels = out_channels + self.id_skip = id_skip + self.stride = stride + self.expand_ratio = expand_ratio + self.drop_connect_rate = drop_connect_rate + + if (se_ratio is not None) and (0.0 < se_ratio <= 1.0): + self.has_se = True + self.se_ratio = se_ratio + else: + self.has_se = False + + # Expansion phase (Inverted Bottleneck) + inp = in_channels # number of input channels + oup = in_channels * expand_ratio # number of output channels + if self.expand_ratio != 1: + self._expand_conv = conv_type(in_channels=inp, out_channels=oup, kernel_size=1, bias=False) + self._expand_conv_padding = _make_same_padder(self._expand_conv, image_size) + + self._bn0 = get_norm_layer(name=norm, spatial_dims=spatial_dims, channels=oup) + else: + # need to have the following to fix JIT error: + # "Module 'MBConvBlock' has no attribute '_expand_conv'" + + # FIXME: find a better way to bypass JIT error + self._expand_conv = nn.Identity() + self._expand_conv_padding = nn.Identity() + self._bn0 = nn.Identity() + + # Depthwise convolution phase + self._depthwise_conv = conv_type( + in_channels=oup, + out_channels=oup, + groups=oup, # groups makes it depthwise + kernel_size=kernel_size, + stride=self.stride, + bias=False, + ) + self._depthwise_conv_padding = _make_same_padder(self._depthwise_conv, image_size) + self._bn1 = get_norm_layer(name=norm, spatial_dims=spatial_dims, channels=oup) + image_size = _calculate_output_image_size(image_size, self.stride) + + # Squeeze and Excitation layer, if desired + if self.has_se: + self._se_adaptpool = adaptivepool_type(1) + num_squeezed_channels = max(1, int(in_channels * self.se_ratio)) + self._se_reduce = conv_type(in_channels=oup, out_channels=num_squeezed_channels, kernel_size=1) + self._se_reduce_padding = _make_same_padder(self._se_reduce, [1, 1]) + self._se_expand = conv_type(in_channels=num_squeezed_channels, out_channels=oup, kernel_size=1) + self._se_expand_padding = _make_same_padder(self._se_expand, [1, 1]) + + # Pointwise convolution phase + final_oup = out_channels + self._project_conv = conv_type(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False) + self._project_conv_padding = _make_same_padder(self._project_conv, image_size) + self._bn2 = get_norm_layer(name=norm, spatial_dims=spatial_dims, channels=final_oup) + + # swish activation to use - using memory efficient swish by default + # can be switched to normal swish using self.set_swish() function call + self._swish = Act["memswish"](inplace=True) + + def forward(self, inputs: torch.Tensor): + """MBConvBlock"s forward function. + + Args: + inputs: Input tensor. + + Returns: + Output of this block after processing. + """ + # Expansion and Depthwise Convolution + x = inputs + if self.expand_ratio != 1: + x = self._expand_conv(self._expand_conv_padding(x)) + x = self._bn0(x) + x = self._swish(x) + + x = self._depthwise_conv(self._depthwise_conv_padding(x)) + x = self._bn1(x) + x = self._swish(x) + + # Squeeze and Excitation + if self.has_se: + x_squeezed = self._se_adaptpool(x) + x_squeezed = self._se_reduce(self._se_reduce_padding(x_squeezed)) + x_squeezed = self._swish(x_squeezed) + x_squeezed = self._se_expand(self._se_expand_padding(x_squeezed)) + x = torch.sigmoid(x_squeezed) * x + + # Pointwise Convolution + x = self._project_conv(self._project_conv_padding(x)) + x = self._bn2(x) + + # Skip connection and drop connect + if self.id_skip and self.stride == 1 and self.in_channels == self.out_channels: + # the combination of skip connection and drop connect brings about stochastic depth. + if self.drop_connect_rate: + x = drop_connect(x, p=self.drop_connect_rate, training=self.training) + x = x + inputs # skip connection + return x + + def set_swish(self, memory_efficient: bool = True) -> None: + """Sets swish function as memory efficient (for training) or standard (for export). + + Args: + memory_efficient (bool): Whether to use memory-efficient version of swish. + """ + self._swish = Act["memswish"](inplace=True) if memory_efficient else Act["swish"](alpha=1.0) + + +class EfficientNet(nn.Module): + def __init__( + self, + blocks_args_str: List[str], + spatial_dims: int = 2, + in_channels: int = 3, + num_classes: int = 1000, + width_coefficient: float = 1.0, + depth_coefficient: float = 1.0, + dropout_rate: float = 0.2, + image_size: int = 224, + norm: Union[str, tuple] = ("batch", {"eps": 1e-3, "momentum": 0.01}), + drop_connect_rate: float = 0.2, + depth_divisor: int = 8, + ) -> None: + """ + EfficientNet based on `Rethinking Model Scaling for Convolutional Neural Networks `_. + Adapted from `EfficientNet-PyTorch `_. + + Args: + blocks_args_str: block definitions. + spatial_dims: number of spatial dimensions. + in_channels: number of input channels. + num_classes: number of output classes. + width_coefficient: width multiplier coefficient (w in paper). + depth_coefficient: depth multiplier coefficient (d in paper). + dropout_rate: dropout rate for dropout layers. + image_size: input image resolution. + norm: feature normalization type and arguments. + drop_connect_rate: dropconnect rate for drop connection (individual weights) layers. + depth_divisor: depth divisor for channel rounding. + + """ + super().__init__() + + if spatial_dims not in (1, 2, 3): + raise ValueError("spatial_dims can only be 1, 2 or 3.") + + # select the type of N-Dimensional layers to use + # these are based on spatial dims and selected from MONAI factories + conv_type: Type[Union[nn.Conv1d, nn.Conv2d, nn.Conv3d]] = Conv["conv", spatial_dims] + adaptivepool_type: Type[Union[nn.AdaptiveAvgPool1d, nn.AdaptiveAvgPool2d, nn.AdaptiveAvgPool3d]] = Pool[ + "adaptiveavg", spatial_dims + ] + + # decode blocks args into arguments for MBConvBlock + blocks_args = [BlockArgs.from_string(s) for s in blocks_args_str] + + # checks for successful decoding of blocks_args_str + if not isinstance(blocks_args, list): + raise ValueError("blocks_args must be a list") + + if blocks_args == []: + raise ValueError("block_args must be non-empty") + + self._blocks_args = blocks_args + self.num_classes = num_classes + self.in_channels = in_channels + self.drop_connect_rate = drop_connect_rate + + # expand input image dimensions to list + current_image_size = [image_size] * spatial_dims + + # Stem + stride = 2 + out_channels = _round_filters(32, width_coefficient, depth_divisor) # number of output channels + self._conv_stem = conv_type(self.in_channels, out_channels, kernel_size=3, stride=stride, bias=False) + self._conv_stem_padding = _make_same_padder(self._conv_stem, current_image_size) + self._bn0 = get_norm_layer(name=norm, spatial_dims=spatial_dims, channels=out_channels) + current_image_size = _calculate_output_image_size(current_image_size, stride) + + # build MBConv blocks + num_blocks = 0 + self._blocks = nn.Sequential() + + self.extract_stacks = [] + + # update baseline blocks to input/output filters and number of repeats based on width and depth multipliers. + for idx, block_args in enumerate(self._blocks_args): + block_args = block_args._replace( + input_filters=_round_filters(block_args.input_filters, width_coefficient, depth_divisor), + output_filters=_round_filters(block_args.output_filters, width_coefficient, depth_divisor), + num_repeat=_round_repeats(block_args.num_repeat, depth_coefficient), + ) + self._blocks_args[idx] = block_args + + # calculate the total number of blocks - needed for drop_connect estimation + num_blocks += block_args.num_repeat + + if block_args.stride > 1: + self.extract_stacks.append(idx) + + self.extract_stacks.append(len(self._blocks_args)) + + # create and add MBConvBlocks to self._blocks + idx = 0 # block index counter + for stack_idx, block_args in enumerate(self._blocks_args): + blk_drop_connect_rate = self.drop_connect_rate + + # scale drop connect_rate + if blk_drop_connect_rate: + blk_drop_connect_rate *= float(idx) / num_blocks + + sub_stack = nn.Sequential() + # the first block needs to take care of stride and filter size increase. + sub_stack.add_module( + str(idx), + MBConvBlock( + spatial_dims=spatial_dims, + in_channels=block_args.input_filters, + out_channels=block_args.output_filters, + kernel_size=block_args.kernel_size, + stride=block_args.stride, + image_size=current_image_size, + expand_ratio=block_args.expand_ratio, + se_ratio=block_args.se_ratio, + id_skip=block_args.id_skip, + norm=norm, + drop_connect_rate=blk_drop_connect_rate, + ), + ) + idx += 1 # increment blocks index counter + + current_image_size = _calculate_output_image_size(current_image_size, block_args.stride) + if block_args.num_repeat > 1: # modify block_args to keep same output size + block_args = block_args._replace(input_filters=block_args.output_filters, stride=1) + + # add remaining block repeated num_repeat times + for _ in range(block_args.num_repeat - 1): + blk_drop_connect_rate = self.drop_connect_rate + + # scale drop connect_rate + if blk_drop_connect_rate: + blk_drop_connect_rate *= float(idx) / num_blocks + + # add blocks + sub_stack.add_module( + str(idx), + MBConvBlock( + spatial_dims=spatial_dims, + in_channels=block_args.input_filters, + out_channels=block_args.output_filters, + kernel_size=block_args.kernel_size, + stride=block_args.stride, + image_size=current_image_size, + expand_ratio=block_args.expand_ratio, + se_ratio=block_args.se_ratio, + id_skip=block_args.id_skip, + norm=norm, + drop_connect_rate=blk_drop_connect_rate, + ), + ) + idx += 1 # increment blocks index counter + + self._blocks.add_module(str(stack_idx), sub_stack) + + # sanity check to see if len(self._blocks) equal expected num_blocks + if idx != num_blocks: + raise ValueError("total number of blocks created != num_blocks") + + # Head + head_in_channels = block_args.output_filters + out_channels = _round_filters(1280, width_coefficient, depth_divisor) + self._conv_head = conv_type(head_in_channels, out_channels, kernel_size=1, bias=False) + self._conv_head_padding = _make_same_padder(self._conv_head, current_image_size) + self._bn1 = get_norm_layer(name=norm, spatial_dims=spatial_dims, channels=out_channels) + + # final linear layer + self._avg_pooling = adaptivepool_type(1) + self._dropout = nn.Dropout(dropout_rate) + self._fc = nn.Linear(out_channels, self.num_classes) + + # swish activation to use - using memory efficient swish by default + # can be switched to normal swish using self.set_swish() function call + self._swish = Act["memswish"]() + + # initialize weights using Tensorflow's init method from official impl. + self._initialize_weights() + + def set_swish(self, memory_efficient: bool = True) -> None: + """ + Sets swish function as memory efficient (for training) or standard (for JIT export). + + Args: + memory_efficient: whether to use memory-efficient version of swish. + + """ + self._swish = Act["memswish"]() if memory_efficient else Act["swish"](alpha=1.0) + for sub_stack in self._blocks: + for block in sub_stack: + block.set_swish(memory_efficient) + + def forward(self, inputs: torch.Tensor): + """ + Args: + inputs: input should have spatially N dimensions + ``(Batch, in_channels, dim_0[, dim_1, ..., dim_N])``, N is defined by `dimensions`. + + Returns: + a torch Tensor of classification prediction in shape ``(Batch, num_classes)``. + """ + # Stem + x = self._conv_stem(self._conv_stem_padding(inputs)) + x = self._swish(self._bn0(x)) + # Blocks + x = self._blocks(x) + # Head + x = self._conv_head(self._conv_head_padding(x)) + x = self._swish(self._bn1(x)) + + # Pooling and final linear layer + x = self._avg_pooling(x) + + x = x.flatten(start_dim=1) + x = self._dropout(x) + x = self._fc(x) + return x + + def _initialize_weights(self) -> None: + """ + Args: + None, initializes weights for conv/linear/batchnorm layers + following weight init methods from + `official Tensorflow EfficientNet implementation + `_. + Adapted from `EfficientNet-PyTorch's init method + `_. + """ + for _, m in self.named_modules(): + if isinstance(m, (nn.Conv1d, nn.Conv2d, nn.Conv3d)): + fan_out = reduce(operator.mul, m.kernel_size, 1) * m.out_channels + m.weight.data.normal_(0, math.sqrt(2.0 / fan_out)) + if m.bias is not None: + m.bias.data.zero_() + elif isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d)): + m.weight.data.fill_(1.0) + m.bias.data.zero_() + elif isinstance(m, nn.Linear): + fan_out = m.weight.size(0) + fan_in = 0 + init_range = 1.0 / math.sqrt(fan_in + fan_out) + m.weight.data.uniform_(-init_range, init_range) + m.bias.data.zero_() + + +class EfficientNetBN(EfficientNet): + def __init__( + self, + model_name: str, + pretrained: bool = True, + progress: bool = True, + spatial_dims: int = 2, + in_channels: int = 3, + num_classes: int = 1000, + norm: Union[str, tuple] = ("batch", {"eps": 1e-3, "momentum": 0.01}), + adv_prop: bool = False, + ) -> None: + """ + Generic wrapper around EfficientNet, used to initialize EfficientNet-B0 to EfficientNet-B7 models + model_name is mandatory argument as there is no EfficientNetBN itself, + it needs the N in [0, 1, 2, 3, 4, 5, 6, 7, 8] to be a model + + Args: + model_name: name of model to initialize, can be from [efficientnet-b0, ..., efficientnet-b8, efficientnet-l2]. + pretrained: whether to initialize pretrained ImageNet weights, only available for spatial_dims=2 and batch + norm is used. + progress: whether to show download progress for pretrained weights download. + spatial_dims: number of spatial dimensions. + in_channels: number of input channels. + num_classes: number of output classes. + norm: feature normalization type and arguments. + adv_prop: whether to use weights trained with adversarial examples. + This argument only works when `pretrained` is `True`. + + Examples:: + + # for pretrained spatial 2D ImageNet + >>> image_size = get_efficientnet_image_size("efficientnet-b0") + >>> inputs = torch.rand(1, 3, image_size, image_size) + >>> model = EfficientNetBN("efficientnet-b0", pretrained=True) + >>> model.eval() + >>> outputs = model(inputs) + + # create spatial 2D + >>> model = EfficientNetBN("efficientnet-b0", spatial_dims=2) + + # create spatial 3D + >>> model = EfficientNetBN("efficientnet-b0", spatial_dims=3) + + # create EfficientNetB7 for spatial 2D + >>> model = EfficientNetBN("efficientnet-b7", spatial_dims=2) + + """ + # block args + blocks_args_str = [ + "r1_k3_s11_e1_i32_o16_se0.25", + "r2_k3_s22_e6_i16_o24_se0.25", + "r2_k5_s22_e6_i24_o40_se0.25", + "r3_k3_s22_e6_i40_o80_se0.25", + "r3_k5_s11_e6_i80_o112_se0.25", + "r4_k5_s22_e6_i112_o192_se0.25", + "r1_k3_s11_e6_i192_o320_se0.25", + ] + + # check if model_name is valid model + if model_name not in efficientnet_params.keys(): + raise ValueError( + "invalid model_name {} found, must be one of {} ".format( + model_name, ", ".join(efficientnet_params.keys()) + ) + ) + + # get network parameters + weight_coeff, depth_coeff, image_size, dropout_rate, dropconnect_rate = efficientnet_params[model_name] + + # create model and initialize random weights + super().__init__( + blocks_args_str=blocks_args_str, + spatial_dims=spatial_dims, + in_channels=in_channels, + num_classes=num_classes, + width_coefficient=weight_coeff, + depth_coefficient=depth_coeff, + dropout_rate=dropout_rate, + image_size=image_size, + drop_connect_rate=dropconnect_rate, + norm=norm, + ) + + # only pretrained for when `spatial_dims` is 2 + if pretrained and (spatial_dims == 2): + _load_state_dict(self, model_name, progress, adv_prop) + + +class EfficientNetBNFeatures(EfficientNet): + def __init__( + self, + model_name: str, + pretrained: bool = True, + progress: bool = True, + spatial_dims: int = 2, + in_channels: int = 3, + num_classes: int = 1000, + norm: Union[str, tuple] = ("batch", {"eps": 1e-3, "momentum": 0.01}), + adv_prop: bool = False, + ) -> None: + """ + Initialize EfficientNet-B0 to EfficientNet-B7 models as a backbone, the backbone can + be used as an encoder for segmentation and objection models. + Compared with the class `EfficientNetBN`, the only different place is the forward function. + + This class refers to `PyTorch image models `_. + + """ + blocks_args_str = [ + "r1_k3_s11_e1_i32_o16_se0.25", + "r2_k3_s22_e6_i16_o24_se0.25", + "r2_k5_s22_e6_i24_o40_se0.25", + "r3_k3_s22_e6_i40_o80_se0.25", + "r3_k5_s11_e6_i80_o112_se0.25", + "r4_k5_s22_e6_i112_o192_se0.25", + "r1_k3_s11_e6_i192_o320_se0.25", + ] + + # check if model_name is valid model + if model_name not in efficientnet_params.keys(): + raise ValueError( + "invalid model_name {} found, must be one of {} ".format( + model_name, ", ".join(efficientnet_params.keys()) + ) + ) + + # get network parameters + weight_coeff, depth_coeff, image_size, dropout_rate, dropconnect_rate = efficientnet_params[model_name] + + # create model and initialize random weights + super().__init__( + blocks_args_str=blocks_args_str, + spatial_dims=spatial_dims, + in_channels=in_channels, + num_classes=num_classes, + width_coefficient=weight_coeff, + depth_coefficient=depth_coeff, + dropout_rate=dropout_rate, + image_size=image_size, + drop_connect_rate=dropconnect_rate, + norm=norm, + ) + + # only pretrained for when `spatial_dims` is 2 + if pretrained and (spatial_dims == 2): + _load_state_dict(self, model_name, progress, adv_prop) + + def forward(self, inputs: torch.Tensor): + """ + Args: + inputs: input should have spatially N dimensions + ``(Batch, in_channels, dim_0[, dim_1, ..., dim_N])``, N is defined by `dimensions`. + + Returns: + a list of torch Tensors. + """ + # Stem + x = self._conv_stem(self._conv_stem_padding(inputs)) + x = self._swish(self._bn0(x)) + + features = [] + if 0 in self.extract_stacks: + features.append(x) + for i, block in enumerate(self._blocks): + x = block(x) + if i + 1 in self.extract_stacks: + features.append(x) + return features + + +def get_efficientnet_image_size(model_name: str) -> int: + """ + Get the input image size for a given efficientnet model. + + Args: + model_name: name of model to initialize, can be from [efficientnet-b0, ..., efficientnet-b7]. + + Returns: + Image size for single spatial dimension as integer. + + """ + # check if model_name is valid model + if model_name not in efficientnet_params.keys(): + raise ValueError( + "invalid model_name {} found, must be one of {} ".format(model_name, ", ".join(efficientnet_params.keys())) + ) + + # return input image size (all dims equal so only need to return for one dim) + _, _, res, _, _ = efficientnet_params[model_name] + return res + + +def drop_connect(inputs: torch.Tensor, p: float, training: bool) -> torch.Tensor: + """ + Drop connect layer that drops individual connections. + Differs from dropout as dropconnect drops connections instead of whole neurons as in dropout. + + Based on `Deep Networks with Stochastic Depth `_. + Adapted from `Official Tensorflow EfficientNet utils + `_. + + This function is generalized for MONAI's N-Dimensional spatial activations + e.g. 1D activations [B, C, H], 2D activations [B, C, H, W] and 3D activations [B, C, H, W, D] + + Args: + inputs: input tensor with [B, C, dim_1, dim_2, ..., dim_N] where N=spatial_dims. + p: probability to use for dropping connections. + training: whether in training or evaluation mode. + + Returns: + output: output tensor after applying drop connection. + """ + if p < 0.0 or p > 1.0: + raise ValueError(f"p must be in range of [0, 1], found {p}") + + # eval mode: drop_connect is switched off - so return input without modifying + if not training: + return inputs + + # train mode: calculate and apply drop_connect + batch_size: int = inputs.shape[0] + keep_prob: float = 1 - p + num_dims: int = len(inputs.shape) - 2 + + # build dimensions for random tensor, use num_dims to populate appropriate spatial dims + random_tensor_shape: List[int] = [batch_size, 1] + [1] * num_dims + + # generate binary_tensor mask according to probability (p for 0, 1-p for 1) + random_tensor: torch.Tensor = torch.rand(random_tensor_shape, dtype=inputs.dtype, device=inputs.device) + random_tensor += keep_prob + + # round to form binary tensor + binary_tensor: torch.Tensor = torch.floor(random_tensor) + + # drop connect using binary tensor + output: torch.Tensor = inputs / keep_prob * binary_tensor + return output + + +def _load_state_dict(model: nn.Module, arch: str, progress: bool, adv_prop: bool) -> None: + if adv_prop: + arch = arch.split("efficientnet-")[-1] + "-ap" + model_url = look_up_option(arch, url_map, None) + if model_url is None: + print(f"pretrained weights of {arch} is not provided") + else: + # load state dict from url + model_url = url_map[arch] + pretrain_state_dict = model_zoo.load_url(model_url, progress=progress) + model_state_dict = model.state_dict() + + pattern = re.compile(r"(.+)\.\d+(\.\d+\..+)") + for key, value in model_state_dict.items(): + pretrain_key = re.sub(pattern, r"\1\2", key) + if pretrain_key in pretrain_state_dict and value.shape == pretrain_state_dict[pretrain_key].shape: + model_state_dict[key] = pretrain_state_dict[pretrain_key] + + model.load_state_dict(model_state_dict) + + +def _get_same_padding_conv_nd( + image_size: List[int], kernel_size: Tuple[int, ...], dilation: Tuple[int, ...], stride: Tuple[int, ...] +) -> List[int]: + """ + Helper for getting padding (nn.ConstantPadNd) to be used to get SAME padding + conv operations similar to Tensorflow's SAME padding. + + This function is generalized for MONAI's N-Dimensional spatial operations (e.g. Conv1D, Conv2D, Conv3D) + + Args: + image_size: input image/feature spatial size. + kernel_size: conv kernel's spatial size. + dilation: conv dilation rate for Atrous conv. + stride: stride for conv operation. + + Returns: + paddings for ConstantPadNd padder to be used on input tensor to conv op. + """ + # get number of spatial dimensions, corresponds to kernel size length + num_dims = len(kernel_size) + + # additional checks to populate dilation and stride (in case they are single entry tuples) + if len(dilation) == 1: + dilation = dilation * num_dims + + if len(stride) == 1: + stride = stride * num_dims + + # equation to calculate (pad^+ + pad^-) size + _pad_size: List[int] = [ + max((math.ceil(_i_s / _s) - 1) * _s + (_k_s - 1) * _d + 1 - _i_s, 0) + for _i_s, _k_s, _d, _s in zip(image_size, kernel_size, dilation, stride) + ] + # distribute paddings into pad^+ and pad^- following Tensorflow's same padding strategy + _paddings: List[Tuple[int, int]] = [(_p // 2, _p - _p // 2) for _p in _pad_size] + + # unroll list of tuples to tuples, and then to list + # reversed as nn.ConstantPadNd expects paddings starting with last dimension + _paddings_ret: List[int] = [outer for inner in reversed(_paddings) for outer in inner] + return _paddings_ret + + +def _make_same_padder(conv_op: Union[nn.Conv1d, nn.Conv2d, nn.Conv3d], image_size: List[int]): + """ + Helper for initializing ConstantPadNd with SAME padding similar to Tensorflow. + Uses output of _get_same_padding_conv_nd() to get the padding size. + + This function is generalized for MONAI's N-Dimensional spatial operations (e.g. Conv1D, Conv2D, Conv3D) + + Args: + conv_op: nn.ConvNd operation to extract parameters for op from + image_size: input image/feature spatial size + + Returns: + If padding required then nn.ConstandNd() padder initialized to paddings otherwise nn.Identity() + """ + # calculate padding required + padding: List[int] = _get_same_padding_conv_nd(image_size, conv_op.kernel_size, conv_op.dilation, conv_op.stride) + + # initialize and return padder + padder = Pad["constantpad", len(padding) // 2] + if sum(padding) > 0: + return padder(padding=padding, value=0.0) + return nn.Identity() + + +def _round_filters(filters: int, width_coefficient: Optional[float], depth_divisor: float) -> int: + """ + Calculate and round number of filters based on width coefficient multiplier and depth divisor. + + Args: + filters: number of input filters. + width_coefficient: width coefficient for model. + depth_divisor: depth divisor to use. + + Returns: + new_filters: new number of filters after calculation. + """ + + if not width_coefficient: + return filters + + multiplier: float = width_coefficient + divisor: float = depth_divisor + filters_float: float = filters * multiplier + + # follow the formula transferred from official TensorFlow implementation + new_filters: float = max(divisor, int(filters_float + divisor / 2) // divisor * divisor) + if new_filters < 0.9 * filters_float: # prevent rounding by more than 10% + new_filters += divisor + return int(new_filters) + + +def _round_repeats(repeats: int, depth_coefficient: Optional[float]) -> int: + """ + Re-calculate module's repeat number of a block based on depth coefficient multiplier. + + Args: + repeats: number of original repeats. + depth_coefficient: depth coefficient for model. + + Returns: + new repeat: new number of repeat after calculating. + """ + if not depth_coefficient: + return repeats + + # follow the formula transferred from official TensorFlow impl. + return int(math.ceil(depth_coefficient * repeats)) + + +def _calculate_output_image_size(input_image_size: List[int], stride: Union[int, Tuple[int]]): + """ + Calculates the output image size when using _make_same_padder with a stride. + Required for static padding. + + Args: + input_image_size: input image/feature spatial size. + stride: Conv2d operation"s stride. + + Returns: + output_image_size: output image/feature spatial size. + """ + + # checks to extract integer stride in case tuple was received + if isinstance(stride, tuple): + all_strides_equal = all(stride[0] == s for s in stride) + if not all_strides_equal: + raise ValueError(f"unequal strides are not possible, got {stride}") + + stride = stride[0] + + # return output image size + return [int(math.ceil(im_sz / stride)) for im_sz in input_image_size] + + +class BlockArgs(NamedTuple): + """ + BlockArgs object to assist in decoding string notation + of arguments for MBConvBlock definition. + """ + + num_repeat: int + kernel_size: int + stride: int + expand_ratio: int + input_filters: int + output_filters: int + id_skip: bool + se_ratio: Optional[float] = None + + @staticmethod + def from_string(block_string: str): + """ + Get a BlockArgs object from a string notation of arguments. + + Args: + block_string (str): A string notation of arguments. + Examples: "r1_k3_s11_e1_i32_o16_se0.25". + + Returns: + BlockArgs: namedtuple defined at the top of this function. + """ + ops = block_string.split("_") + options = {} + for op in ops: + splits = re.split(r"(\d.*)", op) + if len(splits) >= 2: + key, value = splits[:2] + options[key] = value + + # check stride + stride_check = ( + ("s" in options and len(options["s"]) == 1) + or (len(options["s"]) == 2 and options["s"][0] == options["s"][1]) + or (len(options["s"]) == 3 and options["s"][0] == options["s"][1] and options["s"][0] == options["s"][2]) + ) + if not stride_check: + raise ValueError("invalid stride option received") + + return BlockArgs( + num_repeat=int(options["r"]), + kernel_size=int(options["k"]), + stride=int(options["s"][0]), + expand_ratio=int(options["e"]), + input_filters=int(options["i"]), + output_filters=int(options["o"]), + id_skip=("noskip" not in block_string), + se_ratio=float(options["se"]) if "se" in options else None, + ) + + def to_string(self): + """ + Return a block string notation for current BlockArgs object + + Returns: + A string notation of BlockArgs object arguments. + Example: "r1_k3_s11_e1_i32_o16_se0.25_noskip". + """ + string = "r{}_k{}_s{}{}_e{}_i{}_o{}_se{}".format( + self.num_repeat, + self.kernel_size, + self.stride, + self.stride, + self.expand_ratio, + self.input_filters, + self.output_filters, + self.se_ratio, + ) + + if not self.id_skip: + string += "_noskip" + return string diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/networks/nets/resnet.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/networks/nets/resnet.py new file mode 100644 index 0000000000000000000000000000000000000000..c8be9f0e89a087fde17946774524512b54b49e0b --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/networks/nets/resnet.py @@ -0,0 +1,407 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from functools import partial +from typing import Any, Callable, List, Optional, Tuple, Type, Union + +import torch +import torch.nn as nn + +from monai.networks.layers.factories import Conv, Norm, Pool +from monai.networks.layers.utils import get_pool_layer +from monai.utils import ensure_tuple_rep +from monai.utils.module import look_up_option + +__all__ = ["ResNet", "resnet10", "resnet18", "resnet34", "resnet50", "resnet101", "resnet152", "resnet200"] + +from monai.utils import deprecated_arg + + +def get_inplanes(): + return [64, 128, 256, 512] + + +def get_avgpool(): + return [0, 1, (1, 1), (1, 1, 1)] + + +class ResNetBlock(nn.Module): + expansion = 1 + + def __init__( + self, + in_planes: int, + planes: int, + spatial_dims: int = 3, + stride: int = 1, + downsample: Union[nn.Module, partial, None] = None, + ) -> None: + """ + Args: + in_planes: number of input channels. + planes: number of output channels. + spatial_dims: number of spatial dimensions of the input image. + stride: stride to use for first conv layer. + downsample: which downsample layer to use. + """ + super().__init__() + + conv_type: Callable = Conv[Conv.CONV, spatial_dims] + norm_type: Callable = Norm[Norm.BATCH, spatial_dims] + + self.conv1 = conv_type(in_planes, planes, kernel_size=3, padding=1, stride=stride, bias=False) + self.bn1 = norm_type(planes) + self.relu = nn.ReLU(inplace=True) + self.conv2 = conv_type(planes, planes, kernel_size=3, padding=1, bias=False) + self.bn2 = norm_type(planes) + self.downsample = downsample + self.stride = stride + + def forward(self, x: torch.Tensor) -> torch.Tensor: + residual = x + + out: torch.Tensor = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + + if self.downsample is not None: + residual = self.downsample(x) + + out += residual + out = self.relu(out) + + return out + + +class ResNetBottleneck(nn.Module): + expansion = 4 + + def __init__( + self, + in_planes: int, + planes: int, + spatial_dims: int = 3, + stride: int = 1, + downsample: Union[nn.Module, partial, None] = None, + ) -> None: + """ + Args: + in_planes: number of input channels. + planes: number of output channels (taking expansion into account). + spatial_dims: number of spatial dimensions of the input image. + stride: stride to use for second conv layer. + downsample: which downsample layer to use. + """ + + super().__init__() + + conv_type: Callable = Conv[Conv.CONV, spatial_dims] + norm_type: Callable = Norm[Norm.BATCH, spatial_dims] + + self.conv1 = conv_type(in_planes, planes, kernel_size=1, bias=False) + self.bn1 = norm_type(planes) + self.conv2 = conv_type(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) + self.bn2 = norm_type(planes) + self.conv3 = conv_type(planes, planes * self.expansion, kernel_size=1, bias=False) + self.bn3 = norm_type(planes * self.expansion) + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + self.stride = stride + + def forward(self, x: torch.Tensor) -> torch.Tensor: + residual = x + + out: torch.Tensor = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.bn3(out) + + if self.downsample is not None: + residual = self.downsample(x) + + out += residual + out = self.relu(out) + + return out + + +class ResNet(nn.Module): + """ + ResNet based on: `Deep Residual Learning for Image Recognition `_ + and `Can Spatiotemporal 3D CNNs Retrace the History of 2D CNNs and ImageNet? `_. + Adapted from ``_. + + Args: + block: which ResNet block to use, either Basic or Bottleneck. + layers: how many layers to use. + block_inplanes: determine the size of planes at each step. Also tunable with widen_factor. + spatial_dims: number of spatial dimensions of the input image. + n_input_channels: number of input channels for first convolutional layer. + conv1_t_size: size of first convolution layer, determines kernel and padding. + conv1_t_stride: stride of first convolution layer. + no_max_pool: bool argument to determine if to use maxpool layer. + shortcut_type: which downsample block to use. Options are 'A', 'B', default to 'B'. + - 'A': using `self._downsample_basic_block`. + - 'B': kernel_size 1 conv + norm. + widen_factor: widen output for each layer. + num_classes: number of output (classifications). + feed_forward: whether to add the FC layer for the output, default to `True`. + + .. deprecated:: 0.6.0 + ``n_classes`` is deprecated, use ``num_classes`` instead. + + """ + + @deprecated_arg("n_classes", since="0.6") + def __init__( + self, + block: Type[Union[ResNetBlock, ResNetBottleneck]], + layers: List[int], + block_inplanes: List[int], + spatial_dims: int = 3, + n_input_channels: int = 3, + conv1_t_size: Union[Tuple[int], int] = 7, + conv1_t_stride: Union[Tuple[int], int] = 1, + no_max_pool: bool = False, + shortcut_type: str = "B", + widen_factor: float = 1.0, + num_classes: int = 400, + feed_forward: bool = True, + n_classes: Optional[int] = None, + ) -> None: + + super().__init__() + # in case the new num_classes is default but you still call deprecated n_classes + if n_classes is not None and num_classes == 400: + num_classes = n_classes + + conv_type: Type[Union[nn.Conv1d, nn.Conv2d, nn.Conv3d]] = Conv[Conv.CONV, spatial_dims] + norm_type: Type[Union[nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d]] = Norm[Norm.BATCH, spatial_dims] + pool_type: Type[Union[nn.MaxPool1d, nn.MaxPool2d, nn.MaxPool3d]] = Pool[Pool.MAX, spatial_dims] + avgp_type: Type[Union[nn.AdaptiveAvgPool1d, nn.AdaptiveAvgPool2d, nn.AdaptiveAvgPool3d]] = Pool[ + Pool.ADAPTIVEAVG, spatial_dims + ] + + block_avgpool = get_avgpool() + block_inplanes = [int(x * widen_factor) for x in block_inplanes] + + self.in_planes = block_inplanes[0] + self.no_max_pool = no_max_pool + + conv1_kernel_size = ensure_tuple_rep(conv1_t_size, spatial_dims) + conv1_stride = ensure_tuple_rep(conv1_t_stride, spatial_dims) + + self.conv1 = conv_type( + n_input_channels, + self.in_planes, + kernel_size=conv1_kernel_size, # type: ignore + stride=conv1_stride, # type: ignore + padding=tuple(k // 2 for k in conv1_kernel_size), # type: ignore + bias=False, + ) + self.bn1 = norm_type(self.in_planes) + self.relu = nn.ReLU(inplace=True) + self.maxpool = pool_type(kernel_size=3, stride=2, padding=1) + self.layer1 = self._make_layer(block, block_inplanes[0], layers[0], spatial_dims, shortcut_type) + self.layer2 = self._make_layer(block, block_inplanes[1], layers[1], spatial_dims, shortcut_type, stride=2) + self.layer3 = self._make_layer(block, block_inplanes[2], layers[2], spatial_dims, shortcut_type, stride=2) + self.layer4 = self._make_layer(block, block_inplanes[3], layers[3], spatial_dims, shortcut_type, stride=2) + self.avgpool = avgp_type(block_avgpool[spatial_dims]) + self.fc = nn.Linear(block_inplanes[3] * block.expansion, num_classes) if feed_forward else None + + for m in self.modules(): + if isinstance(m, conv_type): + nn.init.kaiming_normal_(torch.as_tensor(m.weight), mode="fan_out", nonlinearity="relu") + elif isinstance(m, norm_type): + nn.init.constant_(torch.as_tensor(m.weight), 1) + nn.init.constant_(torch.as_tensor(m.bias), 0) + elif isinstance(m, nn.Linear): + nn.init.constant_(torch.as_tensor(m.bias), 0) + + def _downsample_basic_block(self, x: torch.Tensor, planes: int, stride: int, spatial_dims: int = 3) -> torch.Tensor: + out: torch.Tensor = get_pool_layer(("avg", {"kernel_size": 1, "stride": stride}), spatial_dims=spatial_dims)(x) + zero_pads = torch.zeros(out.size(0), planes - out.size(1), *out.shape[2:], dtype=out.dtype, device=out.device) + out = torch.cat([out.data, zero_pads], dim=1) + return out + + def _make_layer( + self, + block: Type[Union[ResNetBlock, ResNetBottleneck]], + planes: int, + blocks: int, + spatial_dims: int, + shortcut_type: str, + stride: int = 1, + ) -> nn.Sequential: + + conv_type: Callable = Conv[Conv.CONV, spatial_dims] + norm_type: Callable = Norm[Norm.BATCH, spatial_dims] + + downsample: Union[nn.Module, partial, None] = None + if stride != 1 or self.in_planes != planes * block.expansion: + if look_up_option(shortcut_type, {"A", "B"}) == "A": + downsample = partial( + self._downsample_basic_block, + planes=planes * block.expansion, + stride=stride, + spatial_dims=spatial_dims, + ) + else: + downsample = nn.Sequential( + conv_type(self.in_planes, planes * block.expansion, kernel_size=1, stride=stride), + norm_type(planes * block.expansion), + ) + + layers = [ + block( + in_planes=self.in_planes, planes=planes, spatial_dims=spatial_dims, stride=stride, downsample=downsample + ) + ] + + self.in_planes = planes * block.expansion + for _i in range(1, blocks): + layers.append(block(self.in_planes, planes, spatial_dims=spatial_dims)) + + return nn.Sequential(*layers) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = self.conv1(x) + x = self.bn1(x) + x = self.relu(x) + if not self.no_max_pool: + x = self.maxpool(x) + + x = self.layer1(x) + x = self.layer2(x) + x = self.layer3(x) + x = self.layer4(x) + + x = self.avgpool(x) + + x = x.view(x.size(0), -1) + if self.fc is not None: + x = self.fc(x) + + return x + + +def _resnet( + arch: str, + block: Type[Union[ResNetBlock, ResNetBottleneck]], + layers: List[int], + block_inplanes: List[int], + pretrained: bool, + progress: bool, + **kwargs: Any, +) -> ResNet: + model: ResNet = ResNet(block, layers, block_inplanes, **kwargs) + if pretrained: + # Author of paper zipped the state_dict on googledrive, + # so would need to download, unzip and read (2.8gb file for a ~150mb state dict). + # Would like to load dict from url but need somewhere to save the state dicts. + raise NotImplementedError( + "Currently not implemented. You need to manually download weights provided by the paper's author" + " and load then to the model with `state_dict`. See https://github.com/Tencent/MedicalNet" + ) + return model + + +def resnet10(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet: + """ResNet-10 with optional pretrained support when `spatial_dims` is 3. + + Pretraining from `Med3D: Transfer Learning for 3D Medical Image Analysis `_. + + Args: + pretrained (bool): If True, returns a model pre-trained on 23 medical datasets + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _resnet("resnet10", ResNetBlock, [1, 1, 1, 1], get_inplanes(), pretrained, progress, **kwargs) + + +def resnet18(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet: + """ResNet-18 with optional pretrained support when `spatial_dims` is 3. + + Pretraining from `Med3D: Transfer Learning for 3D Medical Image Analysis `_. + + Args: + pretrained (bool): If True, returns a model pre-trained on 23 medical datasets + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _resnet("resnet18", ResNetBlock, [2, 2, 2, 2], get_inplanes(), pretrained, progress, **kwargs) + + +def resnet34(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet: + """ResNet-34 with optional pretrained support when `spatial_dims` is 3. + + Pretraining from `Med3D: Transfer Learning for 3D Medical Image Analysis `_. + + Args: + pretrained (bool): If True, returns a model pre-trained on 23 medical datasets + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _resnet("resnet34", ResNetBlock, [3, 4, 6, 3], get_inplanes(), pretrained, progress, **kwargs) + + +def resnet50(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet: + """ResNet-50 with optional pretrained support when `spatial_dims` is 3. + + Pretraining from `Med3D: Transfer Learning for 3D Medical Image Analysis `_. + + Args: + pretrained (bool): If True, returns a model pre-trained on 23 medical datasets + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _resnet("resnet50", ResNetBottleneck, [3, 4, 6, 3], get_inplanes(), pretrained, progress, **kwargs) + + +def resnet101(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet: + """ResNet-101 with optional pretrained support when `spatial_dims` is 3. + + Pretraining from `Med3D: Transfer Learning for 3D Medical Image Analysis `_. + + Args: + pretrained (bool): If True, returns a model pre-trained on 8 medical datasets + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _resnet("resnet101", ResNetBottleneck, [3, 4, 23, 3], get_inplanes(), pretrained, progress, **kwargs) + + +def resnet152(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet: + """ResNet-152 with optional pretrained support when `spatial_dims` is 3. + + Pretraining from `Med3D: Transfer Learning for 3D Medical Image Analysis `_. + + Args: + pretrained (bool): If True, returns a model pre-trained on 8 medical datasets + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _resnet("resnet152", ResNetBottleneck, [3, 8, 36, 3], get_inplanes(), pretrained, progress, **kwargs) + + +def resnet200(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet: + """ResNet-200 with optional pretrained support when `spatial_dims` is 3. + + Pretraining from `Med3D: Transfer Learning for 3D Medical Image Analysis `_. + + Args: + pretrained (bool): If True, returns a model pre-trained on 8 medical datasets + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _resnet("resnet200", ResNetBottleneck, [3, 24, 36, 3], get_inplanes(), pretrained, progress, **kwargs) diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/networks/nets/swin_unetr.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/networks/nets/swin_unetr.py new file mode 100644 index 0000000000000000000000000000000000000000..8e900788730a023e27419c2eb93e212955f9cb4d --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/networks/nets/swin_unetr.py @@ -0,0 +1,979 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Sequence, Tuple, Type, Union + +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.utils.checkpoint as checkpoint +from torch.nn import LayerNorm + +from monai.networks.blocks import MLPBlock as Mlp +from monai.networks.blocks import PatchEmbed, UnetOutBlock, UnetrBasicBlock, UnetrUpBlock +from monai.networks.layers import DropPath, trunc_normal_ +from monai.utils import ensure_tuple_rep, optional_import + +rearrange, _ = optional_import("einops", name="rearrange") + + +class SwinUNETR(nn.Module): + """ + Swin UNETR based on: "Hatamizadeh et al., + Swin UNETR: Swin Transformers for Semantic Segmentation of Brain Tumors in MRI Images + " + """ + + def __init__( + self, + img_size: Union[Sequence[int], int], + in_channels: int, + out_channels: int, + depths: Sequence[int] = (2, 2, 2, 2), + num_heads: Sequence[int] = (3, 6, 12, 24), + feature_size: int = 24, + norm_name: Union[Tuple, str] = "instance", + drop_rate: float = 0.0, + attn_drop_rate: float = 0.0, + dropout_path_rate: float = 0.0, + normalize: bool = True, + use_checkpoint: bool = False, + spatial_dims: int = 3, + ) -> None: + """ + Args: + img_size: dimension of input image. + in_channels: dimension of input channels. + out_channels: dimension of output channels. + feature_size: dimension of network feature size. + depths: number of layers in each stage. + num_heads: number of attention heads. + norm_name: feature normalization type and arguments. + drop_rate: dropout rate. + attn_drop_rate: attention dropout rate. + dropout_path_rate: drop path rate. + normalize: normalize output intermediate features in each stage. + use_checkpoint: use gradient checkpointing for reduced memory usage. + spatial_dims: number of spatial dims. + + Examples:: + + # for 3D single channel input with size (96,96,96), 4-channel output and feature size of 48. + >>> net = SwinUNETR(img_size=(96,96,96), in_channels=1, out_channels=4, feature_size=48) + + # for 3D 4-channel input with size (128,128,128), 3-channel output and (2,4,2,2) layers in each stage. + >>> net = SwinUNETR(img_size=(128,128,128), in_channels=4, out_channels=3, depths=(2,4,2,2)) + + # for 2D single channel input with size (96,96), 2-channel output and gradient checkpointing. + >>> net = SwinUNETR(img_size=(96,96), in_channels=3, out_channels=2, use_checkpoint=True, spatial_dims=2) + + """ + + super().__init__() + + img_size = ensure_tuple_rep(img_size, spatial_dims) + patch_size = ensure_tuple_rep(2, spatial_dims) + window_size = ensure_tuple_rep(7, spatial_dims) + + if not (spatial_dims == 2 or spatial_dims == 3): + raise ValueError("spatial dimension should be 2 or 3.") + + for m, p in zip(img_size, patch_size): + for i in range(5): + if m % np.power(p, i + 1) != 0: + raise ValueError("input image size (img_size) should be divisible by stage-wise image resolution.") + + if not (0 <= drop_rate <= 1): + raise ValueError("dropout rate should be between 0 and 1.") + + if not (0 <= attn_drop_rate <= 1): + raise ValueError("attention dropout rate should be between 0 and 1.") + + if not (0 <= dropout_path_rate <= 1): + raise ValueError("drop path rate should be between 0 and 1.") + + if feature_size % 12 != 0: + raise ValueError("feature_size should be divisible by 12.") + + self.normalize = normalize + + self.swinViT = SwinTransformer( + in_chans=in_channels, + embed_dim=feature_size, + window_size=window_size, + patch_size=patch_size, + depths=depths, + num_heads=num_heads, + mlp_ratio=4.0, + qkv_bias=True, + drop_rate=drop_rate, + attn_drop_rate=attn_drop_rate, + drop_path_rate=dropout_path_rate, + norm_layer=nn.LayerNorm, + use_checkpoint=use_checkpoint, + spatial_dims=spatial_dims, + ) + + self.encoder1 = UnetrBasicBlock( + spatial_dims=spatial_dims, + in_channels=in_channels, + out_channels=feature_size, + kernel_size=3, + stride=1, + norm_name=norm_name, + res_block=True, + ) + + self.encoder2 = UnetrBasicBlock( + spatial_dims=spatial_dims, + in_channels=feature_size, + out_channels=feature_size, + kernel_size=3, + stride=1, + norm_name=norm_name, + res_block=True, + ) + + self.encoder3 = UnetrBasicBlock( + spatial_dims=spatial_dims, + in_channels=2 * feature_size, + out_channels=2 * feature_size, + kernel_size=3, + stride=1, + norm_name=norm_name, + res_block=True, + ) + + self.encoder4 = UnetrBasicBlock( + spatial_dims=spatial_dims, + in_channels=4 * feature_size, + out_channels=4 * feature_size, + kernel_size=3, + stride=1, + norm_name=norm_name, + res_block=True, + ) + + self.encoder10 = UnetrBasicBlock( + spatial_dims=spatial_dims, + in_channels=16 * feature_size, + out_channels=16 * feature_size, + kernel_size=3, + stride=1, + norm_name=norm_name, + res_block=True, + ) + + self.decoder5 = UnetrUpBlock( + spatial_dims=spatial_dims, + in_channels=16 * feature_size, + out_channels=8 * feature_size, + kernel_size=3, + upsample_kernel_size=2, + norm_name=norm_name, + res_block=True, + ) + + self.decoder4 = UnetrUpBlock( + spatial_dims=spatial_dims, + in_channels=feature_size * 8, + out_channels=feature_size * 4, + kernel_size=3, + upsample_kernel_size=2, + norm_name=norm_name, + res_block=True, + ) + + self.decoder3 = UnetrUpBlock( + spatial_dims=spatial_dims, + in_channels=feature_size * 4, + out_channels=feature_size * 2, + kernel_size=3, + upsample_kernel_size=2, + norm_name=norm_name, + res_block=True, + ) + self.decoder2 = UnetrUpBlock( + spatial_dims=spatial_dims, + in_channels=feature_size * 2, + out_channels=feature_size, + kernel_size=3, + upsample_kernel_size=2, + norm_name=norm_name, + res_block=True, + ) + + self.decoder1 = UnetrUpBlock( + spatial_dims=spatial_dims, + in_channels=feature_size, + out_channels=feature_size, + kernel_size=3, + upsample_kernel_size=2, + norm_name=norm_name, + res_block=True, + ) + + self.out = UnetOutBlock( + spatial_dims=spatial_dims, in_channels=feature_size, out_channels=out_channels + ) # type: ignore + + def load_from(self, weights): + + with torch.no_grad(): + self.swinViT.patch_embed.proj.weight.copy_(weights["state_dict"]["module.patch_embed.proj.weight"]) + self.swinViT.patch_embed.proj.bias.copy_(weights["state_dict"]["module.patch_embed.proj.bias"]) + for bname, block in self.swinViT.layers1[0].blocks.named_children(): + block.load_from(weights, n_block=bname, layer="layers1") + self.swinViT.layers1[0].downsample.reduction.weight.copy_( + weights["state_dict"]["module.layers1.0.downsample.reduction.weight"] + ) + self.swinViT.layers1[0].downsample.norm.weight.copy_( + weights["state_dict"]["module.layers1.0.downsample.norm.weight"] + ) + self.swinViT.layers1[0].downsample.norm.bias.copy_( + weights["state_dict"]["module.layers1.0.downsample.norm.bias"] + ) + for bname, block in self.swinViT.layers2[0].blocks.named_children(): + block.load_from(weights, n_block=bname, layer="layers2") + self.swinViT.layers2[0].downsample.reduction.weight.copy_( + weights["state_dict"]["module.layers2.0.downsample.reduction.weight"] + ) + self.swinViT.layers2[0].downsample.norm.weight.copy_( + weights["state_dict"]["module.layers2.0.downsample.norm.weight"] + ) + self.swinViT.layers2[0].downsample.norm.bias.copy_( + weights["state_dict"]["module.layers2.0.downsample.norm.bias"] + ) + for bname, block in self.swinViT.layers3[0].blocks.named_children(): + block.load_from(weights, n_block=bname, layer="layers3") + self.swinViT.layers3[0].downsample.reduction.weight.copy_( + weights["state_dict"]["module.layers3.0.downsample.reduction.weight"] + ) + self.swinViT.layers3[0].downsample.norm.weight.copy_( + weights["state_dict"]["module.layers3.0.downsample.norm.weight"] + ) + self.swinViT.layers3[0].downsample.norm.bias.copy_( + weights["state_dict"]["module.layers3.0.downsample.norm.bias"] + ) + for bname, block in self.swinViT.layers4[0].blocks.named_children(): + block.load_from(weights, n_block=bname, layer="layers4") + self.swinViT.layers4[0].downsample.reduction.weight.copy_( + weights["state_dict"]["module.layers4.0.downsample.reduction.weight"] + ) + self.swinViT.layers4[0].downsample.norm.weight.copy_( + weights["state_dict"]["module.layers4.0.downsample.norm.weight"] + ) + self.swinViT.layers4[0].downsample.norm.bias.copy_( + weights["state_dict"]["module.layers4.0.downsample.norm.bias"] + ) + + def forward(self, x_in): + hidden_states_out = self.swinViT(x_in, self.normalize) + enc0 = self.encoder1(x_in) + enc1 = self.encoder2(hidden_states_out[0]) + enc2 = self.encoder3(hidden_states_out[1]) + enc3 = self.encoder4(hidden_states_out[2]) + dec4 = self.encoder10(hidden_states_out[4]) + dec3 = self.decoder5(dec4, hidden_states_out[3]) + dec2 = self.decoder4(dec3, enc3) + dec1 = self.decoder3(dec2, enc2) + dec0 = self.decoder2(dec1, enc1) + out = self.decoder1(dec0, enc0) + logits = self.out(out) + return logits + + +def window_partition(x, window_size): + """window partition operation based on: "Liu et al., + Swin Transformer: Hierarchical Vision Transformer using Shifted Windows + " + https://github.com/microsoft/Swin-Transformer + + Args: + x: input tensor. + window_size: local window size. + """ + x_shape = x.size() + if len(x_shape) == 5: + b, d, h, w, c = x_shape + x = x.view( + b, + d // window_size[0], + window_size[0], + h // window_size[1], + window_size[1], + w // window_size[2], + window_size[2], + c, + ) + windows = ( + x.permute(0, 1, 3, 5, 2, 4, 6, 7).contiguous().view(-1, window_size[0] * window_size[1] * window_size[2], c) + ) + elif len(x_shape) == 4: + b, h, w, c = x.shape + x = x.view(b, h // window_size[0], window_size[0], w // window_size[1], window_size[1], c) + windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size[0] * window_size[1], c) + return windows + + +def window_reverse(windows, window_size, dims): + """window reverse operation based on: "Liu et al., + Swin Transformer: Hierarchical Vision Transformer using Shifted Windows + " + https://github.com/microsoft/Swin-Transformer + + Args: + windows: windows tensor. + window_size: local window size. + dims: dimension values. + """ + if len(dims) == 4: + b, d, h, w = dims + x = windows.view( + b, + d // window_size[0], + h // window_size[1], + w // window_size[2], + window_size[0], + window_size[1], + window_size[2], + -1, + ) + x = x.permute(0, 1, 4, 2, 5, 3, 6, 7).contiguous().view(b, d, h, w, -1) + + elif len(dims) == 3: + b, h, w = dims + x = windows.view(b, h // window_size[0], w // window_size[0], window_size[0], window_size[1], -1) + x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(b, h, w, -1) + return x + + +def get_window_size(x_size, window_size, shift_size=None): + """Computing window size based on: "Liu et al., + Swin Transformer: Hierarchical Vision Transformer using Shifted Windows + " + https://github.com/microsoft/Swin-Transformer + + Args: + x_size: input size. + window_size: local window size. + shift_size: window shifting size. + """ + + use_window_size = list(window_size) + if shift_size is not None: + use_shift_size = list(shift_size) + for i in range(len(x_size)): + if x_size[i] <= window_size[i]: + use_window_size[i] = x_size[i] + if shift_size is not None: + use_shift_size[i] = 0 + + if shift_size is None: + return tuple(use_window_size) + else: + return tuple(use_window_size), tuple(use_shift_size) + + +class WindowAttention(nn.Module): + """ + Window based multi-head self attention module with relative position bias based on: "Liu et al., + Swin Transformer: Hierarchical Vision Transformer using Shifted Windows + " + https://github.com/microsoft/Swin-Transformer + """ + + def __init__( + self, + dim: int, + num_heads: int, + window_size: Sequence[int], + qkv_bias: bool = False, + attn_drop: float = 0.0, + proj_drop: float = 0.0, + ) -> None: + """ + Args: + dim: number of feature channels. + num_heads: number of attention heads. + window_size: local window size. + qkv_bias: add a learnable bias to query, key, value. + attn_drop: attention dropout rate. + proj_drop: dropout rate of output. + """ + + super().__init__() + self.dim = dim + self.window_size = window_size + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = head_dim**-0.5 + mesh_args = torch.meshgrid.__kwdefaults__ + + if len(self.window_size) == 3: + self.relative_position_bias_table = nn.Parameter( + torch.zeros( + (2 * self.window_size[0] - 1) * (2 * self.window_size[1] - 1) * (2 * self.window_size[2] - 1), + num_heads, + ) + ) + coords_d = torch.arange(self.window_size[0]) + coords_h = torch.arange(self.window_size[1]) + coords_w = torch.arange(self.window_size[2]) + if mesh_args is not None: + coords = torch.stack(torch.meshgrid(coords_d, coords_h, coords_w, indexing="ij")) + else: + coords = torch.stack(torch.meshgrid(coords_d, coords_h, coords_w)) + coords_flatten = torch.flatten(coords, 1) + relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] + relative_coords = relative_coords.permute(1, 2, 0).contiguous() + relative_coords[:, :, 0] += self.window_size[0] - 1 + relative_coords[:, :, 1] += self.window_size[1] - 1 + relative_coords[:, :, 2] += self.window_size[2] - 1 + relative_coords[:, :, 0] *= (2 * self.window_size[1] - 1) * (2 * self.window_size[2] - 1) + relative_coords[:, :, 1] *= 2 * self.window_size[2] - 1 + elif len(self.window_size) == 2: + self.relative_position_bias_table = nn.Parameter( + torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads) + ) + coords_h = torch.arange(self.window_size[0]) + coords_w = torch.arange(self.window_size[1]) + if mesh_args is not None: + coords = torch.stack(torch.meshgrid(coords_h, coords_w, indexing="ij")) + else: + coords = torch.stack(torch.meshgrid(coords_h, coords_w)) + coords_flatten = torch.flatten(coords, 1) + relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] + relative_coords = relative_coords.permute(1, 2, 0).contiguous() + relative_coords[:, :, 0] += self.window_size[0] - 1 + relative_coords[:, :, 1] += self.window_size[1] - 1 + relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1 + + relative_position_index = relative_coords.sum(-1) + self.register_buffer("relative_position_index", relative_position_index) + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + trunc_normal_(self.relative_position_bias_table, std=0.02) + self.softmax = nn.Softmax(dim=-1) + + def forward(self, x, mask): + b, n, c = x.shape + qkv = self.qkv(x).reshape(b, n, 3, self.num_heads, c // self.num_heads).permute(2, 0, 3, 1, 4) + q, k, v = qkv[0], qkv[1], qkv[2] + q = q * self.scale + attn = q @ k.transpose(-2, -1) + relative_position_bias = self.relative_position_bias_table[ + self.relative_position_index.clone()[:n, :n].reshape(-1) + ].reshape(n, n, -1) + relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() + attn = attn + relative_position_bias.unsqueeze(0) + if mask is not None: + nw = mask.shape[0] + attn = attn.view(b // nw, nw, self.num_heads, n, n) + mask.unsqueeze(1).unsqueeze(0) + attn = attn.view(-1, self.num_heads, n, n) + attn = self.softmax(attn) + else: + attn = self.softmax(attn) + + attn = self.attn_drop(attn) + x = (attn @ v).transpose(1, 2).reshape(b, n, c) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class SwinTransformerBlock(nn.Module): + """ + Swin Transformer block based on: "Liu et al., + Swin Transformer: Hierarchical Vision Transformer using Shifted Windows + " + https://github.com/microsoft/Swin-Transformer + """ + + def __init__( + self, + dim: int, + num_heads: int, + window_size: Sequence[int], + shift_size: Sequence[int], + mlp_ratio: float = 4.0, + qkv_bias: bool = True, + drop: float = 0.0, + attn_drop: float = 0.0, + drop_path: float = 0.0, + act_layer: str = "GELU", + norm_layer: Type[LayerNorm] = nn.LayerNorm, # type: ignore + use_checkpoint: bool = False, + ) -> None: + """ + Args: + dim: number of feature channels. + num_heads: number of attention heads. + window_size: local window size. + shift_size: window shift size. + mlp_ratio: ratio of mlp hidden dim to embedding dim. + qkv_bias: add a learnable bias to query, key, value. + drop: dropout rate. + attn_drop: attention dropout rate. + drop_path: stochastic depth rate. + act_layer: activation layer. + norm_layer: normalization layer. + use_checkpoint: use gradient checkpointing for reduced memory usage. + """ + + super().__init__() + self.dim = dim + self.num_heads = num_heads + self.window_size = window_size + self.shift_size = shift_size + self.mlp_ratio = mlp_ratio + self.use_checkpoint = use_checkpoint + self.norm1 = norm_layer(dim) + self.attn = WindowAttention( + dim, + window_size=self.window_size, + num_heads=num_heads, + qkv_bias=qkv_bias, + attn_drop=attn_drop, + proj_drop=drop, + ) + + self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp(hidden_size=dim, mlp_dim=mlp_hidden_dim, act=act_layer, dropout_rate=drop, dropout_mode="swin") + + def forward_part1(self, x, mask_matrix): + x_shape = x.size() + x = self.norm1(x) + if len(x_shape) == 5: + b, d, h, w, c = x.shape + window_size, shift_size = get_window_size((d, h, w), self.window_size, self.shift_size) + pad_l = pad_t = pad_d0 = 0 + pad_d1 = (window_size[0] - d % window_size[0]) % window_size[0] + pad_b = (window_size[1] - h % window_size[1]) % window_size[1] + pad_r = (window_size[2] - w % window_size[2]) % window_size[2] + x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b, pad_d0, pad_d1)) + _, dp, hp, wp, _ = x.shape + dims = [b, dp, hp, wp] + + elif len(x_shape) == 4: + b, h, w, c = x.shape + window_size, shift_size = get_window_size((h, w), self.window_size, self.shift_size) + pad_l = pad_t = 0 + pad_r = (window_size[0] - h % window_size[0]) % window_size[0] + pad_b = (window_size[1] - w % window_size[1]) % window_size[1] + x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b)) + _, hp, wp, _ = x.shape + dims = [b, hp, wp] + + if any(i > 0 for i in shift_size): + if len(x_shape) == 5: + shifted_x = torch.roll(x, shifts=(-shift_size[0], -shift_size[1], -shift_size[2]), dims=(1, 2, 3)) + elif len(x_shape) == 4: + shifted_x = torch.roll(x, shifts=(-shift_size[0], -shift_size[1]), dims=(1, 2)) + attn_mask = mask_matrix + else: + shifted_x = x + attn_mask = None + x_windows = window_partition(shifted_x, window_size) + attn_windows = self.attn(x_windows, mask=attn_mask) + attn_windows = attn_windows.view(-1, *(window_size + (c,))) + shifted_x = window_reverse(attn_windows, window_size, dims) + if any(i > 0 for i in shift_size): + if len(x_shape) == 5: + x = torch.roll(shifted_x, shifts=(shift_size[0], shift_size[1], shift_size[2]), dims=(1, 2, 3)) + elif len(x_shape) == 4: + x = torch.roll(shifted_x, shifts=(shift_size[0], shift_size[1]), dims=(1, 2)) + else: + x = shifted_x + + if len(x_shape) == 5: + if pad_d1 > 0 or pad_r > 0 or pad_b > 0: + x = x[:, :d, :h, :w, :].contiguous() + elif len(x_shape) == 4: + if pad_r > 0 or pad_b > 0: + x = x[:, :h, :w, :].contiguous() + + return x + + def forward_part2(self, x): + return self.drop_path(self.mlp(self.norm2(x))) + + def load_from(self, weights, n_block, layer): + root = f"module.{layer}.0.blocks.{n_block}." + block_names = [ + "norm1.weight", + "norm1.bias", + "attn.relative_position_bias_table", + "attn.relative_position_index", + "attn.qkv.weight", + "attn.qkv.bias", + "attn.proj.weight", + "attn.proj.bias", + "norm2.weight", + "norm2.bias", + "mlp.fc1.weight", + "mlp.fc1.bias", + "mlp.fc2.weight", + "mlp.fc2.bias", + ] + with torch.no_grad(): + self.norm1.weight.copy_(weights["state_dict"][root + block_names[0]]) + self.norm1.bias.copy_(weights["state_dict"][root + block_names[1]]) + self.attn.relative_position_bias_table.copy_(weights["state_dict"][root + block_names[2]]) + self.attn.relative_position_index.copy_(weights["state_dict"][root + block_names[3]]) + self.attn.qkv.weight.copy_(weights["state_dict"][root + block_names[4]]) + self.attn.qkv.bias.copy_(weights["state_dict"][root + block_names[5]]) + self.attn.proj.weight.copy_(weights["state_dict"][root + block_names[6]]) + self.attn.proj.bias.copy_(weights["state_dict"][root + block_names[7]]) + self.norm2.weight.copy_(weights["state_dict"][root + block_names[8]]) + self.norm2.bias.copy_(weights["state_dict"][root + block_names[9]]) + self.mlp.linear1.weight.copy_(weights["state_dict"][root + block_names[10]]) + self.mlp.linear1.bias.copy_(weights["state_dict"][root + block_names[11]]) + self.mlp.linear2.weight.copy_(weights["state_dict"][root + block_names[12]]) + self.mlp.linear2.bias.copy_(weights["state_dict"][root + block_names[13]]) + + def forward(self, x, mask_matrix): + shortcut = x + if self.use_checkpoint: + x = checkpoint.checkpoint(self.forward_part1, x, mask_matrix) + else: + x = self.forward_part1(x, mask_matrix) + x = shortcut + self.drop_path(x) + if self.use_checkpoint: + x = x + checkpoint.checkpoint(self.forward_part2, x) + else: + x = x + self.forward_part2(x) + return x + + +class PatchMerging(nn.Module): + """ + Patch merging layer based on: "Liu et al., + Swin Transformer: Hierarchical Vision Transformer using Shifted Windows + " + https://github.com/microsoft/Swin-Transformer + """ + + def __init__( + self, dim: int, norm_layer: Type[LayerNorm] = nn.LayerNorm, spatial_dims: int = 3 + ) -> None: # type: ignore + """ + Args: + dim: number of feature channels. + norm_layer: normalization layer. + spatial_dims: number of spatial dims. + """ + + super().__init__() + self.dim = dim + if spatial_dims == 3: + self.reduction = nn.Linear(8 * dim, 2 * dim, bias=False) + self.norm = norm_layer(8 * dim) + elif spatial_dims == 2: + self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False) + self.norm = norm_layer(4 * dim) + + def forward(self, x): + + x_shape = x.size() + if len(x_shape) == 5: + b, d, h, w, c = x_shape + pad_input = (h % 2 == 1) or (w % 2 == 1) or (d % 2 == 1) + if pad_input: + x = F.pad(x, (0, 0, 0, d % 2, 0, w % 2, 0, h % 2)) + x0 = x[:, 0::2, 0::2, 0::2, :] + x1 = x[:, 1::2, 0::2, 0::2, :] + x2 = x[:, 0::2, 1::2, 0::2, :] + x3 = x[:, 0::2, 0::2, 1::2, :] + x4 = x[:, 1::2, 0::2, 1::2, :] + x5 = x[:, 0::2, 1::2, 0::2, :] + x6 = x[:, 0::2, 0::2, 1::2, :] + x7 = x[:, 1::2, 1::2, 1::2, :] + x = torch.cat([x0, x1, x2, x3, x4, x5, x6, x7], -1) + + elif len(x_shape) == 4: + b, h, w, c = x_shape + pad_input = (h % 2 == 1) or (w % 2 == 1) + if pad_input: + x = F.pad(x, (0, 0, 0, w % 2, 0, h % 2)) + x0 = x[:, 0::2, 0::2, :] + x1 = x[:, 1::2, 0::2, :] + x2 = x[:, 0::2, 1::2, :] + x3 = x[:, 1::2, 1::2, :] + x = torch.cat([x0, x1, x2, x3], -1) + + x = self.norm(x) + x = self.reduction(x) + return x + + +def compute_mask(dims, window_size, shift_size, device): + """Computing region masks based on: "Liu et al., + Swin Transformer: Hierarchical Vision Transformer using Shifted Windows + " + https://github.com/microsoft/Swin-Transformer + + Args: + dims: dimension values. + window_size: local window size. + shift_size: shift size. + device: device. + """ + + cnt = 0 + + if len(dims) == 3: + d, h, w = dims + img_mask = torch.zeros((1, d, h, w, 1), device=device) + for d in slice(-window_size[0]), slice(-window_size[0], -shift_size[0]), slice(-shift_size[0], None): + for h in slice(-window_size[1]), slice(-window_size[1], -shift_size[1]), slice(-shift_size[1], None): + for w in slice(-window_size[2]), slice(-window_size[2], -shift_size[2]), slice(-shift_size[2], None): + img_mask[:, d, h, w, :] = cnt + cnt += 1 + + elif len(dims) == 2: + h, w = dims + img_mask = torch.zeros((1, h, w, 1), device=device) + for h in slice(-window_size[0]), slice(-window_size[0], -shift_size[0]), slice(-shift_size[0], None): + for w in slice(-window_size[1]), slice(-window_size[1], -shift_size[1]), slice(-shift_size[1], None): + img_mask[:, h, w, :] = cnt + cnt += 1 + + mask_windows = window_partition(img_mask, window_size) + mask_windows = mask_windows.squeeze(-1) + attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) + attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0)) + + return attn_mask + + +class BasicLayer(nn.Module): + """ + Basic Swin Transformer layer in one stage based on: "Liu et al., + Swin Transformer: Hierarchical Vision Transformer using Shifted Windows + " + https://github.com/microsoft/Swin-Transformer + """ + + def __init__( + self, + dim: int, + depth: int, + num_heads: int, + window_size: Sequence[int], + drop_path: list, + mlp_ratio: float = 4.0, + qkv_bias: bool = False, + drop: float = 0.0, + attn_drop: float = 0.0, + norm_layer: Type[LayerNorm] = nn.LayerNorm, # type: ignore + downsample: isinstance = None, # type: ignore + use_checkpoint: bool = False, + ) -> None: + """ + Args: + dim: number of feature channels. + depths: number of layers in each stage. + num_heads: number of attention heads. + window_size: local window size. + drop_path: stochastic depth rate. + mlp_ratio: ratio of mlp hidden dim to embedding dim. + qkv_bias: add a learnable bias to query, key, value. + drop: dropout rate. + attn_drop: attention dropout rate. + norm_layer: normalization layer. + downsample: downsample layer at the end of the layer. + use_checkpoint: use gradient checkpointing for reduced memory usage. + """ + + super().__init__() + self.window_size = window_size + self.shift_size = tuple(i // 2 for i in window_size) + self.no_shift = tuple(0 for i in window_size) + self.depth = depth + self.use_checkpoint = use_checkpoint + self.blocks = nn.ModuleList( + [ + SwinTransformerBlock( + dim=dim, + num_heads=num_heads, + window_size=self.window_size, + shift_size=self.no_shift if (i % 2 == 0) else self.shift_size, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + drop=drop, + attn_drop=attn_drop, + drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path, + norm_layer=norm_layer, + use_checkpoint=use_checkpoint, + ) + for i in range(depth) + ] + ) + self.downsample = downsample + if self.downsample is not None: + self.downsample = downsample(dim=dim, norm_layer=norm_layer, spatial_dims=len(self.window_size)) + + def forward(self, x): + x_shape = x.size() + if len(x_shape) == 5: + b, c, d, h, w = x_shape + window_size, shift_size = get_window_size((d, h, w), self.window_size, self.shift_size) + x = rearrange(x, "b c d h w -> b d h w c") + dp = int(np.ceil(d / window_size[0])) * window_size[0] + hp = int(np.ceil(h / window_size[1])) * window_size[1] + wp = int(np.ceil(w / window_size[2])) * window_size[2] + attn_mask = compute_mask([dp, hp, wp], window_size, shift_size, x.device) + for blk in self.blocks: + x = blk(x, attn_mask) + x = x.view(b, d, h, w, -1) + if self.downsample is not None: + x = self.downsample(x) + x = rearrange(x, "b d h w c -> b c d h w") + + elif len(x_shape) == 4: + b, c, h, w = x_shape + window_size, shift_size = get_window_size((h, w), self.window_size, self.shift_size) + x = rearrange(x, "b c h w -> b h w c") + hp = int(np.ceil(h / window_size[0])) * window_size[0] + wp = int(np.ceil(w / window_size[1])) * window_size[1] + attn_mask = compute_mask([hp, wp], window_size, shift_size, x.device) + for blk in self.blocks: + x = blk(x, attn_mask) + x = x.view(b, h, w, -1) + if self.downsample is not None: + x = self.downsample(x) + x = rearrange(x, "b h w c -> b c h w") + return x + + +class SwinTransformer(nn.Module): + """ + Swin Transformer based on: "Liu et al., + Swin Transformer: Hierarchical Vision Transformer using Shifted Windows + " + https://github.com/microsoft/Swin-Transformer + """ + + def __init__( + self, + in_chans: int, + embed_dim: int, + window_size: Sequence[int], + patch_size: Sequence[int], + depths: Sequence[int], + num_heads: Sequence[int], + mlp_ratio: float = 4.0, + qkv_bias: bool = True, + drop_rate: float = 0.0, + attn_drop_rate: float = 0.0, + drop_path_rate: float = 0.0, + norm_layer: Type[LayerNorm] = nn.LayerNorm, # type: ignore + patch_norm: bool = False, + use_checkpoint: bool = False, + spatial_dims: int = 3, + ) -> None: + """ + Args: + in_chans: dimension of input channels. + embed_dim: number of linear projection output channels. + window_size: local window size. + patch_size: patch size. + depths: number of layers in each stage. + num_heads: number of attention heads. + mlp_ratio: ratio of mlp hidden dim to embedding dim. + qkv_bias: add a learnable bias to query, key, value. + drop_rate: dropout rate. + attn_drop_rate: attention dropout rate. + drop_path_rate: stochastic depth rate. + norm_layer: normalization layer. + patch_norm: add normalization after patch embedding. + use_checkpoint: use gradient checkpointing for reduced memory usage. + spatial_dims: spatial dimension. + """ + + super().__init__() + self.num_layers = len(depths) + self.embed_dim = embed_dim + self.patch_norm = patch_norm + self.window_size = window_size + self.patch_size = patch_size + self.patch_embed = PatchEmbed( + patch_size=self.patch_size, + in_chans=in_chans, + embed_dim=embed_dim, + norm_layer=norm_layer if self.patch_norm else None, # type: ignore + spatial_dims=spatial_dims, + ) + self.pos_drop = nn.Dropout(p=drop_rate) + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] + self.layers1 = nn.ModuleList() + self.layers2 = nn.ModuleList() + self.layers3 = nn.ModuleList() + self.layers4 = nn.ModuleList() + for i_layer in range(self.num_layers): + layer = BasicLayer( + dim=int(embed_dim * 2**i_layer), + depth=depths[i_layer], + num_heads=num_heads[i_layer], + window_size=self.window_size, + drop_path=dpr[sum(depths[:i_layer]) : sum(depths[: i_layer + 1])], + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + drop=drop_rate, + attn_drop=attn_drop_rate, + norm_layer=norm_layer, + downsample=PatchMerging, + use_checkpoint=use_checkpoint, + ) + if i_layer == 0: + self.layers1.append(layer) + elif i_layer == 1: + self.layers2.append(layer) + elif i_layer == 2: + self.layers3.append(layer) + elif i_layer == 3: + self.layers4.append(layer) + self.num_features = int(embed_dim * 2 ** (self.num_layers - 1)) + + def proj_out(self, x, normalize=False): + if normalize: + x_shape = x.size() + if len(x_shape) == 5: + n, ch, d, h, w = x_shape + x = rearrange(x, "n c d h w -> n d h w c") + x = F.layer_norm(x, [ch]) + x = rearrange(x, "n d h w c -> n c d h w") + elif len(x_shape) == 4: + n, ch, h, w = x_shape + x = rearrange(x, "n c h w -> n h w c") + x = F.layer_norm(x, [ch]) + x = rearrange(x, "n h w c -> n c h w") + return x + + def forward(self, x, normalize=True): + x0 = self.patch_embed(x) + x0 = self.pos_drop(x0) + x0_out = self.proj_out(x0, normalize) + x1 = self.layers1[0](x0.contiguous()) + x1_out = self.proj_out(x1, normalize) + x2 = self.layers2[0](x1.contiguous()) + x2_out = self.proj_out(x2, normalize) + x3 = self.layers3[0](x2.contiguous()) + x3_out = self.proj_out(x3, normalize) + x4 = self.layers4[0](x3.contiguous()) + x4_out = self.proj_out(x4, normalize) + return [x0_out, x1_out, x2_out, x3_out, x4_out] diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/utils/__pycache__/dist.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/utils/__pycache__/dist.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f8f777ec400125123606f479f83921e58aa11b11 Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/utils/__pycache__/dist.cpython-38.pyc differ diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/utils/__pycache__/enums.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/utils/__pycache__/enums.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b2a717a9bedd5dbbd2e020552e731f08f20c375f Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/utils/__pycache__/enums.cpython-38.pyc differ diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/utils/__pycache__/misc.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/utils/__pycache__/misc.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8add9d9f4c3bd44354305afa0fb38d810d3fce19 Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/utils/__pycache__/misc.cpython-38.pyc differ diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/utils/__pycache__/state_cacher.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/utils/__pycache__/state_cacher.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..add0e341ac19ef4407ebc2a2f20b2ca3e4b91fbe Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/utils/__pycache__/state_cacher.cpython-38.pyc differ diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/visualize/__pycache__/__init__.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/visualize/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..be6cc1b80d84d42434ba1b83e9f466daf4df0cbb Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/visualize/__pycache__/__init__.cpython-38.pyc differ diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/visualize/__pycache__/class_activation_maps.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/visualize/__pycache__/class_activation_maps.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9f9519e5021f1628e1c7b4ce74e2285fab65b878 Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/visualize/__pycache__/class_activation_maps.cpython-38.pyc differ diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/visualize/__pycache__/gradient_based.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/visualize/__pycache__/gradient_based.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bd45cdd47a4ba8229049d55fbfae431f81b490df Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/visualize/__pycache__/gradient_based.cpython-38.pyc differ diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/visualize/__pycache__/img2tensorboard.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/visualize/__pycache__/img2tensorboard.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..04e86d2ed8b16fd2a7f8d6817add2df91a39bfb5 Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/visualize/__pycache__/img2tensorboard.cpython-38.pyc differ diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/visualize/__pycache__/occlusion_sensitivity.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/visualize/__pycache__/occlusion_sensitivity.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..62bbda8f2ca062be73622110dbe82c4d4b8be3c2 Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/visualize/__pycache__/occlusion_sensitivity.cpython-38.pyc differ diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/visualize/__pycache__/utils.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/visualize/__pycache__/utils.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..803690aa4bffa401bb65369a3d49e5ff9c78d72b Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/visualize/__pycache__/utils.cpython-38.pyc differ diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/visualize/__pycache__/visualizer.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/visualize/__pycache__/visualizer.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..90fe3d6e4177a140ef5ce810c238c73fc7e1fcdf Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/visualize/__pycache__/visualizer.cpython-38.pyc differ diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/opencv_python.libs/libxkbcommon-71ae2972.so.0.0.0 b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/opencv_python.libs/libxkbcommon-71ae2972.so.0.0.0 new file mode 100644 index 0000000000000000000000000000000000000000..759c61366d79558033b72390fe29da5fc4ae53d6 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/opencv_python.libs/libxkbcommon-71ae2972.so.0.0.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1fcb38a646bd1ce1daaf682ad29b72e65bfdf67a06335b278f8e99d0b7530212 +size 269865 diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/sklearn/_isotonic.cpython-38-x86_64-linux-gnu.so b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/sklearn/_isotonic.cpython-38-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..b3a9894f38e9215bcd265f676f31fdd8b29351b6 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/sklearn/_isotonic.cpython-38-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7ef797c3c3e978ee14b6de73ea1cb5a1ca5683f8f4b106569d93d37e0b8e7ffd +size 1838800