repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
CoTr
|
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/data_augmentation/nnUNetTrainerV2_noMirroring.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
class nnUNetTrainerV2_noMirroring(nnUNetTrainerV2):
def validate(self, do_mirroring: bool = True, use_sliding_window: bool = True,
step_size: float = 0.5, save_softmax: bool = True, use_gaussian: bool = True, overwrite: bool = True,
validation_folder_name: str = 'validation_raw', debug: bool = False, all_in_gpu: bool = False,
segmentation_export_kwargs: dict = None):
"""
We need to wrap this because we need to enforce self.network.do_ds = False for prediction
:param do_mirroring:
:param use_train_mode:
:param use_sliding_window:
:param step_size:
:param save_softmax:
:param use_gaussian:
:param compute_global_dice:
:param overwrite:
:param validation_folder_name:
:return:
"""
ds = self.network.do_ds
if do_mirroring:
print("WARNING! do_mirroring was True but we cannot do that because we trained without mirroring. "
"do_mirroring was set to False")
do_mirroring = False
self.network.do_ds = False
ret = super().validate(do_mirroring=do_mirroring, use_sliding_window=use_sliding_window, step_size=step_size,
save_softmax=save_softmax, use_gaussian=use_gaussian,
overwrite=overwrite, validation_folder_name=validation_folder_name, debug=debug,
all_in_gpu=all_in_gpu, segmentation_export_kwargs=segmentation_export_kwargs)
self.network.do_ds = ds
return ret
def setup_DA_params(self):
super().setup_DA_params()
self.data_aug_params["do_mirror"] = False
| 2,489
| 45.111111
| 118
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/architectural_variants/nnUNetTrainerV2_lReLU_biasInSegOutput.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from nnunet.network_architecture.generic_UNet import Generic_UNet
from nnunet.network_architecture.initialization import InitWeights_He
from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
from nnunet.utilities.nd_softmax import softmax_helper
from torch import nn
class nnUNetTrainerV2_lReLU_biasInSegOutput(nnUNetTrainerV2):
def initialize_network(self):
if self.threeD:
conv_op = nn.Conv3d
dropout_op = nn.Dropout3d
norm_op = nn.InstanceNorm3d
else:
conv_op = nn.Conv2d
dropout_op = nn.Dropout2d
norm_op = nn.InstanceNorm2d
norm_op_kwargs = {'eps': 1e-5, 'affine': True}
dropout_op_kwargs = {'p': 0, 'inplace': True}
net_nonlin = nn.LeakyReLU
net_nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}
self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes,
len(self.net_num_pool_op_kernel_sizes),
self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op, dropout_op_kwargs,
net_nonlin, net_nonlin_kwargs, True, False, lambda x: x, InitWeights_He(0),
self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True,
seg_output_use_bias=True)
if torch.cuda.is_available():
self.network.cuda()
self.network.inference_apply_nonlin = softmax_helper
| 2,288
| 47.702128
| 124
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/architectural_variants/nnUNetTrainerV2_allConv3x3.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from nnunet.network_architecture.generic_UNet import Generic_UNet
from nnunet.network_architecture.initialization import InitWeights_He
from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
from nnunet.utilities.nd_softmax import softmax_helper
from torch import nn
class nnUNetTrainerV2_allConv3x3(nnUNetTrainerV2):
def initialize_network(self):
"""
- momentum 0.99
- SGD instead of Adam
- self.lr_scheduler = None because we do poly_lr
- deep supervision = True
- i am sure I forgot something here
Known issue: forgot to set neg_slope=0 in InitWeights_He; should not make a difference though
:return:
"""
if self.threeD:
conv_op = nn.Conv3d
dropout_op = nn.Dropout3d
norm_op = nn.InstanceNorm3d
else:
conv_op = nn.Conv2d
dropout_op = nn.Dropout2d
norm_op = nn.InstanceNorm2d
for s in range(len(self.net_conv_kernel_sizes)):
for i in range(len(self.net_conv_kernel_sizes[s])):
self.net_conv_kernel_sizes[s][i] = 3
norm_op_kwargs = {'eps': 1e-5, 'affine': True}
dropout_op_kwargs = {'p': 0, 'inplace': True}
net_nonlin = nn.LeakyReLU
net_nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}
self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes,
len(self.net_num_pool_op_kernel_sizes),
self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op, dropout_op_kwargs,
net_nonlin, net_nonlin_kwargs, True, False, lambda x: x, InitWeights_He(1e-2),
self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True)
if torch.cuda.is_available():
self.network.cuda()
self.network.inference_apply_nonlin = softmax_helper
| 2,727
| 43.721311
| 124
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/architectural_variants/nnUNetTrainerV2_lReLU_convlReLUIN.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from nnunet.network_architecture.generic_UNet import Generic_UNet, ConvDropoutNonlinNorm
from nnunet.network_architecture.initialization import InitWeights_He
from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
from nnunet.utilities.nd_softmax import softmax_helper
from torch import nn
class nnUNetTrainerV2_lReLU_convReLUIN(nnUNetTrainerV2):
def initialize_network(self):
if self.threeD:
conv_op = nn.Conv3d
dropout_op = nn.Dropout3d
norm_op = nn.InstanceNorm3d
else:
conv_op = nn.Conv2d
dropout_op = nn.Dropout2d
norm_op = nn.InstanceNorm2d
norm_op_kwargs = {'eps': 1e-5, 'affine': True}
dropout_op_kwargs = {'p': 0, 'inplace': True}
net_nonlin = nn.LeakyReLU
net_nonlin_kwargs = {'inplace': True, 'negative_slope': 1e-2}
self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes,
len(self.net_num_pool_op_kernel_sizes),
self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op, dropout_op_kwargs,
net_nonlin, net_nonlin_kwargs, True, False, lambda x: x, InitWeights_He(1e-2),
self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True,
basic_block=ConvDropoutNonlinNorm)
if torch.cuda.is_available():
self.network.cuda()
self.network.inference_apply_nonlin = softmax_helper
| 2,318
| 48.340426
| 124
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/architectural_variants/nnUNetTrainerV2_NoNormalization.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from nnunet.network_architecture.generic_UNet import Generic_UNet
from nnunet.network_architecture.initialization import InitWeights_He
from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
from nnunet.network_architecture.custom_modules.helperModules import Identity
from nnunet.utilities.nd_softmax import softmax_helper
from torch import nn
class nnUNetTrainerV2_NoNormalization(nnUNetTrainerV2):
def initialize_network(self):
if self.threeD:
conv_op = nn.Conv3d
dropout_op = nn.Dropout3d
norm_op = Identity
else:
conv_op = nn.Conv2d
dropout_op = nn.Dropout2d
norm_op = Identity
norm_op_kwargs = {}
dropout_op_kwargs = {'p': 0, 'inplace': True}
net_nonlin = nn.LeakyReLU
net_nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}
self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes,
len(self.net_num_pool_op_kernel_sizes),
self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op, dropout_op_kwargs,
net_nonlin, net_nonlin_kwargs, True, False, lambda x: x, InitWeights_He(1e-2),
self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True)
if torch.cuda.is_available():
self.network.cuda()
self.network.inference_apply_nonlin = softmax_helper
| 2,256
| 47.021277
| 124
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/architectural_variants/nnUNetTrainerV2_BN.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from nnunet.network_architecture.generic_UNet import Generic_UNet
from nnunet.network_architecture.initialization import InitWeights_He
from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
from nnunet.utilities.nd_softmax import softmax_helper
from torch import nn
class nnUNetTrainerV2_BN(nnUNetTrainerV2):
def initialize_network(self):
"""
changed deep supervision to False
:return:
"""
if self.threeD:
conv_op = nn.Conv3d
dropout_op = nn.Dropout3d
norm_op = nn.BatchNorm3d
else:
conv_op = nn.Conv2d
dropout_op = nn.Dropout2d
norm_op = nn.BatchNorm2d
norm_op_kwargs = {'eps': 1e-5, 'affine': True}
dropout_op_kwargs = {'p': 0, 'inplace': True}
net_nonlin = nn.LeakyReLU
net_nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}
self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes,
len(self.net_num_pool_op_kernel_sizes),
self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op, dropout_op_kwargs,
net_nonlin, net_nonlin_kwargs, True, False, lambda x: x, InitWeights_He(1e-2),
self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True)
if torch.cuda.is_available():
self.network.cuda()
self.network.inference_apply_nonlin = softmax_helper
nnUNetTrainerV2_BN_copy1 = nnUNetTrainerV2_BN
nnUNetTrainerV2_BN_copy2 = nnUNetTrainerV2_BN
nnUNetTrainerV2_BN_copy3 = nnUNetTrainerV2_BN
nnUNetTrainerV2_BN_copy4 = nnUNetTrainerV2_BN
| 2,473
| 43.178571
| 124
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/architectural_variants/nnUNetTrainerV2_ReLU.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from nnunet.network_architecture.generic_UNet import Generic_UNet
from nnunet.network_architecture.initialization import InitWeights_He
from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
from nnunet.utilities.nd_softmax import softmax_helper
from torch import nn
class nnUNetTrainerV2_ReLU(nnUNetTrainerV2):
def initialize_network(self):
if self.threeD:
conv_op = nn.Conv3d
dropout_op = nn.Dropout3d
norm_op = nn.InstanceNorm3d
else:
conv_op = nn.Conv2d
dropout_op = nn.Dropout2d
norm_op = nn.InstanceNorm2d
norm_op_kwargs = {'eps': 1e-5, 'affine': True}
dropout_op_kwargs = {'p': 0, 'inplace': True}
net_nonlin = nn.ReLU
net_nonlin_kwargs = {'inplace': True}
self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes,
len(self.net_num_pool_op_kernel_sizes),
self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op, dropout_op_kwargs,
net_nonlin, net_nonlin_kwargs, True, False, lambda x: x, InitWeights_He(0),
self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True)
if torch.cuda.is_available():
self.network.cuda()
self.network.inference_apply_nonlin = softmax_helper
| 2,180
| 46.413043
| 124
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/architectural_variants/nnUNetTrainerV2_ResencUNet.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Tuple
import numpy as np
import torch
from nnunet.network_architecture.generic_modular_residual_UNet import FabiansUNet, get_default_network_config
from nnunet.network_architecture.initialization import InitWeights_He
from nnunet.training.network_training.nnUNetTrainer import nnUNetTrainer
from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
from nnunet.utilities.nd_softmax import softmax_helper
class nnUNetTrainerV2_ResencUNet(nnUNetTrainerV2):
def initialize_network(self):
if self.threeD:
cfg = get_default_network_config(3, None, norm_type="in")
else:
cfg = get_default_network_config(1, None, norm_type="in")
stage_plans = self.plans['plans_per_stage'][self.stage]
conv_kernel_sizes = stage_plans['conv_kernel_sizes']
blocks_per_stage_encoder = stage_plans['num_blocks_encoder']
blocks_per_stage_decoder = stage_plans['num_blocks_decoder']
pool_op_kernel_sizes = stage_plans['pool_op_kernel_sizes']
self.network = FabiansUNet(self.num_input_channels, self.base_num_features, blocks_per_stage_encoder, 2,
pool_op_kernel_sizes, conv_kernel_sizes, cfg, self.num_classes,
blocks_per_stage_decoder, True, False, 320, InitWeights_He(1e-2))
if torch.cuda.is_available():
self.network.cuda()
self.network.inference_apply_nonlin = softmax_helper
def setup_DA_params(self):
"""
net_num_pool_op_kernel_sizes is different in resunet
"""
super().setup_DA_params()
self.deep_supervision_scales = [[1, 1, 1]] + list(list(i) for i in 1 / np.cumprod(
np.vstack(self.net_num_pool_op_kernel_sizes[1:]), axis=0))[:-1]
def validate(self, do_mirroring: bool = True, use_sliding_window: bool = True, step_size: float = 0.5,
save_softmax: bool = True, use_gaussian: bool = True, overwrite: bool = True,
validation_folder_name: str = 'validation_raw', debug: bool = False, all_in_gpu: bool = False,
force_separate_z: bool = None, interpolation_order: int = 3, interpolation_order_z=0,
segmentation_export_kwargs: dict = None):
ds = self.network.decoder.deep_supervision
self.network.decoder.deep_supervision = False
ret = nnUNetTrainer.validate(self, do_mirroring, use_sliding_window, step_size, save_softmax, use_gaussian,
overwrite, validation_folder_name, debug, all_in_gpu,
segmentation_export_kwargs)
self.network.decoder.deep_supervision = ds
return ret
def predict_preprocessed_data_return_seg_and_softmax(self, data: np.ndarray, do_mirroring: bool = True,
mirror_axes: Tuple[int] = None,
use_sliding_window: bool = True, step_size: float = 0.5,
use_gaussian: bool = True, pad_border_mode: str = 'constant',
pad_kwargs: dict = None, all_in_gpu: bool = False,
verbose: bool = True, mixed_precision=True) -> Tuple[np.ndarray, np.ndarray]:
ds = self.network.decoder.deep_supervision
self.network.decoder.deep_supervision = False
ret = nnUNetTrainer.predict_preprocessed_data_return_seg_and_softmax(self, data, do_mirroring=do_mirroring,
mirror_axes=mirror_axes,
use_sliding_window=use_sliding_window,
step_size=step_size,
use_gaussian=use_gaussian,
pad_border_mode=pad_border_mode,
pad_kwargs=pad_kwargs,
all_in_gpu=all_in_gpu,
verbose=verbose,
mixed_precision=mixed_precision)
self.network.decoder.deep_supervision = ds
return ret
def run_training(self):
self.maybe_update_lr(self.epoch) # if we dont overwrite epoch then self.epoch+1 is used which is not what we
# want at the start of the training
ds = self.network.decoder.deep_supervision
self.network.decoder.deep_supervision = True
ret = nnUNetTrainer.run_training(self)
self.network.decoder.deep_supervision = ds
return ret
nnUNetTrainerV2_ResencUNet_copy1 = nnUNetTrainerV2_ResencUNet
nnUNetTrainerV2_ResencUNet_copy2 = nnUNetTrainerV2_ResencUNet
nnUNetTrainerV2_ResencUNet_copy3 = nnUNetTrainerV2_ResencUNet
nnUNetTrainerV2_ResencUNet_copy4 = nnUNetTrainerV2_ResencUNet
| 5,977
| 57.038835
| 134
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/architectural_variants/nnUNetTrainerV2_ReLU_convReLUIN.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from nnunet.network_architecture.generic_UNet import Generic_UNet, ConvDropoutNonlinNorm
from nnunet.network_architecture.initialization import InitWeights_He
from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
from nnunet.utilities.nd_softmax import softmax_helper
from torch import nn
class nnUNetTrainerV2_ReLU_convReLUIN(nnUNetTrainerV2):
def initialize_network(self):
if self.threeD:
conv_op = nn.Conv3d
dropout_op = nn.Dropout3d
norm_op = nn.InstanceNorm3d
else:
conv_op = nn.Conv2d
dropout_op = nn.Dropout2d
norm_op = nn.InstanceNorm2d
norm_op_kwargs = {'eps': 1e-5, 'affine': True}
dropout_op_kwargs = {'p': 0, 'inplace': True}
net_nonlin = nn.ReLU
net_nonlin_kwargs = {'inplace': True}
self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes,
len(self.net_num_pool_op_kernel_sizes),
self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op, dropout_op_kwargs,
net_nonlin, net_nonlin_kwargs, True, False, lambda x: x, InitWeights_He(0),
self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True,
basic_block=ConvDropoutNonlinNorm)
if torch.cuda.is_available():
self.network.cuda()
self.network.inference_apply_nonlin = softmax_helper
| 2,285
| 47.638298
| 124
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/architectural_variants/nnUNetTrainerV2_NoNormalization_lr1en3.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nnunet.training.network_training.nnUNet_variants.architectural_variants.nnUNetTrainerV2_NoNormalization import \
nnUNetTrainerV2_NoNormalization
class nnUNetTrainerV2_NoNormalization_lr1en3(nnUNetTrainerV2_NoNormalization):
def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
unpack_data=True, deterministic=True, fp16=False):
super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
deterministic, fp16)
self.initial_lr = 1e-3
| 1,293
| 48.769231
| 117
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/architectural_variants/nnUNetTrainerV2_GN.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from nnunet.network_architecture.generic_UNet import Generic_UNet
from nnunet.network_architecture.initialization import InitWeights_He
from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
from nnunet.network_architecture.custom_modules.helperModules import MyGroupNorm
from nnunet.utilities.nd_softmax import softmax_helper
from torch import nn
class nnUNetTrainerV2_GN(nnUNetTrainerV2):
def initialize_network(self):
"""
changed deep supervision to False
:return:
"""
if self.threeD:
conv_op = nn.Conv3d
dropout_op = nn.Dropout3d
norm_op = MyGroupNorm
else:
conv_op = nn.Conv2d
dropout_op = nn.Dropout2d
norm_op = MyGroupNorm
norm_op_kwargs = {'eps': 1e-5, 'affine': True, 'num_groups': 8}
dropout_op_kwargs = {'p': 0, 'inplace': True}
net_nonlin = nn.LeakyReLU
net_nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}
self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes,
len(self.net_num_pool_op_kernel_sizes),
self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op, dropout_op_kwargs,
net_nonlin, net_nonlin_kwargs, True, False, lambda x: x, InitWeights_He(1e-2),
self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True)
if torch.cuda.is_available():
self.network.cuda()
self.network.inference_apply_nonlin = softmax_helper
| 2,379
| 45.666667
| 124
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/architectural_variants/nnUNetTrainerV2_softDeepSupervision.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from batchgenerators.utilities.file_and_folder_operations import join, maybe_mkdir_p
try:
from meddec.model_training.ablation_studies.new_nnUNet_candidates.nnUNetTrainerCandidate23_softDeepSupervision4 import \
MyDSLoss4
except ImportError:
MyDSLoss4 = None
from nnunet.network_architecture.neural_network import SegmentationNetwork
from nnunet.training.data_augmentation.default_data_augmentation import get_moreDA_augmentation
from nnunet.training.dataloading.dataset_loading import unpack_dataset
from nnunet.training.network_training.nnUNetTrainer import nnUNetTrainer
from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
from torch import nn
import numpy as np
class nnUNetTrainerV2_softDeepSupervision(nnUNetTrainerV2):
def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
unpack_data=True, deterministic=True, fp16=False):
super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
deterministic, fp16)
self.loss = None # we take care of that later
def initialize(self, training=True, force_load_plans=False):
"""
- replaced get_default_augmentation with get_moreDA_augmentation
- only run this code once
- loss function wrapper for deep supervision
:param training:
:param force_load_plans:
:return:
"""
if not self.was_initialized:
maybe_mkdir_p(self.output_folder)
if force_load_plans or (self.plans is None):
self.load_plans_file()
self.process_plans(self.plans)
self.setup_DA_params()
################# Here we wrap the loss for deep supervision ############
# we need to know the number of outputs of the network
net_numpool = len(self.net_num_pool_op_kernel_sizes)
# we give each output a weight which decreases exponentially (division by 2) as the resolution decreases
# this gives higher resolution outputs more weight in the loss
weights = np.array([1 / (2 ** i) for i in range(net_numpool)])
# we don't use the lowest 2 outputs. Normalize weights so that they sum to 1
mask = np.array([True if i < net_numpool - 1 else False for i in range(net_numpool)])
weights[~mask] = 0
weights = weights / weights.sum()
# now wrap the loss
if MyDSLoss4 is None:
raise RuntimeError("This aint ready for prime time yet")
self.loss = MyDSLoss4(self.batch_dice, weights)
#self.loss = MultipleOutputLoss2(self.loss, weights)
################# END ###################
self.folder_with_preprocessed_data = join(self.dataset_directory, self.plans['data_identifier'] +
"_stage%d" % self.stage)
if training:
self.dl_tr, self.dl_val = self.get_basic_generators()
if self.unpack_data:
print("unpacking dataset")
unpack_dataset(self.folder_with_preprocessed_data)
print("done")
else:
print(
"INFO: Not unpacking data! Training may be slow due to that. Pray you are not using 2d or you "
"will wait all winter for your model to finish!")
self.tr_gen, self.val_gen = get_moreDA_augmentation(self.dl_tr, self.dl_val,
self.data_aug_params[
'patch_size_for_spatialtransform'],
self.data_aug_params,
deep_supervision_scales=self.deep_supervision_scales,
soft_ds=True, classes=[0] + list(self.classes),
pin_memory=self.pin_memory)
self.print_to_log_file("TRAINING KEYS:\n %s" % (str(self.dataset_tr.keys())),
also_print_to_console=False)
self.print_to_log_file("VALIDATION KEYS:\n %s" % (str(self.dataset_val.keys())),
also_print_to_console=False)
else:
pass
self.initialize_network()
self.initialize_optimizer_and_scheduler()
assert isinstance(self.network, (SegmentationNetwork, nn.DataParallel))
else:
self.print_to_log_file('self.was_initialized is True, not running self.initialize again')
self.was_initialized = True
def run_online_evaluation(self, output, target):
"""
due to deep supervision the return value and the reference are now lists of tensors. We only need the full
resolution output because this is what we are interested in in the end. The others are ignored
:param output:
:param target:
:return:
"""
target = target[0][:,
None] # we need to restore color channel dimension here to be compatible with previous code
output = output[0]
return nnUNetTrainer.run_online_evaluation(self, output, target)
| 6,237
| 47.734375
| 124
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/architectural_variants/nnUNetTrainerV2_GeLU.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from nnunet.network_architecture.generic_UNet import Generic_UNet
from nnunet.network_architecture.initialization import InitWeights_He
from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
from nnunet.utilities.nd_softmax import softmax_helper
from torch import nn
try:
from torch.nn.functional import gelu
except ImportError:
gelu = None
class GeLU(nn.Module):
def __init__(self):
super().__init__()
if gelu is None:
raise ImportError('You need to have at least torch==1.7.0 to use GeLUs')
def forward(self, x):
return gelu(x)
class nnUNetTrainerV2_GeLU(nnUNetTrainerV2):
def initialize_network(self):
"""
- momentum 0.99
- SGD instead of Adam
- self.lr_scheduler = None because we do poly_lr
- deep supervision = True
- ReLU
- i am sure I forgot something here
Known issue: forgot to set neg_slope=0 in InitWeights_He; should not make a difference though
:return:
"""
if self.threeD:
conv_op = nn.Conv3d
dropout_op = nn.Dropout3d
norm_op = nn.InstanceNorm3d
else:
conv_op = nn.Conv2d
dropout_op = nn.Dropout2d
norm_op = nn.InstanceNorm2d
norm_op_kwargs = {'eps': 1e-5, 'affine': True}
dropout_op_kwargs = {'p': 0, 'inplace': True}
net_nonlin = GeLU
net_nonlin_kwargs = {}
self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes,
len(self.net_num_pool_op_kernel_sizes),
self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op, dropout_op_kwargs,
net_nonlin, net_nonlin_kwargs, True, False, lambda x: x, InitWeights_He(),
self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True)
if torch.cuda.is_available():
self.network.cuda()
self.network.inference_apply_nonlin = softmax_helper
| 2,829
| 37.767123
| 124
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/architectural_variants/nnUNetTrainerV2_ReLU_biasInSegOutput.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from nnunet.network_architecture.generic_UNet import Generic_UNet
from nnunet.network_architecture.initialization import InitWeights_He
from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
from nnunet.utilities.nd_softmax import softmax_helper
from torch import nn
class nnUNetTrainerV2_ReLU_biasInSegOutput(nnUNetTrainerV2):
def initialize_network(self):
if self.threeD:
conv_op = nn.Conv3d
dropout_op = nn.Dropout3d
norm_op = nn.InstanceNorm3d
else:
conv_op = nn.Conv2d
dropout_op = nn.Dropout2d
norm_op = nn.InstanceNorm2d
norm_op_kwargs = {'eps': 1e-5, 'affine': True}
dropout_op_kwargs = {'p': 0, 'inplace': True}
net_nonlin = nn.ReLU
net_nonlin_kwargs = {'inplace': True}
self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes,
len(self.net_num_pool_op_kernel_sizes),
self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op, dropout_op_kwargs,
net_nonlin, net_nonlin_kwargs, True, False, lambda x: x, InitWeights_He(0),
self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True,
seg_output_use_bias=True)
if torch.cuda.is_available():
self.network.cuda()
self.network.inference_apply_nonlin = softmax_helper
| 2,258
| 47.06383
| 124
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/architectural_variants/nnUNetTrainerV2_3ConvPerStage.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from nnunet.network_architecture.generic_UNet import Generic_UNet
from nnunet.network_architecture.initialization import InitWeights_He
from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
from nnunet.utilities.nd_softmax import softmax_helper
from torch import nn
class nnUNetTrainerV2_3ConvPerStage(nnUNetTrainerV2):
def initialize_network(self):
self.base_num_features = 24 # otherwise we run out of VRAM
if self.threeD:
conv_op = nn.Conv3d
dropout_op = nn.Dropout3d
norm_op = nn.InstanceNorm3d
else:
conv_op = nn.Conv2d
dropout_op = nn.Dropout2d
norm_op = nn.InstanceNorm2d
norm_op_kwargs = {'eps': 1e-5, 'affine': True}
dropout_op_kwargs = {'p': 0, 'inplace': True}
net_nonlin = nn.LeakyReLU
net_nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}
self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes,
len(self.net_num_pool_op_kernel_sizes),
3, 2, conv_op, norm_op, norm_op_kwargs, dropout_op, dropout_op_kwargs,
net_nonlin, net_nonlin_kwargs, True, False, lambda x: x, InitWeights_He(1e-2),
self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True)
if torch.cuda.is_available():
self.network.cuda()
self.network.inference_apply_nonlin = softmax_helper
| 2,271
| 47.340426
| 117
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/architectural_variants/nnUNetTrainerV2_noDeepSupervision.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from batchgenerators.utilities.file_and_folder_operations import *
from nnunet.network_architecture.generic_UNet import Generic_UNet
from nnunet.network_architecture.initialization import InitWeights_He
from nnunet.network_architecture.neural_network import SegmentationNetwork
from nnunet.training.data_augmentation.default_data_augmentation import default_3D_augmentation_params, \
default_2D_augmentation_params, get_patch_size, get_moreDA_augmentation
from nnunet.training.dataloading.dataset_loading import unpack_dataset
from nnunet.training.loss_functions.dice_loss import DC_and_CE_loss
from nnunet.training.network_training.nnUNetTrainer import nnUNetTrainer
from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
from nnunet.utilities.nd_softmax import softmax_helper
from torch import nn
import torch
class nnUNetTrainerV2_noDeepSupervision(nnUNetTrainerV2):
def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
unpack_data=True, deterministic=True, fp16=False):
super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
deterministic, fp16)
self.loss = DC_and_CE_loss({'batch_dice': self.batch_dice, 'smooth': 1e-5, 'do_bg': False}, {})
def setup_DA_params(self):
"""
we leave out the creation of self.deep_supervision_scales, so it remains None
:return:
"""
if self.threeD:
self.data_aug_params = default_3D_augmentation_params
self.data_aug_params['rotation_x'] = (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi)
self.data_aug_params['rotation_y'] = (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi)
self.data_aug_params['rotation_z'] = (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi)
if self.do_dummy_2D_aug:
self.data_aug_params["dummy_2D"] = True
self.print_to_log_file("Using dummy2d data augmentation")
self.data_aug_params["elastic_deform_alpha"] = \
default_2D_augmentation_params["elastic_deform_alpha"]
self.data_aug_params["elastic_deform_sigma"] = \
default_2D_augmentation_params["elastic_deform_sigma"]
self.data_aug_params["rotation_x"] = default_2D_augmentation_params["rotation_x"]
else:
self.do_dummy_2D_aug = False
if max(self.patch_size) / min(self.patch_size) > 1.5:
default_2D_augmentation_params['rotation_x'] = (-15. / 360 * 2. * np.pi, 15. / 360 * 2. * np.pi)
self.data_aug_params = default_2D_augmentation_params
self.data_aug_params["mask_was_used_for_normalization"] = self.use_mask_for_norm
if self.do_dummy_2D_aug:
self.basic_generator_patch_size = get_patch_size(self.patch_size[1:],
self.data_aug_params['rotation_x'],
self.data_aug_params['rotation_y'],
self.data_aug_params['rotation_z'],
self.data_aug_params['scale_range'])
self.basic_generator_patch_size = np.array([self.patch_size[0]] + list(self.basic_generator_patch_size))
patch_size_for_spatialtransform = self.patch_size[1:]
else:
self.basic_generator_patch_size = get_patch_size(self.patch_size, self.data_aug_params['rotation_x'],
self.data_aug_params['rotation_y'],
self.data_aug_params['rotation_z'],
self.data_aug_params['scale_range'])
patch_size_for_spatialtransform = self.patch_size
self.data_aug_params["scale_range"] = (0.7, 1.4)
self.data_aug_params["do_elastic"] = False
self.data_aug_params['selected_seg_channels'] = [0]
self.data_aug_params['patch_size_for_spatialtransform'] = patch_size_for_spatialtransform
def initialize(self, training=True, force_load_plans=False):
"""
removed deep supervision
:return:
"""
if not self.was_initialized:
maybe_mkdir_p(self.output_folder)
if force_load_plans or (self.plans is None):
self.load_plans_file()
self.process_plans(self.plans)
self.setup_DA_params()
self.folder_with_preprocessed_data = join(self.dataset_directory, self.plans['data_identifier'] +
"_stage%d" % self.stage)
if training:
self.dl_tr, self.dl_val = self.get_basic_generators()
if self.unpack_data:
print("unpacking dataset")
unpack_dataset(self.folder_with_preprocessed_data)
print("done")
else:
print(
"INFO: Not unpacking data! Training may be slow due to that. Pray you are not using 2d or you "
"will wait all winter for your model to finish!")
assert self.deep_supervision_scales is None
self.tr_gen, self.val_gen = get_moreDA_augmentation(self.dl_tr, self.dl_val,
self.data_aug_params[
'patch_size_for_spatialtransform'],
self.data_aug_params,
deep_supervision_scales=self.deep_supervision_scales,
classes=None,
pin_memory=self.pin_memory)
self.print_to_log_file("TRAINING KEYS:\n %s" % (str(self.dataset_tr.keys())),
also_print_to_console=False)
self.print_to_log_file("VALIDATION KEYS:\n %s" % (str(self.dataset_val.keys())),
also_print_to_console=False)
else:
pass
self.initialize_network()
self.initialize_optimizer_and_scheduler()
assert isinstance(self.network, (SegmentationNetwork, nn.DataParallel))
else:
self.print_to_log_file('self.was_initialized is True, not running self.initialize again')
self.was_initialized = True
def initialize_network(self):
"""
changed deep supervision to False
:return:
"""
if self.threeD:
conv_op = nn.Conv3d
dropout_op = nn.Dropout3d
norm_op = nn.InstanceNorm3d
else:
conv_op = nn.Conv2d
dropout_op = nn.Dropout2d
norm_op = nn.InstanceNorm2d
norm_op_kwargs = {'eps': 1e-5, 'affine': True}
dropout_op_kwargs = {'p': 0, 'inplace': True}
net_nonlin = nn.LeakyReLU
net_nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}
self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes,
len(self.net_num_pool_op_kernel_sizes),
self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op, dropout_op_kwargs,
net_nonlin, net_nonlin_kwargs, False, False, lambda x: x, InitWeights_He(1e-2),
self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True)
if torch.cuda.is_available():
self.network.cuda()
self.network.inference_apply_nonlin = softmax_helper
def run_online_evaluation(self, output, target):
return nnUNetTrainer.run_online_evaluation(self, output, target)
| 8,908
| 52.668675
| 124
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/architectural_variants/nnUNetTrainerV2_LReLU_slope_2en1.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from nnunet.network_architecture.generic_UNet import Generic_UNet
from nnunet.network_architecture.initialization import InitWeights_He
from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
from nnunet.utilities.nd_softmax import softmax_helper
from torch import nn
class nnUNetTrainerV2_LReLU_slope_2en1(nnUNetTrainerV2):
def initialize_network(self):
if self.threeD:
conv_op = nn.Conv3d
dropout_op = nn.Dropout3d
norm_op = nn.InstanceNorm3d
else:
conv_op = nn.Conv2d
dropout_op = nn.Dropout2d
norm_op = nn.InstanceNorm2d
norm_op_kwargs = {'eps': 1e-5, 'affine': True}
dropout_op_kwargs = {'p': 0, 'inplace': True}
net_nonlin = nn.LeakyReLU
net_nonlin_kwargs = {'inplace': True, 'negative_slope': 2e-1}
self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes,
len(self.net_num_pool_op_kernel_sizes),
self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op, dropout_op_kwargs,
net_nonlin, net_nonlin_kwargs, True, False, lambda x: x, InitWeights_He(0),
self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True)
if torch.cuda.is_available():
self.network.cuda()
self.network.inference_apply_nonlin = softmax_helper
| 2,221
| 47.304348
| 124
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/architectural_variants/nnUNetTrainerV2_Mish.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from nnunet.network_architecture.generic_UNet import Generic_UNet
from nnunet.network_architecture.initialization import InitWeights_He
from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
from nnunet.utilities.nd_softmax import softmax_helper
from torch import nn
from nnunet.network_architecture.custom_modules.mish import Mish
class nnUNetTrainerV2_Mish(nnUNetTrainerV2):
def initialize_network(self):
if self.threeD:
conv_op = nn.Conv3d
dropout_op = nn.Dropout3d
norm_op = nn.InstanceNorm3d
else:
conv_op = nn.Conv2d
dropout_op = nn.Dropout2d
norm_op = nn.InstanceNorm2d
norm_op_kwargs = {'eps': 1e-5, 'affine': True}
dropout_op_kwargs = {'p': 0, 'inplace': True}
net_nonlin = Mish
net_nonlin_kwargs = {}
self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes,
len(self.net_num_pool_op_kernel_sizes),
self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op, dropout_op_kwargs,
net_nonlin, net_nonlin_kwargs, True, False, lambda x: x, InitWeights_He(0),
self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True)
if torch.cuda.is_available():
self.network.cuda()
self.network.inference_apply_nonlin = softmax_helper
| 2,228
| 45.4375
| 124
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/architectural_variants/nnUNetTrainerV2_3ConvPerStage_samefilters.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from nnunet.network_architecture.generic_UNet import Generic_UNet
from nnunet.network_architecture.initialization import InitWeights_He
from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
from nnunet.utilities.nd_softmax import softmax_helper
from torch import nn
class nnUNetTrainerV2_3ConvPerStageSameFilters(nnUNetTrainerV2):
def initialize_network(self):
if self.threeD:
conv_op = nn.Conv3d
dropout_op = nn.Dropout3d
norm_op = nn.InstanceNorm3d
else:
conv_op = nn.Conv2d
dropout_op = nn.Dropout2d
norm_op = nn.InstanceNorm2d
norm_op_kwargs = {'eps': 1e-5, 'affine': True}
dropout_op_kwargs = {'p': 0, 'inplace': True}
net_nonlin = nn.LeakyReLU
net_nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}
self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes,
len(self.net_num_pool_op_kernel_sizes),
3, 2, conv_op, norm_op, norm_op_kwargs, dropout_op, dropout_op_kwargs,
net_nonlin, net_nonlin_kwargs, True, False, lambda x: x, InitWeights_He(1e-2),
self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True)
if torch.cuda.is_available():
self.network.cuda()
self.network.inference_apply_nonlin = softmax_helper
| 2,214
| 47.152174
| 117
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/architectural_variants/nnUNetTrainerV2_FRN.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nnunet.network_architecture.custom_modules.feature_response_normalization import FRN3D
from nnunet.network_architecture.generic_UNet import Generic_UNet
from nnunet.network_architecture.initialization import InitWeights_He
from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
from nnunet.utilities.nd_softmax import softmax_helper
from torch import nn
from nnunet.network_architecture.custom_modules.helperModules import Identity
import torch
class nnUNetTrainerV2_FRN(nnUNetTrainerV2):
def initialize_network(self):
"""
changed deep supervision to False
:return:
"""
if self.threeD:
conv_op = nn.Conv3d
dropout_op = nn.Dropout3d
norm_op = FRN3D
else:
conv_op = nn.Conv2d
dropout_op = nn.Dropout2d
raise NotImplementedError
norm_op = nn.BatchNorm2d
norm_op_kwargs = {'eps': 1e-6}
dropout_op_kwargs = {'p': 0, 'inplace': True}
net_nonlin = Identity
net_nonlin_kwargs = {}
self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes,
len(self.net_num_pool_op_kernel_sizes),
self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op, dropout_op_kwargs,
net_nonlin, net_nonlin_kwargs, True, False, lambda x: x, InitWeights_He(1e-2),
self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True)
if torch.cuda.is_available():
self.network.cuda()
self.network.inference_apply_nonlin = softmax_helper
| 2,430
| 43.2
| 124
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/miscellaneous/nnUNetTrainerV2_fullEvals.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from multiprocessing.pool import Pool
from time import time
import numpy as np
import torch
from nnunet.configuration import default_num_threads
from nnunet.inference.segmentation_export import save_segmentation_nifti_from_softmax
from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
from batchgenerators.utilities.file_and_folder_operations import *
from nnunet.evaluation.region_based_evaluation import evaluate_regions, get_brats_regions
class nnUNetTrainerV2_fullEvals(nnUNetTrainerV2):
"""
this trainer only works for brats and nothing else
"""
def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
unpack_data=True, deterministic=True, fp16=False):
super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
deterministic, fp16)
self.validate_every = 1
self.evaluation_regions = get_brats_regions()
self.num_val_batches_per_epoch = 0 # we dont need this because this does not evaluate on full images
def finish_online_evaluation(self):
pass
def validate(self, do_mirroring: bool = True, use_sliding_window: bool = True,
step_size: float = 0.5, save_softmax: bool = True, use_gaussian: bool = True, overwrite: bool = True,
validation_folder_name: str = 'validation_raw', debug: bool = False, all_in_gpu: bool = False,
force_separate_z: bool = None, interpolation_order: int = 3, interpolation_order_z=0):
"""
disable nnunet postprocessing. this would just waste computation time and does not benefit brats
!!!We run this with use_sliding_window=False per default (see on_epoch_end). This triggers fully convolutional
inference. THIS ONLY MAKES SENSE WHEN TRAINING ON FULL IMAGES! Make sure use_sliding_window=True when running
with default patch size (128x128x128)!!!
per default this does not use test time data augmentation (mirroring). The reference implementation, however,
does. I disabled it here because this eats up a lot of computation time
"""
validation_start = time()
current_mode = self.network.training
self.network.eval()
assert self.was_initialized, "must initialize, ideally with checkpoint (or train first)"
if self.dataset_val is None:
self.load_dataset()
self.do_split()
# predictions as they come from the network go here
output_folder = join(self.output_folder, validation_folder_name)
maybe_mkdir_p(output_folder)
# this is for debug purposes
my_input_args = {'do_mirroring': do_mirroring,
'use_sliding_window': use_sliding_window,
'step_size': step_size,
'save_softmax': save_softmax,
'use_gaussian': use_gaussian,
'overwrite': overwrite,
'validation_folder_name': validation_folder_name,
'debug': debug,
'all_in_gpu': all_in_gpu,
'force_separate_z': force_separate_z,
'interpolation_order': interpolation_order,
'interpolation_order_z': interpolation_order_z,
}
save_json(my_input_args, join(output_folder, "validation_args.json"))
if do_mirroring:
if not self.data_aug_params['do_mirror']:
raise RuntimeError("We did not train with mirroring so you cannot do inference with mirroring enabled")
mirror_axes = self.data_aug_params['mirror_axes']
else:
mirror_axes = ()
export_pool = Pool(default_num_threads)
results = []
for k in self.dataset_val.keys():
properties = load_pickle(self.dataset[k]['properties_file'])
fname = properties['list_of_data_files'][0].split("/")[-1][:-12]
if overwrite or (not isfile(join(output_folder, fname + ".nii.gz"))) or \
(save_softmax and not isfile(join(output_folder, fname + ".npz"))):
data = np.load(self.dataset[k]['data_file'])['data']
#print(k, data.shape)
softmax_pred = self.predict_preprocessed_data_return_seg_and_softmax(data[:-1],
do_mirroring=do_mirroring,
mirror_axes=mirror_axes,
use_sliding_window=use_sliding_window,
step_size=step_size,
use_gaussian=use_gaussian,
all_in_gpu=all_in_gpu,
verbose=False,
mixed_precision=self.fp16)[1]
# this does not do anything in brats -> remove this line
# softmax_pred = softmax_pred.transpose([0] + [i + 1 for i in self.transpose_backward])
if save_softmax:
softmax_fname = join(output_folder, fname + ".npz")
else:
softmax_fname = None
results.append(export_pool.starmap_async(save_segmentation_nifti_from_softmax,
((softmax_pred, join(output_folder, fname + ".nii.gz"),
properties, interpolation_order, None, None, None,
softmax_fname, None, force_separate_z,
interpolation_order_z, False),
)
)
)
_ = [i.get() for i in results]
self.print_to_log_file("finished prediction")
# evaluate raw predictions
self.print_to_log_file("evaluation of raw predictions")
# this writes a csv file into output_folder
evaluate_regions(output_folder, self.gt_niftis_folder, self.evaluation_regions)
csv_file = np.loadtxt(join(output_folder, 'summary.csv'), skiprows=1, dtype=str, delimiter=',')[:, 1:]
# these are the values that are compute with np.nanmean aggregation
whole, core, enhancing = csv_file[-4, :].astype(float)
# do some cleanup
if torch.cuda.is_available():
torch.cuda.empty_cache()
self.network.train(current_mode)
validation_end = time()
self.print_to_log_file('Running the validation took %f seconds' % (validation_end - validation_start))
self.print_to_log_file('(the time needed for validation is included in the total epoch time!)')
return whole, core, enhancing
def on_epoch_end(self):
return_value = True
# on epoch end is called before the epoch counter is incremented, so we need to do that here to get the correct epoch number
if (self.epoch + 1) % self.validate_every == 0:
whole, core, enhancing = self.validate(do_mirroring=False, use_sliding_window=True,
step_size=0.5,
save_softmax=False,
use_gaussian=True, overwrite=True,
validation_folder_name='validation_after_ep_%04.0d' % self.epoch,
debug=False, all_in_gpu=True)
here = np.mean((whole, core, enhancing))
self.print_to_log_file("After epoch %d: whole %0.4f core %0.4f enhancing: %0.4f" %
(self.epoch, whole, core, enhancing))
self.print_to_log_file("Mean: %0.4f" % here)
# now we need to figure out if we are done
fully_trained_nnunet = (0.911, 0.8739, 0.7848)
mean_dice = np.mean(fully_trained_nnunet)
target = 0.97 * mean_dice
self.all_val_eval_metrics.append(here)
self.print_to_log_file("Target mean: %0.4f" % target)
if here >= target:
self.print_to_log_file("I am done!")
self.save_checkpoint(join(self.output_folder, "model_final_checkpoint.model"))
return_value = False # this triggers early stopping
ret_old = super().on_epoch_end()
# if we do not achieve the target accuracy in 1000 epochs then we need to stop the training. This is not built
# to run longer than 1000 epochs
if not ret_old:
return_value = ret_old
return return_value
| 9,982
| 49.933673
| 132
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/cascade/nnUNetTrainerV2CascadeFullRes_shorter.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nnunet.training.network_training.nnUNetTrainerV2_CascadeFullRes import nnUNetTrainerV2CascadeFullRes
class nnUNetTrainerV2CascadeFullRes_shorter(nnUNetTrainerV2CascadeFullRes):
def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
unpack_data=True, deterministic=True, previous_trainer="nnUNetTrainerV2", fp16=False):
super().__init__(plans_file, fold, output_folder, dataset_directory,
batch_dice, stage, unpack_data, deterministic,
previous_trainer, fp16)
self.max_num_epochs = 500
| 1,324
| 49.961538
| 114
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/cascade/nnUNetTrainerV2CascadeFullRes_shorter_lowerLR.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nnunet.training.network_training.nnUNetTrainerV2_CascadeFullRes import nnUNetTrainerV2CascadeFullRes
class nnUNetTrainerV2CascadeFullRes_shorter_lowerLR(nnUNetTrainerV2CascadeFullRes):
def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
unpack_data=True, deterministic=True, previous_trainer="nnUNetTrainerV2", fp16=False):
super().__init__(plans_file, fold, output_folder, dataset_directory,
batch_dice, stage, unpack_data, deterministic,
previous_trainer, fp16)
self.max_num_epochs = 500
self.initial_lr = 1e-3
| 1,363
| 49.518519
| 114
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/cascade/nnUNetTrainerV2CascadeFullRes_lowerLR.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nnunet.training.network_training.nnUNetTrainerV2_CascadeFullRes import nnUNetTrainerV2CascadeFullRes
class nnUNetTrainerV2CascadeFullRes_lowerLR(nnUNetTrainerV2CascadeFullRes):
def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
unpack_data=True, deterministic=True, previous_trainer="nnUNetTrainerV2", fp16=False):
super().__init__(plans_file, fold, output_folder, dataset_directory,
batch_dice, stage, unpack_data, deterministic,
previous_trainer, fp16)
self.initial_lr = 1e-3
| 1,321
| 49.846154
| 114
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/cascade/nnUNetTrainerV2CascadeFullRes_DAVariants.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nnunet.training.network_training.nnUNetTrainerV2_CascadeFullRes import nnUNetTrainerV2CascadeFullRes
class nnUNetTrainerV2CascadeFullRes_noConnComp(nnUNetTrainerV2CascadeFullRes):
def setup_DA_params(self):
super().setup_DA_params()
self.data_aug_params['cascade_do_cascade_augmentations'] = True
self.data_aug_params['cascade_random_binary_transform_p'] = 0.4
self.data_aug_params['cascade_random_binary_transform_p_per_label'] = 1
self.data_aug_params['cascade_random_binary_transform_size'] = (1, 8)
self.data_aug_params['cascade_remove_conn_comp_p'] = 0.0
self.data_aug_params['cascade_remove_conn_comp_max_size_percent_threshold'] = 0.15
self.data_aug_params['cascade_remove_conn_comp_fill_with_other_class_p'] = 0.0
class nnUNetTrainerV2CascadeFullRes_smallerBinStrel(nnUNetTrainerV2CascadeFullRes):
def setup_DA_params(self):
super().setup_DA_params()
self.data_aug_params['cascade_do_cascade_augmentations'] = True
self.data_aug_params['cascade_random_binary_transform_p'] = 0.4
self.data_aug_params['cascade_random_binary_transform_p_per_label'] = 1
self.data_aug_params['cascade_random_binary_transform_size'] = (1, 5)
self.data_aug_params['cascade_remove_conn_comp_p'] = 0.2
self.data_aug_params['cascade_remove_conn_comp_max_size_percent_threshold'] = 0.15
self.data_aug_params['cascade_remove_conn_comp_fill_with_other_class_p'] = 0.0
class nnUNetTrainerV2CascadeFullRes_EducatedGuess(nnUNetTrainerV2CascadeFullRes):
def setup_DA_params(self):
super().setup_DA_params()
self.data_aug_params['cascade_do_cascade_augmentations'] = True
self.data_aug_params['cascade_random_binary_transform_p'] = 0.5
self.data_aug_params['cascade_random_binary_transform_p_per_label'] = 0.5
self.data_aug_params['cascade_random_binary_transform_size'] = (1, 5)
self.data_aug_params['cascade_remove_conn_comp_p'] = 0.2
self.data_aug_params['cascade_remove_conn_comp_max_size_percent_threshold'] = 0.10
self.data_aug_params['cascade_remove_conn_comp_fill_with_other_class_p'] = 0.0
class nnUNetTrainerV2CascadeFullRes_EducatedGuess2(nnUNetTrainerV2CascadeFullRes):
def setup_DA_params(self):
super().setup_DA_params()
self.data_aug_params['cascade_do_cascade_augmentations'] = True
self.data_aug_params['cascade_random_binary_transform_p'] = 0.5
self.data_aug_params['cascade_random_binary_transform_p_per_label'] = 0.5
self.data_aug_params['cascade_random_binary_transform_size'] = (1, 5)
self.data_aug_params['cascade_remove_conn_comp_p'] = 0.0
self.data_aug_params['cascade_remove_conn_comp_max_size_percent_threshold'] = 0.10
self.data_aug_params['cascade_remove_conn_comp_fill_with_other_class_p'] = 0.0
class nnUNetTrainerV2CascadeFullRes_EducatedGuess3(nnUNetTrainerV2CascadeFullRes):
def setup_DA_params(self):
super().setup_DA_params()
self.data_aug_params['cascade_do_cascade_augmentations'] = True
self.data_aug_params['cascade_random_binary_transform_p'] = 1
self.data_aug_params['cascade_random_binary_transform_p_per_label'] = 0.33
self.data_aug_params['cascade_random_binary_transform_size'] = (1, 5)
self.data_aug_params['cascade_remove_conn_comp_p'] = 0.0
self.data_aug_params['cascade_remove_conn_comp_max_size_percent_threshold'] = 0.10
self.data_aug_params['cascade_remove_conn_comp_fill_with_other_class_p'] = 0.0
| 4,283
| 47.681818
| 114
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/loss_function/nnUNetTrainerV2_ForceBD.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
class nnUNetTrainerV2_ForceBD(nnUNetTrainerV2):
def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
unpack_data=True, deterministic=True, fp16=False):
batch_dice = True
super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
deterministic, fp16)
| 1,180
| 46.24
| 114
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/loss_function/nnUNetTrainerV2_Loss_CEGDL.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
from nnunet.training.loss_functions.dice_loss import GDL_and_CE_loss
class nnUNetTrainerV2_Loss_CEGDL(nnUNetTrainerV2):
def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
unpack_data=True, deterministic=True, fp16=False):
super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
deterministic, fp16)
self.loss = GDL_and_CE_loss({'batch_dice': self.batch_dice, 'smooth': 1e-5, 'do_bg': False}, {})
| 1,331
| 50.230769
| 114
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/loss_function/nnUNetTrainerV2_Loss_Dice.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
from nnunet.training.loss_functions.dice_loss import SoftDiceLoss
from nnunet.utilities.nd_softmax import softmax_helper
class nnUNetTrainerV2_Loss_Dice(nnUNetTrainerV2):
def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
unpack_data=True, deterministic=True, fp16=False):
super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
deterministic, fp16)
self.loss = SoftDiceLoss(**{'apply_nonlin': softmax_helper, 'batch_dice': self.batch_dice, 'smooth': 1e-5, 'do_bg': False})
class nnUNetTrainerV2_Loss_DicewithBG(nnUNetTrainerV2):
def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
unpack_data=True, deterministic=True, fp16=False):
super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
deterministic, fp16)
self.loss = SoftDiceLoss(**{'apply_nonlin': softmax_helper, 'batch_dice': self.batch_dice, 'smooth': 1e-5, 'do_bg': True})
| 1,936
| 52.805556
| 131
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/loss_function/nnUNetTrainerV2_Loss_Dice_squared.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
from nnunet.training.loss_functions.dice_loss import SoftDiceLossSquared
from nnunet.utilities.nd_softmax import softmax_helper
class nnUNetTrainerV2_Loss_Dice_squared(nnUNetTrainerV2):
def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
unpack_data=True, deterministic=True, fp16=False):
super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
deterministic, fp16)
self.initial_lr = 1e-3
self.loss = SoftDiceLossSquared(**{'apply_nonlin': softmax_helper, 'batch_dice': self.batch_dice, 'smooth': 1e-5, 'do_bg': False})
| 1,462
| 51.25
| 138
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/loss_function/nnUNetTrainerV2_Loss_DiceTopK10.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
from nnunet.training.loss_functions.dice_loss import DC_and_topk_loss
class nnUNetTrainerV2_Loss_DiceTopK10(nnUNetTrainerV2):
def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
unpack_data=True, deterministic=True, fp16=False):
super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
deterministic, fp16)
self.loss = DC_and_topk_loss({'batch_dice': self.batch_dice, 'smooth': 1e-5, 'do_bg': False},
{'k': 10})
| 1,382
| 50.222222
| 114
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/loss_function/nnUNetTrainerV2_graduallyTransitionFromCEToDice.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nnunet.training.loss_functions.deep_supervision import MultipleOutputLoss2
from nnunet.training.loss_functions.dice_loss import DC_and_CE_loss
from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
class nnUNetTrainerV2_graduallyTransitionFromCEToDice(nnUNetTrainerV2):
def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
unpack_data=True, deterministic=True, fp16=False):
super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
deterministic, fp16)
self.loss = DC_and_CE_loss({'batch_dice': self.batch_dice, 'smooth': 1e-5, 'do_bg': False}, {}, weight_ce=2, weight_dice=0)
def update_loss(self):
# we train the first 500 epochs with CE, then transition to Dice between 500 and 750. The last 250 epochs will be Dice only
if self.epoch <= 500:
weight_ce = 2
weight_dice = 0
elif 500 < self.epoch <= 750:
weight_ce = 2 - 2 / 250 * (self.epoch - 500)
weight_dice = 0 + 2 / 250 * (self.epoch - 500)
elif 750 < self.epoch <= self.max_num_epochs:
weight_ce = 0
weight_dice = 2
else:
raise RuntimeError("Invalid epoch: %d" % self.epoch)
self.print_to_log_file("weight ce", weight_ce, "weight dice", weight_dice)
self.loss = DC_and_CE_loss({'batch_dice': self.batch_dice, 'smooth': 1e-5, 'do_bg': False}, {}, weight_ce=weight_ce,
weight_dice=weight_dice)
self.loss = MultipleOutputLoss2(self.loss, self.ds_loss_weights)
def on_epoch_end(self):
ret = super().on_epoch_end()
self.update_loss()
return ret
def load_checkpoint_ram(self, checkpoint, train=True):
ret = super().load_checkpoint_ram(checkpoint, train)
self.update_loss()
return ret
| 2,667
| 44.220339
| 131
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/loss_function/nnUNetTrainerV2_Loss_TopK10.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
from nnunet.training.loss_functions.TopK_loss import TopKLoss
class nnUNetTrainerV2_Loss_TopK10(nnUNetTrainerV2):
def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
unpack_data=True, deterministic=True, fp16=False):
super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
deterministic, fp16)
self.loss = TopKLoss(k=10)
nnUNetTrainerV2_Loss_TopK10_copy1 = nnUNetTrainerV2_Loss_TopK10
nnUNetTrainerV2_Loss_TopK10_copy2 = nnUNetTrainerV2_Loss_TopK10
nnUNetTrainerV2_Loss_TopK10_copy3 = nnUNetTrainerV2_Loss_TopK10
nnUNetTrainerV2_Loss_TopK10_copy4 = nnUNetTrainerV2_Loss_TopK10
| 1,514
| 44.909091
| 114
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/loss_function/nnUNetTrainerV2_ForceSD.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
class nnUNetTrainerV2_ForceSD(nnUNetTrainerV2):
def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
unpack_data=True, deterministic=True, fp16=False):
batch_dice = False
super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
deterministic, fp16)
| 1,181
| 46.28
| 114
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/loss_function/nnUNetTrainerV2_Loss_Dice_lr1en3.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nnunet.training.network_training.nnUNet_variants.loss_function.nnUNetTrainerV2_Loss_Dice import \
nnUNetTrainerV2_Loss_Dice, nnUNetTrainerV2_Loss_DicewithBG
class nnUNetTrainerV2_Loss_Dice_LR1en3(nnUNetTrainerV2_Loss_Dice):
def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
unpack_data=True, deterministic=True, fp16=False):
super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
deterministic, fp16)
self.initial_lr = 1e-3
class nnUNetTrainerV2_Loss_DicewithBG_LR1en3(nnUNetTrainerV2_Loss_DicewithBG):
def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
unpack_data=True, deterministic=True, fp16=False):
super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
deterministic, fp16)
self.initial_lr = 1e-3
| 1,743
| 48.828571
| 114
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/loss_function/nnUNetTrainerV2_Loss_CE.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nnunet.training.loss_functions.crossentropy import RobustCrossEntropyLoss
from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
class nnUNetTrainerV2_Loss_CE(nnUNetTrainerV2):
def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
unpack_data=True, deterministic=True, fp16=False):
super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
deterministic, fp16)
self.loss = RobustCrossEntropyLoss()
| 1,276
| 52.208333
| 114
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/loss_function/nnUNetTrainerV2_Loss_MCC.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
from nnunet.training.loss_functions.dice_loss import MCCLoss
from nnunet.utilities.nd_softmax import softmax_helper
class nnUNetTrainerV2_Loss_MCC(nnUNetTrainerV2):
def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
unpack_data=True, deterministic=True, fp16=False):
super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
deterministic, fp16)
self.initial_lr = 1e-3
self.loss = MCCLoss(apply_nonlin=softmax_helper, batch_mcc=self.batch_dice, do_bg=True, smooth=0.0)
class nnUNetTrainerV2_Loss_MCCnoBG(nnUNetTrainerV2):
def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
unpack_data=True, deterministic=True, fp16=False):
super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
deterministic, fp16)
self.initial_lr = 1e-3
self.loss = MCCLoss(apply_nonlin=softmax_helper, batch_mcc=self.batch_dice, do_bg=False, smooth=0.0)
| 1,943
| 50.157895
| 114
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/loss_function/nnUNetTrainerV2_focalLoss.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
from functools import partial
import torch.nn.functional as F
from torch.nn.modules.loss import _Loss
def sigmoid_focal_loss(
outputs: torch.Tensor,
targets: torch.Tensor,
gamma: float = 2.0,
alpha: float = 0.25,
reduction: str = "mean"
):
"""
Compute binary focal loss between target and output logits.
Source https://github.com/BloodAxe/pytorch-toolbelt
See :class:`~pytorch_toolbelt.losses` for details.
Args:
outputs: Tensor of arbitrary shape
targets: Tensor of the same shape as input
reduction (string, optional):
Specifies the reduction to apply to the output:
"none" | "mean" | "sum" | "batchwise_mean".
"none": no reduction will be applied,
"mean": the sum of the output will be divided by the number of
elements in the output,
"sum": the output will be summed.
See https://github.com/open-mmlab/mmdetection/blob/master/mmdet/core/loss/losses.py # noqa: E501
"""
targets = targets.type(outputs.type())
logpt = -F.binary_cross_entropy_with_logits(
outputs, targets, reduction="none"
)
pt = torch.exp(logpt)
# compute the loss
loss = -((1 - pt).pow(gamma)) * logpt
if alpha is not None:
loss = loss * (alpha * targets + (1 - alpha) * (1 - targets))
if reduction == "mean":
loss = loss.mean()
if reduction == "sum":
loss = loss.sum()
if reduction == "batchwise_mean":
loss = loss.sum(0)
return loss
def reduced_focal_loss(
outputs: torch.Tensor,
targets: torch.Tensor,
threshold: float = 0.5,
gamma: float = 2.0,
reduction="mean"
):
"""
Compute reduced focal loss between target and output logits.
Source https://github.com/BloodAxe/pytorch-toolbelt
See :class:`~pytorch_toolbelt.losses` for details.
Args:
outputs: Tensor of arbitrary shape
targets: Tensor of the same shape as input
reduction (string, optional):
Specifies the reduction to apply to the output:
"none" | "mean" | "sum" | "batchwise_mean".
"none": no reduction will be applied,
"mean": the sum of the output will be divided by the number of
elements in the output,
"sum": the output will be summed.
Note: :attr:`size_average` and :attr:`reduce`
are in the process of being deprecated,
and in the meantime, specifying either of those two args
will override :attr:`reduction`.
"batchwise_mean" computes mean loss per sample in batch.
Default: "mean"
See https://arxiv.org/abs/1903.01347
"""
targets = targets.type(outputs.type())
logpt = -F.binary_cross_entropy_with_logits(
outputs, targets, reduction="none"
)
pt = torch.exp(logpt)
# compute the loss
focal_reduction = ((1. - pt) / threshold).pow(gamma)
focal_reduction[pt < threshold] = 1
loss = -focal_reduction * logpt
if reduction == "mean":
loss = loss.mean()
if reduction == "sum":
loss = loss.sum()
if reduction == "batchwise_mean":
loss = loss.sum(0)
return loss
class FocalLossBinary(_Loss):
def __init__(
self,
ignore: int = None,
reduced: bool = False,
gamma: float = 2.0,
alpha: float = 0.25,
threshold: float = 0.5,
reduction: str = "mean",
):
"""
Compute focal loss for binary classification problem.
"""
super().__init__()
self.ignore = ignore
if reduced:
self.loss_fn = partial(
reduced_focal_loss,
gamma=gamma,
threshold=threshold,
reduction=reduction
)
else:
self.loss_fn = partial(
sigmoid_focal_loss,
gamma=gamma,
alpha=alpha,
reduction=reduction
)
def forward(self, logits, targets):
"""
Args:
logits: [bs; ...]
targets: [bs; ...]
"""
targets = targets.view(-1)
logits = logits.view(-1)
if self.ignore is not None:
# Filter predictions with ignore label from loss computation
not_ignored = targets != self.ignore
logits = logits[not_ignored]
targets = targets[not_ignored]
loss = self.loss_fn(logits, targets)
return loss
class FocalLossMultiClass(FocalLossBinary):
"""
Compute focal loss for multi-class problem.
Ignores targets having -1 label
"""
def forward(self, logits, targets):
"""
Args:
logits: [bs; num_classes; ...]
targets: [bs; ...]
"""
num_classes = logits.size(1)
loss = 0
targets = targets.view(-1)
logits = logits.view(-1, num_classes)
# Filter anchors with -1 label from loss computation
if self.ignore is not None:
not_ignored = targets != self.ignore
for cls in range(num_classes):
cls_label_target = (targets == (cls + 0)).long()
cls_label_input = logits[..., cls]
if self.ignore is not None:
cls_label_target = cls_label_target[not_ignored]
cls_label_input = cls_label_input[not_ignored]
loss += self.loss_fn(cls_label_input, cls_label_target)
return loss
class nnUNetTrainerV2_focalLoss(nnUNetTrainerV2):
def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
unpack_data=True, deterministic=True, fp16=False):
super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
deterministic, fp16)
self.loss = FocalLossMultiClass()
| 6,748
| 30.834906
| 114
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/training/learning_rate/poly_lr.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def poly_lr(epoch, max_epochs, initial_lr, exponent=0.9):
return initial_lr * (1 - epoch / max_epochs)**exponent
| 807
| 43.888889
| 114
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/training/data_augmentation/custom_transforms.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from batchgenerators.transforms import AbstractTransform
class RemoveKeyTransform(AbstractTransform):
def __init__(self, key_to_remove):
self.key_to_remove = key_to_remove
def __call__(self, **data_dict):
_ = data_dict.pop(self.key_to_remove, None)
return data_dict
class MaskTransform(AbstractTransform):
def __init__(self, dct_for_where_it_was_used, mask_idx_in_seg=1, set_outside_to=0, data_key="data", seg_key="seg"):
"""
data[mask < 0] = 0
Sets everything outside the mask to 0. CAREFUL! outside is defined as < 0, not =0 (in the Mask)!!!
:param dct_for_where_it_was_used:
:param mask_idx_in_seg:
:param set_outside_to:
:param data_key:
:param seg_key:
"""
self.dct_for_where_it_was_used = dct_for_where_it_was_used
self.seg_key = seg_key
self.data_key = data_key
self.set_outside_to = set_outside_to
self.mask_idx_in_seg = mask_idx_in_seg
def __call__(self, **data_dict):
seg = data_dict.get(self.seg_key)
if seg is None or seg.shape[1] < self.mask_idx_in_seg:
raise Warning("mask not found, seg may be missing or seg[:, mask_idx_in_seg] may not exist")
data = data_dict.get(self.data_key)
for b in range(data.shape[0]):
mask = seg[b, self.mask_idx_in_seg]
for c in range(data.shape[1]):
if self.dct_for_where_it_was_used[c]:
data[b, c][mask < 0] = self.set_outside_to
data_dict[self.data_key] = data
return data_dict
def convert_3d_to_2d_generator(data_dict):
shp = data_dict['data'].shape
data_dict['data'] = data_dict['data'].reshape((shp[0], shp[1] * shp[2], shp[3], shp[4]))
data_dict['orig_shape_data'] = shp
shp = data_dict['seg'].shape
data_dict['seg'] = data_dict['seg'].reshape((shp[0], shp[1] * shp[2], shp[3], shp[4]))
data_dict['orig_shape_seg'] = shp
return data_dict
def convert_2d_to_3d_generator(data_dict):
shp = data_dict['orig_shape_data']
current_shape = data_dict['data'].shape
data_dict['data'] = data_dict['data'].reshape((shp[0], shp[1], shp[2], current_shape[-2], current_shape[-1]))
shp = data_dict['orig_shape_seg']
current_shape_seg = data_dict['seg'].shape
data_dict['seg'] = data_dict['seg'].reshape((shp[0], shp[1], shp[2], current_shape_seg[-2], current_shape_seg[-1]))
return data_dict
class Convert3DTo2DTransform(AbstractTransform):
def __init__(self):
pass
def __call__(self, **data_dict):
return convert_3d_to_2d_generator(data_dict)
class Convert2DTo3DTransform(AbstractTransform):
def __init__(self):
pass
def __call__(self, **data_dict):
return convert_2d_to_3d_generator(data_dict)
class ConvertSegmentationToRegionsTransform(AbstractTransform):
def __init__(self, regions: dict, seg_key: str = "seg", output_key: str = "seg", seg_channel: int = 0):
"""
regions are tuple of tuples where each inner tuple holds the class indices that are merged into one region, example:
regions= ((1, 2), (2, )) will result in 2 regions: one covering the region of labels 1&2 and the other just 2
:param regions:
:param seg_key:
:param output_key:
"""
self.seg_channel = seg_channel
self.output_key = output_key
self.seg_key = seg_key
self.regions = regions
def __call__(self, **data_dict):
seg = data_dict.get(self.seg_key)
num_regions = len(self.regions)
if seg is not None:
seg_shp = seg.shape
output_shape = list(seg_shp)
output_shape[1] = num_regions
region_output = np.zeros(output_shape, dtype=seg.dtype)
for b in range(seg_shp[0]):
for r, k in enumerate(self.regions.keys()):
for l in self.regions[k]:
region_output[b, r][seg[b, self.seg_channel] == l] = 1
data_dict[self.output_key] = region_output
return data_dict
| 4,821
| 37.887097
| 124
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/training/data_augmentation/pyramid_augmentations.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from copy import deepcopy
from skimage.morphology import label, ball
from skimage.morphology.binary import binary_erosion, binary_dilation, binary_closing, binary_opening
import numpy as np
from batchgenerators.transforms import AbstractTransform
class RemoveRandomConnectedComponentFromOneHotEncodingTransform(AbstractTransform):
def __init__(self, channel_idx, key="data", p_per_sample=0.2, fill_with_other_class_p=0.25,
dont_do_if_covers_more_than_X_percent=0.25, p_per_label=1):
"""
:param dont_do_if_covers_more_than_X_percent: dont_do_if_covers_more_than_X_percent=0.25 is 25\%!
:param channel_idx: can be list or int
:param key:
"""
self.p_per_label = p_per_label
self.dont_do_if_covers_more_than_X_percent = dont_do_if_covers_more_than_X_percent
self.fill_with_other_class_p = fill_with_other_class_p
self.p_per_sample = p_per_sample
self.key = key
if not isinstance(channel_idx, (list, tuple)):
channel_idx = [channel_idx]
self.channel_idx = channel_idx
def __call__(self, **data_dict):
data = data_dict.get(self.key)
for b in range(data.shape[0]):
if np.random.uniform() < self.p_per_sample:
for c in self.channel_idx:
if np.random.uniform() < self.p_per_label:
workon = np.copy(data[b, c])
num_voxels = np.prod(workon.shape, dtype=np.uint64)
lab, num_comp = label(workon, return_num=True)
if num_comp > 0:
component_ids = []
component_sizes = []
for i in range(1, num_comp + 1):
component_ids.append(i)
component_sizes.append(np.sum(lab == i))
component_ids = [i for i, j in zip(component_ids, component_sizes) if j < num_voxels*self.dont_do_if_covers_more_than_X_percent]
#_ = component_ids.pop(np.argmax(component_sizes))
#else:
# component_ids = list(range(1, num_comp + 1))
if len(component_ids) > 0:
random_component = np.random.choice(component_ids)
data[b, c][lab == random_component] = 0
if np.random.uniform() < self.fill_with_other_class_p:
other_ch = [i for i in self.channel_idx if i != c]
if len(other_ch) > 0:
other_class = np.random.choice(other_ch)
data[b, other_class][lab == random_component] = 1
data_dict[self.key] = data
return data_dict
class MoveSegAsOneHotToData(AbstractTransform):
def __init__(self, channel_id, all_seg_labels, key_origin="seg", key_target="data", remove_from_origin=True):
self.remove_from_origin = remove_from_origin
self.all_seg_labels = all_seg_labels
self.key_target = key_target
self.key_origin = key_origin
self.channel_id = channel_id
def __call__(self, **data_dict):
origin = data_dict.get(self.key_origin)
target = data_dict.get(self.key_target)
seg = origin[:, self.channel_id:self.channel_id+1]
seg_onehot = np.zeros((seg.shape[0], len(self.all_seg_labels), *seg.shape[2:]), dtype=seg.dtype)
for i, l in enumerate(self.all_seg_labels):
seg_onehot[:, i][seg[:, 0] == l] = 1
target = np.concatenate((target, seg_onehot), 1)
data_dict[self.key_target] = target
if self.remove_from_origin:
remaining_channels = [i for i in range(origin.shape[1]) if i != self.channel_id]
origin = origin[:, remaining_channels]
data_dict[self.key_origin] = origin
return data_dict
class ApplyRandomBinaryOperatorTransform(AbstractTransform):
def __init__(self, channel_idx, p_per_sample=0.3, any_of_these=(binary_dilation, binary_erosion, binary_closing,
binary_opening),
key="data", strel_size=(1, 10), p_per_label=1):
self.p_per_label = p_per_label
self.strel_size = strel_size
self.key = key
self.any_of_these = any_of_these
self.p_per_sample = p_per_sample
assert not isinstance(channel_idx, tuple), "bäh"
if not isinstance(channel_idx, list):
channel_idx = [channel_idx]
self.channel_idx = channel_idx
def __call__(self, **data_dict):
data = data_dict.get(self.key)
for b in range(data.shape[0]):
if np.random.uniform() < self.p_per_sample:
ch = deepcopy(self.channel_idx)
np.random.shuffle(ch)
for c in ch:
if np.random.uniform() < self.p_per_label:
operation = np.random.choice(self.any_of_these)
selem = ball(np.random.uniform(*self.strel_size))
workon = np.copy(data[b, c]).astype(int)
res = operation(workon, selem).astype(workon.dtype)
data[b, c] = res
# if class was added, we need to remove it in ALL other channels to keep one hot encoding
# properties
# we modify data
other_ch = [i for i in ch if i != c]
if len(other_ch) > 0:
was_added_mask = (res - workon) > 0
for oc in other_ch:
data[b, oc][was_added_mask] = 0
# if class was removed, leave it at background
data_dict[self.key] = data
return data_dict
class ApplyRandomBinaryOperatorTransform2(AbstractTransform):
def __init__(self, channel_idx, p_per_sample=0.3, p_per_label=0.3, any_of_these=(binary_dilation, binary_closing),
key="data", strel_size=(1, 10)):
"""
2019_11_22: I have no idea what the purpose of this was...
the same as above but here we should use only expanding operations. Expansions will replace other labels
:param channel_idx: can be list or int
:param p_per_sample:
:param any_of_these:
:param fill_diff_with_other_class:
:param key:
:param strel_size:
"""
self.strel_size = strel_size
self.key = key
self.any_of_these = any_of_these
self.p_per_sample = p_per_sample
self.p_per_label = p_per_label
assert not isinstance(channel_idx, tuple), "bäh"
if not isinstance(channel_idx, list):
channel_idx = [channel_idx]
self.channel_idx = channel_idx
def __call__(self, **data_dict):
data = data_dict.get(self.key)
for b in range(data.shape[0]):
if np.random.uniform() < self.p_per_sample:
ch = deepcopy(self.channel_idx)
np.random.shuffle(ch)
for c in ch:
if np.random.uniform() < self.p_per_label:
operation = np.random.choice(self.any_of_these)
selem = ball(np.random.uniform(*self.strel_size))
workon = np.copy(data[b, c]).astype(int)
res = operation(workon, selem).astype(workon.dtype)
data[b, c] = res
# if class was added, we need to remove it in ALL other channels to keep one hot encoding
# properties
# we modify data
other_ch = [i for i in ch if i != c]
if len(other_ch) > 0:
was_added_mask = (res - workon) > 0
for oc in other_ch:
data[b, oc][was_added_mask] = 0
# if class was removed, leave it at backgound
data_dict[self.key] = data
return data_dict
| 9,007
| 46.661376
| 156
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/training/data_augmentation/default_data_augmentation.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from copy import deepcopy
import numpy as np
from batchgenerators.dataloading import MultiThreadedAugmenter, SingleThreadedAugmenter
from batchgenerators.transforms import DataChannelSelectionTransform, SegChannelSelectionTransform, SpatialTransform, \
GammaTransform, MirrorTransform, Compose
from batchgenerators.transforms.color_transforms import BrightnessMultiplicativeTransform, \
ContrastAugmentationTransform, BrightnessTransform
from batchgenerators.transforms.noise_transforms import GaussianNoiseTransform, GaussianBlurTransform
from batchgenerators.transforms.resample_transforms import SimulateLowResolutionTransform
from batchgenerators.transforms.utility_transforms import RemoveLabelTransform, RenameTransform, NumpyToTensor
from nnunet.training.data_augmentation.custom_transforms import Convert3DTo2DTransform, Convert2DTo3DTransform, \
MaskTransform, ConvertSegmentationToRegionsTransform, RemoveKeyTransform
from nnunet.training.data_augmentation.downsampling import DownsampleSegForDSTransform3, DownsampleSegForDSTransform2
from nnunet.training.data_augmentation.pyramid_augmentations import MoveSegAsOneHotToData, \
ApplyRandomBinaryOperatorTransform, \
RemoveRandomConnectedComponentFromOneHotEncodingTransform
import os
default_3D_augmentation_params = {
"selected_data_channels": None,
"selected_seg_channels": None,
"do_elastic": True,
"elastic_deform_alpha": (0., 900.),
"elastic_deform_sigma": (9., 13.),
"p_eldef": 0.2,
"do_scaling": True,
"scale_range": (0.85, 1.25),
"independent_scale_factor_for_each_axis": False,
"p_independent_scale_per_axis": 1,
"p_scale": 0.2,
"do_rotation": True,
"rotation_x": (-15. / 360 * 2. * np.pi, 15. / 360 * 2. * np.pi),
"rotation_y": (-15. / 360 * 2. * np.pi, 15. / 360 * 2. * np.pi),
"rotation_z": (-15. / 360 * 2. * np.pi, 15. / 360 * 2. * np.pi),
"rotation_p_per_axis": 1,
"p_rot": 0.2,
"random_crop": False,
"random_crop_dist_to_border": None,
"do_gamma": True,
"gamma_retain_stats": True,
"gamma_range": (0.7, 1.5),
"p_gamma": 0.3,
"do_mirror": True,
"mirror_axes": (0, 1, 2),
"dummy_2D": False,
"mask_was_used_for_normalization": False,
"border_mode_data": "constant",
"all_segmentation_labels": None, # used for cascade
"move_last_seg_chanel_to_data": False, # used for cascade
"cascade_do_cascade_augmentations": False, # used for cascade
"cascade_random_binary_transform_p": 0.4,
"cascade_random_binary_transform_p_per_label": 1,
"cascade_random_binary_transform_size": (1, 8),
"cascade_remove_conn_comp_p": 0.2,
"cascade_remove_conn_comp_max_size_percent_threshold": 0.15,
"cascade_remove_conn_comp_fill_with_other_class_p": 0.0,
"do_additive_brightness": False,
"additive_brightness_p_per_sample": 0.15,
"additive_brightness_p_per_channel": 0.5,
"additive_brightness_mu": 0.0,
"additive_brightness_sigma": 0.1,
"num_threads": 12 if 'nnUNet_n_proc_DA' not in os.environ else int(os.environ['nnUNet_n_proc_DA']),
"num_cached_per_thread": 1,
}
default_2D_augmentation_params = deepcopy(default_3D_augmentation_params)
default_2D_augmentation_params["elastic_deform_alpha"] = (0., 200.)
default_2D_augmentation_params["elastic_deform_sigma"] = (9., 13.)
default_2D_augmentation_params["rotation_x"] = (-180. / 360 * 2. * np.pi, 180. / 360 * 2. * np.pi)
default_2D_augmentation_params["rotation_y"] = (-0. / 360 * 2. * np.pi, 0. / 360 * 2. * np.pi)
default_2D_augmentation_params["rotation_z"] = (-0. / 360 * 2. * np.pi, 0. / 360 * 2. * np.pi)
# sometimes you have 3d data and a 3d net but cannot augment them properly in 3d due to anisotropy (which is currently
# not supported in batchgenerators). In that case you can 'cheat' and transfer your 3d data into 2d data and
# transform them back after augmentation
default_2D_augmentation_params["dummy_2D"] = False
default_2D_augmentation_params["mirror_axes"] = (0, 1) # this can be (0, 1, 2) if dummy_2D=True
def get_patch_size(final_patch_size, rot_x, rot_y, rot_z, scale_range):
if isinstance(rot_x, (tuple, list)):
rot_x = max(np.abs(rot_x))
if isinstance(rot_y, (tuple, list)):
rot_y = max(np.abs(rot_y))
if isinstance(rot_z, (tuple, list)):
rot_z = max(np.abs(rot_z))
rot_x = min(90 / 360 * 2. * np.pi, rot_x)
rot_y = min(90 / 360 * 2. * np.pi, rot_y)
rot_z = min(90 / 360 * 2. * np.pi, rot_z)
from batchgenerators.augmentations.utils import rotate_coords_3d, rotate_coords_2d
coords = np.array(final_patch_size)
final_shape = np.copy(coords)
if len(coords) == 3:
final_shape = np.max(np.vstack((np.abs(rotate_coords_3d(coords, rot_x, 0, 0)), final_shape)), 0)
final_shape = np.max(np.vstack((np.abs(rotate_coords_3d(coords, 0, rot_y, 0)), final_shape)), 0)
final_shape = np.max(np.vstack((np.abs(rotate_coords_3d(coords, 0, 0, rot_z)), final_shape)), 0)
elif len(coords) == 2:
final_shape = np.max(np.vstack((np.abs(rotate_coords_2d(coords, rot_x)), final_shape)), 0)
final_shape /= min(scale_range)
return final_shape.astype(int)
def get_default_augmentation(dataloader_train, dataloader_val, patch_size, params=default_3D_augmentation_params,
border_val_seg=-1, pin_memory=True,
seeds_train=None, seeds_val=None, regions=None):
assert params.get('mirror') is None, "old version of params, use new keyword do_mirror"
tr_transforms = []
if params.get("selected_data_channels") is not None:
tr_transforms.append(DataChannelSelectionTransform(params.get("selected_data_channels")))
if params.get("selected_seg_channels") is not None:
tr_transforms.append(SegChannelSelectionTransform(params.get("selected_seg_channels")))
# don't do color augmentations while in 2d mode with 3d data because the color channel is overloaded!!
if params.get("dummy_2D") is not None and params.get("dummy_2D"):
tr_transforms.append(Convert3DTo2DTransform())
tr_transforms.append(SpatialTransform(
patch_size, patch_center_dist_from_border=None, do_elastic_deform=params.get("do_elastic"),
alpha=params.get("elastic_deform_alpha"), sigma=params.get("elastic_deform_sigma"),
do_rotation=params.get("do_rotation"), angle_x=params.get("rotation_x"), angle_y=params.get("rotation_y"),
angle_z=params.get("rotation_z"), do_scale=params.get("do_scaling"), scale=params.get("scale_range"),
border_mode_data=params.get("border_mode_data"), border_cval_data=0, order_data=3, border_mode_seg="constant",
border_cval_seg=border_val_seg,
order_seg=1, random_crop=params.get("random_crop"), p_el_per_sample=params.get("p_eldef"),
p_scale_per_sample=params.get("p_scale"), p_rot_per_sample=params.get("p_rot"),
independent_scale_for_each_axis=params.get("independent_scale_factor_for_each_axis")
))
if params.get("dummy_2D") is not None and params.get("dummy_2D"):
tr_transforms.append(Convert2DTo3DTransform())
if params.get("do_gamma"):
tr_transforms.append(
GammaTransform(params.get("gamma_range"), False, True, retain_stats=params.get("gamma_retain_stats"),
p_per_sample=params["p_gamma"]))
if params.get("do_mirror"):
tr_transforms.append(MirrorTransform(params.get("mirror_axes")))
if params.get("mask_was_used_for_normalization") is not None:
mask_was_used_for_normalization = params.get("mask_was_used_for_normalization")
tr_transforms.append(MaskTransform(mask_was_used_for_normalization, mask_idx_in_seg=0, set_outside_to=0))
tr_transforms.append(RemoveLabelTransform(-1, 0))
if params.get("move_last_seg_chanel_to_data") is not None and params.get("move_last_seg_chanel_to_data"):
tr_transforms.append(MoveSegAsOneHotToData(1, params.get("all_segmentation_labels"), 'seg', 'data'))
if params.get("cascade_do_cascade_augmentations") and not None and params.get(
"cascade_do_cascade_augmentations"):
tr_transforms.append(ApplyRandomBinaryOperatorTransform(
channel_idx=list(range(-len(params.get("all_segmentation_labels")), 0)),
p_per_sample=params.get("cascade_random_binary_transform_p"),
key="data",
strel_size=params.get("cascade_random_binary_transform_size")))
tr_transforms.append(RemoveRandomConnectedComponentFromOneHotEncodingTransform(
channel_idx=list(range(-len(params.get("all_segmentation_labels")), 0)),
key="data",
p_per_sample=params.get("cascade_remove_conn_comp_p"),
fill_with_other_class_p=params.get("cascade_remove_conn_comp_max_size_percent_threshold"),
dont_do_if_covers_more_than_X_percent=params.get("cascade_remove_conn_comp_fill_with_other_class_p")))
tr_transforms.append(RenameTransform('seg', 'target', True))
if regions is not None:
tr_transforms.append(ConvertSegmentationToRegionsTransform(regions, 'target', 'target'))
tr_transforms.append(NumpyToTensor(['data', 'target'], 'float'))
tr_transforms = Compose(tr_transforms)
# from batchgenerators.dataloading import SingleThreadedAugmenter
# batchgenerator_train = SingleThreadedAugmenter(dataloader_train, tr_transforms)
# import IPython;IPython.embed()
batchgenerator_train = MultiThreadedAugmenter(dataloader_train, tr_transforms, params.get('num_threads'),
params.get("num_cached_per_thread"), seeds=seeds_train,
pin_memory=pin_memory)
val_transforms = []
val_transforms.append(RemoveLabelTransform(-1, 0))
if params.get("selected_data_channels") is not None:
val_transforms.append(DataChannelSelectionTransform(params.get("selected_data_channels")))
if params.get("selected_seg_channels") is not None:
val_transforms.append(SegChannelSelectionTransform(params.get("selected_seg_channels")))
if params.get("move_last_seg_chanel_to_data") is not None and params.get("move_last_seg_chanel_to_data"):
val_transforms.append(MoveSegAsOneHotToData(1, params.get("all_segmentation_labels"), 'seg', 'data'))
val_transforms.append(RenameTransform('seg', 'target', True))
if regions is not None:
val_transforms.append(ConvertSegmentationToRegionsTransform(regions, 'target', 'target'))
val_transforms.append(NumpyToTensor(['data', 'target'], 'float'))
val_transforms = Compose(val_transforms)
# batchgenerator_val = SingleThreadedAugmenter(dataloader_val, val_transforms)
batchgenerator_val = MultiThreadedAugmenter(dataloader_val, val_transforms, max(params.get('num_threads') // 2, 1),
params.get("num_cached_per_thread"), seeds=seeds_val,
pin_memory=pin_memory)
return batchgenerator_train, batchgenerator_val
def get_no_augmentation(dataloader_train, dataloader_val, patch_size, params=default_3D_augmentation_params,
border_val_seg=-1,
seeds_train=None, seeds_val=None, order_seg=1, order_data=3, deep_supervision_scales=None,
soft_ds=False,
classes=None, pin_memory=True, regions=None):
"""
use this instead of get_default_augmentation (drop in replacement) to turn off all data augmentation
:param dataloader_train:
:param dataloader_val:
:param patch_size:
:param params:
:param border_val_seg:
:return:
"""
tr_transforms = []
if params.get("selected_data_channels") is not None:
tr_transforms.append(DataChannelSelectionTransform(params.get("selected_data_channels")))
if params.get("selected_seg_channels") is not None:
tr_transforms.append(SegChannelSelectionTransform(params.get("selected_seg_channels")))
tr_transforms.append(RemoveLabelTransform(-1, 0))
tr_transforms.append(RenameTransform('seg', 'target', True))
if regions is not None:
tr_transforms.append(ConvertSegmentationToRegionsTransform(regions, 'target', 'target'))
if deep_supervision_scales is not None:
if soft_ds:
assert classes is not None
tr_transforms.append(DownsampleSegForDSTransform3(deep_supervision_scales, 'target', 'target', classes))
else:
tr_transforms.append(DownsampleSegForDSTransform2(deep_supervision_scales, 0, 0, input_key='target',
output_key='target'))
tr_transforms.append(NumpyToTensor(['data', 'target'], 'float'))
tr_transforms = Compose(tr_transforms)
batchgenerator_train = MultiThreadedAugmenter(dataloader_train, tr_transforms, params.get('num_threads'),
params.get("num_cached_per_thread"),
seeds=range(params.get('num_threads')), pin_memory=True)
batchgenerator_train.restart()
val_transforms = []
val_transforms.append(RemoveLabelTransform(-1, 0))
if params.get("selected_data_channels") is not None:
val_transforms.append(DataChannelSelectionTransform(params.get("selected_data_channels")))
if params.get("selected_seg_channels") is not None:
val_transforms.append(SegChannelSelectionTransform(params.get("selected_seg_channels")))
val_transforms.append(RenameTransform('seg', 'target', True))
if regions is not None:
val_transforms.append(ConvertSegmentationToRegionsTransform(regions, 'target', 'target'))
if deep_supervision_scales is not None:
if soft_ds:
assert classes is not None
val_transforms.append(DownsampleSegForDSTransform3(deep_supervision_scales, 'target', 'target', classes))
else:
val_transforms.append(DownsampleSegForDSTransform2(deep_supervision_scales, 0, 0, input_key='target',
output_key='target'))
val_transforms.append(NumpyToTensor(['data', 'target'], 'float'))
val_transforms = Compose(val_transforms)
batchgenerator_val = MultiThreadedAugmenter(dataloader_val, val_transforms, max(params.get('num_threads') // 2, 1),
params.get("num_cached_per_thread"),
seeds=range(max(params.get('num_threads') // 2, 1)), pin_memory=True)
batchgenerator_val.restart()
return batchgenerator_train, batchgenerator_val
def get_moreDA_augmentation(dataloader_train, dataloader_val, patch_size, params=default_3D_augmentation_params,
border_val_seg=-1,
seeds_train=None, seeds_val=None, order_seg=1, order_data=3, deep_supervision_scales=None,
soft_ds=False,
classes=None, pin_memory=True, regions=None):
assert params.get('mirror') is None, "old version of params, use new keyword do_mirror"
tr_transforms = []
if params.get("selected_data_channels") is not None:
tr_transforms.append(DataChannelSelectionTransform(params.get("selected_data_channels")))
if params.get("selected_seg_channels") is not None:
tr_transforms.append(SegChannelSelectionTransform(params.get("selected_seg_channels")))
# don't do color augmentations while in 2d mode with 3d data because the color channel is overloaded!!
if params.get("dummy_2D") is not None and params.get("dummy_2D"):
ignore_axes = (0,)
tr_transforms.append(Convert3DTo2DTransform())
else:
ignore_axes = None
tr_transforms.append(SpatialTransform(
patch_size, patch_center_dist_from_border=None,
do_elastic_deform=params.get("do_elastic"), alpha=params.get("elastic_deform_alpha"),
sigma=params.get("elastic_deform_sigma"),
do_rotation=params.get("do_rotation"), angle_x=params.get("rotation_x"), angle_y=params.get("rotation_y"),
angle_z=params.get("rotation_z"), p_rot_per_axis=params.get("rotation_p_per_axis"),
do_scale=params.get("do_scaling"), scale=params.get("scale_range"),
border_mode_data=params.get("border_mode_data"), border_cval_data=0, order_data=order_data,
border_mode_seg="constant", border_cval_seg=border_val_seg,
order_seg=order_seg, random_crop=params.get("random_crop"), p_el_per_sample=params.get("p_eldef"),
p_scale_per_sample=params.get("p_scale"), p_rot_per_sample=params.get("p_rot"),
independent_scale_for_each_axis=params.get("independent_scale_factor_for_each_axis")
))
if params.get("dummy_2D"):
tr_transforms.append(Convert2DTo3DTransform())
# we need to put the color augmentations after the dummy 2d part (if applicable). Otherwise the overloaded color
# channel gets in the way
tr_transforms.append(GaussianNoiseTransform(p_per_sample=0.1))
tr_transforms.append(GaussianBlurTransform((0.5, 1.), different_sigma_per_channel=True, p_per_sample=0.2,
p_per_channel=0.5))
tr_transforms.append(BrightnessMultiplicativeTransform(multiplier_range=(0.75, 1.25), p_per_sample=0.15))
if params.get("do_additive_brightness"):
tr_transforms.append(BrightnessTransform(params.get("additive_brightness_mu"),
params.get("additive_brightness_sigma"),
True, p_per_sample=params.get("additive_brightness_p_per_sample"),
p_per_channel=params.get("additive_brightness_p_per_channel")))
tr_transforms.append(ContrastAugmentationTransform(p_per_sample=0.15))
tr_transforms.append(SimulateLowResolutionTransform(zoom_range=(0.5, 1), per_channel=True,
p_per_channel=0.5,
order_downsample=0, order_upsample=3, p_per_sample=0.25,
ignore_axes=ignore_axes))
tr_transforms.append(
GammaTransform(params.get("gamma_range"), True, True, retain_stats=params.get("gamma_retain_stats"),
p_per_sample=0.1)) # inverted gamma
if params.get("do_gamma"):
tr_transforms.append(
GammaTransform(params.get("gamma_range"), False, True, retain_stats=params.get("gamma_retain_stats"),
p_per_sample=params["p_gamma"]))
if params.get("do_mirror") or params.get("mirror"):
tr_transforms.append(MirrorTransform(params.get("mirror_axes")))
if params.get("mask_was_used_for_normalization") is not None:
mask_was_used_for_normalization = params.get("mask_was_used_for_normalization")
tr_transforms.append(MaskTransform(mask_was_used_for_normalization, mask_idx_in_seg=0, set_outside_to=0))
tr_transforms.append(RemoveLabelTransform(-1, 0))
if params.get("move_last_seg_chanel_to_data") is not None and params.get("move_last_seg_chanel_to_data"):
tr_transforms.append(MoveSegAsOneHotToData(1, params.get("all_segmentation_labels"), 'seg', 'data'))
if params.get("cascade_do_cascade_augmentations") is not None and params.get(
"cascade_do_cascade_augmentations"):
if params.get("cascade_random_binary_transform_p") > 0:
tr_transforms.append(ApplyRandomBinaryOperatorTransform(
channel_idx=list(range(-len(params.get("all_segmentation_labels")), 0)),
p_per_sample=params.get("cascade_random_binary_transform_p"),
key="data",
strel_size=params.get("cascade_random_binary_transform_size"),
p_per_label=params.get("cascade_random_binary_transform_p_per_label")))
if params.get("cascade_remove_conn_comp_p") > 0:
tr_transforms.append(
RemoveRandomConnectedComponentFromOneHotEncodingTransform(
channel_idx=list(range(-len(params.get("all_segmentation_labels")), 0)),
key="data",
p_per_sample=params.get("cascade_remove_conn_comp_p"),
fill_with_other_class_p=params.get("cascade_remove_conn_comp_max_size_percent_threshold"),
dont_do_if_covers_more_than_X_percent=params.get(
"cascade_remove_conn_comp_fill_with_other_class_p")))
tr_transforms.append(RenameTransform('seg', 'target', True))
if regions is not None:
tr_transforms.append(ConvertSegmentationToRegionsTransform(regions, 'target', 'target'))
if deep_supervision_scales is not None:
if soft_ds:
assert classes is not None
tr_transforms.append(DownsampleSegForDSTransform3(deep_supervision_scales, 'target', 'target', classes))
else:
tr_transforms.append(DownsampleSegForDSTransform2(deep_supervision_scales, 0, 0, input_key='target',
output_key='target'))
tr_transforms.append(NumpyToTensor(['data', 'target'], 'float'))
tr_transforms = Compose(tr_transforms)
batchgenerator_train = MultiThreadedAugmenter(dataloader_train, tr_transforms, params.get('num_threads'),
params.get("num_cached_per_thread"),
seeds=seeds_train, pin_memory=pin_memory)
#batchgenerator_train = SingleThreadedAugmenter(dataloader_train, tr_transforms)
#import IPython;IPython.embed()
val_transforms = []
val_transforms.append(RemoveLabelTransform(-1, 0))
if params.get("selected_data_channels") is not None:
val_transforms.append(DataChannelSelectionTransform(params.get("selected_data_channels")))
if params.get("selected_seg_channels") is not None:
val_transforms.append(SegChannelSelectionTransform(params.get("selected_seg_channels")))
if params.get("move_last_seg_chanel_to_data") is not None and params.get("move_last_seg_chanel_to_data"):
val_transforms.append(MoveSegAsOneHotToData(1, params.get("all_segmentation_labels"), 'seg', 'data'))
val_transforms.append(RenameTransform('seg', 'target', True))
if regions is not None:
val_transforms.append(ConvertSegmentationToRegionsTransform(regions, 'target', 'target'))
if deep_supervision_scales is not None:
if soft_ds:
assert classes is not None
val_transforms.append(DownsampleSegForDSTransform3(deep_supervision_scales, 'target', 'target', classes))
else:
val_transforms.append(DownsampleSegForDSTransform2(deep_supervision_scales, 0, 0, input_key='target',
output_key='target'))
val_transforms.append(NumpyToTensor(['data', 'target'], 'float'))
val_transforms = Compose(val_transforms)
batchgenerator_val = MultiThreadedAugmenter(dataloader_val, val_transforms, max(params.get('num_threads') // 2, 1),
params.get("num_cached_per_thread"),
seeds=seeds_val, pin_memory=pin_memory)
#batchgenerator_val = SingleThreadedAugmenter(dataloader_val, val_transforms)
return batchgenerator_train, batchgenerator_val
def get_insaneDA_augmentation(dataloader_train, dataloader_val, patch_size, params=default_3D_augmentation_params,
border_val_seg=-1,
seeds_train=None, seeds_val=None, order_seg=1, order_data=3, deep_supervision_scales=None,
soft_ds=False,
classes=None, pin_memory=True, regions=None):
assert params.get('mirror') is None, "old version of params, use new keyword do_mirror"
tr_transforms = []
if params.get("selected_data_channels") is not None:
tr_transforms.append(DataChannelSelectionTransform(params.get("selected_data_channels")))
if params.get("selected_seg_channels") is not None:
tr_transforms.append(SegChannelSelectionTransform(params.get("selected_seg_channels")))
# don't do color augmentations while in 2d mode with 3d data because the color channel is overloaded!!
if params.get("dummy_2D") is not None and params.get("dummy_2D"):
ignore_axes = (0,)
tr_transforms.append(Convert3DTo2DTransform())
else:
ignore_axes = None
tr_transforms.append(SpatialTransform(
patch_size, patch_center_dist_from_border=None, do_elastic_deform=params.get("do_elastic"),
alpha=params.get("elastic_deform_alpha"), sigma=params.get("elastic_deform_sigma"),
do_rotation=params.get("do_rotation"), angle_x=params.get("rotation_x"), angle_y=params.get("rotation_y"),
angle_z=params.get("rotation_z"), do_scale=params.get("do_scaling"), scale=params.get("scale_range"),
border_mode_data=params.get("border_mode_data"), border_cval_data=0, order_data=order_data,
border_mode_seg="constant", border_cval_seg=border_val_seg,
order_seg=order_seg, random_crop=params.get("random_crop"), p_el_per_sample=params.get("p_eldef"),
p_scale_per_sample=params.get("p_scale"), p_rot_per_sample=params.get("p_rot"),
independent_scale_for_each_axis=params.get("independent_scale_factor_for_each_axis"),
p_independent_scale_per_axis=params.get("p_independent_scale_per_axis")
))
if params.get("dummy_2D"):
tr_transforms.append(Convert2DTo3DTransform())
# we need to put the color augmentations after the dummy 2d part (if applicable). Otherwise the overloaded color
# channel gets in the way
tr_transforms.append(GaussianNoiseTransform(p_per_sample=0.15))
tr_transforms.append(GaussianBlurTransform((0.5, 1.5), different_sigma_per_channel=True, p_per_sample=0.2,
p_per_channel=0.5))
tr_transforms.append(BrightnessMultiplicativeTransform(multiplier_range=(0.70, 1.3), p_per_sample=0.15))
tr_transforms.append(ContrastAugmentationTransform(contrast_range=(0.65, 1.5), p_per_sample=0.15))
tr_transforms.append(SimulateLowResolutionTransform(zoom_range=(0.5, 1), per_channel=True,
p_per_channel=0.5,
order_downsample=0, order_upsample=3, p_per_sample=0.25,
ignore_axes=ignore_axes))
tr_transforms.append(
GammaTransform(params.get("gamma_range"), True, True, retain_stats=params.get("gamma_retain_stats"),
p_per_sample=0.15)) # inverted gamma
if params.get("do_additive_brightness"):
tr_transforms.append(BrightnessTransform(params.get("additive_brightness_mu"),
params.get("additive_brightness_sigma"),
True, p_per_sample=params.get("additive_brightness_p_per_sample"),
p_per_channel=params.get("additive_brightness_p_per_channel")))
if params.get("do_gamma"):
tr_transforms.append(
GammaTransform(params.get("gamma_range"), False, True, retain_stats=params.get("gamma_retain_stats"),
p_per_sample=params["p_gamma"]))
if params.get("do_mirror") or params.get("mirror"):
tr_transforms.append(MirrorTransform(params.get("mirror_axes")))
if params.get("mask_was_used_for_normalization") is not None:
mask_was_used_for_normalization = params.get("mask_was_used_for_normalization")
tr_transforms.append(MaskTransform(mask_was_used_for_normalization, mask_idx_in_seg=0, set_outside_to=0))
tr_transforms.append(RemoveLabelTransform(-1, 0))
if params.get("move_last_seg_chanel_to_data") is not None and params.get("move_last_seg_chanel_to_data"):
tr_transforms.append(MoveSegAsOneHotToData(1, params.get("all_segmentation_labels"), 'seg', 'data'))
if params.get("cascade_do_cascade_augmentations") and not None and params.get(
"cascade_do_cascade_augmentations"):
if params.get("cascade_random_binary_transform_p") > 0:
tr_transforms.append(ApplyRandomBinaryOperatorTransform(
channel_idx=list(range(-len(params.get("all_segmentation_labels")), 0)),
p_per_sample=params.get("cascade_random_binary_transform_p"),
key="data",
strel_size=params.get("cascade_random_binary_transform_size")))
if params.get("cascade_remove_conn_comp_p") > 0:
tr_transforms.append(
RemoveRandomConnectedComponentFromOneHotEncodingTransform(
channel_idx=list(range(-len(params.get("all_segmentation_labels")), 0)),
key="data",
p_per_sample=params.get("cascade_remove_conn_comp_p"),
fill_with_other_class_p=params.get("cascade_remove_conn_comp_max_size_percent_threshold"),
dont_do_if_covers_more_than_X_percent=params.get(
"cascade_remove_conn_comp_fill_with_other_class_p")))
tr_transforms.append(RenameTransform('seg', 'target', True))
if regions is not None:
tr_transforms.append(ConvertSegmentationToRegionsTransform(regions, 'target', 'target'))
if deep_supervision_scales is not None:
if soft_ds:
assert classes is not None
tr_transforms.append(DownsampleSegForDSTransform3(deep_supervision_scales, 'target', 'target', classes))
else:
tr_transforms.append(DownsampleSegForDSTransform2(deep_supervision_scales, 0, 0, input_key='target',
output_key='target'))
tr_transforms.append(NumpyToTensor(['data', 'target'], 'float'))
tr_transforms = Compose(tr_transforms)
batchgenerator_train = MultiThreadedAugmenter(dataloader_train, tr_transforms, params.get('num_threads'),
params.get("num_cached_per_thread"),
seeds=seeds_train, pin_memory=pin_memory)
val_transforms = []
val_transforms.append(RemoveLabelTransform(-1, 0))
if params.get("selected_data_channels") is not None:
val_transforms.append(DataChannelSelectionTransform(params.get("selected_data_channels")))
if params.get("selected_seg_channels") is not None:
val_transforms.append(SegChannelSelectionTransform(params.get("selected_seg_channels")))
if params.get("move_last_seg_chanel_to_data") is not None and params.get("move_last_seg_chanel_to_data"):
val_transforms.append(MoveSegAsOneHotToData(1, params.get("all_segmentation_labels"), 'seg', 'data'))
val_transforms.append(RenameTransform('seg', 'target', True))
if regions is not None:
val_transforms.append(ConvertSegmentationToRegionsTransform(regions, 'target', 'target'))
if deep_supervision_scales is not None:
if soft_ds:
assert classes is not None
val_transforms.append(DownsampleSegForDSTransform3(deep_supervision_scales, 'target', 'target', classes))
else:
val_transforms.append(DownsampleSegForDSTransform2(deep_supervision_scales, 0, 0, input_key='target',
output_key='target'))
val_transforms.append(NumpyToTensor(['data', 'target'], 'float'))
val_transforms = Compose(val_transforms)
batchgenerator_val = MultiThreadedAugmenter(dataloader_val, val_transforms, max(params.get('num_threads') // 2, 1),
params.get("num_cached_per_thread"),
seeds=seeds_val, pin_memory=pin_memory)
return batchgenerator_train, batchgenerator_val
if __name__ == "__main__":
from nnunet.training.dataloading.dataset_loading import DataLoader3D, load_dataset
from nnunet.paths import preprocessing_output_dir
import os
import pickle
t = "Task002_Heart"
p = os.path.join(preprocessing_output_dir, t)
dataset = load_dataset(p, 0)
with open(os.path.join(p, "plans.pkl"), 'rb') as f:
plans = pickle.load(f)
basic_patch_size = get_patch_size(np.array(plans['stage_properties'][0].patch_size),
default_3D_augmentation_params['rotation_x'],
default_3D_augmentation_params['rotation_y'],
default_3D_augmentation_params['rotation_z'],
default_3D_augmentation_params['scale_range'])
dl = DataLoader3D(dataset, basic_patch_size, np.array(plans['stage_properties'][0].patch_size).astype(int), 1)
tr, val = get_default_augmentation(dl, dl, np.array(plans['stage_properties'][0].patch_size).astype(int))
| 34,196
| 53.7152
| 120
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/training/data_augmentation/downsampling.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from batchgenerators.augmentations.utils import convert_seg_image_to_one_hot_encoding_batched, resize_segmentation
from batchgenerators.transforms import AbstractTransform
from torch.nn.functional import avg_pool2d, avg_pool3d
import numpy as np
class DownsampleSegForDSTransform3(AbstractTransform):
'''
returns one hot encodings of the segmentation maps if downsampling has occured (no one hot for highest resolution)
downsampled segmentations are smooth, not 0/1
returns torch tensors, not numpy arrays!
always uses seg channel 0!!
you should always give classes! Otherwise weird stuff may happen
'''
def __init__(self, ds_scales=(1, 0.5, 0.25), input_key="seg", output_key="seg", classes=None):
self.classes = classes
self.output_key = output_key
self.input_key = input_key
self.ds_scales = ds_scales
def __call__(self, **data_dict):
data_dict[self.output_key] = downsample_seg_for_ds_transform3(data_dict[self.input_key][:, 0], self.ds_scales, self.classes)
return data_dict
def downsample_seg_for_ds_transform3(seg, ds_scales=((1, 1, 1), (0.5, 0.5, 0.5), (0.25, 0.25, 0.25)), classes=None):
output = []
one_hot = torch.from_numpy(convert_seg_image_to_one_hot_encoding_batched(seg, classes)) # b, c,
for s in ds_scales:
if all([i == 1 for i in s]):
output.append(torch.from_numpy(seg))
else:
kernel_size = tuple(int(1 / i) for i in s)
stride = kernel_size
pad = tuple((i-1) // 2 for i in kernel_size)
if len(s) == 2:
pool_op = avg_pool2d
elif len(s) == 3:
pool_op = avg_pool3d
else:
raise RuntimeError()
pooled = pool_op(one_hot, kernel_size, stride, pad, count_include_pad=False, ceil_mode=False)
output.append(pooled)
return output
class DownsampleSegForDSTransform2(AbstractTransform):
'''
data_dict['output_key'] will be a list of segmentations scaled according to ds_scales
'''
def __init__(self, ds_scales=(1, 0.5, 0.25), order=0, cval=0, input_key="seg", output_key="seg", axes=None):
self.axes = axes
self.output_key = output_key
self.input_key = input_key
self.cval = cval
self.order = order
self.ds_scales = ds_scales
def __call__(self, **data_dict):
data_dict[self.output_key] = downsample_seg_for_ds_transform2(data_dict[self.input_key], self.ds_scales, self.order,
self.cval, self.axes)
return data_dict
def downsample_seg_for_ds_transform2(seg, ds_scales=((1, 1, 1), (0.5, 0.5, 0.5), (0.25, 0.25, 0.25)), order=0, cval=0, axes=None):
if axes is None:
axes = list(range(2, len(seg.shape)))
output = []
for s in ds_scales:
if all([i == 1 for i in s]):
output.append(seg)
else:
new_shape = np.array(seg.shape).astype(float)
for i, a in enumerate(axes):
new_shape[a] *= s[i]
new_shape = np.round(new_shape).astype(int)
out_seg = np.zeros(new_shape, dtype=seg.dtype)
for b in range(seg.shape[0]):
for c in range(seg.shape[1]):
out_seg[b, c] = resize_segmentation(seg[b, c], new_shape[2:], order, cval)
output.append(out_seg)
return output
| 4,164
| 38.292453
| 132
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/training/data_augmentation/__init__.py
|
from __future__ import absolute_import
from . import *
| 54
| 26.5
| 38
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/training/cascade_stuff/predict_next_stage.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from copy import deepcopy
import numpy as np
from batchgenerators.utilities.file_and_folder_operations import *
import argparse
from nnunet.preprocessing.preprocessing import resample_data_or_seg
from batchgenerators.utilities.file_and_folder_operations import maybe_mkdir_p
import nnunet
from nnunet.run.default_configuration import get_default_configuration
from multiprocessing import Pool
from nnunet.training.model_restore import recursive_find_python_class
from nnunet.training.network_training.nnUNetTrainer import nnUNetTrainer
def resample_and_save(predicted, target_shape, output_file, force_separate_z=False,
interpolation_order=1, interpolation_order_z=0):
if isinstance(predicted, str):
assert isfile(predicted), "If isinstance(segmentation_softmax, str) then " \
"isfile(segmentation_softmax) must be True"
del_file = deepcopy(predicted)
predicted = np.load(predicted)
os.remove(del_file)
predicted_new_shape = resample_data_or_seg(predicted, target_shape, False, order=interpolation_order,
do_separate_z=force_separate_z, cval=0, order_z=interpolation_order_z)
seg_new_shape = predicted_new_shape.argmax(0)
np.savez_compressed(output_file, data=seg_new_shape.astype(np.uint8))
def predict_next_stage(trainer, stage_to_be_predicted_folder):
output_folder = join(pardir(trainer.output_folder), "pred_next_stage")
maybe_mkdir_p(output_folder)
if 'segmentation_export_params' in trainer.plans.keys():
force_separate_z = trainer.plans['segmentation_export_params']['force_separate_z']
interpolation_order = trainer.plans['segmentation_export_params']['interpolation_order']
interpolation_order_z = trainer.plans['segmentation_export_params']['interpolation_order_z']
else:
force_separate_z = None
interpolation_order = 1
interpolation_order_z = 0
export_pool = Pool(2)
results = []
for pat in trainer.dataset_val.keys():
print(pat)
data_file = trainer.dataset_val[pat]['data_file']
data_preprocessed = np.load(data_file)['data'][:-1]
predicted_probabilities = trainer.predict_preprocessed_data_return_seg_and_softmax(
data_preprocessed, do_mirroring=trainer.data_aug_params["do_mirror"],
mirror_axes=trainer.data_aug_params['mirror_axes'], mixed_precision=trainer.fp16)[1]
data_file_nofolder = data_file.split("/")[-1]
data_file_nextstage = join(stage_to_be_predicted_folder, data_file_nofolder)
data_nextstage = np.load(data_file_nextstage)['data']
target_shp = data_nextstage.shape[1:]
output_file = join(output_folder, data_file_nextstage.split("/")[-1][:-4] + "_segFromPrevStage.npz")
if np.prod(predicted_probabilities.shape) > (2e9 / 4 * 0.85): # *0.85 just to be save
np.save(output_file[:-4] + ".npy", predicted_probabilities)
predicted_probabilities = output_file[:-4] + ".npy"
results.append(export_pool.starmap_async(resample_and_save, [(predicted_probabilities, target_shp, output_file,
force_separate_z, interpolation_order,
interpolation_order_z)]))
_ = [i.get() for i in results]
export_pool.close()
export_pool.join()
if __name__ == "__main__":
"""
RUNNING THIS SCRIPT MANUALLY IS USUALLY NOT NECESSARY. USE THE run_training.py FILE!
This script is intended for predicting all the low resolution predictions of 3d_lowres for the next stage of the
cascade. It needs to run once for each fold so that the segmentation is only generated for the validation set
and not on the data the network was trained on. Run it with
python predict_next_stage TRAINERCLASS TASK FOLD"""
parser = argparse.ArgumentParser()
parser.add_argument("network_trainer")
parser.add_argument("task")
parser.add_argument("fold", type=int)
args = parser.parse_args()
trainerclass = args.network_trainer
task = args.task
fold = args.fold
plans_file, folder_with_preprocessed_data, output_folder_name, dataset_directory, batch_dice, stage = \
get_default_configuration("3d_lowres", task)
trainer_class = recursive_find_python_class([join(nnunet.__path__[0], "training", "network_training")],
trainerclass,
"nnunet.training.network_training")
if trainer_class is None:
raise RuntimeError("Could not find trainer class in nnunet.training.network_training")
else:
assert issubclass(trainer_class,
nnUNetTrainer), "network_trainer was found but is not derived from nnUNetTrainer"
trainer = trainer_class(plans_file, fold, folder_with_preprocessed_data, output_folder=output_folder_name,
dataset_directory=dataset_directory, batch_dice=batch_dice, stage=stage)
trainer.initialize(False)
trainer.load_dataset()
trainer.do_split()
trainer.load_best_checkpoint(train=False)
stage_to_be_predicted_folder = join(dataset_directory, trainer.plans['data_identifier'] + "_stage%d" % 1)
output_folder = join(pardir(trainer.output_folder), "pred_next_stage")
maybe_mkdir_p(output_folder)
predict_next_stage(trainer, stage_to_be_predicted_folder)
| 6,245
| 44.926471
| 119
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/training/cascade_stuff/__init__.py
|
from __future__ import absolute_import
from . import *
| 54
| 26.5
| 38
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/training/optimizer/ranger.py
|
############
# https://github.com/lessw2020/Ranger-Deep-Learning-Optimizer
# This code was taken from the repo above and was not created by me (Fabian)! Full credit goes to the original authors
############
import math
import torch
from torch.optim.optimizer import Optimizer
class Ranger(Optimizer):
def __init__(self, params, lr=1e-3, alpha=0.5, k=6, N_sma_threshhold=5, betas=(.95, 0.999), eps=1e-5,
weight_decay=0):
# parameter checks
if not 0.0 <= alpha <= 1.0:
raise ValueError(f'Invalid slow update rate: {alpha}')
if not 1 <= k:
raise ValueError(f'Invalid lookahead steps: {k}')
if not lr > 0:
raise ValueError(f'Invalid Learning Rate: {lr}')
if not eps > 0:
raise ValueError(f'Invalid eps: {eps}')
# parameter comments:
# beta1 (momentum) of .95 seems to work better than .90...
# N_sma_threshold of 5 seems better in testing than 4.
# In both cases, worth testing on your dataset (.90 vs .95, 4 vs 5) to make sure which works best for you.
# prep defaults and init torch.optim base
defaults = dict(lr=lr, alpha=alpha, k=k, step_counter=0, betas=betas, N_sma_threshhold=N_sma_threshhold,
eps=eps, weight_decay=weight_decay)
super().__init__(params, defaults)
# adjustable threshold
self.N_sma_threshhold = N_sma_threshhold
# now we can get to work...
# removed as we now use step from RAdam...no need for duplicate step counting
# for group in self.param_groups:
# group["step_counter"] = 0
# print("group step counter init")
# look ahead params
self.alpha = alpha
self.k = k
# radam buffer for state
self.radam_buffer = [[None, None, None] for ind in range(10)]
# self.first_run_check=0
# lookahead weights
# 9/2/19 - lookahead param tensors have been moved to state storage.
# This should resolve issues with load/save where weights were left in GPU memory from first load, slowing down future runs.
# self.slow_weights = [[p.clone().detach() for p in group['params']]
# for group in self.param_groups]
# don't use grad for lookahead weights
# for w in it.chain(*self.slow_weights):
# w.requires_grad = False
def __setstate__(self, state):
print("set state called")
super(Ranger, self).__setstate__(state)
def step(self, closure=None):
loss = None
# note - below is commented out b/c I have other work that passes back the loss as a float, and thus not a callable closure.
# Uncomment if you need to use the actual closure...
# if closure is not None:
# loss = closure()
# Evaluate averages and grad, update param tensors
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError('Ranger optimizer does not support sparse gradients')
p_data_fp32 = p.data.float()
state = self.state[p] # get state dict for this param
if len(state) == 0: # if first time to run...init dictionary with our desired entries
# if self.first_run_check==0:
# self.first_run_check=1
# print("Initializing slow buffer...should not see this at load from saved model!")
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p_data_fp32)
state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
# look ahead weight storage now in state dict
state['slow_buffer'] = torch.empty_like(p.data)
state['slow_buffer'].copy_(p.data)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)
# begin computations
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
# compute variance mov avg
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
# compute mean moving avg
exp_avg.mul_(beta1).add_(1 - beta1, grad)
state['step'] += 1
buffered = self.radam_buffer[int(state['step'] % 10)]
if state['step'] == buffered[0]:
N_sma, step_size = buffered[1], buffered[2]
else:
buffered[0] = state['step']
beta2_t = beta2 ** state['step']
N_sma_max = 2 / (1 - beta2) - 1
N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)
buffered[1] = N_sma
if N_sma > self.N_sma_threshhold:
step_size = math.sqrt(
(1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / (
N_sma_max - 2)) / (1 - beta1 ** state['step'])
else:
step_size = 1.0 / (1 - beta1 ** state['step'])
buffered[2] = step_size
if group['weight_decay'] != 0:
p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)
if N_sma > self.N_sma_threshhold:
denom = exp_avg_sq.sqrt().add_(group['eps'])
p_data_fp32.addcdiv_(-step_size * group['lr'], exp_avg, denom)
else:
p_data_fp32.add_(-step_size * group['lr'], exp_avg)
p.data.copy_(p_data_fp32)
# integrated look ahead...
# we do it at the param level instead of group level
if state['step'] % group['k'] == 0:
slow_p = state['slow_buffer'] # get access to slow param tensor
slow_p.add_(self.alpha, p.data - slow_p) # (fast weights - slow weights) * alpha
p.data.copy_(slow_p) # copy interpolated weights to RAdam param tensor
return loss
| 6,465
| 41.261438
| 132
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/training/loss_functions/dice_loss.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from nnunet.training.loss_functions.TopK_loss import TopKLoss
from nnunet.training.loss_functions.crossentropy import RobustCrossEntropyLoss
from nnunet.utilities.nd_softmax import softmax_helper
from nnunet.utilities.tensor_utilities import sum_tensor
from torch import nn
import numpy as np
class GDL(nn.Module):
def __init__(self, apply_nonlin=None, batch_dice=False, do_bg=True, smooth=1.,
square=False, square_volumes=False):
"""
square_volumes will square the weight term. The paper recommends square_volumes=True; I don't (just an intuition)
"""
super(GDL, self).__init__()
self.square_volumes = square_volumes
self.square = square
self.do_bg = do_bg
self.batch_dice = batch_dice
self.apply_nonlin = apply_nonlin
self.smooth = smooth
def forward(self, x, y, loss_mask=None):
shp_x = x.shape
shp_y = y.shape
if self.batch_dice:
axes = [0] + list(range(2, len(shp_x)))
else:
axes = list(range(2, len(shp_x)))
if len(shp_x) != len(shp_y):
y = y.view((shp_y[0], 1, *shp_y[1:]))
if all([i == j for i, j in zip(x.shape, y.shape)]):
# if this is the case then gt is probably already a one hot encoding
y_onehot = y
else:
gt = y.long()
y_onehot = torch.zeros(shp_x)
if x.device.type == "cuda":
y_onehot = y_onehot.cuda(x.device.index)
y_onehot.scatter_(1, gt, 1)
if self.apply_nonlin is not None:
x = self.apply_nonlin(x)
if not self.do_bg:
x = x[:, 1:]
y_onehot = y_onehot[:, 1:]
tp, fp, fn, _ = get_tp_fp_fn_tn(x, y_onehot, axes, loss_mask, self.square)
# GDL weight computation, we use 1/V
volumes = sum_tensor(y_onehot, axes) + 1e-6 # add some eps to prevent div by zero
if self.square_volumes:
volumes = volumes ** 2
# apply weights
tp = tp / volumes
fp = fp / volumes
fn = fn / volumes
# sum over classes
if self.batch_dice:
axis = 0
else:
axis = 1
tp = tp.sum(axis, keepdim=False)
fp = fp.sum(axis, keepdim=False)
fn = fn.sum(axis, keepdim=False)
# compute dice
dc = (2 * tp + self.smooth) / (2 * tp + fp + fn + self.smooth)
dc = dc.mean()
return -dc
def get_tp_fp_fn_tn(net_output, gt, axes=None, mask=None, square=False):
"""
net_output must be (b, c, x, y(, z)))
gt must be a label map (shape (b, 1, x, y(, z)) OR shape (b, x, y(, z))) or one hot encoding (b, c, x, y(, z))
if mask is provided it must have shape (b, 1, x, y(, z)))
:param net_output:
:param gt:
:param axes: can be (, ) = no summation
:param mask: mask must be 1 for valid pixels and 0 for invalid pixels
:param square: if True then fp, tp and fn will be squared before summation
:return:
"""
if axes is None:
axes = tuple(range(2, len(net_output.size())))
shp_x = net_output.shape
shp_y = gt.shape
with torch.no_grad():
if len(shp_x) != len(shp_y):
gt = gt.view((shp_y[0], 1, *shp_y[1:]))
if all([i == j for i, j in zip(net_output.shape, gt.shape)]):
# if this is the case then gt is probably already a one hot encoding
y_onehot = gt
else:
gt = gt.long()
y_onehot = torch.zeros(shp_x)
if net_output.device.type == "cuda":
y_onehot = y_onehot.cuda(net_output.device.index)
y_onehot.scatter_(1, gt, 1)
tp = net_output * y_onehot
fp = net_output * (1 - y_onehot)
fn = (1 - net_output) * y_onehot
tn = (1 - net_output) * (1 - y_onehot)
if mask is not None:
tp = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(tp, dim=1)), dim=1)
fp = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(fp, dim=1)), dim=1)
fn = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(fn, dim=1)), dim=1)
tn = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(tn, dim=1)), dim=1)
if square:
tp = tp ** 2
fp = fp ** 2
fn = fn ** 2
tn = tn ** 2
if len(axes) > 0:
tp = sum_tensor(tp, axes, keepdim=False)
fp = sum_tensor(fp, axes, keepdim=False)
fn = sum_tensor(fn, axes, keepdim=False)
tn = sum_tensor(tn, axes, keepdim=False)
return tp, fp, fn, tn
class SoftDiceLoss(nn.Module):
def __init__(self, apply_nonlin=None, batch_dice=False, do_bg=True, smooth=1.):
"""
"""
super(SoftDiceLoss, self).__init__()
self.do_bg = do_bg
self.batch_dice = batch_dice
self.apply_nonlin = apply_nonlin
self.smooth = smooth
def forward(self, x, y, loss_mask=None):
shp_x = x.shape
if self.batch_dice:
axes = [0] + list(range(2, len(shp_x)))
else:
axes = list(range(2, len(shp_x)))
if self.apply_nonlin is not None:
x = self.apply_nonlin(x)
tp, fp, fn, _ = get_tp_fp_fn_tn(x, y, axes, loss_mask, False)
nominator = 2 * tp + self.smooth
denominator = 2 * tp + fp + fn + self.smooth
dc = nominator / (denominator + 1e-8)
if not self.do_bg:
if self.batch_dice:
dc = dc[1:]
else:
dc = dc[:, 1:]
dc = dc.mean()
return -dc
class MCCLoss(nn.Module):
def __init__(self, apply_nonlin=None, batch_mcc=False, do_bg=True, smooth=0.0):
"""
based on matthews correlation coefficient
https://en.wikipedia.org/wiki/Matthews_correlation_coefficient
Does not work. Really unstable. F this.
"""
super(MCCLoss, self).__init__()
self.smooth = smooth
self.do_bg = do_bg
self.batch_mcc = batch_mcc
self.apply_nonlin = apply_nonlin
def forward(self, x, y, loss_mask=None):
shp_x = x.shape
voxels = np.prod(shp_x[2:])
if self.batch_mcc:
axes = [0] + list(range(2, len(shp_x)))
else:
axes = list(range(2, len(shp_x)))
if self.apply_nonlin is not None:
x = self.apply_nonlin(x)
tp, fp, fn, tn = get_tp_fp_fn_tn(x, y, axes, loss_mask, False)
tp /= voxels
fp /= voxels
fn /= voxels
tn /= voxels
nominator = tp * tn - fp * fn + self.smooth
denominator = ((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn)) ** 0.5 + self.smooth
mcc = nominator / denominator
if not self.do_bg:
if self.batch_mcc:
mcc = mcc[1:]
else:
mcc = mcc[:, 1:]
mcc = mcc.mean()
return -mcc
class SoftDiceLossSquared(nn.Module):
def __init__(self, apply_nonlin=None, batch_dice=False, do_bg=True, smooth=1.):
"""
squares the terms in the denominator as proposed by Milletari et al.
"""
super(SoftDiceLossSquared, self).__init__()
self.do_bg = do_bg
self.batch_dice = batch_dice
self.apply_nonlin = apply_nonlin
self.smooth = smooth
def forward(self, x, y, loss_mask=None):
shp_x = x.shape
shp_y = y.shape
if self.batch_dice:
axes = [0] + list(range(2, len(shp_x)))
else:
axes = list(range(2, len(shp_x)))
if self.apply_nonlin is not None:
x = self.apply_nonlin(x)
with torch.no_grad():
if len(shp_x) != len(shp_y):
y = y.view((shp_y[0], 1, *shp_y[1:]))
if all([i == j for i, j in zip(x.shape, y.shape)]):
# if this is the case then gt is probably already a one hot encoding
y_onehot = y
else:
y = y.long()
y_onehot = torch.zeros(shp_x)
if x.device.type == "cuda":
y_onehot = y_onehot.cuda(x.device.index)
y_onehot.scatter_(1, y, 1).float()
intersect = x * y_onehot
# values in the denominator get smoothed
denominator = x ** 2 + y_onehot ** 2
# aggregation was previously done in get_tp_fp_fn, but needs to be done here now (needs to be done after
# squaring)
intersect = sum_tensor(intersect, axes, False) + self.smooth
denominator = sum_tensor(denominator, axes, False) + self.smooth
dc = 2 * intersect / denominator
if not self.do_bg:
if self.batch_dice:
dc = dc[1:]
else:
dc = dc[:, 1:]
dc = dc.mean()
return -dc
class DC_and_CE_loss(nn.Module):
def __init__(self, soft_dice_kwargs, ce_kwargs, aggregate="sum", square_dice=False, weight_ce=1, weight_dice=1,
log_dice=False, ignore_label=None):
"""
CAREFUL. Weights for CE and Dice do not need to sum to one. You can set whatever you want.
:param soft_dice_kwargs:
:param ce_kwargs:
:param aggregate:
:param square_dice:
:param weight_ce:
:param weight_dice:
"""
super(DC_and_CE_loss, self).__init__()
if ignore_label is not None:
assert not square_dice, 'not implemented'
ce_kwargs['reduction'] = 'none'
self.log_dice = log_dice
self.weight_dice = weight_dice
self.weight_ce = weight_ce
self.aggregate = aggregate
self.ce = RobustCrossEntropyLoss(**ce_kwargs)
self.ignore_label = ignore_label
if not square_dice:
self.dc = SoftDiceLoss(apply_nonlin=softmax_helper, **soft_dice_kwargs)
else:
self.dc = SoftDiceLossSquared(apply_nonlin=softmax_helper, **soft_dice_kwargs)
def forward(self, net_output, target):
"""
target must be b, c, x, y(, z) with c=1
:param net_output:
:param target:
:return:
"""
if self.ignore_label is not None:
assert target.shape[1] == 1, 'not implemented for one hot encoding'
mask = target != self.ignore_label
target[~mask] = 0
mask = mask.float()
else:
mask = None
dc_loss = self.dc(net_output, target, loss_mask=mask) if self.weight_dice != 0 else 0
if self.log_dice:
dc_loss = -torch.log(-dc_loss)
ce_loss = self.ce(net_output, target[:, 0].long()) if self.weight_ce != 0 else 0
if self.ignore_label is not None:
ce_loss *= mask[:, 0]
ce_loss = ce_loss.sum() / mask.sum()
if self.aggregate == "sum":
result = self.weight_ce * ce_loss + self.weight_dice * dc_loss
else:
raise NotImplementedError("nah son") # reserved for other stuff (later)
return result
class DC_and_BCE_loss(nn.Module):
def __init__(self, bce_kwargs, soft_dice_kwargs, aggregate="sum"):
"""
DO NOT APPLY NONLINEARITY IN YOUR NETWORK!
THIS LOSS IS INTENDED TO BE USED FOR BRATS REGIONS ONLY
:param soft_dice_kwargs:
:param bce_kwargs:
:param aggregate:
"""
super(DC_and_BCE_loss, self).__init__()
self.aggregate = aggregate
self.ce = nn.BCEWithLogitsLoss(**bce_kwargs)
self.dc = SoftDiceLoss(apply_nonlin=torch.sigmoid, **soft_dice_kwargs)
def forward(self, net_output, target):
ce_loss = self.ce(net_output, target)
dc_loss = self.dc(net_output, target)
if self.aggregate == "sum":
result = ce_loss + dc_loss
else:
raise NotImplementedError("nah son") # reserved for other stuff (later)
return result
class GDL_and_CE_loss(nn.Module):
def __init__(self, gdl_dice_kwargs, ce_kwargs, aggregate="sum"):
super(GDL_and_CE_loss, self).__init__()
self.aggregate = aggregate
self.ce = RobustCrossEntropyLoss(**ce_kwargs)
self.dc = GDL(softmax_helper, **gdl_dice_kwargs)
def forward(self, net_output, target):
dc_loss = self.dc(net_output, target)
ce_loss = self.ce(net_output, target)
if self.aggregate == "sum":
result = ce_loss + dc_loss
else:
raise NotImplementedError("nah son") # reserved for other stuff (later)
return result
class DC_and_topk_loss(nn.Module):
def __init__(self, soft_dice_kwargs, ce_kwargs, aggregate="sum", square_dice=False):
super(DC_and_topk_loss, self).__init__()
self.aggregate = aggregate
self.ce = TopKLoss(**ce_kwargs)
if not square_dice:
self.dc = SoftDiceLoss(apply_nonlin=softmax_helper, **soft_dice_kwargs)
else:
self.dc = SoftDiceLossSquared(apply_nonlin=softmax_helper, **soft_dice_kwargs)
def forward(self, net_output, target):
dc_loss = self.dc(net_output, target)
ce_loss = self.ce(net_output, target)
if self.aggregate == "sum":
result = ce_loss + dc_loss
else:
raise NotImplementedError("nah son") # reserved for other stuff (later?)
return result
| 14,049
| 31.903981
| 121
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/training/loss_functions/TopK_loss.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from nnunet.training.loss_functions.crossentropy import RobustCrossEntropyLoss
class TopKLoss(RobustCrossEntropyLoss):
"""
Network has to have NO LINEARITY!
"""
def __init__(self, weight=None, ignore_index=-100, k=10):
self.k = k
super(TopKLoss, self).__init__(weight, False, ignore_index, reduce=False)
def forward(self, inp, target):
target = target[:, 0].long()
res = super(TopKLoss, self).forward(inp, target)
num_voxels = np.prod(res.shape, dtype=np.int64)
res, _ = torch.topk(res.view((-1, )), int(num_voxels * self.k / 100), sorted=False)
return res.mean()
| 1,364
| 39.147059
| 114
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/training/loss_functions/crossentropy.py
|
from torch import nn, Tensor
class RobustCrossEntropyLoss(nn.CrossEntropyLoss):
"""
this is just a compatibility layer because my target tensor is float and has an extra dimension
"""
def forward(self, input: Tensor, target: Tensor) -> Tensor:
if len(target.shape) == len(input.shape):
assert target.shape[1] == 1
target = target[:, 0]
return super().forward(input, target.long())
| 438
| 35.583333
| 99
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/training/loss_functions/deep_supervision.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from torch import nn
class MultipleOutputLoss2(nn.Module):
def __init__(self, loss, weight_factors=None):
"""
use this if you have several outputs and ground truth (both list of same len) and the loss should be computed
between them (x[0] and y[0], x[1] and y[1] etc)
:param loss:
:param weight_factors:
"""
super(MultipleOutputLoss2, self).__init__()
self.weight_factors = weight_factors
self.loss = loss
def forward(self, x, y):
assert isinstance(x, (tuple, list)), "x must be either tuple or list"
assert isinstance(y, (tuple, list)), "y must be either tuple or list"
if self.weight_factors is None:
weights = [1] * len(x)
else:
weights = self.weight_factors
l = weights[0] * self.loss(x[0], y[0])
for i in range(1, len(x)):
if weights[i] != 0:
l += weights[i] * self.loss(x[i], y[i])
return l
| 1,679
| 37.181818
| 117
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/training/loss_functions/__init__.py
|
from __future__ import absolute_import
from . import *
| 54
| 26.5
| 38
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/dataset_conversion/Task017_BeyondCranialVaultAbdominalOrganSegmentation.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
import SimpleITK as sitk
from batchgenerators.utilities.file_and_folder_operations import *
from multiprocessing import Pool
import numpy as np
from nnunet.configuration import default_num_threads
from scipy.ndimage import label
from sklearn.model_selection import KFold
def export_segmentations(indir, outdir):
niftis = subfiles(indir, suffix='nii.gz', join=False)
for n in niftis:
identifier = str(n.split("_")[-1][:-7])
outfname = join(outdir, "test-segmentation-%s.nii" % identifier)
img = sitk.ReadImage(join(indir, n))
sitk.WriteImage(img, outfname)
def export_segmentations_postprocess(indir, outdir):
maybe_mkdir_p(outdir)
niftis = subfiles(indir, suffix='nii.gz', join=False)
for n in niftis:
print("\n", n)
identifier = str(n.split("_")[-1][:-7])
outfname = join(outdir, "test-segmentation-%s.nii" % identifier)
img = sitk.ReadImage(join(indir, n))
img_npy = sitk.GetArrayFromImage(img)
lmap, num_objects = label((img_npy > 0).astype(int))
sizes = []
for o in range(1, num_objects + 1):
sizes.append((lmap == o).sum())
mx = np.argmax(sizes) + 1
print(sizes)
img_npy[lmap != mx] = 0
img_new = sitk.GetImageFromArray(img_npy)
img_new.CopyInformation(img)
sitk.WriteImage(img_new, outfname)
if __name__ == "__main__":
train_dir = "/media/userdisk1/ytxie/BCV-pro/dataset/BCV/RawData/Training"
test_dir = "/media/userdisk1/ytxie/BCV-pro/dataset/BCV/RawData/Testing"
output_folder = "/media/userdisk1/ytxie/nnU-pro/nnU_data/nnUNet_raw/nnUNet_raw_data/Task017_BCV"
img_dir = join(output_folder, "imagesTr")
lab_dir = join(output_folder, "labelsTr")
img_dir_te = join(output_folder, "imagesTs")
maybe_mkdir_p(img_dir)
maybe_mkdir_p(lab_dir)
maybe_mkdir_p(img_dir_te)
def load_save_train(args):
data_file, seg_file = args
pat_id = data_file.split("/")[-1]
pat_id = "bcv_" + str(int(pat_id.split("-")[-1][3:-7]))
img_itk = sitk.ReadImage(data_file)
sitk.WriteImage(img_itk, join(img_dir, pat_id + "_0000.nii.gz"))
img_itk = sitk.ReadImage(seg_file)
sitk.WriteImage(img_itk, join(lab_dir, pat_id + ".nii.gz"))
return pat_id
def load_save_test(args):
data_file = args
pat_id = data_file.split("/")[-1]
pat_id = "bcv_" + str(int(pat_id.split("-")[-1][3:-7]))
img_itk = sitk.ReadImage(data_file)
sitk.WriteImage(img_itk, join(img_dir_te, pat_id + "_0000.nii.gz"))
return pat_id
nii_files_tr_data = subfiles(join(train_dir,'img'), True, "img", "nii.gz", True)
nii_files_tr_seg = subfiles(join(train_dir,'label'), True, "label", "nii.gz", True)
nii_files_ts = subfiles(join(test_dir, 'img'), True, "img", "nii.gz", True)
p = Pool(default_num_threads)
train_ids = p.map(load_save_train, zip(nii_files_tr_data, nii_files_tr_seg))
test_ids = p.map(load_save_test, nii_files_ts)
p.close()
p.join()
json_dict = OrderedDict()
json_dict['name'] = "BCV"
json_dict['description'] = "BeyondCranialVaultAbodominalOrganSegmentation"
json_dict['tensorImageSize'] = "4D"
json_dict['reference'] = "see challenge website"
json_dict['licence'] = "see challenge website"
json_dict['release'] = "0.0"
json_dict['modality'] = {
"0": "CT"
}
json_dict['labels'] = {
"0": "background",
"1": "spleen",
"2": "kidney_right",
"3": "kidney_left",
"4": "gallbladder",
"5": "esophagus",
"6": "liver",
"7": "stomach",
"8": "aorta",
"9": "ivc",
"10": "pvsv",
"11": "pancreas",
"12": "adrenal_gland_right",
"13": "adrenal_gland_left"
}
json_dict['numTraining'] = len(train_ids)
json_dict['numTest'] = len(test_ids)
json_dict['training'] = [{'image': "./imagesTr/%s.nii.gz" % i, "label": "./labelsTr/%s.nii.gz" % i} for i in train_ids]
json_dict['test'] = ["./imagesTs/%s.nii.gz" % i for i in test_ids]
with open(os.path.join(output_folder, "dataset.json"), 'w') as f:
json.dump(json_dict, f, indent=4, sort_keys=True)
# create a dummy split (patients need to be separated)
splits = []
patients = np.unique([i[:10] for i in train_ids])
patientids = [i[4:] for i in train_ids]
splits.append(OrderedDict())
splits[-1]['train'] = [i for i in train_ids if int(i[4:]) <32]
splits[-1]['val'] = [i for i in train_ids if int(i[4:]) >= 32]
save_pickle(splits, join(output_folder, "splits_final.pkl"))
| 5,387
| 36.158621
| 123
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/dataset_conversion/Task055_SegTHOR.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
from nnunet.paths import nnUNet_raw_data
from batchgenerators.utilities.file_and_folder_operations import *
import shutil
import SimpleITK as sitk
def convert_for_submission(source_dir, target_dir):
"""
I believe they want .nii, not .nii.gz
:param source_dir:
:param target_dir:
:return:
"""
files = subfiles(source_dir, suffix=".nii.gz", join=False)
maybe_mkdir_p(target_dir)
for f in files:
img = sitk.ReadImage(join(source_dir, f))
out_file = join(target_dir, f[:-7] + ".nii")
sitk.WriteImage(img, out_file)
if __name__ == "__main__":
base = "/media/fabian/DeepLearningData/SegTHOR"
task_id = 55
task_name = "SegTHOR"
foldername = "Task%03.0d_%s" % (task_id, task_name)
out_base = join(nnUNet_raw_data, foldername)
imagestr = join(out_base, "imagesTr")
imagests = join(out_base, "imagesTs")
labelstr = join(out_base, "labelsTr")
maybe_mkdir_p(imagestr)
maybe_mkdir_p(imagests)
maybe_mkdir_p(labelstr)
train_patient_names = []
test_patient_names = []
train_patients = subfolders(join(base, "train"), join=False)
for p in train_patients:
curr = join(base, "train", p)
label_file = join(curr, "GT.nii.gz")
image_file = join(curr, p + ".nii.gz")
shutil.copy(image_file, join(imagestr, p + "_0000.nii.gz"))
shutil.copy(label_file, join(labelstr, p + ".nii.gz"))
train_patient_names.append(p)
test_patients = subfiles(join(base, "test"), join=False, suffix=".nii.gz")
for p in test_patients:
p = p[:-7]
curr = join(base, "test")
image_file = join(curr, p + ".nii.gz")
shutil.copy(image_file, join(imagests, p + "_0000.nii.gz"))
test_patient_names.append(p)
json_dict = OrderedDict()
json_dict['name'] = "SegTHOR"
json_dict['description'] = "SegTHOR"
json_dict['tensorImageSize'] = "4D"
json_dict['reference'] = "see challenge website"
json_dict['licence'] = "see challenge website"
json_dict['release'] = "0.0"
json_dict['modality'] = {
"0": "CT",
}
json_dict['labels'] = {
"0": "background",
"1": "esophagus",
"2": "heart",
"3": "trachea",
"4": "aorta",
}
json_dict['numTraining'] = len(train_patient_names)
json_dict['numTest'] = len(test_patient_names)
json_dict['training'] = [{'image': "./imagesTr/%s.nii.gz" % i.split("/")[-1], "label": "./labelsTr/%s.nii.gz" % i.split("/")[-1]} for i in
train_patient_names]
json_dict['test'] = ["./imagesTs/%s.nii.gz" % i.split("/")[-1] for i in test_patient_names]
save_json(json_dict, os.path.join(out_base, "dataset.json"))
| 3,462
| 33.979798
| 142
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/dataset_conversion/Task058_ISBI_EM_SEG.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
import SimpleITK as sitk
import numpy as np
from batchgenerators.utilities.file_and_folder_operations import *
from nnunet.paths import nnUNet_raw_data
from skimage import io
def export_for_submission(predicted_npz, out_file):
"""
they expect us to submit a 32 bit 3d tif image with values between 0 (100% membrane certainty) and 1
(100% non-membrane certainty). We use the softmax output for that
:return:
"""
a = np.load(predicted_npz)['softmax']
a = a / a.sum(0)[None]
# channel 0 is non-membrane prob
nonmembr_prob = a[0]
assert out_file.endswith(".tif")
io.imsave(out_file, nonmembr_prob.astype(np.float32))
if __name__ == "__main__":
# download from here http://brainiac2.mit.edu/isbi_challenge/downloads
base = "/media/fabian/My Book/datasets/ISBI_EM_SEG"
# the orientation of VerSe is all fing over the place. run fslreorient2std to correct that (hopefully!)
# THIS CAN HAVE CONSEQUENCES FOR THE TEST SET SUBMISSION! CAREFUL!
train_volume = io.imread(join(base, "train-volume.tif"))
train_labels = io.imread(join(base, "train-labels.tif"))
train_labels[train_labels == 255] = 1
test_volume = io.imread(join(base, "test-volume.tif"))
task_id = 58
task_name = "ISBI_EM_SEG"
foldername = "Task%03.0d_%s" % (task_id, task_name)
out_base = join(nnUNet_raw_data, foldername)
imagestr = join(out_base, "imagesTr")
imagests = join(out_base, "imagesTs")
labelstr = join(out_base, "labelsTr")
maybe_mkdir_p(imagestr)
maybe_mkdir_p(imagests)
maybe_mkdir_p(labelstr)
img_tr_itk = sitk.GetImageFromArray(train_volume.astype(np.float32))
lab_tr_itk = sitk.GetImageFromArray(1 - train_labels) # walls are foreground, cells background
img_te_itk = sitk.GetImageFromArray(test_volume.astype(np.float32))
img_tr_itk.SetSpacing((4, 4, 50))
lab_tr_itk.SetSpacing((4, 4, 50))
img_te_itk.SetSpacing((4, 4, 50))
# 5 copies, otherwise we cannot run nnunet (5 fold cv needs that)
sitk.WriteImage(img_tr_itk, join(imagestr, "training0_0000.nii.gz"))
sitk.WriteImage(img_tr_itk, join(imagestr, "training1_0000.nii.gz"))
sitk.WriteImage(img_tr_itk, join(imagestr, "training2_0000.nii.gz"))
sitk.WriteImage(img_tr_itk, join(imagestr, "training3_0000.nii.gz"))
sitk.WriteImage(img_tr_itk, join(imagestr, "training4_0000.nii.gz"))
sitk.WriteImage(lab_tr_itk, join(labelstr, "training0.nii.gz"))
sitk.WriteImage(lab_tr_itk, join(labelstr, "training1.nii.gz"))
sitk.WriteImage(lab_tr_itk, join(labelstr, "training2.nii.gz"))
sitk.WriteImage(lab_tr_itk, join(labelstr, "training3.nii.gz"))
sitk.WriteImage(lab_tr_itk, join(labelstr, "training4.nii.gz"))
sitk.WriteImage(img_te_itk, join(imagests, "testing.nii.gz"))
json_dict = OrderedDict()
json_dict['name'] = task_name
json_dict['description'] = task_name
json_dict['tensorImageSize'] = "4D"
json_dict['reference'] = "see challenge website"
json_dict['licence'] = "see challenge website"
json_dict['release'] = "0.0"
json_dict['modality'] = {
"0": "EM",
}
json_dict['labels'] = {i: str(i) for i in range(2)}
json_dict['numTraining'] = 5
json_dict['numTest'] = 1
json_dict['training'] = [{'image': "./imagesTr/training%d.nii.gz" % i, "label": "./labelsTr/training%d.nii.gz" % i} for i in
range(5)]
json_dict['test'] = ["./imagesTs/testing.nii.gz"]
save_json(json_dict, os.path.join(out_base, "dataset.json"))
| 4,263
| 39.609524
| 128
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/dataset_conversion/Task082_BraTS_2020.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from copy import deepcopy
from multiprocessing.pool import Pool
import numpy as np
from collections import OrderedDict
from batchgenerators.utilities.file_and_folder_operations import *
from meddec.paper_plot.nature_methods.challenge_visualization_stuff.own_implementation.ranking import \
rank_then_aggregate
import scipy.stats as ss
from nnunet.dataset_conversion.Task032_BraTS_2018 import convert_labels_back_to_BraTS_2018_2019_convention
from nnunet.dataset_conversion.Task043_BraTS_2019 import copy_BraTS_segmentation_and_convert_labels
from nnunet.evaluation.region_based_evaluation import get_brats_regions, evaluate_regions
from nnunet.paths import nnUNet_raw_data
import SimpleITK as sitk
import shutil
from medpy.metric import dc, hd95
from nnunet.postprocessing.consolidate_postprocessing import collect_cv_niftis
from typing import Tuple
def apply_brats_threshold(fname, out_dir, threshold, replace_with):
img_itk = sitk.ReadImage(fname)
img_npy = sitk.GetArrayFromImage(img_itk)
s = np.sum(img_npy == 3)
if s < threshold:
# print(s, fname)
img_npy[img_npy == 3] = replace_with
img_itk_postprocessed = sitk.GetImageFromArray(img_npy)
img_itk_postprocessed.CopyInformation(img_itk)
sitk.WriteImage(img_itk_postprocessed, join(out_dir, fname.split("/")[-1]))
def load_niftis_threshold_compute_dice(gt_file, pred_file, thresholds: Tuple[list, tuple]):
gt = sitk.GetArrayFromImage(sitk.ReadImage(gt_file))
pred = sitk.GetArrayFromImage(sitk.ReadImage(pred_file))
mask_pred = pred == 3
mask_gt = gt == 3
num_pred = np.sum(mask_pred)
num_gt = np.sum(mask_gt)
dice = dc(mask_pred, mask_gt)
res_dice = {}
res_was_smaller = {}
for t in thresholds:
was_smaller = False
if num_pred < t:
was_smaller = True
if num_gt == 0:
dice_here = 1.
else:
dice_here = 0.
else:
dice_here = deepcopy(dice)
res_dice[t] = dice_here
res_was_smaller[t] = was_smaller
return res_was_smaller, res_dice
def apply_threshold_to_folder(folder_in, folder_out, threshold, replace_with, processes=24):
maybe_mkdir_p(folder_out)
niftis = subfiles(folder_in, suffix='.nii.gz', join=True)
p = Pool(processes)
p.starmap(apply_brats_threshold, zip(niftis, [folder_out]*len(niftis), [threshold]*len(niftis), [replace_with] * len(niftis)))
p.close()
p.join()
def determine_brats_postprocessing(folder_with_preds, folder_with_gt, postprocessed_output_dir, processes=8,
thresholds=(0, 10, 50, 100, 200, 500, 750, 1000, 1500, 2500, 10000), replace_with=2):
# find pairs
nifti_gt = subfiles(folder_with_gt, suffix=".nii.gz", sort=True)
p = Pool(processes)
nifti_pred = subfiles(folder_with_preds, suffix='.nii.gz', sort=True)
results = p.starmap_async(load_niftis_threshold_compute_dice, zip(nifti_gt, nifti_pred, [thresholds] * len(nifti_pred)))
results = results.get()
all_dc_per_threshold = {}
for t in thresholds:
all_dc_per_threshold[t] = np.array([i[1][t] for i in results])
print(t, np.mean(all_dc_per_threshold[t]))
means = [np.mean(all_dc_per_threshold[t]) for t in thresholds]
best_threshold = thresholds[np.argmax(means)]
print('best', best_threshold, means[np.argmax(means)])
maybe_mkdir_p(postprocessed_output_dir)
p.starmap(apply_brats_threshold, zip(nifti_pred, [postprocessed_output_dir]*len(nifti_pred), [best_threshold]*len(nifti_pred), [replace_with] * len(nifti_pred)))
p.close()
p.join()
save_pickle((thresholds, means, best_threshold, all_dc_per_threshold), join(postprocessed_output_dir, "threshold.pkl"))
def collect_and_prepare(base_dir, num_processes = 12, clean=False):
"""
collect all cv_niftis, compute brats metrics, compute enh tumor thresholds and summarize in csv
:param base_dir:
:return:
"""
out = join(base_dir, 'cv_results')
out_pp = join(base_dir, 'cv_results_pp')
experiments = subfolders(base_dir, join=False, prefix='nnUNetTrainer')
regions = get_brats_regions()
gt_dir = join(base_dir, 'gt_niftis')
replace_with = 2
failed = []
successful = []
for e in experiments:
print(e)
try:
o = join(out, e)
o_p = join(out_pp, e)
maybe_mkdir_p(o)
maybe_mkdir_p(o_p)
collect_cv_niftis(join(base_dir, e), o)
if clean or not isfile(join(o, 'summary.csv')):
evaluate_regions(o, gt_dir, regions, num_processes)
if clean or not isfile(join(o_p, 'threshold.pkl')):
determine_brats_postprocessing(o, gt_dir, o_p, num_processes, thresholds=list(np.arange(0, 760, 10)), replace_with=replace_with)
if clean or not isfile(join(o_p, 'summary.csv')):
evaluate_regions(o_p, gt_dir, regions, num_processes)
successful.append(e)
except Exception as ex:
print("\nERROR\n", e, ex, "\n")
failed.append(e)
# we are interested in the mean (nan is 1) column
with open(join(base_dir, 'cv_summary.csv'), 'w') as f:
f.write('name,whole,core,enh,mean\n')
for e in successful:
expected_nopp = join(out, e, 'summary.csv')
expected_pp = join(out, out_pp, e, 'summary.csv')
if isfile(expected_nopp):
res = np.loadtxt(expected_nopp, dtype=str, skiprows=0, delimiter=',')[-2]
as_numeric = [float(i) for i in res[1:]]
f.write(e + '_noPP,')
f.write("%0.4f," % as_numeric[0])
f.write("%0.4f," % as_numeric[1])
f.write("%0.4f," % as_numeric[2])
f.write("%0.4f\n" % np.mean(as_numeric))
if isfile(expected_pp):
res = np.loadtxt(expected_pp, dtype=str, skiprows=0, delimiter=',')[-2]
as_numeric = [float(i) for i in res[1:]]
f.write(e + '_PP,')
f.write("%0.4f," % as_numeric[0])
f.write("%0.4f," % as_numeric[1])
f.write("%0.4f," % as_numeric[2])
f.write("%0.4f\n" % np.mean(as_numeric))
# this just crawls the folders and evaluates what it finds
with open(join(base_dir, 'cv_summary2.csv'), 'w') as f:
for folder in ['cv_results', 'cv_results_pp']:
for ex in subdirs(join(base_dir, folder), join=False):
print(folder, ex)
expected = join(base_dir, folder, ex, 'summary.csv')
if clean or not isfile(expected):
evaluate_regions(join(base_dir, folder, ex), gt_dir, regions, num_processes)
if isfile(expected):
res = np.loadtxt(expected, dtype=str, skiprows=0, delimiter=',')[-2]
as_numeric = [float(i) for i in res[1:]]
f.write('%s__%s,' % (folder, ex))
f.write("%0.4f," % as_numeric[0])
f.write("%0.4f," % as_numeric[1])
f.write("%0.4f," % as_numeric[2])
f.write("%0.4f\n" % np.mean(as_numeric))
f.write('name,whole,core,enh,mean\n')
for e in successful:
expected_nopp = join(out, e, 'summary.csv')
expected_pp = join(out, out_pp, e, 'summary.csv')
if isfile(expected_nopp):
res = np.loadtxt(expected_nopp, dtype=str, skiprows=0, delimiter=',')[-2]
as_numeric = [float(i) for i in res[1:]]
f.write(e + '_noPP,')
f.write("%0.4f," % as_numeric[0])
f.write("%0.4f," % as_numeric[1])
f.write("%0.4f," % as_numeric[2])
f.write("%0.4f\n" % np.mean(as_numeric))
if isfile(expected_pp):
res = np.loadtxt(expected_pp, dtype=str, skiprows=0, delimiter=',')[-2]
as_numeric = [float(i) for i in res[1:]]
f.write(e + '_PP,')
f.write("%0.4f," % as_numeric[0])
f.write("%0.4f," % as_numeric[1])
f.write("%0.4f," % as_numeric[2])
f.write("%0.4f\n" % np.mean(as_numeric))
# apply threshold to val set
expected_num_cases = 125
missing_valset = []
has_val_pred = []
for e in successful:
if isdir(join(base_dir, 'predVal', e)):
currdir = join(base_dir, 'predVal', e)
files = subfiles(currdir, suffix='.nii.gz', join=False)
if len(files) != expected_num_cases:
print(e, 'prediction not done, found %d files, expected %s' % (len(files), expected_num_cases))
continue
output_folder = join(base_dir, 'predVal_PP', e)
maybe_mkdir_p(output_folder)
threshold = load_pickle(join(out_pp, e, 'threshold.pkl'))[2]
if threshold > 1000: threshold = 750 # don't make it too big!
apply_threshold_to_folder(currdir, output_folder, threshold, replace_with, num_processes)
has_val_pred.append(e)
else:
print(e, 'has no valset predictions')
missing_valset.append(e)
# 'nnUNetTrainerV2BraTSRegions_DA3_BN__nnUNetPlansv2.1_bs5_15fold' needs special treatment
e = 'nnUNetTrainerV2BraTSRegions_DA3_BN__nnUNetPlansv2.1_bs5'
currdir = join(base_dir, 'predVal', 'nnUNetTrainerV2BraTSRegions_DA3_BN__nnUNetPlansv2.1_bs5_15fold')
output_folder = join(base_dir, 'predVal_PP', 'nnUNetTrainerV2BraTSRegions_DA3_BN__nnUNetPlansv2.1_bs5_15fold')
maybe_mkdir_p(output_folder)
threshold = load_pickle(join(out_pp, e, 'threshold.pkl'))[2]
if threshold > 1000: threshold = 750 # don't make it too big!
apply_threshold_to_folder(currdir, output_folder, threshold, replace_with, num_processes)
# 'nnUNetTrainerV2BraTSRegions_DA3_BN__nnUNetPlansv2.1_bs5_15fold' needs special treatment
e = 'nnUNetTrainerV2BraTSRegions_DA4_BN__nnUNetPlansv2.1_bs5'
currdir = join(base_dir, 'predVal', 'nnUNetTrainerV2BraTSRegions_DA4_BN__nnUNetPlansv2.1_bs5_15fold')
output_folder = join(base_dir, 'predVal_PP', 'nnUNetTrainerV2BraTSRegions_DA4_BN__nnUNetPlansv2.1_bs5_15fold')
maybe_mkdir_p(output_folder)
threshold = load_pickle(join(out_pp, e, 'threshold.pkl'))[2]
if threshold > 1000: threshold = 750 # don't make it too big!
apply_threshold_to_folder(currdir, output_folder, threshold, replace_with, num_processes)
# convert val set to brats labels for submission
output_converted = join(base_dir, 'converted_valSet')
for source in ['predVal', 'predVal_PP']:
for e in has_val_pred + ['nnUNetTrainerV2BraTSRegions_DA3_BN__nnUNetPlansv2.1_bs5_15fold', 'nnUNetTrainerV2BraTSRegions_DA4_BN__nnUNetPlansv2.1_bs5_15fold']:
expected_source_folder = join(base_dir, source, e)
if not isdir(expected_source_folder):
print(e, 'has no', source)
raise RuntimeError()
files = subfiles(expected_source_folder, suffix='.nii.gz', join=False)
if len(files) != expected_num_cases:
print(e, 'prediction not done, found %d files, expected %s' % (len(files), expected_num_cases))
continue
target_folder = join(output_converted, source, e)
maybe_mkdir_p(target_folder)
convert_labels_back_to_BraTS_2018_2019_convention(expected_source_folder, target_folder)
summarize_validation_set_predictions(output_converted)
def summarize_validation_set_predictions(base):
with open(join(base, 'summary.csv'), 'w') as f:
f.write('name,whole,core,enh,mean,whole,core,enh,mean\n')
for subf in subfolders(base, join=False):
for e in subfolders(join(base, subf), join=False):
expected = join(base, subf, e, 'Stats_Validation_final.csv')
if not isfile(expected):
print(subf, e, 'has missing csv')
continue
a = np.loadtxt(expected, delimiter=',', dtype=str)
assert a.shape[0] == 131, 'did not evaluate all 125 cases!'
selected_row = a[-5]
values = [float(i) for i in selected_row[1:4]]
f.write(e + "_" + subf + ',')
f.write("%0.4f," % values[1])
f.write("%0.4f," % values[2])
f.write("%0.4f," % values[0])
f.write("%0.4f," % np.mean(values))
values = [float(i) for i in selected_row[-3:]]
f.write("%0.4f," % values[1])
f.write("%0.4f," % values[2])
f.write("%0.4f," % values[0])
f.write("%0.4f\n" % np.mean(values))
def compute_BraTS_dice(ref, pred):
"""
ref and gt are binary integer numpy.ndarray s
:param ref:
:param gt:
:return:
"""
num_ref = np.sum(ref)
num_pred = np.sum(pred)
if num_ref == 0:
if num_pred == 0:
return 1
else:
return 0
else:
return dc(pred, ref)
def convert_all_to_BraTS(input_folder, output_folder, expected_num_cases=125):
for s in subdirs(input_folder, join=False):
nii = subfiles(join(input_folder, s), suffix='.nii.gz', join=False)
if len(nii) != expected_num_cases:
print(s)
else:
target_dir = join(output_folder, s)
convert_labels_back_to_BraTS_2018_2019_convention(join(input_folder, s), target_dir, num_processes=6)
def compute_BraTS_HD95(ref, pred):
"""
ref and gt are binary integer numpy.ndarray s
spacing is assumed to be (1, 1, 1)
:param ref:
:param pred:
:return:
"""
num_ref = np.sum(ref)
num_pred = np.sum(pred)
if num_ref == 0:
if num_pred == 0:
return 0
else:
return 373.12866
elif num_pred == 0 and num_ref != 0:
return 373.12866
else:
return hd95(pred, ref, (1, 1, 1))
def evaluate_BraTS_case(arr: np.ndarray, arr_gt: np.ndarray):
"""
attempting to reimplement the brats evaluation scheme
assumes edema=1, non_enh=2, enh=3
:param arr:
:param arr_gt:
:return:
"""
# whole tumor
mask_gt = (arr_gt != 0).astype(int)
mask_pred = (arr != 0).astype(int)
dc_whole = compute_BraTS_dice(mask_gt, mask_pred)
hd95_whole = compute_BraTS_HD95(mask_gt, mask_pred)
del mask_gt, mask_pred
# tumor core
mask_gt = (arr_gt > 1).astype(int)
mask_pred = (arr > 1).astype(int)
dc_core = compute_BraTS_dice(mask_gt, mask_pred)
hd95_core = compute_BraTS_HD95(mask_gt, mask_pred)
del mask_gt, mask_pred
# enhancing
mask_gt = (arr_gt == 3).astype(int)
mask_pred = (arr == 3).astype(int)
dc_enh = compute_BraTS_dice(mask_gt, mask_pred)
hd95_enh = compute_BraTS_HD95(mask_gt, mask_pred)
del mask_gt, mask_pred
return dc_whole, dc_core, dc_enh, hd95_whole, hd95_core, hd95_enh
def load_evaluate(filename_gt: str, filename_pred: str):
arr_pred = sitk.GetArrayFromImage(sitk.ReadImage(filename_pred))
arr_gt = sitk.GetArrayFromImage(sitk.ReadImage(filename_gt))
return evaluate_BraTS_case(arr_pred, arr_gt)
def evaluate_BraTS_folder(folder_pred, folder_gt, num_processes: int = 24, strict=False):
nii_pred = subfiles(folder_pred, suffix='.nii.gz', join=False)
if len(nii_pred) == 0:
return
nii_gt = subfiles(folder_gt, suffix='.nii.gz', join=False)
assert all([i in nii_gt for i in nii_pred]), 'not all predicted niftis have a reference file!'
if strict:
assert all([i in nii_pred for i in nii_gt]), 'not all gt niftis have a predicted file!'
p = Pool(num_processes)
nii_pred_fullpath = [join(folder_pred, i) for i in nii_pred]
nii_gt_fullpath = [join(folder_gt, i) for i in nii_pred]
results = p.starmap(load_evaluate, zip(nii_gt_fullpath, nii_pred_fullpath))
# now write to output file
with open(join(folder_pred, 'results.csv'), 'w') as f:
f.write("name,dc_whole,dc_core,dc_enh,hd95_whole,hd95_core,hd95_enh\n")
for fname, r in zip(nii_pred, results):
f.write(fname)
f.write(",%0.4f,%0.4f,%0.4f,%3.3f,%3.3f,%3.3f\n" % r)
def load_csv_for_ranking(csv_file: str):
res = np.loadtxt(csv_file, dtype='str', delimiter=',')
scores = res[1:, [1, 2, 3, -3, -2, -1]].astype(float)
scores[:, -3:] *= -1
scores[:, -3:] += 373.129
assert np.all(scores <= 373.129)
assert np.all(scores >= 0)
return scores
def rank_algorithms(data:np.ndarray):
"""
data is (metrics x experiments x cases)
:param data:
:return:
"""
num_metrics, num_experiments, num_cases = data.shape
ranks = np.zeros((num_metrics, num_experiments))
for m in range(6):
r = np.apply_along_axis(ss.rankdata, 0, -data[m], 'min')
ranks[m] = r.mean(1)
average_rank = np.mean(ranks, 0)
final_ranks = ss.rankdata(average_rank, 'min')
return final_ranks, average_rank, ranks
def score_and_postprocess_model_based_on_rank_then_aggregate():
"""
Similarly to BraTS 2017 - BraTS 2019, each participant will be ranked for each of the X test cases. Each case
includes 3 regions of evaluation, and the metrics used to produce the rankings will be the Dice Similarity
Coefficient and the 95% Hausdorff distance. Thus, for X number of cases included in the BraTS 2020, each
participant ends up having X*3*2 rankings. The final ranking score is the average of all these rankings normalized
by the number of teams.
https://zenodo.org/record/3718904
-> let's optimize for this.
Important: the outcome very much depends on the competing models. We need some references. We only got our own,
so let's hope this still works
:return:
"""
base = "/media/fabian/Results/nnUNet/3d_fullres/Task082_BraTS2020"
replace_with = 2
num_processes = 24
expected_num_cases_val = 125
# use a separate output folder from the previous experiments to ensure we are not messing things up
output_base_here = join(base, 'use_brats_ranking')
maybe_mkdir_p(output_base_here)
# collect cv niftis and compute metrics with evaluate_BraTS_folder to ensure we work with the same metrics as brats
out = join(output_base_here, 'cv_results')
experiments = subfolders(base, join=False, prefix='nnUNetTrainer')
gt_dir = join(base, 'gt_niftis')
experiments_with_full_cv = []
for e in experiments:
print(e)
o = join(out, e)
maybe_mkdir_p(o)
try:
collect_cv_niftis(join(base, e), o)
if not isfile(join(o, 'results.csv')):
evaluate_BraTS_folder(o, gt_dir, num_processes, strict=True)
experiments_with_full_cv.append(e)
except Exception as ex:
print("\nERROR\n", e, ex, "\n")
if isfile(join(o, 'results.csv')):
os.remove(join(o, 'results.csv'))
# rank the non-postprocessed models
tmp = np.loadtxt(join(out, experiments_with_full_cv[0], 'results.csv'), dtype='str', delimiter=',')
num_cases = len(tmp) - 1
data_for_ranking = np.zeros((6, len(experiments_with_full_cv), num_cases))
for i, e in enumerate(experiments_with_full_cv):
scores = load_csv_for_ranking(join(out, e, 'results.csv'))
for metric in range(6):
data_for_ranking[metric, i] = scores[:, metric]
final_ranks, average_rank, ranks = rank_algorithms(data_for_ranking)
for t in np.argsort(final_ranks):
print(final_ranks[t], average_rank[t], experiments_with_full_cv[t])
# for each model, create output directories with different thresholds. evaluate ALL OF THEM (might take a while lol)
thresholds = np.arange(25, 751, 25)
output_pp_tmp = join(output_base_here, 'cv_determine_pp_thresholds')
for e in experiments_with_full_cv:
input_folder = join(out, e)
for t in thresholds:
output_directory = join(output_pp_tmp, e, str(t))
maybe_mkdir_p(output_directory)
if not isfile(join(output_directory, 'results.csv')):
apply_threshold_to_folder(input_folder, output_directory, t, replace_with, processes=16)
evaluate_BraTS_folder(output_directory, gt_dir, num_processes)
# load ALL the results!
results = []
experiment_names = []
for e in experiments_with_full_cv:
for t in thresholds:
output_directory = join(output_pp_tmp, e, str(t))
expected_file = join(output_directory, 'results.csv')
if not isfile(expected_file):
print(e, 'does not have a results file for threshold', t)
continue
results.append(load_csv_for_ranking(expected_file))
experiment_names.append("%s___%d" % (e, t))
all_results = np.concatenate([i[None] for i in results], 0).transpose((2, 0, 1))
# concatenate with non postprocessed models
all_results = np.concatenate((data_for_ranking, all_results), 1)
experiment_names += experiments_with_full_cv
final_ranks, average_rank, ranks = rank_algorithms(all_results)
for t in np.argsort(final_ranks):
print(final_ranks[t], average_rank[t], experiment_names[t])
# for each model, print the non postprocessed model as well as the best postprocessed model. If there are
# validation set predictions, apply the best threshold to the validation set
pred_val_base = join(base, 'predVal_PP_rank')
has_val_pred = []
for e in experiments_with_full_cv:
rank_nonpp = final_ranks[experiment_names.index(e)]
avg_rank_nonpp = average_rank[experiment_names.index(e)]
print(e, avg_rank_nonpp, rank_nonpp)
predicted_val = join(base, 'predVal', e)
pp_models = [j for j, i in enumerate(experiment_names) if i.split("___")[0] == e and i != e]
if len(pp_models) > 0:
ranks = [final_ranks[i] for i in pp_models]
best_idx = np.argmin(ranks)
best = experiment_names[pp_models[best_idx]]
best_avg_rank = average_rank[pp_models[best_idx]]
print(best, best_avg_rank, min(ranks))
print('')
# apply threshold to validation set
best_threshold = int(best.split('___')[-1])
if not isdir(predicted_val):
print(e, 'has not valset predictions')
else:
files = subfiles(predicted_val, suffix='.nii.gz')
if len(files) != expected_num_cases_val:
print(e, 'has missing val cases. found: %d expected: %d' % (len(files), expected_num_cases_val))
else:
apply_threshold_to_folder(predicted_val, join(pred_val_base, e), best_threshold, replace_with, num_processes)
has_val_pred.append(e)
else:
print(e, 'not found in ranking')
# apply nnUNetTrainerV2BraTSRegions_DA3_BN__nnUNetPlansv2.1_bs5 to nnUNetTrainerV2BraTSRegions_DA3_BN__nnUNetPlansv2.1_bs5_15fold
e = 'nnUNetTrainerV2BraTSRegions_DA3_BN__nnUNetPlansv2.1_bs5'
pp_models = [j for j, i in enumerate(experiment_names) if i.split("___")[0] == e and i != e]
ranks = [final_ranks[i] for i in pp_models]
best_idx = np.argmin(ranks)
best = experiment_names[pp_models[best_idx]]
best_avg_rank = average_rank[pp_models[best_idx]]
best_threshold = int(best.split('___')[-1])
predicted_val = join(base, 'predVal', 'nnUNetTrainerV2BraTSRegions_DA3_BN__nnUNetPlansv2.1_bs5_15fold')
apply_threshold_to_folder(predicted_val, join(pred_val_base, 'nnUNetTrainerV2BraTSRegions_DA3_BN__nnUNetPlansv2.1_bs5_15fold'), best_threshold, replace_with, num_processes)
has_val_pred.append('nnUNetTrainerV2BraTSRegions_DA3_BN__nnUNetPlansv2.1_bs5_15fold')
# apply nnUNetTrainerV2BraTSRegions_DA4_BN__nnUNetPlansv2.1_bs5 to nnUNetTrainerV2BraTSRegions_DA4_BN__nnUNetPlansv2.1_bs5_15fold
e = 'nnUNetTrainerV2BraTSRegions_DA4_BN__nnUNetPlansv2.1_bs5'
pp_models = [j for j, i in enumerate(experiment_names) if i.split("___")[0] == e and i != e]
ranks = [final_ranks[i] for i in pp_models]
best_idx = np.argmin(ranks)
best = experiment_names[pp_models[best_idx]]
best_avg_rank = average_rank[pp_models[best_idx]]
best_threshold = int(best.split('___')[-1])
predicted_val = join(base, 'predVal', 'nnUNetTrainerV2BraTSRegions_DA4_BN__nnUNetPlansv2.1_bs5_15fold')
apply_threshold_to_folder(predicted_val, join(pred_val_base, 'nnUNetTrainerV2BraTSRegions_DA4_BN__nnUNetPlansv2.1_bs5_15fold'), best_threshold, replace_with, num_processes)
has_val_pred.append('nnUNetTrainerV2BraTSRegions_DA4_BN__nnUNetPlansv2.1_bs5_15fold')
# convert valsets
output_converted = join(base, 'converted_valSet')
for e in has_val_pred:
expected_source_folder = join(base, 'predVal_PP_rank', e)
if not isdir(expected_source_folder):
print(e, 'has no predVal_PP_rank')
raise RuntimeError()
files = subfiles(expected_source_folder, suffix='.nii.gz', join=False)
if len(files) != expected_num_cases_val:
print(e, 'prediction not done, found %d files, expected %s' % (len(files), expected_num_cases_val))
continue
target_folder = join(output_converted, 'predVal_PP_rank', e)
maybe_mkdir_p(target_folder)
convert_labels_back_to_BraTS_2018_2019_convention(expected_source_folder, target_folder)
# now load all the csvs for the validation set (obtained from evaluation platform) and rank our models on the
# validation set
flds = subdirs(output_converted, join=False)
results_valset = []
names_valset = []
for f in flds:
curr = join(output_converted, f)
experiments = subdirs(curr, join=False)
for e in experiments:
currr = join(curr, e)
expected_file = join(currr, 'Stats_Validation_final.csv')
if not isfile(expected_file):
print(f, e, "has not been evaluated yet!")
else:
res = load_csv_for_ranking(expected_file)[:-5]
assert res.shape[0] == expected_num_cases_val
results_valset.append(res[None])
names_valset.append("%s___%s" % (f, e))
results_valset = np.concatenate(results_valset, 0) # experiments x cases x metrics
# convert to metrics x experiments x cases
results_valset = results_valset.transpose((2, 0, 1))
final_ranks, average_rank, ranks = rank_algorithms(results_valset)
for t in np.argsort(final_ranks):
print(final_ranks[t], average_rank[t], names_valset[t])
if __name__ == "__main__":
"""
THIS CODE IS A MESS. IT IS PROVIDED AS IS WITH NO GUARANTEES. YOU HAVE TO DIG THROUGH IT YOURSELF. GOOD LUCK ;-)
REMEMBER TO CONVERT LABELS BACK TO BRATS CONVENTION AFTER PREDICTION!
"""
task_name = "Task082_BraTS2020"
downloaded_data_dir = "/home/fabian/Downloads/MICCAI_BraTS2020_TrainingData"
downloaded_data_dir_val = "/home/fabian/Downloads/MICCAI_BraTS2020_ValidationData"
target_base = join(nnUNet_raw_data, task_name)
target_imagesTr = join(target_base, "imagesTr")
target_imagesVal = join(target_base, "imagesVal")
target_imagesTs = join(target_base, "imagesTs")
target_labelsTr = join(target_base, "labelsTr")
maybe_mkdir_p(target_imagesTr)
maybe_mkdir_p(target_imagesVal)
maybe_mkdir_p(target_imagesTs)
maybe_mkdir_p(target_labelsTr)
patient_names = []
cur = join(downloaded_data_dir)
for p in subdirs(cur, join=False):
patdir = join(cur, p)
patient_name = p
patient_names.append(patient_name)
t1 = join(patdir, p + "_t1.nii.gz")
t1c = join(patdir, p + "_t1ce.nii.gz")
t2 = join(patdir, p + "_t2.nii.gz")
flair = join(patdir, p + "_flair.nii.gz")
seg = join(patdir, p + "_seg.nii.gz")
assert all([
isfile(t1),
isfile(t1c),
isfile(t2),
isfile(flair),
isfile(seg)
]), "%s" % patient_name
shutil.copy(t1, join(target_imagesTr, patient_name + "_0000.nii.gz"))
shutil.copy(t1c, join(target_imagesTr, patient_name + "_0001.nii.gz"))
shutil.copy(t2, join(target_imagesTr, patient_name + "_0002.nii.gz"))
shutil.copy(flair, join(target_imagesTr, patient_name + "_0003.nii.gz"))
copy_BraTS_segmentation_and_convert_labels(seg, join(target_labelsTr, patient_name + ".nii.gz"))
json_dict = OrderedDict()
json_dict['name'] = "BraTS2020"
json_dict['description'] = "nothing"
json_dict['tensorImageSize'] = "4D"
json_dict['reference'] = "see BraTS2020"
json_dict['licence'] = "see BraTS2020 license"
json_dict['release'] = "0.0"
json_dict['modality'] = {
"0": "T1",
"1": "T1ce",
"2": "T2",
"3": "FLAIR"
}
json_dict['labels'] = {
"0": "background",
"1": "edema",
"2": "non-enhancing",
"3": "enhancing",
}
json_dict['numTraining'] = len(patient_names)
json_dict['numTest'] = 0
json_dict['training'] = [{'image': "./imagesTr/%s.nii.gz" % i, "label": "./labelsTr/%s.nii.gz" % i} for i in
patient_names]
json_dict['test'] = []
save_json(json_dict, join(target_base, "dataset.json"))
if downloaded_data_dir_val is not None:
for p in subdirs(downloaded_data_dir_val, join=False):
patdir = join(downloaded_data_dir_val, p)
patient_name = p
t1 = join(patdir, p + "_t1.nii.gz")
t1c = join(patdir, p + "_t1ce.nii.gz")
t2 = join(patdir, p + "_t2.nii.gz")
flair = join(patdir, p + "_flair.nii.gz")
assert all([
isfile(t1),
isfile(t1c),
isfile(t2),
isfile(flair),
]), "%s" % patient_name
shutil.copy(t1, join(target_imagesVal, patient_name + "_0000.nii.gz"))
shutil.copy(t1c, join(target_imagesVal, patient_name + "_0001.nii.gz"))
shutil.copy(t2, join(target_imagesVal, patient_name + "_0002.nii.gz"))
shutil.copy(flair, join(target_imagesVal, patient_name + "_0003.nii.gz"))
downloaded_data_dir_test = "/home/fabian/Downloads/MICCAI_BraTS2020_TestingData"
if isdir(downloaded_data_dir_test):
for p in subdirs(downloaded_data_dir_test, join=False):
patdir = join(downloaded_data_dir_test, p)
patient_name = p
t1 = join(patdir, p + "_t1.nii.gz")
t1c = join(patdir, p + "_t1ce.nii.gz")
t2 = join(patdir, p + "_t2.nii.gz")
flair = join(patdir, p + "_flair.nii.gz")
assert all([
isfile(t1),
isfile(t1c),
isfile(t2),
isfile(flair),
]), "%s" % patient_name
shutil.copy(t1, join(target_imagesTs, patient_name + "_0000.nii.gz"))
shutil.copy(t1c, join(target_imagesTs, patient_name + "_0001.nii.gz"))
shutil.copy(t2, join(target_imagesTs, patient_name + "_0002.nii.gz"))
shutil.copy(flair, join(target_imagesTs, patient_name + "_0003.nii.gz"))
# test set
# nnUNet_ensemble -f nnUNetTrainerV2BraTSRegions_DA3_BN_BD__nnUNetPlansv2.1_bs5_5fold nnUNetTrainerV2BraTSRegions_DA4_BN_BD__nnUNetPlansv2.1_bs5_5fold nnUNetTrainerV2BraTSRegions_DA4_BN__nnUNetPlansv2.1_bs5_15fold -o ensembled_nnUNetTrainerV2BraTSRegions_DA3_BN_BD__nnUNetPlansv2.1_bs5_5fold__nnUNetTrainerV2BraTSRegions_DA4_BN_BD__nnUNetPlansv2.1_bs5_5fold__nnUNetTrainerV2BraTSRegions_DA4_BN__nnUNetPlansv2.1_bs5_15fold
# apply_threshold_to_folder('ensembled_nnUNetTrainerV2BraTSRegions_DA3_BN_BD__nnUNetPlansv2.1_bs5_5fold__nnUNetTrainerV2BraTSRegions_DA4_BN_BD__nnUNetPlansv2.1_bs5_5fold__nnUNetTrainerV2BraTSRegions_DA4_BN__nnUNetPlansv2.1_bs5_15fold/', 'ensemble_PP200/', 200, 2)
# convert_labels_back_to_BraTS_2018_2019_convention('ensemble_PP200/', 'ensemble_PP200_converted')
| 32,886
| 42.790945
| 426
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/dataset_conversion/Task032_BraTS_2018.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from multiprocessing.pool import Pool
import numpy as np
from collections import OrderedDict
from batchgenerators.utilities.file_and_folder_operations import *
from nnunet.dataset_conversion.Task043_BraTS_2019 import copy_BraTS_segmentation_and_convert_labels
from nnunet.paths import nnUNet_raw_data
import SimpleITK as sitk
import shutil
def convert_labels_back_to_BraTS(seg: np.ndarray):
new_seg = np.zeros_like(seg)
new_seg[seg == 1] = 2
new_seg[seg == 3] = 4
new_seg[seg == 2] = 1
return new_seg
def load_convert_save(filename, input_folder, output_folder):
a = sitk.ReadImage(join(input_folder, filename))
b = sitk.GetArrayFromImage(a)
c = convert_labels_back_to_BraTS(b)
d = sitk.GetImageFromArray(c)
d.CopyInformation(a)
sitk.WriteImage(d, join(output_folder, filename))
def convert_labels_back_to_BraTS_2018_2019_convention(input_folder: str, output_folder: str, num_processes: int = 12):
"""
reads all prediction files (nifti) in the input folder, converts the labels back to BraTS convention and saves the
result in output_folder
:param input_folder:
:param output_folder:
:return:
"""
maybe_mkdir_p(output_folder)
nii = subfiles(input_folder, suffix='.nii.gz', join=False)
p = Pool(num_processes)
p.starmap(load_convert_save, zip(nii, [input_folder] * len(nii), [output_folder] * len(nii)))
p.close()
p.join()
if __name__ == "__main__":
"""
REMEMBER TO CONVERT LABELS BACK TO BRATS CONVENTION AFTER PREDICTION!
"""
task_name = "Task032_BraTS2018"
downloaded_data_dir = "/home/fabian/Downloads/BraTS2018_train_val_test_data/MICCAI_BraTS_2018_Data_Training"
target_base = join(nnUNet_raw_data, task_name)
target_imagesTr = join(target_base, "imagesTr")
target_imagesVal = join(target_base, "imagesVal")
target_imagesTs = join(target_base, "imagesTs")
target_labelsTr = join(target_base, "labelsTr")
maybe_mkdir_p(target_imagesTr)
maybe_mkdir_p(target_imagesVal)
maybe_mkdir_p(target_imagesTs)
maybe_mkdir_p(target_labelsTr)
patient_names = []
for tpe in ["HGG", "LGG"]:
cur = join(downloaded_data_dir, tpe)
for p in subdirs(cur, join=False):
patdir = join(cur, p)
patient_name = tpe + "__" + p
patient_names.append(patient_name)
t1 = join(patdir, p + "_t1.nii.gz")
t1c = join(patdir, p + "_t1ce.nii.gz")
t2 = join(patdir, p + "_t2.nii.gz")
flair = join(patdir, p + "_flair.nii.gz")
seg = join(patdir, p + "_seg.nii.gz")
assert all([
isfile(t1),
isfile(t1c),
isfile(t2),
isfile(flair),
isfile(seg)
]), "%s" % patient_name
shutil.copy(t1, join(target_imagesTr, patient_name + "_0000.nii.gz"))
shutil.copy(t1c, join(target_imagesTr, patient_name + "_0001.nii.gz"))
shutil.copy(t2, join(target_imagesTr, patient_name + "_0002.nii.gz"))
shutil.copy(flair, join(target_imagesTr, patient_name + "_0003.nii.gz"))
copy_BraTS_segmentation_and_convert_labels(seg, join(target_labelsTr, patient_name + ".nii.gz"))
json_dict = OrderedDict()
json_dict['name'] = "BraTS2018"
json_dict['description'] = "nothing"
json_dict['tensorImageSize'] = "4D"
json_dict['reference'] = "see BraTS2018"
json_dict['licence'] = "see BraTS2019 license"
json_dict['release'] = "0.0"
json_dict['modality'] = {
"0": "T1",
"1": "T1ce",
"2": "T2",
"3": "FLAIR"
}
json_dict['labels'] = {
"0": "background",
"1": "edema",
"2": "non-enhancing",
"3": "enhancing",
}
json_dict['numTraining'] = len(patient_names)
json_dict['numTest'] = 0
json_dict['training'] = [{'image': "./imagesTr/%s.nii.gz" % i, "label": "./labelsTr/%s.nii.gz" % i} for i in
patient_names]
json_dict['test'] = []
save_json(json_dict, join(target_base, "dataset.json"))
del tpe, cur
downloaded_data_dir = "/home/fabian/Downloads/BraTS2018_train_val_test_data/MICCAI_BraTS_2018_Data_Validation"
for p in subdirs(downloaded_data_dir, join=False):
patdir = join(downloaded_data_dir, p)
patient_name = p
t1 = join(patdir, p + "_t1.nii.gz")
t1c = join(patdir, p + "_t1ce.nii.gz")
t2 = join(patdir, p + "_t2.nii.gz")
flair = join(patdir, p + "_flair.nii.gz")
assert all([
isfile(t1),
isfile(t1c),
isfile(t2),
isfile(flair),
]), "%s" % patient_name
shutil.copy(t1, join(target_imagesVal, patient_name + "_0000.nii.gz"))
shutil.copy(t1c, join(target_imagesVal, patient_name + "_0001.nii.gz"))
shutil.copy(t2, join(target_imagesVal, patient_name + "_0002.nii.gz"))
shutil.copy(flair, join(target_imagesVal, patient_name + "_0003.nii.gz"))
downloaded_data_dir = "/home/fabian/Downloads/BraTS2018_train_val_test_data/MICCAI_BraTS_2018_Data_Testing_FIsensee"
for p in subdirs(downloaded_data_dir, join=False):
patdir = join(downloaded_data_dir, p)
patient_name = p
t1 = join(patdir, p + "_t1.nii.gz")
t1c = join(patdir, p + "_t1ce.nii.gz")
t2 = join(patdir, p + "_t2.nii.gz")
flair = join(patdir, p + "_flair.nii.gz")
assert all([
isfile(t1),
isfile(t1c),
isfile(t2),
isfile(flair),
]), "%s" % patient_name
shutil.copy(t1, join(target_imagesTs, patient_name + "_0000.nii.gz"))
shutil.copy(t1c, join(target_imagesTs, patient_name + "_0001.nii.gz"))
shutil.copy(t2, join(target_imagesTs, patient_name + "_0002.nii.gz"))
shutil.copy(flair, join(target_imagesTs, patient_name + "_0003.nii.gz"))
| 6,634
| 36.485876
| 120
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/dataset_conversion/Task076_Fluo_N3DH_SIM.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from multiprocessing import Pool
from multiprocessing.dummy import Pool
import SimpleITK as sitk
import numpy as np
from batchgenerators.utilities.file_and_folder_operations import *
from skimage.io import imread
from skimage.io import imsave
from skimage.morphology import ball
from skimage.morphology import erosion
from skimage.transform import resize
from nnunet.paths import nnUNet_raw_data
from nnunet.paths import preprocessing_output_dir
def load_bmp_convert_to_nifti_borders(img_file, lab_file, img_out_base, anno_out, spacing, border_thickness=0.7):
img = imread(img_file)
img_itk = sitk.GetImageFromArray(img.astype(np.float32))
img_itk.SetSpacing(np.array(spacing)[::-1])
sitk.WriteImage(img_itk, join(img_out_base + "_0000.nii.gz"))
if lab_file is not None:
l = imread(lab_file)
borders = generate_border_as_suggested_by_twollmann(l, spacing, border_thickness)
l[l > 0] = 1
l[borders == 1] = 2
l_itk = sitk.GetImageFromArray(l.astype(np.uint8))
l_itk.SetSpacing(np.array(spacing)[::-1])
sitk.WriteImage(l_itk, anno_out)
def generate_ball(spacing, radius, dtype=int):
radius_in_voxels = np.round(radius / np.array(spacing)).astype(int)
n = 2 * radius_in_voxels + 1
ball_iso = ball(max(n) * 2, dtype=np.float64)
ball_resampled = resize(ball_iso, n, 1, 'constant', 0, clip=True, anti_aliasing=False, preserve_range=True)
ball_resampled[ball_resampled > 0.5] = 1
ball_resampled[ball_resampled <= 0.5] = 0
return ball_resampled.astype(dtype)
def generate_border_as_suggested_by_twollmann(label_img: np.ndarray, spacing, border_thickness: float = 2) -> np.ndarray:
border = np.zeros_like(label_img)
selem = generate_ball(spacing, border_thickness)
for l in np.unique(label_img):
if l == 0: continue
mask = (label_img == l).astype(int)
eroded = erosion(mask, selem)
border[(eroded == 0) & (mask != 0)] = 1
return border
def find_differences(labelstr1, labelstr2):
for n in subfiles(labelstr1, suffix='.nii.gz', join=False):
a = sitk.GetArrayFromImage(sitk.ReadImage(join(labelstr1, n)))
b = sitk.GetArrayFromImage(sitk.ReadImage(join(labelstr2, n)))
print(n, np.sum(a != b))
def prepare_task(base, task_id, task_name, spacing, border_thickness: float = 15, processes: int = 16):
p = Pool(processes)
foldername = "Task%03.0d_%s" % (task_id, task_name)
out_base = join(nnUNet_raw_data, foldername)
imagestr = join(out_base, "imagesTr")
imagests = join(out_base, "imagesTs")
labelstr = join(out_base, "labelsTr")
maybe_mkdir_p(imagestr)
maybe_mkdir_p(imagests)
maybe_mkdir_p(labelstr)
train_patient_names = []
test_patient_names = []
res = []
for train_sequence in [i for i in subfolders(base + "_train", join=False) if not i.endswith("_GT")]:
train_cases = subfiles(join(base + '_train', train_sequence), suffix=".tif", join=False)
for t in train_cases:
casename = train_sequence + "_" + t[:-4]
img_file = join(base + '_train', train_sequence, t)
lab_file = join(base + '_train', train_sequence + "_GT", "SEG", "man_seg" + t[1:])
if not isfile(lab_file):
continue
img_out_base = join(imagestr, casename)
anno_out = join(labelstr, casename + ".nii.gz")
res.append(
p.starmap_async(load_bmp_convert_to_nifti_borders, ((img_file, lab_file, img_out_base, anno_out, spacing, border_thickness),)))
train_patient_names.append(casename)
for test_sequence in [i for i in subfolders(base + "_test", join=False) if not i.endswith("_GT")]:
test_cases = subfiles(join(base + '_test', test_sequence), suffix=".tif", join=False)
for t in test_cases:
casename = test_sequence + "_" + t[:-4]
img_file = join(base + '_test', test_sequence, t)
lab_file = None
img_out_base = join(imagests, casename)
anno_out = None
res.append(
p.starmap_async(load_bmp_convert_to_nifti_borders, ((img_file, lab_file, img_out_base, anno_out, spacing, border_thickness),)))
test_patient_names.append(casename)
_ = [i.get() for i in res]
json_dict = {}
json_dict['name'] = task_name
json_dict['description'] = ""
json_dict['tensorImageSize'] = "4D"
json_dict['reference'] = ""
json_dict['licence'] = ""
json_dict['release'] = "0.0"
json_dict['modality'] = {
"0": "BF",
}
json_dict['labels'] = {
"0": "background",
"1": "cell",
"2": "border",
}
json_dict['numTraining'] = len(train_patient_names)
json_dict['numTest'] = len(test_patient_names)
json_dict['training'] = [{'image': "./imagesTr/%s.nii.gz" % i, "label": "./labelsTr/%s.nii.gz" % i} for i in
train_patient_names]
json_dict['test'] = ["./imagesTs/%s.nii.gz" % i for i in test_patient_names]
save_json(json_dict, os.path.join(out_base, "dataset.json"))
p.close()
p.join()
def plot_images(folder, output_folder):
maybe_mkdir_p(output_folder)
import matplotlib.pyplot as plt
for i in subfiles(folder, suffix='.nii.gz', join=False):
img = sitk.GetArrayFromImage(sitk.ReadImage(join(folder, i)))
center_slice = img[img.shape[0]//2]
plt.imsave(join(output_folder, i[:-7] + '.png'), center_slice)
def convert_to_tiff(nifti_image: str, output_name: str):
npy = sitk.GetArrayFromImage(sitk.ReadImage(nifti_image))
imsave(output_name, npy.astype(np.uint16), compress=6)
def convert_to_instance_seg(arr: np.ndarray, spacing: tuple = (0.2, 0.125, 0.125)):
from skimage.morphology import label, dilation
# 1 is core, 2 is border
objects = label((arr == 1).astype(int))
final = np.copy(objects)
remaining_border = arr == 2
current = np.copy(objects)
dilated_mm = np.array((0, 0, 0))
spacing = np.array(spacing)
while np.sum(remaining_border) > 0:
strel_size = [0, 0, 0]
maximum_dilation = max(dilated_mm)
for i in range(3):
if spacing[i] == min(spacing):
strel_size[i] = 1
continue
if dilated_mm[i] + spacing[i] / 2 < maximum_dilation:
strel_size[i] = 1
ball_here = ball(1)
if strel_size[0] == 0: ball_here = ball_here[1:2]
if strel_size[1] == 0: ball_here = ball_here[:, 1:2]
if strel_size[2] == 0: ball_here = ball_here[:, :, 1:2]
#print(1)
dilated = dilation(current, ball_here)
diff = (current == 0) & (dilated != current)
final[diff & remaining_border] = dilated[diff & remaining_border]
remaining_border[diff] = 0
current = dilated
dilated_mm = [dilated_mm[i] + spacing[i] if strel_size[i] == 1 else dilated_mm[i] for i in range(3)]
return final.astype(np.uint32)
def convert_to_instance_seg2(arr: np.ndarray, spacing: tuple = (0.2, 0.125, 0.125), small_center_threshold=30,
isolated_border_as_separate_instance_threshold: int = 15):
from skimage.morphology import label, dilation
# we first identify centers that are too small and set them to be border. This should remove false positive instances
objects = label((arr == 1).astype(int))
for o in np.unique(objects):
if o > 0 and np.sum(objects == o) <= small_center_threshold:
arr[objects == o] = 2
# 1 is core, 2 is border
objects = label((arr == 1).astype(int))
final = np.copy(objects)
remaining_border = arr == 2
current = np.copy(objects)
dilated_mm = np.array((0, 0, 0))
spacing = np.array(spacing)
while np.sum(remaining_border) > 0:
strel_size = [0, 0, 0]
maximum_dilation = max(dilated_mm)
for i in range(3):
if spacing[i] == min(spacing):
strel_size[i] = 1
continue
if dilated_mm[i] + spacing[i] / 2 < maximum_dilation:
strel_size[i] = 1
ball_here = ball(1)
if strel_size[0] == 0: ball_here = ball_here[1:2]
if strel_size[1] == 0: ball_here = ball_here[:, 1:2]
if strel_size[2] == 0: ball_here = ball_here[:, :, 1:2]
#print(1)
dilated = dilation(current, ball_here)
diff = (current == 0) & (dilated != current)
final[diff & remaining_border] = dilated[diff & remaining_border]
remaining_border[diff] = 0
current = dilated
dilated_mm = [dilated_mm[i] + spacing[i] if strel_size[i] == 1 else dilated_mm[i] for i in range(3)]
# what can happen is that a cell is so small that the network only predicted border and no core. This cell will be
# fused with the nearest other instance, which we don't want. Therefore we identify isolated border predictions and
# give them a separate instance id
# we identify isolated border predictions by checking each foreground object in arr and see whether this object
# also contains label 1
max_label = np.max(final)
foreground_objects = label((arr != 0).astype(int))
for i in np.unique(foreground_objects):
if i > 0 and (1 not in np.unique(arr[foreground_objects==i])):
size_of_object = np.sum(foreground_objects==i)
if size_of_object >= isolated_border_as_separate_instance_threshold:
final[foreground_objects == i] = max_label + 1
max_label += 1
#print('yeah boi')
return final.astype(np.uint32)
def load_instanceseg_save(in_file: str, out_file:str, better: bool):
itk_img = sitk.ReadImage(in_file)
if not better:
instanceseg = convert_to_instance_seg(sitk.GetArrayFromImage(itk_img))
else:
instanceseg = convert_to_instance_seg2(sitk.GetArrayFromImage(itk_img))
itk_out = sitk.GetImageFromArray(instanceseg)
itk_out.CopyInformation(itk_img)
sitk.WriteImage(itk_out, out_file)
def convert_all_to_instance(input_folder: str, output_folder: str, processes: int = 24, better: bool = False):
maybe_mkdir_p(output_folder)
p = Pool(processes)
files = subfiles(input_folder, suffix='.nii.gz', join=False)
output_files = [join(output_folder, i) for i in files]
input_files = [join(input_folder, i) for i in files]
better = [better] * len(files)
r = p.starmap_async(load_instanceseg_save, zip(input_files, output_files, better))
_ = r.get()
p.close()
p.join()
if __name__ == "__main__":
base = "/home/fabian/data/Fluo-N3DH-SIM"
task_id = 76
task_name = 'Fluo_N3DH_SIM'
spacing = (0.2, 0.125, 0.125)
border_thickness = 0.5
prepare_task(base, task_id, task_name, spacing, border_thickness, 12)
# we need custom splits
task_name = "Task076_Fluo_N3DH_SIM"
labelsTr = join(nnUNet_raw_data, task_name, "labelsTr")
cases = subfiles(labelsTr, suffix='.nii.gz', join=False)
splits = []
splits.append(
{'train': [i[:-7] for i in cases if i.startswith('01_')],
'val': [i[:-7] for i in cases if i.startswith('02_')]}
)
splits.append(
{'train': [i[:-7] for i in cases if i.startswith('02_')],
'val': [i[:-7] for i in cases if i.startswith('01_')]}
)
maybe_mkdir_p(join(preprocessing_output_dir, task_name))
save_pickle(splits, join(preprocessing_output_dir, task_name, "splits_final.pkl"))
# test set was converted to instance seg with convert_all_to_instance with better=True
# convert to tiff with convert_to_tiff
| 12,344
| 38.440895
| 143
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/dataset_conversion/Task089_Fluo-N2DH-SIM.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import shutil
from multiprocessing import Pool
import SimpleITK as sitk
import numpy as np
from batchgenerators.utilities.file_and_folder_operations import *
from skimage.io import imread
from skimage.io import imsave
from skimage.morphology import disk
from skimage.morphology import erosion
from skimage.transform import resize
from nnunet.paths import nnUNet_raw_data
def load_bmp_convert_to_nifti_borders_2d(img_file, lab_file, img_out_base, anno_out, spacing, border_thickness=0.7):
img = imread(img_file)
img_itk = sitk.GetImageFromArray(img.astype(np.float32)[None])
img_itk.SetSpacing(list(spacing)[::-1] + [999])
sitk.WriteImage(img_itk, join(img_out_base + "_0000.nii.gz"))
if lab_file is not None:
l = imread(lab_file)
borders = generate_border_as_suggested_by_twollmann_2d(l, spacing, border_thickness)
l[l > 0] = 1
l[borders == 1] = 2
l_itk = sitk.GetImageFromArray(l.astype(np.uint8)[None])
l_itk.SetSpacing(list(spacing)[::-1] + [999])
sitk.WriteImage(l_itk, anno_out)
def generate_disk(spacing, radius, dtype=int):
radius_in_voxels = np.round(radius / np.array(spacing)).astype(int)
n = 2 * radius_in_voxels + 1
disk_iso = disk(max(n) * 2, dtype=np.float64)
disk_resampled = resize(disk_iso, n, 1, 'constant', 0, clip=True, anti_aliasing=False, preserve_range=True)
disk_resampled[disk_resampled > 0.5] = 1
disk_resampled[disk_resampled <= 0.5] = 0
return disk_resampled.astype(dtype)
def generate_border_as_suggested_by_twollmann_2d(label_img: np.ndarray, spacing,
border_thickness: float = 2) -> np.ndarray:
border = np.zeros_like(label_img)
selem = generate_disk(spacing, border_thickness)
for l in np.unique(label_img):
if l == 0: continue
mask = (label_img == l).astype(int)
eroded = erosion(mask, selem)
border[(eroded == 0) & (mask != 0)] = 1
return border
def prepare_task(base, task_id, task_name, spacing, border_thickness: float = 15):
p = Pool(16)
foldername = "Task%03.0d_%s" % (task_id, task_name)
out_base = join(nnUNet_raw_data, foldername)
imagestr = join(out_base, "imagesTr")
imagests = join(out_base, "imagesTs")
labelstr = join(out_base, "labelsTr")
maybe_mkdir_p(imagestr)
maybe_mkdir_p(imagests)
maybe_mkdir_p(labelstr)
train_patient_names = []
test_patient_names = []
res = []
for train_sequence in [i for i in subfolders(base + "_train", join=False) if not i.endswith("_GT")]:
train_cases = subfiles(join(base + '_train', train_sequence), suffix=".tif", join=False)
for t in train_cases:
casename = train_sequence + "_" + t[:-4]
img_file = join(base + '_train', train_sequence, t)
lab_file = join(base + '_train', train_sequence + "_GT", "SEG", "man_seg" + t[1:])
if not isfile(lab_file):
continue
img_out_base = join(imagestr, casename)
anno_out = join(labelstr, casename + ".nii.gz")
res.append(
p.starmap_async(load_bmp_convert_to_nifti_borders_2d,
((img_file, lab_file, img_out_base, anno_out, spacing, border_thickness),)))
train_patient_names.append(casename)
for test_sequence in [i for i in subfolders(base + "_test", join=False) if not i.endswith("_GT")]:
test_cases = subfiles(join(base + '_test', test_sequence), suffix=".tif", join=False)
for t in test_cases:
casename = test_sequence + "_" + t[:-4]
img_file = join(base + '_test', test_sequence, t)
lab_file = None
img_out_base = join(imagests, casename)
anno_out = None
res.append(
p.starmap_async(load_bmp_convert_to_nifti_borders_2d,
((img_file, lab_file, img_out_base, anno_out, spacing, border_thickness),)))
test_patient_names.append(casename)
_ = [i.get() for i in res]
json_dict = {}
json_dict['name'] = task_name
json_dict['description'] = ""
json_dict['tensorImageSize'] = "4D"
json_dict['reference'] = ""
json_dict['licence'] = ""
json_dict['release'] = "0.0"
json_dict['modality'] = {
"0": "BF",
}
json_dict['labels'] = {
"0": "background",
"1": "cell",
"2": "border",
}
json_dict['numTraining'] = len(train_patient_names)
json_dict['numTest'] = len(test_patient_names)
json_dict['training'] = [{'image': "./imagesTr/%s.nii.gz" % i, "label": "./labelsTr/%s.nii.gz" % i} for i in
train_patient_names]
json_dict['test'] = ["./imagesTs/%s.nii.gz" % i for i in test_patient_names]
save_json(json_dict, os.path.join(out_base, "dataset.json"))
p.close()
p.join()
def convert_to_instance_seg(arr: np.ndarray, spacing: tuple = (0.125, 0.125), small_center_threshold: int = 30,
isolated_border_as_separate_instance_threshold=15):
from skimage.morphology import label, dilation
# we first identify centers that are too small and set them to be border. This should remove false positive instances
objects = label((arr == 1).astype(int))
for o in np.unique(objects):
if o > 0 and np.sum(objects == o) <= small_center_threshold:
arr[objects == o] = 2
# 1 is core, 2 is border
objects = label((arr == 1).astype(int))
final = np.copy(objects)
remaining_border = arr == 2
current = np.copy(objects)
dilated_mm = np.array((0, 0))
spacing = np.array(spacing)
while np.sum(remaining_border) > 0:
strel_size = [0, 0]
maximum_dilation = max(dilated_mm)
for i in range(2):
if spacing[i] == min(spacing):
strel_size[i] = 1
continue
if dilated_mm[i] + spacing[i] / 2 < maximum_dilation:
strel_size[i] = 1
ball_here = disk(1)
if strel_size[0] == 0: ball_here = ball_here[1:2]
if strel_size[1] == 0: ball_here = ball_here[:, 1:2]
#print(1)
dilated = dilation(current, ball_here)
diff = (current == 0) & (dilated != current)
final[diff & remaining_border] = dilated[diff & remaining_border]
remaining_border[diff] = 0
current = dilated
dilated_mm = [dilated_mm[i] + spacing[i] if strel_size[i] == 1 else dilated_mm[i] for i in range(2)]
# what can happen is that a cell is so small that the network only predicted border and no core. This cell will be
# fused with the nearest other instance, which we don't want. Therefore we identify isolated border predictions and
# give them a separate instance id
# we identify isolated border predictions by checking each foreground object in arr and see whether this object
# also contains label 1
max_label = np.max(final)
foreground_objects = label((arr != 0).astype(int))
for i in np.unique(foreground_objects):
if i > 0 and (1 not in np.unique(arr[foreground_objects==i])):
size_of_object = np.sum(foreground_objects==i)
if size_of_object >= isolated_border_as_separate_instance_threshold:
final[foreground_objects == i] = max_label + 1
max_label += 1
#print('yeah boi')
return final.astype(np.uint32)
def load_convert_to_instance_save(file_in: str, file_out: str, spacing):
img = sitk.ReadImage(file_in)
img_npy = sitk.GetArrayFromImage(img)
out = convert_to_instance_seg(img_npy[0], spacing)[None]
out_itk = sitk.GetImageFromArray(out.astype(np.int16))
out_itk.CopyInformation(img)
sitk.WriteImage(out_itk, file_out)
def convert_folder_to_instanceseg(folder_in: str, folder_out: str, spacing, processes: int = 12):
input_files = subfiles(folder_in, suffix=".nii.gz", join=False)
maybe_mkdir_p(folder_out)
output_files = [join(folder_out, i) for i in input_files]
input_files = [join(folder_in, i) for i in input_files]
p = Pool(processes)
r = []
for i, o in zip(input_files, output_files):
r.append(
p.starmap_async(
load_convert_to_instance_save,
((i, o, spacing),)
)
)
_ = [i.get() for i in r]
p.close()
p.join()
def convert_to_tiff(nifti_image: str, output_name: str):
npy = sitk.GetArrayFromImage(sitk.ReadImage(nifti_image))
imsave(output_name, npy[0].astype(np.uint16), compress=6)
if __name__ == "__main__":
base = "/home/fabian/Downloads/Fluo-N2DH-SIM+"
task_name = 'Fluo-N2DH-SIM'
spacing = (0.125, 0.125)
task_id = 999
border_thickness = 0.7
prepare_task(base, task_id, task_name, spacing, border_thickness)
task_id = 89
additional_time_steps = 4
task_name = 'Fluo-N2DH-SIM_thickborder_time'
full_taskname = 'Task%03.0d_' % task_id + task_name
output_raw = join(nnUNet_raw_data, full_taskname)
shutil.rmtree(output_raw)
shutil.copytree(join(nnUNet_raw_data, 'Task999_Fluo-N2DH-SIM_thickborder'), output_raw)
shutil.rmtree(join(nnUNet_raw_data, 'Task999_Fluo-N2DH-SIM_thickborder'))
# now add additional time information
for fld in ['imagesTr', 'imagesTs']:
curr = join(output_raw, fld)
for seq in ['01', '02']:
images = subfiles(curr, prefix=seq, join=False)
for i in images:
current_timestep = int(i.split('_')[1][1:])
renamed = join(curr, i.replace("_0000", "_%04.0d" % additional_time_steps))
shutil.move(join(curr, i), renamed)
for previous_timestep in range(-additional_time_steps, 0):
# previous time steps will already have been processed and renamed!
expected_filename = join(curr, seq + "_t%03.0d" % (
current_timestep + previous_timestep) + "_%04.0d" % additional_time_steps + ".nii.gz")
if not isfile(expected_filename):
# create empty image
img = sitk.ReadImage(renamed)
empty = sitk.GetImageFromArray(np.zeros_like(sitk.GetArrayFromImage(img)))
empty.CopyInformation(img)
sitk.WriteImage(empty, join(curr, i.replace("_0000", "_%04.0d" % (
additional_time_steps + previous_timestep))))
else:
shutil.copy(expected_filename, join(curr, i.replace("_0000", "_%04.0d" % (
additional_time_steps + previous_timestep))))
dataset = load_json(join(output_raw, 'dataset.json'))
dataset['modality'] = {
'0': 't_minus 4',
'1': 't_minus 3',
'2': 't_minus 2',
'3': 't_minus 1',
'4': 'frame of interest',
}
save_json(dataset, join(output_raw, 'dataset.json'))
# we do not need custom splits since we train on all training cases
# test set predictions are converted to instance seg with convert_folder_to_instanceseg
# test set predictions are converted to tiff with convert_to_tiff
| 11,972
| 40.286207
| 121
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/dataset_conversion/Task064_KiTS_labelsFixed.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import shutil
from batchgenerators.utilities.file_and_folder_operations import *
from nnunet.paths import nnUNet_raw_data
if __name__ == "__main__":
"""
This is the KiTS dataset after Nick fixed all the labels that had errors. Downloaded on Jan 6th 2020
"""
base = "/media/userdisk1/Datasets/KITS2019/data"
task_id = 64
task_name = "KiTS_labelsFixed"
foldername = "Task%03.0d_%s" % (task_id, task_name)
out_base = join(nnUNet_raw_data, foldername)
imagestr = join(out_base, "imagesTr")
imagests = join(out_base, "imagesTs")
labelstr = join(out_base, "labelsTr")
maybe_mkdir_p(imagestr)
maybe_mkdir_p(imagests)
maybe_mkdir_p(labelstr)
train_patient_names = []
test_patient_names = []
all_cases = subfolders(base, join=False)
train_patients = all_cases[:210]
test_patients = all_cases[210:]
for p in train_patients:
curr = join(base, p)
label_file = join(curr, "segmentation.nii.gz")
image_file = join(curr, "imaging.nii.gz")
shutil.copy(image_file, join(imagestr, p + "_0000.nii.gz"))
shutil.copy(label_file, join(labelstr, p + ".nii.gz"))
train_patient_names.append(p)
for p in test_patients:
curr = join(base, p)
image_file = join(curr, "imaging.nii.gz")
shutil.copy(image_file, join(imagests, p + "_0000.nii.gz"))
test_patient_names.append(p)
json_dict = {}
json_dict['name'] = "KiTS"
json_dict['description'] = "kidney and kidney tumor segmentation"
json_dict['tensorImageSize'] = "4D"
json_dict['reference'] = "KiTS data for nnunet"
json_dict['licence'] = ""
json_dict['release'] = "0.0"
json_dict['modality'] = {
"0": "CT",
}
json_dict['labels'] = {
"0": "background",
"1": "Kidney",
"2": "Tumor"
}
json_dict['numTraining'] = len(train_patient_names)
json_dict['numTest'] = len(test_patient_names)
json_dict['training'] = [{'image': "./imagesTr/%s.nii.gz" % i.split("/")[-1], "label": "./labelsTr/%s.nii.gz" % i.split("/")[-1]} for i in
train_patient_names]
json_dict['test'] = ["./imagesTs/%s.nii.gz" % i.split("/")[-1] for i in test_patient_names]
save_json(json_dict, os.path.join(out_base, "dataset.json"))
| 3,012
| 34.447059
| 142
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/dataset_conversion/Task035_ISBI_MSLesionSegmentationChallenge.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import shutil
from collections import OrderedDict
import numpy as np
import SimpleITK as sitk
import multiprocessing
from batchgenerators.utilities.file_and_folder_operations import *
def convert_to_nii_gz(filename):
f = sitk.ReadImage(filename)
sitk.WriteImage(f, os.path.splitext(filename)[0] + ".nii.gz")
os.remove(filename)
def convert_for_submission(source_dir, target_dir):
files = subfiles(source_dir, suffix=".nii.gz", join=False)
maybe_mkdir_p(target_dir)
for f in files:
splitted = f.split("__")
case_id = int(splitted[1])
timestep = int(splitted[2][:-7])
t = join(target_dir, "test%02d_%02d_nnUNet.nii" % (case_id, timestep))
img = sitk.ReadImage(join(source_dir, f))
sitk.WriteImage(img, t)
if __name__ == "__main__":
# convert to nifti.gz
dirs = ['/media/fabian/My Book/MedicalDecathlon/Task035_ISBILesionSegmentation/imagesTr',
'/media/fabian/My Book/MedicalDecathlon/Task035_ISBILesionSegmentation/imagesTs',
'/media/fabian/My Book/MedicalDecathlon/Task035_ISBILesionSegmentation/labelsTr']
p = multiprocessing.Pool(3)
for d in dirs:
nii_files = subfiles(d, suffix='.nii')
p.map(convert_to_nii_gz, nii_files)
p.close()
p.join()
def rename_files(folder):
all_files = subfiles(folder, join=False)
# there are max 14 patients per folder, starting with 1
for patientid in range(1, 15):
# there are certainly no more than 10 time steps per patient, starting with 1
for t in range(1, 10):
patient_files = [i for i in all_files if i.find("%02.0d_%02.0d_" % (patientid, t)) != -1]
if not len(patient_files) == 4:
continue
flair_file = [i for i in patient_files if i.endswith("_flair_pp.nii.gz")][0]
mprage_file = [i for i in patient_files if i.endswith("_mprage_pp.nii.gz")][0]
pd_file = [i for i in patient_files if i.endswith("_pd_pp.nii.gz")][0]
t2_file = [i for i in patient_files if i.endswith("_t2_pp.nii.gz")][0]
os.rename(join(folder, flair_file), join(folder, "case__%02.0d__%02.0d_0000.nii.gz" % (patientid, t)))
os.rename(join(folder, mprage_file), join(folder, "case__%02.0d__%02.0d_0001.nii.gz" % (patientid, t)))
os.rename(join(folder, pd_file), join(folder, "case__%02.0d__%02.0d_0002.nii.gz" % (patientid, t)))
os.rename(join(folder, t2_file), join(folder, "case__%02.0d__%02.0d_0003.nii.gz" % (patientid, t)))
for d in dirs[:-1]:
rename_files(d)
# now we have to deal with the training masks, we do it the quick and dirty way here by just creating copies of the
# training data
train_folder = '/media/fabian/My Book/MedicalDecathlon/Task035_ISBILesionSegmentation/imagesTr'
for patientid in range(1, 6):
for t in range(1, 6):
fnames_original = subfiles(train_folder, prefix="case__%02.0d__%02.0d" % (patientid, t), suffix=".nii.gz", sort=True)
for f in fnames_original:
for mask in [1, 2]:
fname_target = f[:-12] + "__mask%d" % mask + f[-12:]
shutil.copy(f, fname_target)
os.remove(f)
labels_folder = '/media/fabian/My Book/MedicalDecathlon/Task035_ISBILesionSegmentation/labelsTr'
for patientid in range(1, 6):
for t in range(1, 6):
for mask in [1, 2]:
f = join(labels_folder, "training%02d_%02d_mask%d.nii.gz" % (patientid, t, mask))
if isfile(f):
os.rename(f, join(labels_folder, "case__%02.0d__%02.0d__mask%d.nii.gz" % (patientid, t, mask)))
tr_files = []
for patientid in range(1, 6):
for t in range(1, 6):
for mask in [1, 2]:
if isfile(join(labels_folder, "case__%02.0d__%02.0d__mask%d.nii.gz" % (patientid, t, mask))):
tr_files.append("case__%02.0d__%02.0d__mask%d.nii.gz" % (patientid, t, mask))
ts_files = []
for patientid in range(1, 20):
for t in range(1, 20):
if isfile(join("/media/fabian/My Book/MedicalDecathlon/Task035_ISBILesionSegmentation/imagesTs",
"case__%02.0d__%02.0d_0000.nii.gz" % (patientid, t))):
ts_files.append("case__%02.0d__%02.0d.nii.gz" % (patientid, t))
out_base = '/media/fabian/My Book/MedicalDecathlon/Task035_ISBILesionSegmentation/'
json_dict = OrderedDict()
json_dict['name'] = "ISBI_Lesion_Segmentation_Challenge_2015"
json_dict['description'] = "nothing"
json_dict['tensorImageSize'] = "4D"
json_dict['reference'] = "see challenge website"
json_dict['licence'] = "see challenge website"
json_dict['release'] = "0.0"
json_dict['modality'] = {
"0": "flair",
"1": "mprage",
"2": "pd",
"3": "t2"
}
json_dict['labels'] = {
"0": "background",
"1": "lesion"
}
json_dict['numTraining'] = len(subfiles(labels_folder))
json_dict['numTest'] = len(subfiles('/media/fabian/My Book/MedicalDecathlon/Task035_ISBILesionSegmentation/imagesTs')) // 4
json_dict['training'] = [{'image': "./imagesTr/%s.nii.gz" % i[:-7], "label": "./labelsTr/%s.nii.gz" % i[:-7]} for i in
tr_files]
json_dict['test'] = ["./imagesTs/%s.nii.gz" % i[:-7] for i in ts_files]
save_json(json_dict, join(out_base, "dataset.json"))
case_identifiers = np.unique([i[:-12] for i in subfiles("/media/fabian/My Book/MedicalDecathlon/MedicalDecathlon_raw_splitted/Task035_ISBILesionSegmentation/imagesTr", suffix='.nii.gz', join=False)])
splits = []
for f in range(5):
cases = [i for i in range(1, 6) if i != f+1]
splits.append(OrderedDict())
splits[-1]['val'] = np.array([i for i in case_identifiers if i.startswith("case__%02d__" % (f + 1))])
remaining = [i for i in case_identifiers if i not in splits[-1]['val']]
splits[-1]['train'] = np.array(remaining)
maybe_mkdir_p("/media/fabian/nnunet/Task035_ISBILesionSegmentation")
save_pickle(splits, join("/media/fabian/nnunet/Task035_ISBILesionSegmentation", "splits_final.pkl"))
| 6,987
| 41.871166
| 203
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/dataset_conversion/Task027_AutomaticCardiacDetectionChallenge.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
from batchgenerators.utilities.file_and_folder_operations import *
import shutil
import numpy as np
from sklearn.model_selection import KFold
def convert_to_submission(source_dir, target_dir):
niftis = subfiles(source_dir, join=False, suffix=".nii.gz")
patientids = np.unique([i[:10] for i in niftis])
maybe_mkdir_p(target_dir)
for p in patientids:
files_of_that_patient = subfiles(source_dir, prefix=p, suffix=".nii.gz", join=False)
assert len(files_of_that_patient)
files_of_that_patient.sort()
# first is ED, second is ES
shutil.copy(join(source_dir, files_of_that_patient[0]), join(target_dir, p + "_ED.nii.gz"))
shutil.copy(join(source_dir, files_of_that_patient[1]), join(target_dir, p + "_ES.nii.gz"))
if __name__ == "__main__":
folder = "/media/fabian/My Book/datasets/ACDC/training"
folder_test = "/media/fabian/My Book/datasets/ACDC/testing/testing"
out_folder = "/media/fabian/My Book/MedicalDecathlon/MedicalDecathlon_raw_splitted/Task027_ACDC"
maybe_mkdir_p(join(out_folder, "imagesTr"))
maybe_mkdir_p(join(out_folder, "imagesTs"))
maybe_mkdir_p(join(out_folder, "labelsTr"))
# train
all_train_files = []
patient_dirs_train = subfolders(folder, prefix="patient")
for p in patient_dirs_train:
current_dir = p
data_files_train = [i for i in subfiles(current_dir, suffix=".nii.gz") if i.find("_gt") == -1 and i.find("_4d") == -1]
corresponding_seg_files = [i[:-7] + "_gt.nii.gz" for i in data_files_train]
for d, s in zip(data_files_train, corresponding_seg_files):
patient_identifier = d.split("/")[-1][:-7]
all_train_files.append(patient_identifier + "_0000.nii.gz")
shutil.copy(d, join(out_folder, "imagesTr", patient_identifier + "_0000.nii.gz"))
shutil.copy(s, join(out_folder, "labelsTr", patient_identifier + ".nii.gz"))
# test
all_test_files = []
patient_dirs_test = subfolders(folder_test, prefix="patient")
for p in patient_dirs_test:
current_dir = p
data_files_test = [i for i in subfiles(current_dir, suffix=".nii.gz") if i.find("_gt") == -1 and i.find("_4d") == -1]
for d in data_files_test:
patient_identifier = d.split("/")[-1][:-7]
all_test_files.append(patient_identifier + "_0000.nii.gz")
shutil.copy(d, join(out_folder, "imagesTs", patient_identifier + "_0000.nii.gz"))
json_dict = OrderedDict()
json_dict['name'] = "ACDC"
json_dict['description'] = "cardias cine MRI segmentation"
json_dict['tensorImageSize'] = "4D"
json_dict['reference'] = "see ACDC challenge"
json_dict['licence'] = "see ACDC challenge"
json_dict['release'] = "0.0"
json_dict['modality'] = {
"0": "MRI",
}
json_dict['labels'] = {
"0": "background",
"1": "RV",
"2": "MLV",
"3": "LVC"
}
json_dict['numTraining'] = len(all_train_files)
json_dict['numTest'] = len(all_test_files)
json_dict['training'] = [{'image': "./imagesTr/%s.nii.gz" % i.split("/")[-1][:-12], "label": "./labelsTr/%s.nii.gz" % i.split("/")[-1][:-12]} for i in
all_train_files]
json_dict['test'] = ["./imagesTs/%s.nii.gz" % i.split("/")[-1][:-12] for i in all_test_files]
save_json(json_dict, os.path.join(out_folder, "dataset.json"))
# create a dummy split (patients need to be separated)
splits = []
patients = np.unique([i[:10] for i in all_train_files])
patientids = [i[:-12] for i in all_train_files]
kf = KFold(5, True, 12345)
for tr, val in kf.split(patients):
splits.append(OrderedDict())
tr_patients = patients[tr]
splits[-1]['train'] = [i[:-12] for i in all_train_files if i[:10] in tr_patients]
val_patients = patients[val]
splits[-1]['val'] = [i[:-12] for i in all_train_files if i[:10] in val_patients]
save_pickle(splits, "/media/fabian/nnunet/Task027_ACDC/splits_final.pkl")
| 4,754
| 43.858491
| 154
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/dataset_conversion/Task029_LiverTumorSegmentationChallenge.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
import SimpleITK as sitk
from batchgenerators.utilities.file_and_folder_operations import *
from multiprocessing import Pool
import numpy as np
from nnunet.configuration import default_num_threads
from scipy.ndimage import label
def export_segmentations(indir, outdir):
niftis = subfiles(indir, suffix='nii.gz', join=False)
for n in niftis:
identifier = str(n.split("_")[-1][:-7])
outfname = join(outdir, "test-segmentation-%s.nii" % identifier)
img = sitk.ReadImage(join(indir, n))
sitk.WriteImage(img, outfname)
def export_segmentations_postprocess(indir, outdir):
maybe_mkdir_p(outdir)
niftis = subfiles(indir, suffix='nii.gz', join=False)
for n in niftis:
print("\n", n)
identifier = str(n.split("_")[-1][:-7])
outfname = join(outdir, "test-segmentation-%s.nii" % identifier)
img = sitk.ReadImage(join(indir, n))
img_npy = sitk.GetArrayFromImage(img)
lmap, num_objects = label((img_npy > 0).astype(int))
sizes = []
for o in range(1, num_objects + 1):
sizes.append((lmap == o).sum())
mx = np.argmax(sizes) + 1
print(sizes)
img_npy[lmap != mx] = 0
img_new = sitk.GetImageFromArray(img_npy)
img_new.CopyInformation(img)
sitk.WriteImage(img_new, outfname)
if __name__ == "__main__":
train_dir = "/media/fabian/DeepLearningData/tmp/LITS-Challenge-Train-Data"
test_dir = "/media/fabian/My Book/datasets/LiTS/test_data"
output_folder = "/media/fabian/My Book/MedicalDecathlon/MedicalDecathlon_raw_splitted/Task029_LITS"
img_dir = join(output_folder, "imagesTr")
lab_dir = join(output_folder, "labelsTr")
img_dir_te = join(output_folder, "imagesTs")
maybe_mkdir_p(img_dir)
maybe_mkdir_p(lab_dir)
maybe_mkdir_p(img_dir_te)
def load_save_train(args):
data_file, seg_file = args
pat_id = data_file.split("/")[-1]
pat_id = "train_" + pat_id.split("-")[-1][:-4]
img_itk = sitk.ReadImage(data_file)
sitk.WriteImage(img_itk, join(img_dir, pat_id + "_0000.nii.gz"))
img_itk = sitk.ReadImage(seg_file)
sitk.WriteImage(img_itk, join(lab_dir, pat_id + ".nii.gz"))
return pat_id
def load_save_test(args):
data_file = args
pat_id = data_file.split("/")[-1]
pat_id = "test_" + pat_id.split("-")[-1][:-4]
img_itk = sitk.ReadImage(data_file)
sitk.WriteImage(img_itk, join(img_dir_te, pat_id + "_0000.nii.gz"))
return pat_id
nii_files_tr_data = subfiles(train_dir, True, "volume", "nii", True)
nii_files_tr_seg = subfiles(train_dir, True, "segmen", "nii", True)
nii_files_ts = subfiles(test_dir, True, "test-volume", "nii", True)
p = Pool(default_num_threads)
train_ids = p.map(load_save_train, zip(nii_files_tr_data, nii_files_tr_seg))
test_ids = p.map(load_save_test, nii_files_ts)
p.close()
p.join()
json_dict = OrderedDict()
json_dict['name'] = "LITS"
json_dict['description'] = "LITS"
json_dict['tensorImageSize'] = "4D"
json_dict['reference'] = "see challenge website"
json_dict['licence'] = "see challenge website"
json_dict['release'] = "0.0"
json_dict['modality'] = {
"0": "CT"
}
json_dict['labels'] = {
"0": "background",
"1": "liver",
"2": "tumor"
}
json_dict['numTraining'] = len(train_ids)
json_dict['numTest'] = len(test_ids)
json_dict['training'] = [{'image': "./imagesTr/%s.nii.gz" % i, "label": "./labelsTr/%s.nii.gz" % i} for i in train_ids]
json_dict['test'] = ["./imagesTs/%s.nii.gz" % i for i in test_ids]
with open(os.path.join(output_folder, "dataset.json"), 'w') as f:
json.dump(json_dict, f, indent=4, sort_keys=True)
| 4,535
| 35.878049
| 123
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/dataset_conversion/Task069_CovidSeg.py
|
import shutil
from batchgenerators.utilities.file_and_folder_operations import *
import SimpleITK as sitk
from nnunet.paths import nnUNet_raw_data
if __name__ == '__main__':
#data is available at http://medicalsegmentation.com/covid19/
download_dir = '/home/fabian/Downloads'
task_id = 69
task_name = "CovidSeg"
foldername = "Task%03.0d_%s" % (task_id, task_name)
out_base = join(nnUNet_raw_data, foldername)
imagestr = join(out_base, "imagesTr")
imagests = join(out_base, "imagesTs")
labelstr = join(out_base, "labelsTr")
maybe_mkdir_p(imagestr)
maybe_mkdir_p(imagests)
maybe_mkdir_p(labelstr)
train_patient_names = []
test_patient_names = []
# the niftis are 3d, but they are just stacks of 2d slices from different patients. So no 3d U-Net, please
# the training stack has 100 slices, so we split it into 5 equally sized parts (20 slices each) for cross-validation
training_data = sitk.GetArrayFromImage(sitk.ReadImage(join(download_dir, 'tr_im.nii.gz')))
training_labels = sitk.GetArrayFromImage(sitk.ReadImage(join(download_dir, 'tr_mask.nii.gz')))
for f in range(5):
this_name = 'part_%d' % f
data = training_data[f::5]
labels = training_labels[f::5]
sitk.WriteImage(sitk.GetImageFromArray(data), join(imagestr, this_name + '_0000.nii.gz'))
sitk.WriteImage(sitk.GetImageFromArray(labels), join(labelstr, this_name + '.nii.gz'))
train_patient_names.append(this_name)
shutil.copy(join(download_dir, 'val_im.nii.gz'), join(imagests, 'val_im.nii.gz'))
test_patient_names.append('val_im')
json_dict = {}
json_dict['name'] = task_name
json_dict['description'] = ""
json_dict['tensorImageSize'] = "4D"
json_dict['reference'] = ""
json_dict['licence'] = ""
json_dict['release'] = "0.0"
json_dict['modality'] = {
"0": "nonct",
}
json_dict['labels'] = {
"0": "background",
"1": "stuff1",
"2": "stuff2",
"3": "stuff3",
}
json_dict['numTraining'] = len(train_patient_names)
json_dict['numTest'] = len(test_patient_names)
json_dict['training'] = [{'image': "./imagesTr/%s.nii.gz" % i.split("/")[-1], "label": "./labelsTr/%s.nii.gz" % i.split("/")[-1]} for i in
train_patient_names]
json_dict['test'] = ["./imagesTs/%s.nii.gz" % i.split("/")[-1] for i in test_patient_names]
save_json(json_dict, os.path.join(out_base, "dataset.json"))
| 2,507
| 35.347826
| 142
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/dataset_conversion/Task075_Fluo_C3DH_A549_ManAndSim.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from multiprocessing import Pool
import SimpleITK as sitk
import numpy as np
from batchgenerators.utilities.file_and_folder_operations import *
from nnunet.paths import nnUNet_raw_data
from nnunet.paths import preprocessing_output_dir
from skimage.io import imread
def load_tiff_convert_to_nifti(img_file, lab_file, img_out_base, anno_out, spacing):
img = imread(img_file)
img_itk = sitk.GetImageFromArray(img.astype(np.float32))
img_itk.SetSpacing(np.array(spacing)[::-1])
sitk.WriteImage(img_itk, join(img_out_base + "_0000.nii.gz"))
if lab_file is not None:
l = imread(lab_file)
l[l > 0] = 1
l_itk = sitk.GetImageFromArray(l.astype(np.uint8))
l_itk.SetSpacing(np.array(spacing)[::-1])
sitk.WriteImage(l_itk, anno_out)
def prepare_task(base, task_id, task_name, spacing):
p = Pool(16)
foldername = "Task%03.0d_%s" % (task_id, task_name)
out_base = join(nnUNet_raw_data, foldername)
imagestr = join(out_base, "imagesTr")
imagests = join(out_base, "imagesTs")
labelstr = join(out_base, "labelsTr")
maybe_mkdir_p(imagestr)
maybe_mkdir_p(imagests)
maybe_mkdir_p(labelstr)
train_patient_names = []
test_patient_names = []
res = []
for train_sequence in [i for i in subfolders(base + "_train", join=False) if not i.endswith("_GT")]:
train_cases = subfiles(join(base + '_train', train_sequence), suffix=".tif", join=False)
for t in train_cases:
casename = train_sequence + "_" + t[:-4]
img_file = join(base + '_train', train_sequence, t)
lab_file = join(base + '_train', train_sequence + "_GT", "SEG", "man_seg" + t[1:])
if not isfile(lab_file):
continue
img_out_base = join(imagestr, casename)
anno_out = join(labelstr, casename + ".nii.gz")
res.append(
p.starmap_async(load_tiff_convert_to_nifti, ((img_file, lab_file, img_out_base, anno_out, spacing),)))
train_patient_names.append(casename)
for test_sequence in [i for i in subfolders(base + "_test", join=False) if not i.endswith("_GT")]:
test_cases = subfiles(join(base + '_test', test_sequence), suffix=".tif", join=False)
for t in test_cases:
casename = test_sequence + "_" + t[:-4]
img_file = join(base + '_test', test_sequence, t)
lab_file = None
img_out_base = join(imagests, casename)
anno_out = None
res.append(
p.starmap_async(load_tiff_convert_to_nifti, ((img_file, lab_file, img_out_base, anno_out, spacing),)))
test_patient_names.append(casename)
_ = [i.get() for i in res]
json_dict = {}
json_dict['name'] = task_name
json_dict['description'] = ""
json_dict['tensorImageSize'] = "4D"
json_dict['reference'] = ""
json_dict['licence'] = ""
json_dict['release'] = "0.0"
json_dict['modality'] = {
"0": "BF",
}
json_dict['labels'] = {
"0": "background",
"1": "cell",
}
json_dict['numTraining'] = len(train_patient_names)
json_dict['numTest'] = len(test_patient_names)
json_dict['training'] = [{'image': "./imagesTr/%s.nii.gz" % i, "label": "./labelsTr/%s.nii.gz" % i} for i in
train_patient_names]
json_dict['test'] = ["./imagesTs/%s.nii.gz" % i for i in test_patient_names]
save_json(json_dict, os.path.join(out_base, "dataset.json"))
p.close()
p.join()
if __name__ == "__main__":
base = "/media/fabian/My Book/datasets/CellTrackingChallenge/Fluo-C3DH-A549_ManAndSim"
task_id = 75
task_name = 'Fluo_C3DH_A549_ManAndSim'
spacing = (1, 0.126, 0.126)
prepare_task(base, task_id, task_name, spacing)
task_name = "Task075_Fluo_C3DH_A549_ManAndSim"
labelsTr = join(nnUNet_raw_data, task_name, "labelsTr")
cases = subfiles(labelsTr, suffix='.nii.gz', join=False)
splits = []
splits.append(
{'train': [i[:-7] for i in cases if i.startswith('01_') or i.startswith('02_SIM')],
'val': [i[:-7] for i in cases if i.startswith('02_') and not i.startswith('02_SIM')]}
)
splits.append(
{'train': [i[:-7] for i in cases if i.startswith('02_') or i.startswith('01_SIM')],
'val': [i[:-7] for i in cases if i.startswith('01_') and not i.startswith('01_SIM')]}
)
splits.append(
{'train': [i[:-7] for i in cases if i.startswith('01_') or i.startswith('02_') and not i.startswith('02_SIM')],
'val': [i[:-7] for i in cases if i.startswith('02_SIM')]}
)
splits.append(
{'train': [i[:-7] for i in cases if i.startswith('02_') or i.startswith('01_') and not i.startswith('01_SIM')],
'val': [i[:-7] for i in cases if i.startswith('01_SIM')]}
)
save_pickle(splits, join(preprocessing_output_dir, task_name, "splits_final.pkl"))
| 5,610
| 39.65942
| 119
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/dataset_conversion/Task065_KiTS_NicksLabels.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import shutil
from batchgenerators.utilities.file_and_folder_operations import *
from nnunet.paths import nnUNet_raw_data
if __name__ == "__main__":
"""
Nick asked me to rerun the training with other labels (the Kidney region is defined differently).
These labels operate in interpolated spacing. I don't like that but that's how it is
"""
base = "/media/fabian/My Book/datasets/KiTS_NicksLabels/kits19/data"
labelsdir = "/media/fabian/My Book/datasets/KiTS_NicksLabels/filled_labels"
task_id = 65
task_name = "KiTS_NicksLabels"
foldername = "Task%03.0d_%s" % (task_id, task_name)
out_base = join(nnUNet_raw_data, foldername)
imagestr = join(out_base, "imagesTr")
imagests = join(out_base, "imagesTs")
labelstr = join(out_base, "labelsTr")
maybe_mkdir_p(imagestr)
maybe_mkdir_p(imagests)
maybe_mkdir_p(labelstr)
train_patient_names = []
test_patient_names = []
all_cases = subfolders(base, join=False)
train_patients = all_cases[:210]
test_patients = all_cases[210:]
for p in train_patients:
curr = join(base, p)
label_file = join(labelsdir, p + ".nii.gz")
image_file = join(curr, "imaging.nii.gz")
shutil.copy(image_file, join(imagestr, p + "_0000.nii.gz"))
shutil.copy(label_file, join(labelstr, p + ".nii.gz"))
train_patient_names.append(p)
for p in test_patients:
curr = join(base, p)
image_file = join(curr, "imaging.nii.gz")
shutil.copy(image_file, join(imagests, p + "_0000.nii.gz"))
test_patient_names.append(p)
json_dict = {}
json_dict['name'] = "KiTS"
json_dict['description'] = "kidney and kidney tumor segmentation"
json_dict['tensorImageSize'] = "4D"
json_dict['reference'] = "KiTS data for nnunet"
json_dict['licence'] = ""
json_dict['release'] = "0.0"
json_dict['modality'] = {
"0": "CT",
}
json_dict['labels'] = {
"0": "background",
"1": "Kidney",
"2": "Tumor"
}
json_dict['numTraining'] = len(train_patient_names)
json_dict['numTest'] = len(test_patient_names)
json_dict['training'] = [{'image': "./imagesTr/%s.nii.gz" % i.split("/")[-1], "label": "./labelsTr/%s.nii.gz" % i.split("/")[-1]} for i in
train_patient_names]
json_dict['test'] = ["./imagesTs/%s.nii.gz" % i.split("/")[-1] for i in test_patient_names]
save_json(json_dict, os.path.join(out_base, "dataset.json"))
| 3,196
| 35.329545
| 142
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/dataset_conversion/Task024_Promise2012.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
import SimpleITK as sitk
from batchgenerators.utilities.file_and_folder_operations import *
def export_for_submission(source_dir, target_dir):
"""
promise wants mhd :-/
:param source_dir:
:param target_dir:
:return:
"""
files = subfiles(source_dir, suffix=".nii.gz", join=False)
target_files = [join(target_dir, i[:-7] + ".mhd") for i in files]
maybe_mkdir_p(target_dir)
for f, t in zip(files, target_files):
img = sitk.ReadImage(join(source_dir, f))
sitk.WriteImage(img, t)
if __name__ == "__main__":
folder = "/media/fabian/My Book/datasets/promise2012"
out_folder = "/media/fabian/My Book/MedicalDecathlon/MedicalDecathlon_raw_splitted/Task024_Promise"
maybe_mkdir_p(join(out_folder, "imagesTr"))
maybe_mkdir_p(join(out_folder, "imagesTs"))
maybe_mkdir_p(join(out_folder, "labelsTr"))
# train
current_dir = join(folder, "train")
segmentations = subfiles(current_dir, suffix="segmentation.mhd")
raw_data = [i for i in subfiles(current_dir, suffix="mhd") if not i.endswith("segmentation.mhd")]
for i in raw_data:
out_fname = join(out_folder, "imagesTr", i.split("/")[-1][:-4] + "_0000.nii.gz")
sitk.WriteImage(sitk.ReadImage(i), out_fname)
for i in segmentations:
out_fname = join(out_folder, "labelsTr", i.split("/")[-1][:-17] + ".nii.gz")
sitk.WriteImage(sitk.ReadImage(i), out_fname)
# test
current_dir = join(folder, "test")
test_data = subfiles(current_dir, suffix="mhd")
for i in test_data:
out_fname = join(out_folder, "imagesTs", i.split("/")[-1][:-4] + "_0000.nii.gz")
sitk.WriteImage(sitk.ReadImage(i), out_fname)
json_dict = OrderedDict()
json_dict['name'] = "PROMISE12"
json_dict['description'] = "prostate"
json_dict['tensorImageSize'] = "4D"
json_dict['reference'] = "see challenge website"
json_dict['licence'] = "see challenge website"
json_dict['release'] = "0.0"
json_dict['modality'] = {
"0": "MRI",
}
json_dict['labels'] = {
"0": "background",
"1": "prostate"
}
json_dict['numTraining'] = len(raw_data)
json_dict['numTest'] = len(test_data)
json_dict['training'] = [{'image': "./imagesTr/%s.nii.gz" % i.split("/")[-1][:-4], "label": "./labelsTr/%s.nii.gz" % i.split("/")[-1][:-4]} for i in
raw_data]
json_dict['test'] = ["./imagesTs/%s.nii.gz" % i.split("/")[-1][:-4] for i in test_data]
save_json(json_dict, os.path.join(out_folder, "dataset.json"))
| 3,287
| 39.097561
| 152
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/dataset_conversion/Task040_KiTS.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from copy import deepcopy
from batchgenerators.utilities.file_and_folder_operations import *
import shutil
import SimpleITK as sitk
from multiprocessing import Pool
from medpy.metric import dc
import numpy as np
from nnunet.paths import network_training_output_dir
from scipy.ndimage import label
def compute_dice_scores(ref: str, pred: str):
ref = sitk.GetArrayFromImage(sitk.ReadImage(ref))
pred = sitk.GetArrayFromImage(sitk.ReadImage(pred))
kidney_mask_ref = ref > 0
kidney_mask_pred = pred > 0
if np.sum(kidney_mask_pred) == 0 and kidney_mask_ref.sum() == 0:
kidney_dice = np.nan
else:
kidney_dice = dc(kidney_mask_pred, kidney_mask_ref)
tumor_mask_ref = ref == 2
tumor_mask_pred = pred == 2
if np.sum(tumor_mask_ref) == 0 and tumor_mask_pred.sum() == 0:
tumor_dice = np.nan
else:
tumor_dice = dc(tumor_mask_ref, tumor_mask_pred)
geometric_mean = np.mean((kidney_dice, tumor_dice))
return kidney_dice, tumor_dice, geometric_mean
def evaluate_folder(folder_gt: str, folder_pred: str):
p = Pool(8)
niftis = subfiles(folder_gt, suffix=".nii.gz", join=False)
images_gt = [join(folder_gt, i) for i in niftis]
images_pred = [join(folder_pred, i) for i in niftis]
results = p.starmap(compute_dice_scores, zip(images_gt, images_pred))
p.close()
p.join()
with open(join(folder_pred, "results.csv"), 'w') as f:
for i, ni in enumerate(niftis):
f.write("%s,%0.4f,%0.4f,%0.4f\n" % (ni, *results[i]))
def remove_all_but_the_two_largest_conn_comp(img_itk_file: str, file_out: str):
"""
This was not used. I was just curious because others used this. Turns out this is not necessary for my networks
"""
img_itk = sitk.ReadImage(img_itk_file)
img_npy = sitk.GetArrayFromImage(img_itk)
labelmap, num_labels = label((img_npy > 0).astype(int))
if num_labels > 2:
label_sizes = []
for i in range(1, num_labels + 1):
label_sizes.append(np.sum(labelmap == i))
argsrt = np.argsort(label_sizes)[::-1] # two largest are now argsrt[0] and argsrt[1]
keep_mask = (labelmap == argsrt[0] + 1) | (labelmap == argsrt[1] + 1)
img_npy[~keep_mask] = 0
new = sitk.GetImageFromArray(img_npy)
new.CopyInformation(img_itk)
sitk.WriteImage(new, file_out)
print(os.path.basename(img_itk_file), num_labels, label_sizes)
else:
shutil.copy(img_itk_file, file_out)
def manual_postprocess(folder_in,
folder_out):
"""
This was not used. I was just curious because others used this. Turns out this is not necessary for my networks
"""
maybe_mkdir_p(folder_out)
infiles = subfiles(folder_in, suffix=".nii.gz", join=False)
outfiles = [join(folder_out, i) for i in infiles]
infiles = [join(folder_in, i) for i in infiles]
p = Pool(8)
_ = p.starmap_async(remove_all_but_the_two_largest_conn_comp, zip(infiles, outfiles))
_ = _.get()
p.close()
p.join()
def copy_npz_fom_valsets():
'''
this is preparation for ensembling
:return:
'''
base = join(network_training_output_dir, "3d_lowres/Task048_KiTS_clean")
folders = ['nnUNetTrainerNewCandidate23_FabiansPreActResNet__nnUNetPlans',
'nnUNetTrainerNewCandidate23_FabiansResNet__nnUNetPlans',
'nnUNetTrainerNewCandidate23__nnUNetPlans']
for f in folders:
out = join(base, f, 'crossval_npz')
maybe_mkdir_p(out)
shutil.copy(join(base, f, 'plans.pkl'), out)
for fold in range(5):
cur = join(base, f, 'fold_%d' % fold, 'validation_raw')
npz_files = subfiles(cur, suffix='.npz', join=False)
pkl_files = [i[:-3] + 'pkl' for i in npz_files]
assert all([isfile(join(cur, i)) for i in pkl_files])
for n in npz_files:
corresponding_pkl = n[:-3] + 'pkl'
shutil.copy(join(cur, n), out)
shutil.copy(join(cur, corresponding_pkl), out)
def ensemble(experiments=('nnUNetTrainerNewCandidate23_FabiansPreActResNet__nnUNetPlans',
'nnUNetTrainerNewCandidate23_FabiansResNet__nnUNetPlans'), out_dir="/media/fabian/Results/nnUNet/3d_lowres/Task048_KiTS_clean/ensemble_preactres_and_res"):
from nnunet.inference.ensemble_predictions import merge
folders = [join(network_training_output_dir, "3d_lowres/Task048_KiTS_clean", i, 'crossval_npz') for i in experiments]
merge(folders, out_dir, 8)
def prepare_submission(fld= "/home/fabian/drives/datasets/results/nnUNet/test_sets/Task048_KiTS_clean/predicted_ens_3d_fullres_3d_cascade_fullres_postprocessed", # '/home/fabian/datasets_fabian/predicted_KiTS_nnUNetTrainerNewCandidate23_FabiansResNet',
out='/home/fabian/drives/datasets/results/nnUNet/test_sets/Task048_KiTS_clean/submission'):
nii = subfiles(fld, join=False, suffix='.nii.gz')
maybe_mkdir_p(out)
for n in nii:
outfname = n.replace('case', 'prediction')
shutil.copy(join(fld, n), join(out, outfname))
def pretent_to_be_nnUNetTrainer(base, folds=(0, 1, 2, 3, 4)):
"""
changes best checkpoint pickle nnunettrainer class name to nnUNetTrainer
:param experiments:
:return:
"""
for fold in folds:
cur = join(base, "fold_%d" % fold)
pkl_file = join(cur, 'model_best.model.pkl')
a = load_pickle(pkl_file)
a['name_old'] = deepcopy(a['name'])
a['name'] = 'nnUNetTrainer'
save_pickle(a, pkl_file)
def reset_trainerName(base, folds=(0, 1, 2, 3, 4)):
for fold in folds:
cur = join(base, "fold_%d" % fold)
pkl_file = join(cur, 'model_best.model.pkl')
a = load_pickle(pkl_file)
a['name'] = a['name_old']
del a['name_old']
save_pickle(a, pkl_file)
def nnUNetTrainer_these(experiments=('nnUNetTrainerNewCandidate23_FabiansPreActResNet__nnUNetPlans',
'nnUNetTrainerNewCandidate23_FabiansResNet__nnUNetPlans',
'nnUNetTrainerNewCandidate23__nnUNetPlans')):
"""
changes best checkpoint pickle nnunettrainer class name to nnUNetTrainer
:param experiments:
:return:
"""
base = join(network_training_output_dir, "3d_lowres/Task048_KiTS_clean")
for exp in experiments:
cur = join(base, exp)
pretent_to_be_nnUNetTrainer(cur)
def reset_trainerName_these(experiments=('nnUNetTrainerNewCandidate23_FabiansPreActResNet__nnUNetPlans',
'nnUNetTrainerNewCandidate23_FabiansResNet__nnUNetPlans',
'nnUNetTrainerNewCandidate23__nnUNetPlans')):
"""
changes best checkpoint pickle nnunettrainer class name to nnUNetTrainer
:param experiments:
:return:
"""
base = join(network_training_output_dir, "3d_lowres/Task048_KiTS_clean")
for exp in experiments:
cur = join(base, exp)
reset_trainerName(cur)
if __name__ == "__main__":
base = "/media/fabian/My Book/datasets/KiTS2019_Challenge/kits19/data"
out = "/media/fabian/My Book/MedicalDecathlon/nnUNet_raw_splitted/Task040_KiTS"
cases = subdirs(base, join=False)
maybe_mkdir_p(out)
maybe_mkdir_p(join(out, "imagesTr"))
maybe_mkdir_p(join(out, "imagesTs"))
maybe_mkdir_p(join(out, "labelsTr"))
for c in cases:
case_id = int(c.split("_")[-1])
if case_id < 210:
shutil.copy(join(base, c, "imaging.nii.gz"), join(out, "imagesTr", c + "_0000.nii.gz"))
shutil.copy(join(base, c, "segmentation.nii.gz"), join(out, "labelsTr", c + ".nii.gz"))
else:
shutil.copy(join(base, c, "imaging.nii.gz"), join(out, "imagesTs", c + "_0000.nii.gz"))
json_dict = {}
json_dict['name'] = "KiTS"
json_dict['description'] = "kidney and kidney tumor segmentation"
json_dict['tensorImageSize'] = "4D"
json_dict['reference'] = "KiTS data for nnunet"
json_dict['licence'] = ""
json_dict['release'] = "0.0"
json_dict['modality'] = {
"0": "CT",
}
json_dict['labels'] = {
"0": "background",
"1": "Kidney",
"2": "Tumor"
}
json_dict['numTraining'] = len(cases)
json_dict['numTest'] = 0
json_dict['training'] = [{'image': "./imagesTr/%s.nii.gz" % i, "label": "./labelsTr/%s.nii.gz" % i} for i in
cases]
json_dict['test'] = []
save_json(json_dict, os.path.join(out, "dataset.json"))
| 9,159
| 37.008299
| 252
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/dataset_conversion/Task114_heart_MNMs.py
|
from collections import OrderedDict
from batchgenerators.utilities.file_and_folder_operations import *
import shutil
import numpy as np
from numpy.random.mtrand import RandomState
import subprocess
from multiprocessing import pool
import pandas as pd
def get_mnms_data(data_root):
files_raw = []
files_gt = []
for r, dirs, files in os.walk(data_root):
for f in files:
if f.endswith('nii.gz'):
file_path = os.path.join(r, f)
if '_gt' in f:
files_gt.append(file_path)
else:
files_raw.append(file_path)
return files_raw, files_gt
def generate_filename_for_nnunet(pat_id, ts, pat_folder=None, add_zeros=False, vendor=None, centre=None, mode='mnms',
data_format='nii.gz'):
if not vendor or not centre:
if add_zeros:
filename = "{}_{}_0000.{}".format(pat_id, str(ts).zfill(4), data_format)
else:
filename = "{}_{}.{}".format(pat_id, str(ts).zfill(4), data_format)
else:
if mode == 'mnms':
if add_zeros:
filename = "{}_{}_{}_{}_0000.{}".format(pat_id, str(ts).zfill(4), vendor, centre, data_format)
else:
filename = "{}_{}_{}_{}.{}".format(pat_id, str(ts).zfill(4), vendor, centre, data_format)
else:
if add_zeros:
filename = "{}_{}_{}_{}_0000.{}".format(vendor, centre, pat_id, str(ts).zfill(4), data_format)
else:
filename = "{}_{}_{}_{}.{}".format(vendor, centre, pat_id, str(ts).zfill(4), data_format)
if pat_folder:
filename = os.path.join(pat_folder, filename)
return filename
def select_annotated_frames_mms(data_folder, out_folder, add_zeros=False, mode='mnms', df_path="/media/full/tera2/data/challenges/mms/Training-corrected_original/M&Ms Dataset Information.xlsx"):
table = pd.read_excel(df_path, index_col='External code')
for idx in table.index:
ed = table.loc[idx, 'ED']
es = table.loc[idx, 'ES']
vendor = table.loc[idx, 'Vendor']
centre = table.loc[idx, 'Centre']
if vendor != "C":
# generate old filename (w/o vendor and centre)
filename_ed_original = generate_filename_for_nnunet(pat_id=idx, ts=ed, pat_folder=data_folder,
vendor=None, centre=None, add_zeros=False)
filename_es_original = generate_filename_for_nnunet(pat_id=idx, ts=es, pat_folder=data_folder,
vendor=None, centre=None, add_zeros=False)
# generate new filename with vendor and centre
filename_ed = generate_filename_for_nnunet(pat_id=idx, ts=ed, pat_folder=out_folder,
vendor=vendor, centre=centre, add_zeros=add_zeros, mode=mode)
filename_es = generate_filename_for_nnunet(pat_id=idx, ts=es, pat_folder=out_folder,
vendor=vendor, centre=centre, add_zeros=add_zeros, mode=mode)
shutil.copy(filename_ed_original, filename_ed)
shutil.copy(filename_es_original, filename_es)
def create_custom_splits_for_experiments(task_path):
data_keys = [i[:-4] for i in
subfiles(os.path.join(task_path, "nnUNetData_plans_v2.1_2D_stage0"),
join=False, suffix='npz')]
existing_splits = os.path.join(task_path, "splits_final.pkl")
splits = load_pickle(existing_splits)
splits = splits[:5] # discard old changes
unique_a_only = np.unique([i.split('_')[0] for i in data_keys if i.find('_A_') != -1])
unique_b_only = np.unique([i.split('_')[0] for i in data_keys if i.find('_B_') != -1])
num_train_a = int(np.round(0.8 * len(unique_a_only)))
num_train_b = int(np.round(0.8 * len(unique_b_only)))
p = RandomState(1234)
idx_a_train = p.choice(len(unique_a_only), num_train_a, replace=False)
idx_b_train = p.choice(len(unique_b_only), num_train_b, replace=False)
identifiers_a_train = [unique_a_only[i] for i in idx_a_train]
identifiers_b_train = [unique_b_only[i] for i in idx_b_train]
identifiers_a_val = [i for i in unique_a_only if i not in identifiers_a_train]
identifiers_b_val = [i for i in unique_b_only if i not in identifiers_b_train]
# fold 5 will be train on a and eval on val sets of a and b
splits.append({'train': [i for i in data_keys if i.split("_")[0] in identifiers_a_train],
'val': [i for i in data_keys if i.split("_")[0] in identifiers_a_val] + [i for i in data_keys if
i.split("_")[
0] in identifiers_b_val]})
# fold 6 will be train on b and eval on val sets of a and b
splits.append({'train': [i for i in data_keys if i.split("_")[0] in identifiers_b_train],
'val': [i for i in data_keys if i.split("_")[0] in identifiers_a_val] + [i for i in data_keys if
i.split("_")[
0] in identifiers_b_val]})
# fold 7 train on both, eval on both
splits.append({'train': [i for i in data_keys if i.split("_")[0] in identifiers_b_train] + [i for i in data_keys if i.split("_")[0] in identifiers_a_train],
'val': [i for i in data_keys if i.split("_")[0] in identifiers_a_val] + [i for i in data_keys if
i.split("_")[
0] in identifiers_b_val]})
save_pickle(splits, existing_splits)
def split_4d_nii(nii_path, split_folder, pat_name=None, add_zeros=False):
# create temporary folder in which the 3d+t file will be split into many 3d files
temp_base = os.path.dirname(nii_path)
temp_location = os.path.join(temp_base, 'tmp')
if not os.path.isdir(temp_location):
os.mkdir(temp_location)
os.chdir(temp_location)
if not os.path.isdir(split_folder):
os.mkdir(split_folder)
_ = subprocess.call(['fslsplit', nii_path])
# rename files so that the patient's ID is in the filename
file_list = [f for f in os.listdir(temp_location) if os.path.isfile(f)]
file_list = sorted(file_list)
if not pat_name:
pat_name = os.path.basename(os.path.dirname(nii_path))
for ts, temp_file in enumerate(file_list):
# get time
time_step = temp_file.split('.')[0][3:]
# make sure the time step is a number. Otherwise trust in pythons sort algorithm
try:
int(time_step)
except:
time_step = ts
# change filename AND location -> move files
if add_zeros:
new_file_name = '{}_{}_0000.nii.gz'.format(pat_name, time_step)
else:
new_file_name = '{}_{}.nii.gz'.format(pat_name, time_step)
os.rename(os.path.join(temp_location, temp_file),
os.path.join(split_folder, new_file_name))
os.rmdir(temp_location)
def split_4d_parallel(args):
nii_path, split_folder, pat_name = args
split_4d_nii(nii_path, split_folder, pat_name)
def split_4d_for_all_pat(files_paths, split_folder):
p = pool.Pool(8)
p.map(split_4d_parallel,
zip(files_paths, [split_folder] * len(files_paths), [None] * len(files_paths)))
if __name__ == "__main__":
task_name = "Task114_heart_MNMs"
train_dir = "/media/full/97d8d6e1-1aa1-4761-9dd1-fc6a62cf6264/nnUnet_raw/nnUNet_raw_data/{}/imagesTr".format(task_name)
test_dir = "/media/full/97d8d6e1-1aa1-4761-9dd1-fc6a62cf6264/nnUnet_raw/nnUNet_raw_data/{}/imagesTs".format(task_name)
#out_dir='/media/full/tera2/output_nnUNet/preprocessed_data/Task114_heart_mnms'
out_dir='/media/full/97d8d6e1-1aa1-4761-9dd1-fc6a62cf6264/tmp'
# train
all_train_files = [os.path.join(train_dir, x) for x in os.listdir(train_dir)]
# test
all_test_files = [os.path.join(test_dir, x) for x in os.listdir(test_dir)]
data_root = '/media/full/97d8d6e1-1aa1-4761-9dd1-fc6a62cf6264/data/challenges/mms/Training-corrected_original/Labeled'
files_raw, files_gt = get_mnms_data(data_root=data_root)
split_path_raw ='/media/full/97d8d6e1-1aa1-4761-9dd1-fc6a62cf6264/data/challenges/mms/temp_split_raw'
split_path_gt ='/media/full/97d8d6e1-1aa1-4761-9dd1-fc6a62cf6264/data/challenges/mms/temp_split_gt'
maybe_mkdir_p(split_path_raw)
maybe_mkdir_p(split_path_gt)
split_4d_for_all_pat(files_raw, split_path_raw)
split_4d_for_all_pat(files_gt, split_path_gt)
out_dir = '/media/full/97d8d6e1-1aa1-4761-9dd1-fc6a62cf6264/nnUnet_raw/nnUNet_raw_data/{}/'.format(task_name)
maybe_mkdir_p(join(out_dir, "imagesTr"))
maybe_mkdir_p(join(out_dir, "imagesTs"))
maybe_mkdir_p(join(out_dir, "labelsTr"))
imagesTr_path = os.path.join(out_dir, "imagesTr")
labelsTr_path = os.path.join(out_dir, "labelsTr")
select_annotated_frames_mms(split_path_raw, imagesTr_path, add_zeros=True)
select_annotated_frames_mms(split_path_gt, labelsTr_path, add_zeros=False)
labelsTr = subfiles(labelsTr_path)
json_dict = OrderedDict()
json_dict['name'] = "M&Ms"
json_dict['description'] = "short axis cardiac cine MRI segmentation"
json_dict['tensorImageSize'] = "4D"
json_dict['reference'] = "Campello, Víctor M. et al.: Multi-Centre, Multi-Vendor & Multi-Disease Cardiac Image Segmentation. In preparation."
json_dict['licence'] = "see M&Ms challenge"
json_dict['release'] = "0.0"
json_dict['modality'] = {
"0": "MRI",
}
# labels differ for ACDC challenge
json_dict['labels'] = {
"0": "background",
"1": "LVBP",
"2": "LVM",
"3": "RV"
}
json_dict['numTraining'] = len(labelsTr)
json_dict['numTest'] = 0
json_dict['training'] = [{'image': "./imagesTr/%s" % i.split("/")[-1], "label": "./labelsTr/%s" % i.split("/")[-1]} for i in
labelsTr]
json_dict['test'] = []
save_json(json_dict, os.path.join(out_dir, "dataset.json"))
# then preprocess data and plan training.
# run in terminal
# > nnUNet_plan_and_preprocess -t <TaskID> --verify_dataset_integrity
# start training and stop it immediately to get a split.pkl file
# > nnUNet_train 2d nnUNetTrainerV2_MMS <TaskID> 0
#
# then create custom splits as used for the final M&Ms submission
#
split_file_path = '/media/full/97d8d6e1-1aa1-4761-9dd1-fc6a62cf6264/output_nnUNet/preprocessed_data/{}/'.format(task_name)
create_custom_splits_for_experiments(split_file_path)
| 11,023
| 43.813008
| 194
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/dataset_conversion/Task062_NIHPancreas.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
from nnunet.paths import nnUNet_raw_data
from batchgenerators.utilities.file_and_folder_operations import *
import shutil
from multiprocessing import Pool
import nibabel
def reorient(filename):
img = nibabel.load(filename)
img = nibabel.as_closest_canonical(img)
nibabel.save(img, filename)
if __name__ == "__main__":
base = "/media/fabian/DeepLearningData/Pancreas-CT"
# reorient
p = Pool(8)
results = []
for f in subfiles(join(base, "data"), suffix=".nii.gz"):
results.append(p.map_async(reorient, (f, )))
_ = [i.get() for i in results]
for f in subfiles(join(base, "TCIA_pancreas_labels-02-05-2017"), suffix=".nii.gz"):
results.append(p.map_async(reorient, (f, )))
_ = [i.get() for i in results]
task_id = 62
task_name = "NIHPancreas"
foldername = "Task%03.0d_%s" % (task_id, task_name)
out_base = join(nnUNet_raw_data, foldername)
imagestr = join(out_base, "imagesTr")
imagests = join(out_base, "imagesTs")
labelstr = join(out_base, "labelsTr")
maybe_mkdir_p(imagestr)
maybe_mkdir_p(imagests)
maybe_mkdir_p(labelstr)
train_patient_names = []
test_patient_names = []
cases = list(range(1, 83))
folder_data = join(base, "data")
folder_labels = join(base, "TCIA_pancreas_labels-02-05-2017")
for c in cases:
casename = "pancreas_%04.0d" % c
shutil.copy(join(folder_data, "PANCREAS_%04.0d.nii.gz" % c), join(imagestr, casename + "_0000.nii.gz"))
shutil.copy(join(folder_labels, "label%04.0d.nii.gz" % c), join(labelstr, casename + ".nii.gz"))
train_patient_names.append(casename)
json_dict = OrderedDict()
json_dict['name'] = task_name
json_dict['description'] = task_name
json_dict['tensorImageSize'] = "4D"
json_dict['reference'] = "see website"
json_dict['licence'] = "see website"
json_dict['release'] = "0.0"
json_dict['modality'] = {
"0": "CT",
}
json_dict['labels'] = {
"0": "background",
"1": "Pancreas",
}
json_dict['numTraining'] = len(train_patient_names)
json_dict['numTest'] = len(test_patient_names)
json_dict['training'] = [{'image': "./imagesTr/%s.nii.gz" % i.split("/")[-1], "label": "./labelsTr/%s.nii.gz" % i.split("/")[-1]} for i in
train_patient_names]
json_dict['test'] = ["./imagesTs/%s.nii.gz" % i.split("/")[-1] for i in test_patient_names]
save_json(json_dict, os.path.join(out_base, "dataset.json"))
| 3,238
| 34.988889
| 142
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/dataset_conversion/Task037_038_Chaos_Challenge.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from PIL import Image
import shutil
from collections import OrderedDict
import dicom2nifti
import numpy as np
from batchgenerators.utilities.data_splitting import get_split_deterministic
from batchgenerators.utilities.file_and_folder_operations import *
from PIL import Image
import SimpleITK as sitk
from nnunet.paths import preprocessing_output_dir, nnUNet_raw_data
from nnunet.utilities.sitk_stuff import copy_geometry
from nnunet.inference.ensemble_predictions import merge
def load_png_stack(folder):
pngs = subfiles(folder, suffix="png")
pngs.sort()
loaded = []
for p in pngs:
loaded.append(np.array(Image.open(p)))
loaded = np.stack(loaded, 0)[::-1]
return loaded
def convert_CT_seg(loaded_png):
return loaded_png.astype(np.uint16)
def convert_MR_seg(loaded_png):
result = np.zeros(loaded_png.shape)
result[(loaded_png > 55) & (loaded_png <= 70)] = 1 # liver
result[(loaded_png > 110) & (loaded_png <= 135)] = 2 # right kidney
result[(loaded_png > 175) & (loaded_png <= 200)] = 3 # left kidney
result[(loaded_png > 240) & (loaded_png <= 255)] = 4 # spleen
return result
def convert_seg_to_intensity_task5(seg):
seg_new = np.zeros(seg.shape, dtype=np.uint8)
seg_new[seg == 1] = 63
seg_new[seg == 2] = 126
seg_new[seg == 3] = 189
seg_new[seg == 4] = 252
return seg_new
def convert_seg_to_intensity_task3(seg):
seg_new = np.zeros(seg.shape, dtype=np.uint8)
seg_new[seg == 1] = 63
return seg_new
def write_pngs_from_nifti(nifti, output_folder, converter=convert_seg_to_intensity_task3):
npy = sitk.GetArrayFromImage(sitk.ReadImage(nifti))
seg_new = converter(npy)
for z in range(len(npy)):
Image.fromarray(seg_new[z]).save(join(output_folder, "img%03.0d.png" % z))
def convert_variant2_predicted_test_to_submission_format(folder_with_predictions,
output_folder="/home/fabian/drives/datasets/results/nnUNet/test_sets/Task038_CHAOS_Task_3_5_Variant2/ready_to_submit",
postprocessing_file="/home/fabian/drives/datasets/results/nnUNet/ensembles/Task038_CHAOS_Task_3_5_Variant2/ensemble_2d__nnUNetTrainerV2__nnUNetPlansv2.1--3d_fullres__nnUNetTrainerV2__nnUNetPlansv2.1/postprocessing.json"):
"""
output_folder is where the extracted template is
:param folder_with_predictions:
:param output_folder:
:return:
"""
postprocessing_file = "/media/fabian/Results/nnUNet/3d_fullres/Task039_CHAOS_Task_3_5_Variant2_highres/" \
"nnUNetTrainerV2__nnUNetPlansfixed/postprocessing.json"
# variant 2 treats in and out phase as two training examples, so we need to ensemble these two again
final_predictions_folder = join(output_folder, "final")
maybe_mkdir_p(final_predictions_folder)
t1_patient_names = [i.split("_")[-1][:-7] for i in subfiles(folder_with_predictions, prefix="T1", suffix=".nii.gz", join=False)]
folder_for_ensembing0 = join(output_folder, "ens0")
folder_for_ensembing1 = join(output_folder, "ens1")
maybe_mkdir_p(folder_for_ensembing0)
maybe_mkdir_p(folder_for_ensembing1)
# now copy all t1 out phases in ens0 and all in phases in ens1. Name them the same.
for t1 in t1_patient_names:
shutil.copy(join(folder_with_predictions, "T1_in_%s.npz" % t1), join(folder_for_ensembing1, "T1_%s.npz" % t1))
shutil.copy(join(folder_with_predictions, "T1_in_%s.pkl" % t1), join(folder_for_ensembing1, "T1_%s.pkl" % t1))
shutil.copy(join(folder_with_predictions, "T1_out_%s.npz" % t1), join(folder_for_ensembing0, "T1_%s.npz" % t1))
shutil.copy(join(folder_with_predictions, "T1_out_%s.pkl" % t1), join(folder_for_ensembing0, "T1_%s.pkl" % t1))
shutil.copy(join(folder_with_predictions, "plans.pkl"), join(folder_for_ensembing0, "plans.pkl"))
shutil.copy(join(folder_with_predictions, "plans.pkl"), join(folder_for_ensembing1, "plans.pkl"))
# there is a problem with T1_35 that I need to correct manually (different crop size, will not negatively impact results)
#ens0_softmax = np.load(join(folder_for_ensembing0, "T1_35.npz"))['softmax']
ens1_softmax = np.load(join(folder_for_ensembing1, "T1_35.npz"))['softmax']
#ens0_props = load_pickle(join(folder_for_ensembing0, "T1_35.pkl"))
#ens1_props = load_pickle(join(folder_for_ensembing1, "T1_35.pkl"))
ens1_softmax = ens1_softmax[:, :, :-1, :]
np.savez_compressed(join(folder_for_ensembing1, "T1_35.npz"), softmax=ens1_softmax)
shutil.copy(join(folder_for_ensembing0, "T1_35.pkl"), join(folder_for_ensembing1, "T1_35.pkl"))
# now call my ensemble function
merge((folder_for_ensembing0, folder_for_ensembing1), final_predictions_folder, 8, True,
postprocessing_file=postprocessing_file)
# copy t2 files to final_predictions_folder as well
t2_files = subfiles(folder_with_predictions, prefix="T2", suffix=".nii.gz", join=False)
for t2 in t2_files:
shutil.copy(join(folder_with_predictions, t2), join(final_predictions_folder, t2))
# apply postprocessing
from nnunet.postprocessing.connected_components import apply_postprocessing_to_folder, load_postprocessing
postprocessed_folder = join(output_folder, "final_postprocessed")
for_which_classes, min_valid_obj_size = load_postprocessing(postprocessing_file)
apply_postprocessing_to_folder(final_predictions_folder, postprocessed_folder,
for_which_classes, min_valid_obj_size, 8)
# now export the niftis in the weird png format
# task 3
output_dir = join(output_folder, "CHAOS_submission_template_new", "Task3", "MR")
for t1 in t1_patient_names:
output_folder_here = join(output_dir, t1, "T1DUAL", "Results")
nifti_file = join(postprocessed_folder, "T1_%s.nii.gz" % t1)
write_pngs_from_nifti(nifti_file, output_folder_here, converter=convert_seg_to_intensity_task3)
for t2 in t2_files:
patname = t2.split("_")[-1][:-7]
output_folder_here = join(output_dir, patname, "T2SPIR", "Results")
nifti_file = join(postprocessed_folder, "T2_%s.nii.gz" % patname)
write_pngs_from_nifti(nifti_file, output_folder_here, converter=convert_seg_to_intensity_task3)
# task 5
output_dir = join(output_folder, "CHAOS_submission_template_new", "Task5", "MR")
for t1 in t1_patient_names:
output_folder_here = join(output_dir, t1, "T1DUAL", "Results")
nifti_file = join(postprocessed_folder, "T1_%s.nii.gz" % t1)
write_pngs_from_nifti(nifti_file, output_folder_here, converter=convert_seg_to_intensity_task5)
for t2 in t2_files:
patname = t2.split("_")[-1][:-7]
output_folder_here = join(output_dir, patname, "T2SPIR", "Results")
nifti_file = join(postprocessed_folder, "T2_%s.nii.gz" % patname)
write_pngs_from_nifti(nifti_file, output_folder_here, converter=convert_seg_to_intensity_task5)
if __name__ == "__main__":
"""
This script only prepares data to participate in Task 5 and Task 5. I don't like the CT task because
1) there are
no abdominal organs in the ground truth. In the case of CT we are supposed to train only liver while on MRI we are
supposed to train all organs. This would require manual modification of nnU-net to deal with this dataset. This is
not what nnU-net is about.
2) CT Liver or multiorgan segmentation is too easy to get external data for. Therefore the challenges comes down
to who gets the b est external data, not who has the best algorithm. Not super interesting.
Task 3 is a subtask of Task 5 so we need to prepare the data only once.
Difficulty: We need to process both T1 and T2, but T1 has 2 'modalities' (phases). nnU-Net cannot handly varying
number of input channels. We need to be creative.
We deal with this by preparing 2 Variants:
1) pretend we have 2 modalities for T2 as well by simply stacking a copy of the data
2) treat all MRI sequences independently, so we now have 3*20 training data instead of 2*20. In inference we then
ensemble the results for the two t1 modalities.
Careful: We need to split manually here to ensure we stratify by patient
"""
root = "/media/fabian/My Book/datasets/CHAOS_challenge/Train_Sets"
root_test = "/media/fabian/My Book/datasets/CHAOS_challenge/Test_Sets"
out_base = nnUNet_raw_data
# CT
# we ignore CT because
##############################################################
# Variant 1
##############################################################
patient_ids = []
patient_ids_test = []
output_folder = join(out_base, "Task037_CHAOS_Task_3_5_Variant1")
output_images = join(output_folder, "imagesTr")
output_labels = join(output_folder, "labelsTr")
output_imagesTs = join(output_folder, "imagesTs")
maybe_mkdir_p(output_images)
maybe_mkdir_p(output_labels)
maybe_mkdir_p(output_imagesTs)
# Process T1 train
d = join(root, "MR")
patients = subdirs(d, join=False)
for p in patients:
patient_name = "T1_" + p
gt_dir = join(d, p, "T1DUAL", "Ground")
seg = convert_MR_seg(load_png_stack(gt_dir)[::-1])
img_dir = join(d, p, "T1DUAL", "DICOM_anon", "InPhase")
img_outfile = join(output_images, patient_name + "_0000.nii.gz")
_ = dicom2nifti.convert_dicom.dicom_series_to_nifti(img_dir, img_outfile, reorient_nifti=False)
img_dir = join(d, p, "T1DUAL", "DICOM_anon", "OutPhase")
img_outfile = join(output_images, patient_name + "_0001.nii.gz")
_ = dicom2nifti.convert_dicom.dicom_series_to_nifti(img_dir, img_outfile, reorient_nifti=False)
img_sitk = sitk.ReadImage(img_outfile)
img_sitk_npy = sitk.GetArrayFromImage(img_sitk)
seg_itk = sitk.GetImageFromArray(seg.astype(np.uint8))
seg_itk = copy_geometry(seg_itk, img_sitk)
sitk.WriteImage(seg_itk, join(output_labels, patient_name + ".nii.gz"))
patient_ids.append(patient_name)
# Process T1 test
d = join(root_test, "MR")
patients = subdirs(d, join=False)
for p in patients:
patient_name = "T1_" + p
img_dir = join(d, p, "T1DUAL", "DICOM_anon", "InPhase")
img_outfile = join(output_imagesTs, patient_name + "_0000.nii.gz")
_ = dicom2nifti.convert_dicom.dicom_series_to_nifti(img_dir, img_outfile, reorient_nifti=False)
img_dir = join(d, p, "T1DUAL", "DICOM_anon", "OutPhase")
img_outfile = join(output_imagesTs, patient_name + "_0001.nii.gz")
_ = dicom2nifti.convert_dicom.dicom_series_to_nifti(img_dir, img_outfile, reorient_nifti=False)
img_sitk = sitk.ReadImage(img_outfile)
img_sitk_npy = sitk.GetArrayFromImage(img_sitk)
patient_ids_test.append(patient_name)
# Process T2 train
d = join(root, "MR")
patients = subdirs(d, join=False)
for p in patients:
patient_name = "T2_" + p
gt_dir = join(d, p, "T2SPIR", "Ground")
seg = convert_MR_seg(load_png_stack(gt_dir)[::-1])
img_dir = join(d, p, "T2SPIR", "DICOM_anon")
img_outfile = join(output_images, patient_name + "_0000.nii.gz")
_ = dicom2nifti.convert_dicom.dicom_series_to_nifti(img_dir, img_outfile, reorient_nifti=False)
shutil.copy(join(output_images, patient_name + "_0000.nii.gz"), join(output_images, patient_name + "_0001.nii.gz"))
img_sitk = sitk.ReadImage(img_outfile)
img_sitk_npy = sitk.GetArrayFromImage(img_sitk)
seg_itk = sitk.GetImageFromArray(seg.astype(np.uint8))
seg_itk = copy_geometry(seg_itk, img_sitk)
sitk.WriteImage(seg_itk, join(output_labels, patient_name + ".nii.gz"))
patient_ids.append(patient_name)
# Process T2 test
d = join(root_test, "MR")
patients = subdirs(d, join=False)
for p in patients:
patient_name = "T2_" + p
gt_dir = join(d, p, "T2SPIR", "Ground")
img_dir = join(d, p, "T2SPIR", "DICOM_anon")
img_outfile = join(output_imagesTs, patient_name + "_0000.nii.gz")
_ = dicom2nifti.convert_dicom.dicom_series_to_nifti(img_dir, img_outfile, reorient_nifti=False)
shutil.copy(join(output_imagesTs, patient_name + "_0000.nii.gz"), join(output_imagesTs, patient_name + "_0001.nii.gz"))
img_sitk = sitk.ReadImage(img_outfile)
img_sitk_npy = sitk.GetArrayFromImage(img_sitk)
patient_ids_test.append(patient_name)
json_dict = OrderedDict()
json_dict['name'] = "Chaos Challenge Task3/5 Variant 1"
json_dict['description'] = "nothing"
json_dict['tensorImageSize'] = "4D"
json_dict['reference'] = "https://chaos.grand-challenge.org/Data/"
json_dict['licence'] = "see https://chaos.grand-challenge.org/Data/"
json_dict['release'] = "0.0"
json_dict['modality'] = {
"0": "MRI",
"1": "MRI",
}
json_dict['labels'] = {
"0": "background",
"1": "liver",
"2": "right kidney",
"3": "left kidney",
"4": "spleen",
}
json_dict['numTraining'] = len(patient_ids)
json_dict['numTest'] = 0
json_dict['training'] = [{'image': "./imagesTr/%s.nii.gz" % i, "label": "./labelsTr/%s.nii.gz" % i} for i in
patient_ids]
json_dict['test'] = []
save_json(json_dict, join(output_folder, "dataset.json"))
##############################################################
# Variant 2
##############################################################
patient_ids = []
patient_ids_test = []
output_folder = join(out_base, "Task038_CHAOS_Task_3_5_Variant2")
output_images = join(output_folder, "imagesTr")
output_imagesTs = join(output_folder, "imagesTs")
output_labels = join(output_folder, "labelsTr")
maybe_mkdir_p(output_images)
maybe_mkdir_p(output_imagesTs)
maybe_mkdir_p(output_labels)
# Process T1 train
d = join(root, "MR")
patients = subdirs(d, join=False)
for p in patients:
patient_name_in = "T1_in_" + p
patient_name_out = "T1_out_" + p
gt_dir = join(d, p, "T1DUAL", "Ground")
seg = convert_MR_seg(load_png_stack(gt_dir)[::-1])
img_dir = join(d, p, "T1DUAL", "DICOM_anon", "InPhase")
img_outfile = join(output_images, patient_name_in + "_0000.nii.gz")
_ = dicom2nifti.convert_dicom.dicom_series_to_nifti(img_dir, img_outfile, reorient_nifti=False)
img_dir = join(d, p, "T1DUAL", "DICOM_anon", "OutPhase")
img_outfile = join(output_images, patient_name_out + "_0000.nii.gz")
_ = dicom2nifti.convert_dicom.dicom_series_to_nifti(img_dir, img_outfile, reorient_nifti=False)
img_sitk = sitk.ReadImage(img_outfile)
img_sitk_npy = sitk.GetArrayFromImage(img_sitk)
seg_itk = sitk.GetImageFromArray(seg.astype(np.uint8))
seg_itk = copy_geometry(seg_itk, img_sitk)
sitk.WriteImage(seg_itk, join(output_labels, patient_name_in + ".nii.gz"))
sitk.WriteImage(seg_itk, join(output_labels, patient_name_out + ".nii.gz"))
patient_ids.append(patient_name_out)
patient_ids.append(patient_name_in)
# Process T1 test
d = join(root_test, "MR")
patients = subdirs(d, join=False)
for p in patients:
patient_name_in = "T1_in_" + p
patient_name_out = "T1_out_" + p
gt_dir = join(d, p, "T1DUAL", "Ground")
img_dir = join(d, p, "T1DUAL", "DICOM_anon", "InPhase")
img_outfile = join(output_imagesTs, patient_name_in + "_0000.nii.gz")
_ = dicom2nifti.convert_dicom.dicom_series_to_nifti(img_dir, img_outfile, reorient_nifti=False)
img_dir = join(d, p, "T1DUAL", "DICOM_anon", "OutPhase")
img_outfile = join(output_imagesTs, patient_name_out + "_0000.nii.gz")
_ = dicom2nifti.convert_dicom.dicom_series_to_nifti(img_dir, img_outfile, reorient_nifti=False)
img_sitk = sitk.ReadImage(img_outfile)
img_sitk_npy = sitk.GetArrayFromImage(img_sitk)
patient_ids_test.append(patient_name_out)
patient_ids_test.append(patient_name_in)
# Process T2 train
d = join(root, "MR")
patients = subdirs(d, join=False)
for p in patients:
patient_name = "T2_" + p
gt_dir = join(d, p, "T2SPIR", "Ground")
seg = convert_MR_seg(load_png_stack(gt_dir)[::-1])
img_dir = join(d, p, "T2SPIR", "DICOM_anon")
img_outfile = join(output_images, patient_name + "_0000.nii.gz")
_ = dicom2nifti.convert_dicom.dicom_series_to_nifti(img_dir, img_outfile, reorient_nifti=False)
img_sitk = sitk.ReadImage(img_outfile)
img_sitk_npy = sitk.GetArrayFromImage(img_sitk)
seg_itk = sitk.GetImageFromArray(seg.astype(np.uint8))
seg_itk = copy_geometry(seg_itk, img_sitk)
sitk.WriteImage(seg_itk, join(output_labels, patient_name + ".nii.gz"))
patient_ids.append(patient_name)
# Process T2 test
d = join(root_test, "MR")
patients = subdirs(d, join=False)
for p in patients:
patient_name = "T2_" + p
gt_dir = join(d, p, "T2SPIR", "Ground")
img_dir = join(d, p, "T2SPIR", "DICOM_anon")
img_outfile = join(output_imagesTs, patient_name + "_0000.nii.gz")
_ = dicom2nifti.convert_dicom.dicom_series_to_nifti(img_dir, img_outfile, reorient_nifti=False)
img_sitk = sitk.ReadImage(img_outfile)
img_sitk_npy = sitk.GetArrayFromImage(img_sitk)
patient_ids_test.append(patient_name)
json_dict = OrderedDict()
json_dict['name'] = "Chaos Challenge Task3/5 Variant 2"
json_dict['description'] = "nothing"
json_dict['tensorImageSize'] = "4D"
json_dict['reference'] = "https://chaos.grand-challenge.org/Data/"
json_dict['licence'] = "see https://chaos.grand-challenge.org/Data/"
json_dict['release'] = "0.0"
json_dict['modality'] = {
"0": "MRI",
}
json_dict['labels'] = {
"0": "background",
"1": "liver",
"2": "right kidney",
"3": "left kidney",
"4": "spleen",
}
json_dict['numTraining'] = len(patient_ids)
json_dict['numTest'] = 0
json_dict['training'] = [{'image': "./imagesTr/%s.nii.gz" % i, "label": "./labelsTr/%s.nii.gz" % i} for i in
patient_ids]
json_dict['test'] = []
save_json(json_dict, join(output_folder, "dataset.json"))
#################################################
# custom split
#################################################
patients = subdirs(join(root, "MR"), join=False)
task_name_variant1 = "Task037_CHAOS_Task_3_5_Variant1"
task_name_variant2 = "Task038_CHAOS_Task_3_5_Variant2"
output_preprocessed_v1 = join(preprocessing_output_dir, task_name_variant1)
maybe_mkdir_p(output_preprocessed_v1)
output_preprocessed_v2 = join(preprocessing_output_dir, task_name_variant2)
maybe_mkdir_p(output_preprocessed_v2)
splits = []
for fold in range(5):
tr, val = get_split_deterministic(patients, fold, 5, 12345)
train = ["T2_" + i for i in tr] + ["T1_" + i for i in tr]
validation = ["T2_" + i for i in val] + ["T1_" + i for i in val]
splits.append({
'train': train,
'val': validation
})
save_pickle(splits, join(output_preprocessed_v1, "splits_final.pkl"))
splits = []
for fold in range(5):
tr, val = get_split_deterministic(patients, fold, 5, 12345)
train = ["T2_" + i for i in tr] + ["T1_in_" + i for i in tr] + ["T1_out_" + i for i in tr]
validation = ["T2_" + i for i in val] + ["T1_in_" + i for i in val] + ["T1_out_" + i for i in val]
splits.append({
'train': train,
'val': validation
})
save_pickle(splits, join(output_preprocessed_v2, "splits_final.pkl"))
| 20,639
| 43.772234
| 278
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/dataset_conversion/Task056_VerSe2019.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess
from collections import OrderedDict
import SimpleITK as sitk
from multiprocessing.pool import Pool
from nnunet.configuration import default_num_threads
from nnunet.dataset_conversion.Task056_Verse_normalize_orientation import normalize_slice_orientation, read_image, \
save_image, restore_original_slice_orientation
from nnunet.paths import nnUNet_raw_data
from batchgenerators.utilities.file_and_folder_operations import *
import shutil
from medpy import metric
import numpy as np
def load_corr_save(in_folder: str, out_folder: str, filename: str):
assert filename.endswith(".nii.gz")
maybe_mkdir_p(out_folder)
img, header = read_image(join(in_folder, filename))
img_corr, header_corr = normalize_slice_orientation(img, header)
# now we save without restoring original slice orientation. We pickle the header for later
save_image(img_corr, header_corr, join(out_folder, filename))
save_pickle(header, join(out_folder, filename[:-7] + ".pkl"))
# just a test to see if we can reproduce the original image
# img_corr2, header_corr2 = restore_original_slice_orientation(img_corr, header_corr)
# save_image(img_corr2, header_corr2, join(out_folder, filename[:-7] + "_re.nii.gz"))
# seems to work
def evaluate_verse_case(sitk_file_ref:str, sitk_file_test:str):
"""
Only vertebra that are present in the reference will be evaluated
:param sitk_file_ref:
:param sitk_file_test:
:return:
"""
gt_npy = sitk.GetArrayFromImage(sitk.ReadImage(sitk_file_ref))
pred_npy = sitk.GetArrayFromImage(sitk.ReadImage(sitk_file_test))
dice_scores = []
for label in range(1, 26):
mask_gt = gt_npy == label
if np.sum(mask_gt) > 0:
mask_pred = pred_npy == label
dc = metric.dc(mask_pred, mask_gt)
else:
dc = np.nan
dice_scores.append(dc)
return dice_scores
def evaluate_verse_folder(folder_pred, folder_gt, out_json="/home/fabian/verse.json"):
p = Pool(default_num_threads)
files_gt_bare = subfiles(folder_gt, join=False)
assert all([isfile(join(folder_pred, i)) for i in files_gt_bare]), "some files are missing in the predicted folder"
files_pred = [join(folder_pred, i) for i in files_gt_bare]
files_gt = [join(folder_gt, i) for i in files_gt_bare]
results = p.starmap_async(evaluate_verse_case, zip(files_gt, files_pred))
results = results.get()
dct = {i: j for i, j in zip(files_gt_bare, results)}
results_stacked = np.vstack(results)
results_mean = np.nanmean(results_stacked, 0)
overall_mean = np.nanmean(results_mean)
save_json((dct, list(results_mean), overall_mean), out_json)
if __name__ == "__main__":
base = "/media/fabian/DeepLearningData/VerSe2019"
base_corrOrient = "/media/fabian/DeepLearningData/VerSe2019_corrOrient"
# correct orientation
train_files_base = subfiles(join(base, "train"), join=False, suffix="_seg.nii.gz")
train_segs = [i[:-len("_seg.nii.gz")] + "_seg.nii.gz" for i in train_files_base]
train_data = [i[:-len("_seg.nii.gz")] + ".nii.gz" for i in train_files_base]
test_files_base = [i[:-len(".nii.gz")] for i in subfiles(join(base, "test"), join=False, suffix=".nii.gz")]
test_data = [i + ".nii.gz" for i in test_files_base]
for i in train_segs + train_data:
load_corr_save(join(base, "train"), join(base_corrOrient, "train"), i)
for i in test_data:
load_corr_save(join(base, "test"), join(base_corrOrient, "test"), i)
train_files_base = subfiles(join(base_corrOrient, "train"), join=True, suffix="_seg.nii.gz")
train_segs = [i[:-len("_seg.nii.gz")] + "_seg.nii.gz" for i in train_files_base]
train_data = [i[:-len("_seg.nii.gz")] + ".nii.gz" for i in train_files_base]
test_files_base = [i[:-len(".nii.gz")] for i in subfiles(join(base_corrOrient, "test"), join=True, suffix=".nii.gz")]
test_data = [i + ".nii.gz" for i in test_files_base]
task_id = 56
task_name = "VerSe"
foldername = "Task%03.0d_%s" % (task_id, task_name)
out_base = join(nnUNet_raw_data, foldername)
imagestr = join(out_base, "imagesTr")
imagests = join(out_base, "imagesTs")
labelstr = join(out_base, "labelsTr")
maybe_mkdir_p(imagestr)
maybe_mkdir_p(imagests)
maybe_mkdir_p(labelstr)
train_patient_names = [i[:-len("_seg.nii.gz")] for i in subfiles(join(base_corrOrient, "train"), join=False, suffix="_seg.nii.gz")]
for p in train_patient_names:
curr = join(base_corrOrient, "train")
label_file = join(curr, p + "_seg.nii.gz")
image_file = join(curr, p + ".nii.gz")
shutil.copy(image_file, join(imagestr, p + "_0000.nii.gz"))
shutil.copy(label_file, join(labelstr, p + ".nii.gz"))
test_patient_names = [i[:-7] for i in subfiles(join(base_corrOrient, "test"), join=False, suffix=".nii.gz")]
for p in test_patient_names:
curr = join(base_corrOrient, "test")
image_file = join(curr, p + ".nii.gz")
shutil.copy(image_file, join(imagests, p + "_0000.nii.gz"))
json_dict = OrderedDict()
json_dict['name'] = "VerSe2019"
json_dict['description'] = "VerSe2019"
json_dict['tensorImageSize'] = "4D"
json_dict['reference'] = "see challenge website"
json_dict['licence'] = "see challenge website"
json_dict['release'] = "0.0"
json_dict['modality'] = {
"0": "CT",
}
json_dict['labels'] = {i: str(i) for i in range(26)}
json_dict['numTraining'] = len(train_patient_names)
json_dict['numTest'] = len(test_patient_names)
json_dict['training'] = [{'image': "./imagesTr/%s.nii.gz" % i.split("/")[-1], "label": "./labelsTr/%s.nii.gz" % i.split("/")[-1]} for i in
train_patient_names]
json_dict['test'] = ["./imagesTs/%s.nii.gz" % i.split("/")[-1] for i in test_patient_names]
save_json(json_dict, os.path.join(out_base, "dataset.json"))
# run this part of the code once training is done
folder_gt = "/media/fabian/My Book/MedicalDecathlon/nnUNet_raw_splitted/Task056_VerSe/labelsTr"
folder_pred = "/home/fabian/drives/datasets/results/nnUNet/3d_fullres/Task056_VerSe/nnUNetTrainerV2__nnUNetPlansv2.1/cv_niftis_raw"
out_json = "/home/fabian/Task056_VerSe_3d_fullres_summary.json"
evaluate_verse_folder(folder_pred, folder_gt, out_json)
folder_pred = "/home/fabian/drives/datasets/results/nnUNet/3d_lowres/Task056_VerSe/nnUNetTrainerV2__nnUNetPlansv2.1/cv_niftis_raw"
out_json = "/home/fabian/Task056_VerSe_3d_lowres_summary.json"
evaluate_verse_folder(folder_pred, folder_gt, out_json)
folder_pred = "/home/fabian/drives/datasets/results/nnUNet/3d_cascade_fullres/Task056_VerSe/nnUNetTrainerV2CascadeFullRes__nnUNetPlansv2.1/cv_niftis_raw"
out_json = "/home/fabian/Task056_VerSe_3d_cascade_fullres_summary.json"
evaluate_verse_folder(folder_pred, folder_gt, out_json)
| 7,616
| 43.028902
| 157
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/dataset_conversion/Task061_CREMI.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
from batchgenerators.utilities.file_and_folder_operations import *
import numpy as np
from nnunet.paths import nnUNet_raw_data, preprocessing_output_dir
import shutil
import SimpleITK as sitk
try:
import h5py
except ImportError:
h5py = None
def load_sample(filename):
# we need raw data and seg
f = h5py.File(filename, 'r')
data = np.array(f['volumes']['raw'])
if 'labels' in f['volumes'].keys():
labels = np.array(f['volumes']['labels']['clefts'])
# clefts are low values, background is high
labels = (labels < 100000).astype(np.uint8)
else:
labels = None
return data, labels
def save_as_nifti(arr, filename, spacing):
itk_img = sitk.GetImageFromArray(arr)
itk_img.SetSpacing(spacing)
sitk.WriteImage(itk_img, filename)
def prepare_submission():
from cremi.io import CremiFile
from cremi.Volume import Volume
base = "/home/fabian/drives/datasets/results/nnUNet/test_sets/Task061_CREMI/"
# a+
pred = sitk.GetArrayFromImage(sitk.ReadImage(join(base, 'results_3d_fullres', "sample_a+.nii.gz"))).astype(np.uint64)
pred[pred == 0] = 0xffffffffffffffff
out_a = CremiFile(join(base, 'sample_A+_20160601.hdf'), 'w')
clefts = Volume(pred, (40., 4., 4.))
out_a.write_clefts(clefts)
out_a.close()
pred = sitk.GetArrayFromImage(sitk.ReadImage(join(base, 'results_3d_fullres', "sample_b+.nii.gz"))).astype(np.uint64)
pred[pred == 0] = 0xffffffffffffffff
out_b = CremiFile(join(base, 'sample_B+_20160601.hdf'), 'w')
clefts = Volume(pred, (40., 4., 4.))
out_b.write_clefts(clefts)
out_b.close()
pred = sitk.GetArrayFromImage(sitk.ReadImage(join(base, 'results_3d_fullres', "sample_c+.nii.gz"))).astype(np.uint64)
pred[pred == 0] = 0xffffffffffffffff
out_c = CremiFile(join(base, 'sample_C+_20160601.hdf'), 'w')
clefts = Volume(pred, (40., 4., 4.))
out_c.write_clefts(clefts)
out_c.close()
if __name__ == "__main__":
assert h5py is not None, "you need h5py for this. Install with 'pip install h5py'"
foldername = "Task061_CREMI"
out_base = join(nnUNet_raw_data, foldername)
imagestr = join(out_base, "imagesTr")
imagests = join(out_base, "imagesTs")
labelstr = join(out_base, "labelsTr")
maybe_mkdir_p(imagestr)
maybe_mkdir_p(imagests)
maybe_mkdir_p(labelstr)
base = "/media/fabian/My Book/datasets/CREMI"
# train
img, label = load_sample(join(base, "sample_A_20160501.hdf"))
save_as_nifti(img, join(imagestr, "sample_a_0000.nii.gz"), (4, 4, 40))
save_as_nifti(label, join(labelstr, "sample_a.nii.gz"), (4, 4, 40))
img, label = load_sample(join(base, "sample_B_20160501.hdf"))
save_as_nifti(img, join(imagestr, "sample_b_0000.nii.gz"), (4, 4, 40))
save_as_nifti(label, join(labelstr, "sample_b.nii.gz"), (4, 4, 40))
img, label = load_sample(join(base, "sample_C_20160501.hdf"))
save_as_nifti(img, join(imagestr, "sample_c_0000.nii.gz"), (4, 4, 40))
save_as_nifti(label, join(labelstr, "sample_c.nii.gz"), (4, 4, 40))
save_as_nifti(img, join(imagestr, "sample_d_0000.nii.gz"), (4, 4, 40))
save_as_nifti(label, join(labelstr, "sample_d.nii.gz"), (4, 4, 40))
save_as_nifti(img, join(imagestr, "sample_e_0000.nii.gz"), (4, 4, 40))
save_as_nifti(label, join(labelstr, "sample_e.nii.gz"), (4, 4, 40))
# test
img, label = load_sample(join(base, "sample_A+_20160601.hdf"))
save_as_nifti(img, join(imagests, "sample_a+_0000.nii.gz"), (4, 4, 40))
img, label = load_sample(join(base, "sample_B+_20160601.hdf"))
save_as_nifti(img, join(imagests, "sample_b+_0000.nii.gz"), (4, 4, 40))
img, label = load_sample(join(base, "sample_C+_20160601.hdf"))
save_as_nifti(img, join(imagests, "sample_c+_0000.nii.gz"), (4, 4, 40))
json_dict = OrderedDict()
json_dict['name'] = foldername
json_dict['description'] = foldername
json_dict['tensorImageSize'] = "4D"
json_dict['reference'] = "see challenge website"
json_dict['licence'] = "see challenge website"
json_dict['release'] = "0.0"
json_dict['modality'] = {
"0": "EM",
}
json_dict['labels'] = {i: str(i) for i in range(2)}
json_dict['numTraining'] = 5
json_dict['numTest'] = 1
json_dict['training'] = [{'image': "./imagesTr/sample_%s.nii.gz" % i, "label": "./labelsTr/sample_%s.nii.gz" % i} for i in
['a', 'b', 'c', 'd', 'e']]
json_dict['test'] = ["./imagesTs/sample_a+.nii.gz", "./imagesTs/sample_b+.nii.gz", "./imagesTs/sample_c+.nii.gz"]
save_json(json_dict, os.path.join(out_base, "dataset.json"))
out_preprocessed = join(preprocessing_output_dir, foldername)
maybe_mkdir_p(out_preprocessed)
# manual splits. we train 5 models on all three datasets
splits = [{'train': ["sample_a", "sample_b", "sample_c"], 'val': ["sample_a", "sample_b", "sample_c"]},
{'train': ["sample_a", "sample_b", "sample_c"], 'val': ["sample_a", "sample_b", "sample_c"]},
{'train': ["sample_a", "sample_b", "sample_c"], 'val': ["sample_a", "sample_b", "sample_c"]},
{'train': ["sample_a", "sample_b", "sample_c"], 'val': ["sample_a", "sample_b", "sample_c"]},
{'train': ["sample_a", "sample_b", "sample_c"], 'val': ["sample_a", "sample_b", "sample_c"]}]
save_pickle(splits, join(out_preprocessed, "splits_final.pkl"))
| 6,124
| 40.952055
| 126
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/dataset_conversion/__init__.py
|
from __future__ import absolute_import
from . import *
| 55
| 17.666667
| 38
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/dataset_conversion/Task056_Verse_normalize_orientation.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This code is copied from https://gist.github.com/nlessmann/24d405eaa82abba6676deb6be839266c. All credits go to the
original author (user nlessmann on GitHub)
"""
import numpy as np
import SimpleITK as sitk
def reverse_axes(image):
return np.transpose(image, tuple(reversed(range(image.ndim))))
def read_image(imagefile):
image = sitk.ReadImage(imagefile)
data = reverse_axes(sitk.GetArrayFromImage(image)) # switch from zyx to xyz
header = {
'spacing': image.GetSpacing(),
'origin': image.GetOrigin(),
'direction': image.GetDirection()
}
return data, header
def save_image(img: np.ndarray, header: dict, output_file: str):
"""
CAREFUL you need to restore_original_slice_orientation before saving!
:param img:
:param header:
:return:
"""
# reverse back
img = reverse_axes(img) # switch from zyx to xyz
img_itk = sitk.GetImageFromArray(img)
img_itk.SetSpacing(header['spacing'])
img_itk.SetOrigin(header['origin'])
if not isinstance(header['direction'], tuple):
img_itk.SetDirection(header['direction'].flatten())
else:
img_itk.SetDirection(header['direction'])
sitk.WriteImage(img_itk, output_file)
def swap_flip_dimensions(cosine_matrix, image, header=None):
# Compute swaps and flips
swap = np.argmax(abs(cosine_matrix), axis=0)
flip = np.sum(cosine_matrix, axis=0)
# Apply transformation to image volume
image = np.transpose(image, tuple(swap))
image = image[tuple(slice(None, None, int(f)) for f in flip)]
if header is None:
return image
# Apply transformation to header
header['spacing'] = tuple(header['spacing'][s] for s in swap)
header['direction'] = np.eye(3)
return image, header
def normalize_slice_orientation(image, header):
# Preserve original header so that we can easily transform back
header['original'] = header.copy()
# Compute inverse of cosine (round first because we assume 0/1 values only)
# to determine how the image has to be transposed and flipped for cosine = identity
cosine = np.asarray(header['direction']).reshape(3, 3)
cosine_inv = np.linalg.inv(np.round(cosine))
return swap_flip_dimensions(cosine_inv, image, header)
def restore_original_slice_orientation(mask, header):
# Use original orientation for transformation because we assume the image to be in
# normalized orientation, i.e., identity cosine)
cosine = np.asarray(header['original']['direction']).reshape(3, 3)
cosine_rnd = np.round(cosine)
# Apply transformations to both the image and the mask
return swap_flip_dimensions(cosine_rnd, mask), header['original']
| 3,396
| 33.313131
| 114
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/dataset_conversion/Task059_EPFL_EM_MITO_SEG.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import subprocess
from collections import OrderedDict
from nnunet.paths import nnUNet_raw_data
from batchgenerators.utilities.file_and_folder_operations import *
import shutil
from skimage import io
import SimpleITK as sitk
import shutil
if __name__ == "__main__":
# download from here https://www.epfl.ch/labs/cvlab/data/data-em/
base = "/media/fabian/My Book/datasets/EPFL_MITO_SEG"
# the orientation of VerSe is all fing over the place. run fslreorient2std to correct that (hopefully!)
# THIS CAN HAVE CONSEQUENCES FOR THE TEST SET SUBMISSION! CAREFUL!
train_volume = io.imread(join(base, "training.tif"))
train_labels = io.imread(join(base, "training_groundtruth.tif"))
train_labels[train_labels == 255] = 1
test_volume = io.imread(join(base, "testing.tif"))
test_labels = io.imread(join(base, "testing_groundtruth.tif"))
test_labels[test_labels == 255] = 1
task_id = 59
task_name = "EPFL_EM_MITO_SEG"
foldername = "Task%03.0d_%s" % (task_id, task_name)
out_base = join(nnUNet_raw_data, foldername)
imagestr = join(out_base, "imagesTr")
imagests = join(out_base, "imagesTs")
labelstr = join(out_base, "labelsTr")
labelste = join(out_base, "labelsTs")
maybe_mkdir_p(imagestr)
maybe_mkdir_p(imagests)
maybe_mkdir_p(labelstr)
maybe_mkdir_p(labelste)
img_tr_itk = sitk.GetImageFromArray(train_volume.astype(np.float32))
lab_tr_itk = sitk.GetImageFromArray(train_labels.astype(np.uint8))
img_te_itk = sitk.GetImageFromArray(test_volume.astype(np.float32))
lab_te_itk = sitk.GetImageFromArray(test_labels.astype(np.uint8))
img_tr_itk.SetSpacing((5, 5, 5))
lab_tr_itk.SetSpacing((5, 5, 5))
img_te_itk.SetSpacing((5, 5, 5))
lab_te_itk.SetSpacing((5, 5, 5))
# 5 copies, otherwise we cannot run nnunet (5 fold cv needs that)
sitk.WriteImage(img_tr_itk, join(imagestr, "training0_0000.nii.gz"))
shutil.copy(join(imagestr, "training0_0000.nii.gz"), join(imagestr, "training1_0000.nii.gz"))
shutil.copy(join(imagestr, "training0_0000.nii.gz"), join(imagestr, "training2_0000.nii.gz"))
shutil.copy(join(imagestr, "training0_0000.nii.gz"), join(imagestr, "training3_0000.nii.gz"))
shutil.copy(join(imagestr, "training0_0000.nii.gz"), join(imagestr, "training4_0000.nii.gz"))
sitk.WriteImage(lab_tr_itk, join(labelstr, "training0.nii.gz"))
shutil.copy(join(labelstr, "training0.nii.gz"), join(labelstr, "training1.nii.gz"))
shutil.copy(join(labelstr, "training0.nii.gz"), join(labelstr, "training2.nii.gz"))
shutil.copy(join(labelstr, "training0.nii.gz"), join(labelstr, "training3.nii.gz"))
shutil.copy(join(labelstr, "training0.nii.gz"), join(labelstr, "training4.nii.gz"))
sitk.WriteImage(img_te_itk, join(imagests, "testing.nii.gz"))
sitk.WriteImage(lab_te_itk, join(labelste, "testing.nii.gz"))
json_dict = OrderedDict()
json_dict['name'] = task_name
json_dict['description'] = task_name
json_dict['tensorImageSize'] = "4D"
json_dict['reference'] = "see challenge website"
json_dict['licence'] = "see challenge website"
json_dict['release'] = "0.0"
json_dict['modality'] = {
"0": "EM",
}
json_dict['labels'] = {i: str(i) for i in range(2)}
json_dict['numTraining'] = 5
json_dict['numTest'] = 1
json_dict['training'] = [{'image': "./imagesTr/training%d.nii.gz" % i, "label": "./labelsTr/training%d.nii.gz" % i} for i in
range(5)]
json_dict['test'] = ["./imagesTs/testing.nii.gz"]
save_json(json_dict, os.path.join(out_base, "dataset.json"))
| 4,324
| 42.686869
| 128
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/dataset_conversion/Task043_BraTS_2019.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from collections import OrderedDict
from batchgenerators.utilities.file_and_folder_operations import *
from nnunet.paths import nnUNet_raw_data
import SimpleITK as sitk
import shutil
def copy_BraTS_segmentation_and_convert_labels(in_file, out_file):
# use this for segmentation only!!!
# nnUNet wants the labels to be continuous. BraTS is 0, 1, 2, 4 -> we make that into 0, 1, 2, 3
img = sitk.ReadImage(in_file)
img_npy = sitk.GetArrayFromImage(img)
uniques = np.unique(img_npy)
for u in uniques:
if u not in [0, 1, 2, 4]:
raise RuntimeError('unexpected label')
seg_new = np.zeros_like(img_npy)
seg_new[img_npy == 4] = 3
seg_new[img_npy == 2] = 1
seg_new[img_npy == 1] = 2
img_corr = sitk.GetImageFromArray(seg_new)
img_corr.CopyInformation(img)
sitk.WriteImage(img_corr, out_file)
if __name__ == "__main__":
"""
REMEMBER TO CONVERT LABELS BACK TO BRATS CONVENTION AFTER PREDICTION!
"""
task_name = "Task043_BraTS2019"
downloaded_data_dir = "/home/sdp/MLPERF/Brats2019_DATA/MICCAI_BraTS_2019_Data_Training"
target_base = join(nnUNet_raw_data, task_name)
target_imagesTr = join(target_base, "imagesTr")
target_imagesVal = join(target_base, "imagesVal")
target_imagesTs = join(target_base, "imagesTs")
target_labelsTr = join(target_base, "labelsTr")
maybe_mkdir_p(target_imagesTr)
maybe_mkdir_p(target_imagesVal)
maybe_mkdir_p(target_imagesTs)
maybe_mkdir_p(target_labelsTr)
patient_names = []
for tpe in ["HGG", "LGG"]:
cur = join(downloaded_data_dir, tpe)
for p in subdirs(cur, join=False):
patdir = join(cur, p)
patient_name = tpe + "__" + p
patient_names.append(patient_name)
t1 = join(patdir, p + "_t1.nii.gz")
t1c = join(patdir, p + "_t1ce.nii.gz")
t2 = join(patdir, p + "_t2.nii.gz")
flair = join(patdir, p + "_flair.nii.gz")
seg = join(patdir, p + "_seg.nii.gz")
assert all([
isfile(t1),
isfile(t1c),
isfile(t2),
isfile(flair),
isfile(seg)
]), "%s" % patient_name
shutil.copy(t1, join(target_imagesTr, patient_name + "_0000.nii.gz"))
shutil.copy(t1c, join(target_imagesTr, patient_name + "_0001.nii.gz"))
shutil.copy(t2, join(target_imagesTr, patient_name + "_0002.nii.gz"))
shutil.copy(flair, join(target_imagesTr, patient_name + "_0003.nii.gz"))
copy_BraTS_segmentation_and_convert_labels(seg, join(target_labelsTr, patient_name + ".nii.gz"))
json_dict = OrderedDict()
json_dict['name'] = "BraTS2019"
json_dict['description'] = "nothing"
json_dict['tensorImageSize'] = "4D"
json_dict['reference'] = "see BraTS2019"
json_dict['licence'] = "see BraTS2019 license"
json_dict['release'] = "0.0"
json_dict['modality'] = {
"0": "T1",
"1": "T1ce",
"2": "T2",
"3": "FLAIR"
}
json_dict['labels'] = {
"0": "background",
"1": "edema",
"2": "non-enhancing",
"3": "enhancing",
}
json_dict['numTraining'] = len(patient_names)
json_dict['numTest'] = 0
json_dict['training'] = [{'image': "./imagesTr/%s.nii.gz" % i, "label": "./labelsTr/%s.nii.gz" % i} for i in
patient_names]
json_dict['test'] = []
save_json(json_dict, join(target_base, "dataset.json"))
downloaded_data_dir = "/home/sdp/MLPERF/Brats2019_DATA/MICCAI_BraTS_2019_Data_Validation"
for p in subdirs(downloaded_data_dir, join=False):
patdir = join(downloaded_data_dir, p)
patient_name = p
t1 = join(patdir, p + "_t1.nii.gz")
t1c = join(patdir, p + "_t1ce.nii.gz")
t2 = join(patdir, p + "_t2.nii.gz")
flair = join(patdir, p + "_flair.nii.gz")
assert all([
isfile(t1),
isfile(t1c),
isfile(t2),
isfile(flair),
]), "%s" % patient_name
shutil.copy(t1, join(target_imagesVal, patient_name + "_0000.nii.gz"))
shutil.copy(t1c, join(target_imagesVal, patient_name + "_0001.nii.gz"))
shutil.copy(t2, join(target_imagesVal, patient_name + "_0002.nii.gz"))
shutil.copy(flair, join(target_imagesVal, patient_name + "_0003.nii.gz"))
"""
#I dont have the testing data
downloaded_data_dir = "/home/fabian/Downloads/BraTS2018_train_val_test_data/MICCAI_BraTS_2018_Data_Testing_FIsensee"
for p in subdirs(downloaded_data_dir, join=False):
patdir = join(downloaded_data_dir, p)
patient_name = p
t1 = join(patdir, p + "_t1.nii.gz")
t1c = join(patdir, p + "_t1ce.nii.gz")
t2 = join(patdir, p + "_t2.nii.gz")
flair = join(patdir, p + "_flair.nii.gz")
assert all([
isfile(t1),
isfile(t1c),
isfile(t2),
isfile(flair),
]), "%s" % patient_name
shutil.copy(t1, join(target_imagesTs, patient_name + "_0000.nii.gz"))
shutil.copy(t1c, join(target_imagesTs, patient_name + "_0001.nii.gz"))
shutil.copy(t2, join(target_imagesTs, patient_name + "_0002.nii.gz"))
shutil.copy(flair, join(target_imagesTs, patient_name + "_0003.nii.gz"))"""
| 6,075
| 35.824242
| 120
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/network_architecture/generic_UNet_DP.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from nnunet.network_architecture.generic_UNet import Generic_UNet
from nnunet.network_architecture.initialization import InitWeights_He
from nnunet.training.loss_functions.crossentropy import RobustCrossEntropyLoss
from nnunet.training.loss_functions.dice_loss import get_tp_fp_fn_tn
from nnunet.utilities.nd_softmax import softmax_helper
from nnunet.utilities.tensor_utilities import sum_tensor
from torch import nn
class Generic_UNet_DP(Generic_UNet):
def __init__(self, input_channels, base_num_features, num_classes, num_pool, num_conv_per_stage=2,
feat_map_mul_on_downscale=2, conv_op=nn.Conv2d,
norm_op=nn.BatchNorm2d, norm_op_kwargs=None,
dropout_op=nn.Dropout2d, dropout_op_kwargs=None,
nonlin=nn.LeakyReLU, nonlin_kwargs=None, deep_supervision=True, dropout_in_localization=False,
weightInitializer=InitWeights_He(1e-2), pool_op_kernel_sizes=None,
conv_kernel_sizes=None,
upscale_logits=False, convolutional_pooling=False, convolutional_upsampling=False,
max_num_features=None):
"""
As opposed to the Generic_UNet, this class will compute parts of the loss function in the forward pass. This is
useful for GPU parallelization. The batch DICE loss, if used, must be computed over the whole batch. Therefore, in a
naive implementation, all softmax outputs must be copied to a single GPU which will then
do the loss computation all by itself. In the context of 3D Segmentation, this results in a lot of overhead AND
is inefficient because the DICE computation is also kinda expensive (Think 8 GPUs with a result of shape
2x4x128x128x128 each.). The DICE is a global metric, but its parts can be computed locally (TP, FP, FN). Thus,
this implementation will compute all the parts of the loss function in the forward pass (and thus in a
parallelized way). The results are very small (batch_size x num_classes for TP, FN and FP, respectively; scalar for CE) and
copied easily. Also the final steps of the loss function (computing batch dice and average CE values) are easy
and very quick on the one GPU they need to run on. BAM.
final_nonlin is lambda x:x here!
"""
super(Generic_UNet_DP, self).__init__(input_channels, base_num_features, num_classes, num_pool,
num_conv_per_stage,
feat_map_mul_on_downscale, conv_op,
norm_op, norm_op_kwargs,
dropout_op, dropout_op_kwargs,
nonlin, nonlin_kwargs, deep_supervision, dropout_in_localization,
lambda x: x, weightInitializer, pool_op_kernel_sizes,
conv_kernel_sizes,
upscale_logits, convolutional_pooling, convolutional_upsampling,
max_num_features)
self.ce_loss = RobustCrossEntropyLoss()
def forward(self, x, y=None, return_hard_tp_fp_fn=False):
res = super(Generic_UNet_DP, self).forward(x) # regular Generic_UNet forward pass
if y is None:
return res
else:
# compute ce loss
if self._deep_supervision and self.do_ds:
ce_losses = [self.ce_loss(res[0], y[0]).unsqueeze(0)]
tps = []
fps = []
fns = []
res_softmax = softmax_helper(res[0])
tp, fp, fn, _ = get_tp_fp_fn_tn(res_softmax, y[0])
tps.append(tp)
fps.append(fp)
fns.append(fn)
for i in range(1, len(y)):
ce_losses.append(self.ce_loss(res[i], y[i]).unsqueeze(0))
res_softmax = softmax_helper(res[i])
tp, fp, fn, _ = get_tp_fp_fn_tn(res_softmax, y[i])
tps.append(tp)
fps.append(fp)
fns.append(fn)
ret = ce_losses, tps, fps, fns
else:
ce_loss = self.ce_loss(res, y).unsqueeze(0)
# tp fp and fn need the output to be softmax
res_softmax = softmax_helper(res)
tp, fp, fn, _ = get_tp_fp_fn_tn(res_softmax, y)
ret = ce_loss, tp, fp, fn
if return_hard_tp_fp_fn:
if self._deep_supervision and self.do_ds:
output = res[0]
target = y[0]
else:
target = y
output = res
with torch.no_grad():
num_classes = output.shape[1]
output_softmax = softmax_helper(output)
output_seg = output_softmax.argmax(1)
target = target[:, 0]
axes = tuple(range(1, len(target.shape)))
tp_hard = torch.zeros((target.shape[0], num_classes - 1)).to(output_seg.device.index)
fp_hard = torch.zeros((target.shape[0], num_classes - 1)).to(output_seg.device.index)
fn_hard = torch.zeros((target.shape[0], num_classes - 1)).to(output_seg.device.index)
for c in range(1, num_classes):
tp_hard[:, c - 1] = sum_tensor((output_seg == c).float() * (target == c).float(), axes=axes)
fp_hard[:, c - 1] = sum_tensor((output_seg == c).float() * (target != c).float(), axes=axes)
fn_hard[:, c - 1] = sum_tensor((output_seg != c).float() * (target == c).float(), axes=axes)
tp_hard = tp_hard.sum(0, keepdim=False)[None]
fp_hard = fp_hard.sum(0, keepdim=False)[None]
fn_hard = fn_hard.sum(0, keepdim=False)[None]
ret = *ret, tp_hard, fp_hard, fn_hard
return ret
| 6,839
| 53.72
| 131
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/network_architecture/neural_network.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from batchgenerators.augmentations.utils import pad_nd_image
from nnunet.utilities.random_stuff import no_op
from nnunet.utilities.to_torch import to_cuda, maybe_to_torch
from torch import nn
import torch
from scipy.ndimage.filters import gaussian_filter
from typing import Union, Tuple, List
from torch.cuda.amp import autocast
class NeuralNetwork(nn.Module):
def __init__(self):
super(NeuralNetwork, self).__init__()
def get_device(self):
if next(self.parameters()).device == "cpu":
return "cpu"
else:
return next(self.parameters()).device.index
def set_device(self, device):
if device == "cpu":
self.cpu()
else:
self.cuda(device)
def forward(self, x):
raise NotImplementedError
class SegmentationNetwork(NeuralNetwork):
def __init__(self):
super(NeuralNetwork, self).__init__()
# if we have 5 pooling then our patch size must be divisible by 2**5
self.input_shape_must_be_divisible_by = None # for example in a 2d network that does 5 pool in x and 6 pool
# in y this would be (32, 64)
# we need to know this because we need to know if we are a 2d or a 3d netowrk
self.conv_op = None # nn.Conv2d or nn.Conv3d
# this tells us how many channely we have in the output. Important for preallocation in inference
self.num_classes = None # number of channels in the output
# depending on the loss, we do not hard code a nonlinearity into the architecture. To aggregate predictions
# during inference, we need to apply the nonlinearity, however. So it is important to let the newtork know what
# to apply in inference. For the most part this will be softmax
self.inference_apply_nonlin = lambda x: x # softmax_helper
# This is for saving a gaussian importance map for inference. It weights voxels higher that are closer to the
# center. Prediction at the borders are often less accurate and are thus downweighted. Creating these Gaussians
# can be expensive, so it makes sense to save and reuse them.
self._gaussian_3d = self._patch_size_for_gaussian_3d = None
self._gaussian_2d = self._patch_size_for_gaussian_2d = None
def predict_3D(self, x: np.ndarray, do_mirroring: bool, mirror_axes: Tuple[int, ...] = (0, 1, 2),
use_sliding_window: bool = False,
step_size: float = 0.5, patch_size: Tuple[int, ...] = None, regions_class_order: Tuple[int, ...] = None,
use_gaussian: bool = False, pad_border_mode: str = "constant",
pad_kwargs: dict = None, all_in_gpu: bool = False,
verbose: bool = True, mixed_precision: bool = True) -> Tuple[np.ndarray, np.ndarray]:
"""
Use this function to predict a 3D image. It does not matter whether the network is a 2D or 3D U-Net, it will
detect that automatically and run the appropriate code.
When running predictions, you need to specify whether you want to run fully convolutional of sliding window
based inference. We very strongly recommend you use sliding window with the default settings.
It is the responsibility of the user to make sure the network is in the proper mode (eval for inference!). If
the network is not in eval mode it will print a warning.
:param x: Your input data. Must be a nd.ndarray of shape (c, x, y, z).
:param do_mirroring: If True, use test time data augmentation in the form of mirroring
:param mirror_axes: Determines which axes to use for mirroing. Per default, mirroring is done along all three
axes
:param use_sliding_window: if True, run sliding window prediction. Heavily recommended! This is also the default
:param step_size: When running sliding window prediction, the step size determines the distance between adjacent
predictions. The smaller the step size, the denser the predictions (and the longer it takes!). Step size is given
as a fraction of the patch_size. 0.5 is the default and means that wen advance by patch_size * 0.5 between
predictions. step_size cannot be larger than 1!
:param patch_size: The patch size that was used for training the network. Do not use different patch sizes here,
this will either crash or give potentially less accurate segmentations
:param regions_class_order: Fabian only
:param use_gaussian: (Only applies to sliding window prediction) If True, uses a Gaussian importance weighting
to weigh predictions closer to the center of the current patch higher than those at the borders. The reason
behind this is that the segmentation accuracy decreases towards the borders. Default (and recommended): True
:param pad_border_mode: leave this alone
:param pad_kwargs: leave this alone
:param all_in_gpu: experimental. You probably want to leave this as is it
:param verbose: Do you want a wall of text? If yes then set this to True
:param mixed_precision: if True, will run inference in mixed precision with autocast()
:return:
"""
torch.cuda.empty_cache()
assert step_size <= 1, 'step_size must be smaller than 1. Otherwise there will be a gap between consecutive ' \
'predictions'
if verbose: print("debug: mirroring", do_mirroring, "mirror_axes", mirror_axes)
assert self.get_device() != "cpu", "CPU not implemented"
if pad_kwargs is None:
pad_kwargs = {'constant_values': 0}
# A very long time ago the mirror axes were (2, 3, 4) for a 3d network. This is just to intercept any old
# code that uses this convention
if len(mirror_axes):
if self.conv_op == nn.Conv2d:
if max(mirror_axes) > 1:
raise ValueError("mirror axes. duh")
if self.conv_op == nn.Conv3d:
if max(mirror_axes) > 2:
raise ValueError("mirror axes. duh")
if self.training:
print('WARNING! Network is in train mode during inference. This may be intended, or not...')
assert len(x.shape) == 4, "data must have shape (c,x,y,z)"
if mixed_precision:
context = autocast
else:
context = no_op
with context():
with torch.no_grad():
if self.conv_op == nn.Conv3d:
if use_sliding_window:
res = self._internal_predict_3D_3Dconv_tiled(x, step_size, do_mirroring, mirror_axes, patch_size,
regions_class_order, use_gaussian, pad_border_mode,
pad_kwargs=pad_kwargs, all_in_gpu=all_in_gpu,
verbose=verbose)
else:
res = self._internal_predict_3D_3Dconv(x, patch_size, do_mirroring, mirror_axes, regions_class_order,
pad_border_mode, pad_kwargs=pad_kwargs, verbose=verbose)
elif self.conv_op == nn.Conv2d:
if use_sliding_window:
res = self._internal_predict_3D_2Dconv_tiled(x, patch_size, do_mirroring, mirror_axes, step_size,
regions_class_order, use_gaussian, pad_border_mode,
pad_kwargs, all_in_gpu, False)
else:
res = self._internal_predict_3D_2Dconv(x, patch_size, do_mirroring, mirror_axes, regions_class_order,
pad_border_mode, pad_kwargs, all_in_gpu, False)
else:
raise RuntimeError("Invalid conv op, cannot determine what dimensionality (2d/3d) the network is")
return res
def predict_2D(self, x, do_mirroring: bool, mirror_axes: tuple = (0, 1, 2), use_sliding_window: bool = False,
step_size: float = 0.5, patch_size: tuple = None, regions_class_order: tuple = None,
use_gaussian: bool = False, pad_border_mode: str = "constant",
pad_kwargs: dict = None, all_in_gpu: bool = False,
verbose: bool = True, mixed_precision: bool = True) -> Tuple[np.ndarray, np.ndarray]:
"""
Use this function to predict a 2D image. If this is a 3D U-Net it will crash because you cannot predict a 2D
image with that (you dummy).
When running predictions, you need to specify whether you want to run fully convolutional of sliding window
based inference. We very strongly recommend you use sliding window with the default settings.
It is the responsibility of the user to make sure the network is in the proper mode (eval for inference!). If
the network is not in eval mode it will print a warning.
:param x: Your input data. Must be a nd.ndarray of shape (c, x, y).
:param do_mirroring: If True, use test time data augmentation in the form of mirroring
:param mirror_axes: Determines which axes to use for mirroing. Per default, mirroring is done along all three
axes
:param use_sliding_window: if True, run sliding window prediction. Heavily recommended! This is also the default
:param step_size: When running sliding window prediction, the step size determines the distance between adjacent
predictions. The smaller the step size, the denser the predictions (and the longer it takes!). Step size is given
as a fraction of the patch_size. 0.5 is the default and means that wen advance by patch_size * 0.5 between
predictions. step_size cannot be larger than 1!
:param patch_size: The patch size that was used for training the network. Do not use different patch sizes here,
this will either crash or give potentially less accurate segmentations
:param regions_class_order: Fabian only
:param use_gaussian: (Only applies to sliding window prediction) If True, uses a Gaussian importance weighting
to weigh predictions closer to the center of the current patch higher than those at the borders. The reason
behind this is that the segmentation accuracy decreases towards the borders. Default (and recommended): True
:param pad_border_mode: leave this alone
:param pad_kwargs: leave this alone
:param all_in_gpu: experimental. You probably want to leave this as is it
:param verbose: Do you want a wall of text? If yes then set this to True
:return:
"""
torch.cuda.empty_cache()
assert step_size <= 1, 'step_size must be smaler than 1. Otherwise there will be a gap between consecutive ' \
'predictions'
if self.conv_op == nn.Conv3d:
raise RuntimeError("Cannot predict 2d if the network is 3d. Dummy.")
if verbose: print("debug: mirroring", do_mirroring, "mirror_axes", mirror_axes)
assert self.get_device() != "cpu", "CPU not implemented"
if pad_kwargs is None:
pad_kwargs = {'constant_values': 0}
# A very long time ago the mirror axes were (2, 3) for a 2d network. This is just to intercept any old
# code that uses this convention
if len(mirror_axes):
if max(mirror_axes) > 1:
raise ValueError("mirror axes. duh")
if self.training:
print('WARNING! Network is in train mode during inference. This may be intended, or not...')
assert len(x.shape) == 3, "data must have shape (c,x,y)"
if mixed_precision:
context = autocast
else:
context = no_op
with context():
with torch.no_grad():
if self.conv_op == nn.Conv2d:
if use_sliding_window:
res = self._internal_predict_2D_2Dconv_tiled(x, step_size, do_mirroring, mirror_axes, patch_size,
regions_class_order, use_gaussian, pad_border_mode,
pad_kwargs, all_in_gpu, verbose)
else:
res = self._internal_predict_2D_2Dconv(x, patch_size, do_mirroring, mirror_axes, regions_class_order,
pad_border_mode, pad_kwargs, verbose)
else:
raise RuntimeError("Invalid conv op, cannot determine what dimensionality (2d/3d) the network is")
return res
@staticmethod
def _get_gaussian(patch_size, sigma_scale=1. / 8) -> np.ndarray:
tmp = np.zeros(patch_size)
center_coords = [i // 2 for i in patch_size]
sigmas = [i * sigma_scale for i in patch_size]
tmp[tuple(center_coords)] = 1
gaussian_importance_map = gaussian_filter(tmp, sigmas, 0, mode='constant', cval=0)
gaussian_importance_map = gaussian_importance_map / np.max(gaussian_importance_map) * 1
gaussian_importance_map = gaussian_importance_map.astype(np.float32)
# gaussian_importance_map cannot be 0, otherwise we may end up with nans!
gaussian_importance_map[gaussian_importance_map == 0] = np.min(
gaussian_importance_map[gaussian_importance_map != 0])
return gaussian_importance_map
@staticmethod
def _compute_steps_for_sliding_window(patch_size: Tuple[int, ...], image_size: Tuple[int, ...], step_size: float) -> List[List[int]]:
assert [i >= j for i, j in zip(image_size, patch_size)], "image size must be as large or larger than patch_size"
assert 0 < step_size <= 1, 'step_size must be larger than 0 and smaller or equal to 1'
# our step width is patch_size*step_size at most, but can be narrower. For example if we have image size of
# 110, patch size of 32 and step_size of 0.5, then we want to make 4 steps starting at coordinate 0, 27, 55, 78
target_step_sizes_in_voxels = [i * step_size for i in patch_size]
num_steps = [int(np.ceil((i - k) / j)) + 1 for i, j, k in zip(image_size, target_step_sizes_in_voxels, patch_size)]
steps = []
for dim in range(len(patch_size)):
# the highest step value for this dimension is
max_step_value = image_size[dim] - patch_size[dim]
if num_steps[dim] > 1:
actual_step_size = max_step_value / (num_steps[dim] - 1)
else:
actual_step_size = 99999999999 # does not matter because there is only one step at 0
steps_here = [int(np.round(actual_step_size * i)) for i in range(num_steps[dim])]
steps.append(steps_here)
return steps
def _internal_predict_3D_3Dconv_tiled(self, x: np.ndarray, step_size: float, do_mirroring: bool, mirror_axes: tuple,
patch_size: tuple, regions_class_order: tuple, use_gaussian: bool,
pad_border_mode: str, pad_kwargs: dict, all_in_gpu: bool,
verbose: bool) -> Tuple[np.ndarray, np.ndarray]:
# better safe than sorry
assert len(x.shape) == 4, "x must be (c, x, y, z)"
assert self.get_device() != "cpu"
if verbose: print("step_size:", step_size)
if verbose: print("do mirror:", do_mirroring)
assert patch_size is not None, "patch_size cannot be None for tiled prediction"
# for sliding window inference the image must at least be as large as the patch size. It does not matter
# whether the shape is divisible by 2**num_pool as long as the patch size is
data, slicer = pad_nd_image(x, patch_size, pad_border_mode, pad_kwargs, True, None)
data_shape = data.shape # still c, x, y, z
# compute the steps for sliding window
steps = self._compute_steps_for_sliding_window(patch_size, data_shape[1:], step_size)
num_tiles = len(steps[0]) * len(steps[1]) * len(steps[2])
if verbose:
print("data shape:", data_shape)
print("patch size:", patch_size)
print("steps (x, y, and z):", steps)
print("number of tiles:", num_tiles)
# we only need to compute that once. It can take a while to compute this due to the large sigma in
# gaussian_filter
if use_gaussian and num_tiles > 1:
if self._gaussian_3d is None or not all(
[i == j for i, j in zip(patch_size, self._patch_size_for_gaussian_3d)]):
if verbose: print('computing Gaussian')
gaussian_importance_map = self._get_gaussian(patch_size, sigma_scale=1. / 8)
self._gaussian_3d = gaussian_importance_map
self._patch_size_for_gaussian_3d = patch_size
else:
if verbose: print("using precomputed Gaussian")
gaussian_importance_map = self._gaussian_3d
gaussian_importance_map = torch.from_numpy(gaussian_importance_map).cuda(self.get_device(),
non_blocking=True)
else:
gaussian_importance_map = None
if all_in_gpu:
# If we run the inference in GPU only (meaning all tensors are allocated on the GPU, this reduces
# CPU-GPU communication but required more GPU memory) we need to preallocate a few things on GPU
if use_gaussian and num_tiles > 1:
# half precision for the outputs should be good enough. If the outputs here are half, the
# gaussian_importance_map should be as well
gaussian_importance_map = gaussian_importance_map.half()
# make sure we did not round anything to 0
gaussian_importance_map[gaussian_importance_map == 0] = gaussian_importance_map[
gaussian_importance_map != 0].min()
add_for_nb_of_preds = gaussian_importance_map
else:
add_for_nb_of_preds = torch.ones(data.shape[1:], device=self.get_device())
if verbose: print("initializing result array (on GPU)")
aggregated_results = torch.zeros([self.num_classes] + list(data.shape[1:]), dtype=torch.half,
device=self.get_device())
if verbose: print("moving data to GPU")
data = torch.from_numpy(data).cuda(self.get_device(), non_blocking=True)
if verbose: print("initializing result_numsamples (on GPU)")
aggregated_nb_of_predictions = torch.zeros([self.num_classes] + list(data.shape[1:]), dtype=torch.half,
device=self.get_device())
else:
if use_gaussian and num_tiles > 1:
add_for_nb_of_preds = self._gaussian_3d
else:
add_for_nb_of_preds = np.ones(data.shape[1:], dtype=np.float32)
aggregated_results = np.zeros([self.num_classes] + list(data.shape[1:]), dtype=np.float32)
aggregated_nb_of_predictions = np.zeros([self.num_classes] + list(data.shape[1:]), dtype=np.float32)
for x in steps[0]:
lb_x = x
ub_x = x + patch_size[0]
for y in steps[1]:
lb_y = y
ub_y = y + patch_size[1]
for z in steps[2]:
lb_z = z
ub_z = z + patch_size[2]
predicted_patch = self._internal_maybe_mirror_and_pred_3D(
data[None, :, lb_x:ub_x, lb_y:ub_y, lb_z:ub_z], mirror_axes, do_mirroring,
gaussian_importance_map)[0]
if all_in_gpu:
predicted_patch = predicted_patch.half()
else:
predicted_patch = predicted_patch.cpu().numpy()
aggregated_results[:, lb_x:ub_x, lb_y:ub_y, lb_z:ub_z] += predicted_patch
aggregated_nb_of_predictions[:, lb_x:ub_x, lb_y:ub_y, lb_z:ub_z] += add_for_nb_of_preds
# we reverse the padding here (remeber that we padded the input to be at least as large as the patch size
slicer = tuple(
[slice(0, aggregated_results.shape[i]) for i in
range(len(aggregated_results.shape) - (len(slicer) - 1))] + slicer[1:])
aggregated_results = aggregated_results[slicer]
aggregated_nb_of_predictions = aggregated_nb_of_predictions[slicer]
# computing the class_probabilities by dividing the aggregated result with result_numsamples
class_probabilities = aggregated_results / aggregated_nb_of_predictions
if regions_class_order is None:
predicted_segmentation = class_probabilities.argmax(0)
else:
if all_in_gpu:
class_probabilities_here = class_probabilities.detach().cpu().numpy()
else:
class_probabilities_here = class_probabilities
predicted_segmentation = np.zeros(class_probabilities_here.shape[1:], dtype=np.float32)
for i, c in enumerate(regions_class_order):
predicted_segmentation[class_probabilities_here[i] > 0.5] = c
if all_in_gpu:
if verbose: print("copying results to CPU")
if regions_class_order is None:
predicted_segmentation = predicted_segmentation.detach().cpu().numpy()
class_probabilities = class_probabilities.detach().cpu().numpy()
if verbose: print("prediction done")
return predicted_segmentation, class_probabilities
def _internal_predict_2D_2Dconv(self, x: np.ndarray, min_size: Tuple[int, int], do_mirroring: bool,
mirror_axes: tuple = (0, 1, 2), regions_class_order: tuple = None,
pad_border_mode: str = "constant", pad_kwargs: dict = None,
verbose: bool = True) -> Tuple[np.ndarray, np.ndarray]:
"""
This one does fully convolutional inference. No sliding window
"""
assert len(x.shape) == 3, "x must be (c, x, y)"
assert self.get_device() != "cpu"
assert self.input_shape_must_be_divisible_by is not None, 'input_shape_must_be_divisible_by must be set to ' \
'run _internal_predict_2D_2Dconv'
if verbose: print("do mirror:", do_mirroring)
data, slicer = pad_nd_image(x, min_size, pad_border_mode, pad_kwargs, True,
self.input_shape_must_be_divisible_by)
predicted_probabilities = self._internal_maybe_mirror_and_pred_2D(data[None], mirror_axes, do_mirroring,
None)[0]
slicer = tuple(
[slice(0, predicted_probabilities.shape[i]) for i in range(len(predicted_probabilities.shape) -
(len(slicer) - 1))] + slicer[1:])
predicted_probabilities = predicted_probabilities[slicer]
if regions_class_order is None:
predicted_segmentation = predicted_probabilities.argmax(0)
predicted_segmentation = predicted_segmentation.detach().cpu().numpy()
predicted_probabilities = predicted_probabilities.detach().cpu().numpy()
else:
predicted_probabilities = predicted_probabilities.detach().cpu().numpy()
predicted_segmentation = np.zeros(predicted_probabilities.shape[1:], dtype=np.float32)
for i, c in enumerate(regions_class_order):
predicted_segmentation[predicted_probabilities[i] > 0.5] = c
return predicted_segmentation, predicted_probabilities
def _internal_predict_3D_3Dconv(self, x: np.ndarray, min_size: Tuple[int, ...], do_mirroring: bool,
mirror_axes: tuple = (0, 1, 2), regions_class_order: tuple = None,
pad_border_mode: str = "constant", pad_kwargs: dict = None,
verbose: bool = True) -> Tuple[np.ndarray, np.ndarray]:
"""
This one does fully convolutional inference. No sliding window
"""
assert len(x.shape) == 4, "x must be (c, x, y, z)"
assert self.get_device() != "cpu"
assert self.input_shape_must_be_divisible_by is not None, 'input_shape_must_be_divisible_by must be set to ' \
'run _internal_predict_3D_3Dconv'
if verbose: print("do mirror:", do_mirroring)
data, slicer = pad_nd_image(x, min_size, pad_border_mode, pad_kwargs, True,
self.input_shape_must_be_divisible_by)
predicted_probabilities = self._internal_maybe_mirror_and_pred_3D(data[None], mirror_axes, do_mirroring,
None)[0]
slicer = tuple(
[slice(0, predicted_probabilities.shape[i]) for i in range(len(predicted_probabilities.shape) -
(len(slicer) - 1))] + slicer[1:])
predicted_probabilities = predicted_probabilities[slicer]
if regions_class_order is None:
predicted_segmentation = predicted_probabilities.argmax(0)
predicted_segmentation = predicted_segmentation.detach().cpu().numpy()
predicted_probabilities = predicted_probabilities.detach().cpu().numpy()
else:
predicted_probabilities = predicted_probabilities.detach().cpu().numpy()
predicted_segmentation = np.zeros(predicted_probabilities.shape[1:], dtype=np.float32)
for i, c in enumerate(regions_class_order):
predicted_segmentation[predicted_probabilities[i] > 0.5] = c
return predicted_segmentation, predicted_probabilities
def _internal_maybe_mirror_and_pred_3D(self, x: Union[np.ndarray, torch.tensor], mirror_axes: tuple,
do_mirroring: bool = True,
mult: np.ndarray or torch.tensor = None) -> torch.tensor:
assert len(x.shape) == 5, 'x must be (b, c, x, y, z)'
# everything in here takes place on the GPU. If x and mult are not yet on GPU this will be taken care of here
# we now return a cuda tensor! Not numpy array!
x = to_cuda(maybe_to_torch(x), gpu_id=self.get_device())
result_torch = torch.zeros([1, self.num_classes] + list(x.shape[2:]),
dtype=torch.float).cuda(self.get_device(), non_blocking=True)
if mult is not None:
mult = to_cuda(maybe_to_torch(mult), gpu_id=self.get_device())
if do_mirroring:
mirror_idx = 8
num_results = 2 ** len(mirror_axes)
else:
mirror_idx = 1
num_results = 1
for m in range(mirror_idx):
if m == 0:
pred = self.inference_apply_nonlin(self(x))
result_torch += 1 / num_results * pred
if m == 1 and (2 in mirror_axes):
pred = self.inference_apply_nonlin(self(torch.flip(x, (4, ))))
result_torch += 1 / num_results * torch.flip(pred, (4,))
if m == 2 and (1 in mirror_axes):
pred = self.inference_apply_nonlin(self(torch.flip(x, (3, ))))
result_torch += 1 / num_results * torch.flip(pred, (3,))
if m == 3 and (2 in mirror_axes) and (1 in mirror_axes):
pred = self.inference_apply_nonlin(self(torch.flip(x, (4, 3))))
result_torch += 1 / num_results * torch.flip(pred, (4, 3))
if m == 4 and (0 in mirror_axes):
pred = self.inference_apply_nonlin(self(torch.flip(x, (2, ))))
result_torch += 1 / num_results * torch.flip(pred, (2,))
if m == 5 and (0 in mirror_axes) and (2 in mirror_axes):
pred = self.inference_apply_nonlin(self(torch.flip(x, (4, 2))))
result_torch += 1 / num_results * torch.flip(pred, (4, 2))
if m == 6 and (0 in mirror_axes) and (1 in mirror_axes):
pred = self.inference_apply_nonlin(self(torch.flip(x, (3, 2))))
result_torch += 1 / num_results * torch.flip(pred, (3, 2))
if m == 7 and (0 in mirror_axes) and (1 in mirror_axes) and (2 in mirror_axes):
pred = self.inference_apply_nonlin(self(torch.flip(x, (4, 3, 2))))
result_torch += 1 / num_results * torch.flip(pred, (4, 3, 2))
if mult is not None:
result_torch[:, :] *= mult
return result_torch
def _internal_maybe_mirror_and_pred_2D(self, x: Union[np.ndarray, torch.tensor], mirror_axes: tuple,
do_mirroring: bool = True,
mult: np.ndarray or torch.tensor = None) -> torch.tensor:
# everything in here takes place on the GPU. If x and mult are not yet on GPU this will be taken care of here
# we now return a cuda tensor! Not numpy array!
assert len(x.shape) == 4, 'x must be (b, c, x, y)'
x = to_cuda(maybe_to_torch(x), gpu_id=self.get_device())
result_torch = torch.zeros([x.shape[0], self.num_classes] + list(x.shape[2:]),
dtype=torch.float).cuda(self.get_device(), non_blocking=True)
if mult is not None:
mult = to_cuda(maybe_to_torch(mult), gpu_id=self.get_device())
if do_mirroring:
mirror_idx = 4
num_results = 2 ** len(mirror_axes)
else:
mirror_idx = 1
num_results = 1
for m in range(mirror_idx):
if m == 0:
pred = self.inference_apply_nonlin(self(x))
result_torch += 1 / num_results * pred
if m == 1 and (1 in mirror_axes):
pred = self.inference_apply_nonlin(self(torch.flip(x, (3, ))))
result_torch += 1 / num_results * torch.flip(pred, (3, ))
if m == 2 and (0 in mirror_axes):
pred = self.inference_apply_nonlin(self(torch.flip(x, (2, ))))
result_torch += 1 / num_results * torch.flip(pred, (2, ))
if m == 3 and (0 in mirror_axes) and (1 in mirror_axes):
pred = self.inference_apply_nonlin(self(torch.flip(x, (3, 2))))
result_torch += 1 / num_results * torch.flip(pred, (3, 2))
if mult is not None:
result_torch[:, :] *= mult
return result_torch
def _internal_predict_2D_2Dconv_tiled(self, x: np.ndarray, step_size: float, do_mirroring: bool, mirror_axes: tuple,
patch_size: tuple, regions_class_order: tuple, use_gaussian: bool,
pad_border_mode: str, pad_kwargs: dict, all_in_gpu: bool,
verbose: bool) -> Tuple[np.ndarray, np.ndarray]:
# better safe than sorry
assert len(x.shape) == 3, "x must be (c, x, y)"
assert self.get_device() != "cpu"
if verbose: print("step_size:", step_size)
if verbose: print("do mirror:", do_mirroring)
assert patch_size is not None, "patch_size cannot be None for tiled prediction"
# for sliding window inference the image must at least be as large as the patch size. It does not matter
# whether the shape is divisible by 2**num_pool as long as the patch size is
data, slicer = pad_nd_image(x, patch_size, pad_border_mode, pad_kwargs, True, None)
data_shape = data.shape # still c, x, y
# compute the steps for sliding window
steps = self._compute_steps_for_sliding_window(patch_size, data_shape[1:], step_size)
num_tiles = len(steps[0]) * len(steps[1])
if verbose:
print("data shape:", data_shape)
print("patch size:", patch_size)
print("steps (x, y, and z):", steps)
print("number of tiles:", num_tiles)
# we only need to compute that once. It can take a while to compute this due to the large sigma in
# gaussian_filter
if use_gaussian and num_tiles > 1:
if self._gaussian_2d is None or not all(
[i == j for i, j in zip(patch_size, self._patch_size_for_gaussian_2d)]):
if verbose: print('computing Gaussian')
gaussian_importance_map = self._get_gaussian(patch_size, sigma_scale=1. / 8)
self._gaussian_2d = gaussian_importance_map
self._patch_size_for_gaussian_2d = patch_size
else:
if verbose: print("using precomputed Gaussian")
gaussian_importance_map = self._gaussian_2d
gaussian_importance_map = torch.from_numpy(gaussian_importance_map).cuda(self.get_device(),
non_blocking=True)
else:
gaussian_importance_map = None
if all_in_gpu:
# If we run the inference in GPU only (meaning all tensors are allocated on the GPU, this reduces
# CPU-GPU communication but required more GPU memory) we need to preallocate a few things on GPU
if use_gaussian and num_tiles > 1:
# half precision for the outputs should be good enough. If the outputs here are half, the
# gaussian_importance_map should be as well
gaussian_importance_map = gaussian_importance_map.half()
# make sure we did not round anything to 0
gaussian_importance_map[gaussian_importance_map == 0] = gaussian_importance_map[
gaussian_importance_map != 0].min()
add_for_nb_of_preds = gaussian_importance_map
else:
add_for_nb_of_preds = torch.ones(data.shape[1:], device=self.get_device())
if verbose: print("initializing result array (on GPU)")
aggregated_results = torch.zeros([self.num_classes] + list(data.shape[1:]), dtype=torch.half,
device=self.get_device())
if verbose: print("moving data to GPU")
data = torch.from_numpy(data).cuda(self.get_device(), non_blocking=True)
if verbose: print("initializing result_numsamples (on GPU)")
aggregated_nb_of_predictions = torch.zeros([self.num_classes] + list(data.shape[1:]), dtype=torch.half,
device=self.get_device())
else:
if use_gaussian and num_tiles > 1:
add_for_nb_of_preds = self._gaussian_2d
else:
add_for_nb_of_preds = np.ones(data.shape[1:], dtype=np.float32)
aggregated_results = np.zeros([self.num_classes] + list(data.shape[1:]), dtype=np.float32)
aggregated_nb_of_predictions = np.zeros([self.num_classes] + list(data.shape[1:]), dtype=np.float32)
for x in steps[0]:
lb_x = x
ub_x = x + patch_size[0]
for y in steps[1]:
lb_y = y
ub_y = y + patch_size[1]
predicted_patch = self._internal_maybe_mirror_and_pred_2D(
data[None, :, lb_x:ub_x, lb_y:ub_y], mirror_axes, do_mirroring,
gaussian_importance_map)[0]
if all_in_gpu:
predicted_patch = predicted_patch.half()
else:
predicted_patch = predicted_patch.cpu().numpy()
aggregated_results[:, lb_x:ub_x, lb_y:ub_y] += predicted_patch
aggregated_nb_of_predictions[:, lb_x:ub_x, lb_y:ub_y] += add_for_nb_of_preds
# we reverse the padding here (remeber that we padded the input to be at least as large as the patch size
slicer = tuple(
[slice(0, aggregated_results.shape[i]) for i in
range(len(aggregated_results.shape) - (len(slicer) - 1))] + slicer[1:])
aggregated_results = aggregated_results[slicer]
aggregated_nb_of_predictions = aggregated_nb_of_predictions[slicer]
# computing the class_probabilities by dividing the aggregated result with result_numsamples
class_probabilities = aggregated_results / aggregated_nb_of_predictions
if regions_class_order is None:
predicted_segmentation = class_probabilities.argmax(0)
else:
if all_in_gpu:
class_probabilities_here = class_probabilities.detach().cpu().numpy()
else:
class_probabilities_here = class_probabilities
predicted_segmentation = np.zeros(class_probabilities_here.shape[1:], dtype=np.float32)
for i, c in enumerate(regions_class_order):
predicted_segmentation[class_probabilities_here[i] > 0.5] = c
if all_in_gpu:
if verbose: print("copying results to CPU")
if regions_class_order is None:
predicted_segmentation = predicted_segmentation.detach().cpu().numpy()
class_probabilities = class_probabilities.detach().cpu().numpy()
if verbose: print("prediction done")
return predicted_segmentation, class_probabilities
def _internal_predict_3D_2Dconv(self, x: np.ndarray, min_size: Tuple[int, int], do_mirroring: bool,
mirror_axes: tuple = (0, 1), regions_class_order: tuple = None,
pad_border_mode: str = "constant", pad_kwargs: dict = None,
all_in_gpu: bool = False, verbose: bool = True) -> Tuple[np.ndarray, np.ndarray]:
if all_in_gpu:
raise NotImplementedError
assert len(x.shape) == 4, "data must be c, x, y, z"
predicted_segmentation = []
softmax_pred = []
for s in range(x.shape[1]):
pred_seg, softmax_pres = self._internal_predict_2D_2Dconv(
x[:, s], min_size, do_mirroring, mirror_axes, regions_class_order, pad_border_mode, pad_kwargs, verbose)
predicted_segmentation.append(pred_seg[None])
softmax_pred.append(softmax_pres[None])
predicted_segmentation = np.vstack(predicted_segmentation)
softmax_pred = np.vstack(softmax_pred).transpose((1, 0, 2, 3))
return predicted_segmentation, softmax_pred
def predict_3D_pseudo3D_2Dconv(self, x: np.ndarray, min_size: Tuple[int, int], do_mirroring: bool,
mirror_axes: tuple = (0, 1), regions_class_order: tuple = None,
pseudo3D_slices: int = 5, all_in_gpu: bool = False,
pad_border_mode: str = "constant", pad_kwargs: dict = None,
verbose: bool = True) -> Tuple[np.ndarray, np.ndarray]:
if all_in_gpu:
raise NotImplementedError
assert len(x.shape) == 4, "data must be c, x, y, z"
assert pseudo3D_slices % 2 == 1, "pseudo3D_slices must be odd"
extra_slices = (pseudo3D_slices - 1) // 2
shp_for_pad = np.array(x.shape)
shp_for_pad[1] = extra_slices
pad = np.zeros(shp_for_pad, dtype=np.float32)
data = np.concatenate((pad, x, pad), 1)
predicted_segmentation = []
softmax_pred = []
for s in range(extra_slices, data.shape[1] - extra_slices):
d = data[:, (s - extra_slices):(s + extra_slices + 1)]
d = d.reshape((-1, d.shape[-2], d.shape[-1]))
pred_seg, softmax_pres = \
self._internal_predict_2D_2Dconv(d, min_size, do_mirroring, mirror_axes,
regions_class_order, pad_border_mode, pad_kwargs, verbose)
predicted_segmentation.append(pred_seg[None])
softmax_pred.append(softmax_pres[None])
predicted_segmentation = np.vstack(predicted_segmentation)
softmax_pred = np.vstack(softmax_pred).transpose((1, 0, 2, 3))
return predicted_segmentation, softmax_pred
def _internal_predict_3D_2Dconv_tiled(self, x: np.ndarray, patch_size: Tuple[int, int], do_mirroring: bool,
mirror_axes: tuple = (0, 1), step_size: float = 0.5,
regions_class_order: tuple = None, use_gaussian: bool = False,
pad_border_mode: str = "edge", pad_kwargs: dict =None,
all_in_gpu: bool = False,
verbose: bool = True) -> Tuple[np.ndarray, np.ndarray]:
if all_in_gpu:
raise NotImplementedError
assert len(x.shape) == 4, "data must be c, x, y, z"
predicted_segmentation = []
softmax_pred = []
for s in range(x.shape[1]):
pred_seg, softmax_pres = self._internal_predict_2D_2Dconv_tiled(
x[:, s], step_size, do_mirroring, mirror_axes, patch_size, regions_class_order, use_gaussian,
pad_border_mode, pad_kwargs, all_in_gpu, verbose)
predicted_segmentation.append(pred_seg[None])
softmax_pred.append(softmax_pres[None])
predicted_segmentation = np.vstack(predicted_segmentation)
softmax_pred = np.vstack(softmax_pred).transpose((1, 0, 2, 3))
return predicted_segmentation, softmax_pred
if __name__ == '__main__':
print(SegmentationNetwork._compute_steps_for_sliding_window((30, 224, 224), (162, 529, 529), 0.5))
print(SegmentationNetwork._compute_steps_for_sliding_window((30, 224, 224), (162, 529, 529), 1))
print(SegmentationNetwork._compute_steps_for_sliding_window((30, 224, 224), (162, 529, 529), 0.1))
print(SegmentationNetwork._compute_steps_for_sliding_window((30, 224, 224), (60, 448, 224), 1))
print(SegmentationNetwork._compute_steps_for_sliding_window((30, 224, 224), (60, 448, 224), 0.5))
print(SegmentationNetwork._compute_steps_for_sliding_window((30, 224, 224), (30, 224, 224), 1))
print(SegmentationNetwork._compute_steps_for_sliding_window((30, 224, 224), (30, 224, 224), 0.125))
print(SegmentationNetwork._compute_steps_for_sliding_window((123, 54, 123), (246, 162, 369), 0.25))
| 43,801
| 51.964933
| 137
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/network_architecture/initialization.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from torch import nn
class InitWeights_He(object):
def __init__(self, neg_slope=1e-2):
self.neg_slope = neg_slope
def __call__(self, module):
if isinstance(module, nn.Conv3d) or isinstance(module, nn.Conv2d) or isinstance(module, nn.ConvTranspose2d) or isinstance(module, nn.ConvTranspose3d):
module.weight = nn.init.kaiming_normal_(module.weight, a=self.neg_slope)
if module.bias is not None:
module.bias = nn.init.constant_(module.bias, 0)
class InitWeights_XavierUniform(object):
def __init__(self, gain=1):
self.gain = gain
def __call__(self, module):
if isinstance(module, nn.Conv3d) or isinstance(module, nn.Conv2d) or isinstance(module, nn.ConvTranspose2d) or isinstance(module, nn.ConvTranspose3d):
module.weight = nn.init.xavier_uniform_(module.weight, self.gain)
if module.bias is not None:
module.bias = nn.init.constant_(module.bias, 0)
| 1,673
| 41.923077
| 158
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/network_architecture/generic_UNet.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from copy import deepcopy
from nnunet.utilities.nd_softmax import softmax_helper
from torch import nn
import torch
import numpy as np
from nnunet.network_architecture.initialization import InitWeights_He
from nnunet.network_architecture.neural_network import SegmentationNetwork
import torch.nn.functional
class ConvDropoutNormNonlin(nn.Module):
"""
fixes a bug in ConvDropoutNormNonlin where lrelu was used regardless of nonlin. Bad.
"""
def __init__(self, input_channels, output_channels,
conv_op=nn.Conv2d, conv_kwargs=None,
norm_op=nn.BatchNorm2d, norm_op_kwargs=None,
dropout_op=nn.Dropout2d, dropout_op_kwargs=None,
nonlin=nn.LeakyReLU, nonlin_kwargs=None):
super(ConvDropoutNormNonlin, self).__init__()
if nonlin_kwargs is None:
nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}
if dropout_op_kwargs is None:
dropout_op_kwargs = {'p': 0.5, 'inplace': True}
if norm_op_kwargs is None:
norm_op_kwargs = {'eps': 1e-5, 'affine': True, 'momentum': 0.1}
if conv_kwargs is None:
conv_kwargs = {'kernel_size': 3, 'stride': 1, 'padding': 1, 'dilation': 1, 'bias': True}
self.nonlin_kwargs = nonlin_kwargs
self.nonlin = nonlin
self.dropout_op = dropout_op
self.dropout_op_kwargs = dropout_op_kwargs
self.norm_op_kwargs = norm_op_kwargs
self.conv_kwargs = conv_kwargs
self.conv_op = conv_op
self.norm_op = norm_op
self.conv = self.conv_op(input_channels, output_channels, **self.conv_kwargs)
if self.dropout_op is not None and self.dropout_op_kwargs['p'] is not None and self.dropout_op_kwargs[
'p'] > 0:
self.dropout = self.dropout_op(**self.dropout_op_kwargs)
else:
self.dropout = None
self.instnorm = self.norm_op(output_channels, **self.norm_op_kwargs)
self.lrelu = self.nonlin(**self.nonlin_kwargs)
def forward(self, x):
x = self.conv(x)
if self.dropout is not None:
x = self.dropout(x)
return self.lrelu(self.instnorm(x))
class ConvDropoutNonlinNorm(ConvDropoutNormNonlin):
def forward(self, x):
x = self.conv(x)
if self.dropout is not None:
x = self.dropout(x)
return self.instnorm(self.lrelu(x))
class StackedConvLayers(nn.Module):
def __init__(self, input_feature_channels, output_feature_channels, num_convs,
conv_op=nn.Conv2d, conv_kwargs=None,
norm_op=nn.BatchNorm2d, norm_op_kwargs=None,
dropout_op=nn.Dropout2d, dropout_op_kwargs=None,
nonlin=nn.LeakyReLU, nonlin_kwargs=None, first_stride=None, basic_block=ConvDropoutNormNonlin):
'''
stacks ConvDropoutNormLReLU layers. initial_stride will only be applied to first layer in the stack. The other parameters affect all layers
:param input_feature_channels:
:param output_feature_channels:
:param num_convs:
:param dilation:
:param kernel_size:
:param padding:
:param dropout:
:param initial_stride:
:param conv_op:
:param norm_op:
:param dropout_op:
:param inplace:
:param neg_slope:
:param norm_affine:
:param conv_bias:
'''
self.input_channels = input_feature_channels
self.output_channels = output_feature_channels
if nonlin_kwargs is None:
nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}
if dropout_op_kwargs is None:
dropout_op_kwargs = {'p': 0.5, 'inplace': True}
if norm_op_kwargs is None:
norm_op_kwargs = {'eps': 1e-5, 'affine': True, 'momentum': 0.1}
if conv_kwargs is None:
conv_kwargs = {'kernel_size': 3, 'stride': 1, 'padding': 1, 'dilation': 1, 'bias': True}
self.nonlin_kwargs = nonlin_kwargs
self.nonlin = nonlin
self.dropout_op = dropout_op
self.dropout_op_kwargs = dropout_op_kwargs
self.norm_op_kwargs = norm_op_kwargs
self.conv_kwargs = conv_kwargs
self.conv_op = conv_op
self.norm_op = norm_op
if first_stride is not None:
self.conv_kwargs_first_conv = deepcopy(conv_kwargs)
self.conv_kwargs_first_conv['stride'] = first_stride
else:
self.conv_kwargs_first_conv = conv_kwargs
super(StackedConvLayers, self).__init__()
self.blocks = nn.Sequential(
*([basic_block(input_feature_channels, output_feature_channels, self.conv_op,
self.conv_kwargs_first_conv,
self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs,
self.nonlin, self.nonlin_kwargs)] +
[basic_block(output_feature_channels, output_feature_channels, self.conv_op,
self.conv_kwargs,
self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs,
self.nonlin, self.nonlin_kwargs) for _ in range(num_convs - 1)]))
def forward(self, x):
return self.blocks(x)
def print_module_training_status(module):
if isinstance(module, nn.Conv2d) or isinstance(module, nn.Conv3d) or isinstance(module, nn.Dropout3d) or \
isinstance(module, nn.Dropout2d) or isinstance(module, nn.Dropout) or isinstance(module, nn.InstanceNorm3d) \
or isinstance(module, nn.InstanceNorm2d) or isinstance(module, nn.InstanceNorm1d) \
or isinstance(module, nn.BatchNorm2d) or isinstance(module, nn.BatchNorm3d) or isinstance(module,
nn.BatchNorm1d):
print(str(module), module.training)
class Upsample(nn.Module):
def __init__(self, size=None, scale_factor=None, mode='nearest', align_corners=False):
super(Upsample, self).__init__()
self.align_corners = align_corners
self.mode = mode
self.scale_factor = scale_factor
self.size = size
def forward(self, x):
return nn.functional.interpolate(x, size=self.size, scale_factor=self.scale_factor, mode=self.mode,
align_corners=self.align_corners)
class Generic_UNet(SegmentationNetwork):
DEFAULT_BATCH_SIZE_3D = 2
DEFAULT_PATCH_SIZE_3D = (64, 192, 160)
SPACING_FACTOR_BETWEEN_STAGES = 2
BASE_NUM_FEATURES_3D = 30
MAX_NUMPOOL_3D = 999
MAX_NUM_FILTERS_3D = 320
DEFAULT_PATCH_SIZE_2D = (256, 256)
BASE_NUM_FEATURES_2D = 30
DEFAULT_BATCH_SIZE_2D = 50
MAX_NUMPOOL_2D = 999
MAX_FILTERS_2D = 480
use_this_for_batch_size_computation_2D = 19739648
use_this_for_batch_size_computation_3D = 520000000 # 505789440
def __init__(self, input_channels, base_num_features, num_classes, num_pool, num_conv_per_stage=2,
feat_map_mul_on_downscale=2, conv_op=nn.Conv2d,
norm_op=nn.BatchNorm2d, norm_op_kwargs=None,
dropout_op=nn.Dropout2d, dropout_op_kwargs=None,
nonlin=nn.LeakyReLU, nonlin_kwargs=None, deep_supervision=True, dropout_in_localization=False,
final_nonlin=softmax_helper, weightInitializer=InitWeights_He(1e-2), pool_op_kernel_sizes=None,
conv_kernel_sizes=None,
upscale_logits=False, convolutional_pooling=False, convolutional_upsampling=False,
max_num_features=None, basic_block=ConvDropoutNormNonlin,
seg_output_use_bias=False):
"""
basically more flexible than v1, architecture is the same
Does this look complicated? Nah bro. Functionality > usability
This does everything you need, including world peace.
Questions? -> f.isensee@dkfz.de
"""
super(Generic_UNet, self).__init__()
self.convolutional_upsampling = convolutional_upsampling
self.convolutional_pooling = convolutional_pooling
self.upscale_logits = upscale_logits
if nonlin_kwargs is None:
nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}
if dropout_op_kwargs is None:
dropout_op_kwargs = {'p': 0.5, 'inplace': True}
if norm_op_kwargs is None:
norm_op_kwargs = {'eps': 1e-5, 'affine': True, 'momentum': 0.1}
self.conv_kwargs = {'stride': 1, 'dilation': 1, 'bias': True}
self.nonlin = nonlin
self.nonlin_kwargs = nonlin_kwargs
self.dropout_op_kwargs = dropout_op_kwargs
self.norm_op_kwargs = norm_op_kwargs
self.weightInitializer = weightInitializer
self.conv_op = conv_op
self.norm_op = norm_op
self.dropout_op = dropout_op
self.num_classes = num_classes
self.final_nonlin = final_nonlin
self._deep_supervision = deep_supervision
self.do_ds = deep_supervision
if conv_op == nn.Conv2d:
upsample_mode = 'bilinear'
pool_op = nn.MaxPool2d
transpconv = nn.ConvTranspose2d
if pool_op_kernel_sizes is None:
pool_op_kernel_sizes = [(2, 2)] * num_pool
if conv_kernel_sizes is None:
conv_kernel_sizes = [(3, 3)] * (num_pool + 1)
elif conv_op == nn.Conv3d:
upsample_mode = 'trilinear'
pool_op = nn.MaxPool3d
transpconv = nn.ConvTranspose3d
if pool_op_kernel_sizes is None:
pool_op_kernel_sizes = [(2, 2, 2)] * num_pool
if conv_kernel_sizes is None:
conv_kernel_sizes = [(3, 3, 3)] * (num_pool + 1)
else:
raise ValueError("unknown convolution dimensionality, conv op: %s" % str(conv_op))
self.input_shape_must_be_divisible_by = np.prod(pool_op_kernel_sizes, 0, dtype=np.int64)
self.pool_op_kernel_sizes = pool_op_kernel_sizes
self.conv_kernel_sizes = conv_kernel_sizes
self.conv_pad_sizes = []
for krnl in self.conv_kernel_sizes:
self.conv_pad_sizes.append([1 if i == 3 else 0 for i in krnl])
if max_num_features is None:
if self.conv_op == nn.Conv3d:
self.max_num_features = self.MAX_NUM_FILTERS_3D
else:
self.max_num_features = self.MAX_FILTERS_2D
else:
self.max_num_features = max_num_features
self.conv_blocks_context = []
self.conv_blocks_localization = []
self.td = []
self.tu = []
self.seg_outputs = []
output_features = base_num_features
input_features = input_channels
for d in range(num_pool):
# determine the first stride
if d != 0 and self.convolutional_pooling:
first_stride = pool_op_kernel_sizes[d - 1]
else:
first_stride = None
self.conv_kwargs['kernel_size'] = self.conv_kernel_sizes[d]
self.conv_kwargs['padding'] = self.conv_pad_sizes[d]
# add convolutions
self.conv_blocks_context.append(StackedConvLayers(input_features, output_features, num_conv_per_stage,
self.conv_op, self.conv_kwargs, self.norm_op,
self.norm_op_kwargs, self.dropout_op,
self.dropout_op_kwargs, self.nonlin, self.nonlin_kwargs,
first_stride, basic_block=basic_block))
if not self.convolutional_pooling:
self.td.append(pool_op(pool_op_kernel_sizes[d]))
input_features = output_features
output_features = int(np.round(output_features * feat_map_mul_on_downscale))
output_features = min(output_features, self.max_num_features)
# now the bottleneck.
# determine the first stride
if self.convolutional_pooling:
first_stride = pool_op_kernel_sizes[-1]
else:
first_stride = None
# the output of the last conv must match the number of features from the skip connection if we are not using
# convolutional upsampling. If we use convolutional upsampling then the reduction in feature maps will be
# done by the transposed conv
if self.convolutional_upsampling:
final_num_features = output_features
else:
final_num_features = self.conv_blocks_context[-1].output_channels
self.conv_kwargs['kernel_size'] = self.conv_kernel_sizes[num_pool]
self.conv_kwargs['padding'] = self.conv_pad_sizes[num_pool]
self.conv_blocks_context.append(nn.Sequential(
StackedConvLayers(input_features, output_features, num_conv_per_stage - 1, self.conv_op, self.conv_kwargs,
self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs, self.nonlin,
self.nonlin_kwargs, first_stride, basic_block=basic_block),
StackedConvLayers(output_features, final_num_features, 1, self.conv_op, self.conv_kwargs,
self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs, self.nonlin,
self.nonlin_kwargs, basic_block=basic_block)))
# if we don't want to do dropout in the localization pathway then we set the dropout prob to zero here
if not dropout_in_localization:
old_dropout_p = self.dropout_op_kwargs['p']
self.dropout_op_kwargs['p'] = 0.0
# now lets build the localization pathway
for u in range(num_pool):
nfeatures_from_down = final_num_features
nfeatures_from_skip = self.conv_blocks_context[
-(2 + u)].output_channels # self.conv_blocks_context[-1] is bottleneck, so start with -2
n_features_after_tu_and_concat = nfeatures_from_skip * 2
# the first conv reduces the number of features to match those of skip
# the following convs work on that number of features
# if not convolutional upsampling then the final conv reduces the num of features again
if u != num_pool - 1 and not self.convolutional_upsampling:
final_num_features = self.conv_blocks_context[-(3 + u)].output_channels
else:
final_num_features = nfeatures_from_skip
if not self.convolutional_upsampling:
self.tu.append(Upsample(scale_factor=pool_op_kernel_sizes[-(u + 1)], mode=upsample_mode))
else:
self.tu.append(transpconv(nfeatures_from_down, nfeatures_from_skip, pool_op_kernel_sizes[-(u + 1)],
pool_op_kernel_sizes[-(u + 1)], bias=False))
self.conv_kwargs['kernel_size'] = self.conv_kernel_sizes[- (u + 1)]
self.conv_kwargs['padding'] = self.conv_pad_sizes[- (u + 1)]
self.conv_blocks_localization.append(nn.Sequential(
StackedConvLayers(n_features_after_tu_and_concat, nfeatures_from_skip, num_conv_per_stage - 1,
self.conv_op, self.conv_kwargs, self.norm_op, self.norm_op_kwargs, self.dropout_op,
self.dropout_op_kwargs, self.nonlin, self.nonlin_kwargs, basic_block=basic_block),
StackedConvLayers(nfeatures_from_skip, final_num_features, 1, self.conv_op, self.conv_kwargs,
self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs,
self.nonlin, self.nonlin_kwargs, basic_block=basic_block)
))
for ds in range(len(self.conv_blocks_localization)):
self.seg_outputs.append(conv_op(self.conv_blocks_localization[ds][-1].output_channels, num_classes,
1, 1, 0, 1, 1, seg_output_use_bias))
self.upscale_logits_ops = []
cum_upsample = np.cumprod(np.vstack(pool_op_kernel_sizes), axis=0)[::-1]
for usl in range(num_pool - 1):
if self.upscale_logits:
self.upscale_logits_ops.append(Upsample(scale_factor=tuple([int(i) for i in cum_upsample[usl + 1]]),
mode=upsample_mode))
else:
self.upscale_logits_ops.append(lambda x: x)
if not dropout_in_localization:
self.dropout_op_kwargs['p'] = old_dropout_p
# register all modules properly
self.conv_blocks_localization = nn.ModuleList(self.conv_blocks_localization)
self.conv_blocks_context = nn.ModuleList(self.conv_blocks_context)
self.td = nn.ModuleList(self.td)
self.tu = nn.ModuleList(self.tu)
self.seg_outputs = nn.ModuleList(self.seg_outputs)
if self.upscale_logits:
self.upscale_logits_ops = nn.ModuleList(
self.upscale_logits_ops) # lambda x:x is not a Module so we need to distinguish here
if self.weightInitializer is not None:
self.apply(self.weightInitializer)
# self.apply(print_module_training_status)
def forward(self, x):
skips = []
seg_outputs = []
for d in range(len(self.conv_blocks_context) - 1):
x = self.conv_blocks_context[d](x)
skips.append(x)
if not self.convolutional_pooling:
x = self.td[d](x)
x = self.conv_blocks_context[-1](x)
for u in range(len(self.tu)):
x = self.tu[u](x)
x = torch.cat((x, skips[-(u + 1)]), dim=1)
x = self.conv_blocks_localization[u](x)
seg_outputs.append(self.final_nonlin(self.seg_outputs[u](x)))
if self._deep_supervision and self.do_ds:
return tuple([seg_outputs[-1]] + [i(j) for i, j in
zip(list(self.upscale_logits_ops)[::-1], seg_outputs[:-1][::-1])])
else:
return seg_outputs[-1]
@staticmethod
def compute_approx_vram_consumption(patch_size, num_pool_per_axis, base_num_features, max_num_features,
num_modalities, num_classes, pool_op_kernel_sizes, deep_supervision=False,
conv_per_stage=2):
"""
This only applies for num_conv_per_stage and convolutional_upsampling=True
not real vram consumption. just a constant term to which the vram consumption will be approx proportional
(+ offset for parameter storage)
:param deep_supervision:
:param patch_size:
:param num_pool_per_axis:
:param base_num_features:
:param max_num_features:
:param num_modalities:
:param num_classes:
:param pool_op_kernel_sizes:
:return:
"""
if not isinstance(num_pool_per_axis, np.ndarray):
num_pool_per_axis = np.array(num_pool_per_axis)
npool = len(pool_op_kernel_sizes)
map_size = np.array(patch_size)
tmp = np.int64((conv_per_stage * 2 + 1) * np.prod(map_size, dtype=np.int64) * base_num_features +
num_modalities * np.prod(map_size, dtype=np.int64) +
num_classes * np.prod(map_size, dtype=np.int64))
num_feat = base_num_features
for p in range(npool):
for pi in range(len(num_pool_per_axis)):
map_size[pi] /= pool_op_kernel_sizes[p][pi]
num_feat = min(num_feat * 2, max_num_features)
num_blocks = (conv_per_stage * 2 + 1) if p < (npool - 1) else conv_per_stage # conv_per_stage + conv_per_stage for the convs of encode/decode and 1 for transposed conv
tmp += num_blocks * np.prod(map_size, dtype=np.int64) * num_feat
if deep_supervision and p < (npool - 2):
tmp += np.prod(map_size, dtype=np.int64) * num_classes
# print(p, map_size, num_feat, tmp)
return tmp
| 20,989
| 45.644444
| 180
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/network_architecture/generic_modular_residual_UNet.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from nnunet.network_architecture.custom_modules.conv_blocks import BasicResidualBlock, ResidualLayer
from nnunet.network_architecture.generic_UNet import Upsample
from nnunet.network_architecture.generic_modular_UNet import PlainConvUNetDecoder, get_default_network_config
from nnunet.network_architecture.neural_network import SegmentationNetwork
from nnunet.training.loss_functions.dice_loss import DC_and_CE_loss
from torch import nn
from torch.optim import SGD
from torch.backends import cudnn
class ResidualUNetEncoder(nn.Module):
def __init__(self, input_channels, base_num_features, num_blocks_per_stage, feat_map_mul_on_downscale,
pool_op_kernel_sizes, conv_kernel_sizes, props, default_return_skips=True,
max_num_features=480, block=BasicResidualBlock):
"""
Following UNet building blocks can be added by utilizing the properties this class exposes (TODO)
this one includes the bottleneck layer!
:param input_channels:
:param base_num_features:
:param num_blocks_per_stage:
:param feat_map_mul_on_downscale:
:param pool_op_kernel_sizes:
:param conv_kernel_sizes:
:param props:
"""
super(ResidualUNetEncoder, self).__init__()
self.default_return_skips = default_return_skips
self.props = props
self.stages = []
self.stage_output_features = []
self.stage_pool_kernel_size = []
self.stage_conv_op_kernel_size = []
assert len(pool_op_kernel_sizes) == len(conv_kernel_sizes)
num_stages = len(conv_kernel_sizes)
if not isinstance(num_blocks_per_stage, (list, tuple)):
num_blocks_per_stage = [num_blocks_per_stage] * num_stages
else:
assert len(num_blocks_per_stage) == num_stages
self.num_blocks_per_stage = num_blocks_per_stage # decoder may need this
self.initial_conv = props['conv_op'](input_channels, base_num_features, 3, padding=1, **props['conv_op_kwargs'])
self.initial_norm = props['norm_op'](base_num_features, **props['norm_op_kwargs'])
self.initial_nonlin = props['nonlin'](**props['nonlin_kwargs'])
current_input_features = base_num_features
for stage in range(num_stages):
current_output_features = min(base_num_features * feat_map_mul_on_downscale ** stage, max_num_features)
current_kernel_size = conv_kernel_sizes[stage]
current_pool_kernel_size = pool_op_kernel_sizes[stage]
current_stage = ResidualLayer(current_input_features, current_output_features, current_kernel_size, props,
self.num_blocks_per_stage[stage], current_pool_kernel_size, block)
self.stages.append(current_stage)
self.stage_output_features.append(current_output_features)
self.stage_conv_op_kernel_size.append(current_kernel_size)
self.stage_pool_kernel_size.append(current_pool_kernel_size)
# update current_input_features
current_input_features = current_output_features
self.stages = nn.ModuleList(self.stages)
def forward(self, x, return_skips=None):
"""
:param x:
:param return_skips: if none then self.default_return_skips is used
:return:
"""
skips = []
x = self.initial_nonlin(self.initial_norm(self.initial_conv(x)))
for s in self.stages:
x = s(x)
if self.default_return_skips:
skips.append(x)
if return_skips is None:
return_skips = self.default_return_skips
if return_skips:
return skips
else:
return x
@staticmethod
def compute_approx_vram_consumption(patch_size, base_num_features, max_num_features,
num_modalities, pool_op_kernel_sizes, num_conv_per_stage_encoder,
feat_map_mul_on_downscale, batch_size):
npool = len(pool_op_kernel_sizes) - 1
current_shape = np.array(patch_size)
tmp = (num_conv_per_stage_encoder[0] * 2 + 1) * np.prod(current_shape) * base_num_features \
+ num_modalities * np.prod(current_shape)
num_feat = base_num_features
for p in range(1, npool + 1):
current_shape = current_shape / np.array(pool_op_kernel_sizes[p])
num_feat = min(num_feat * feat_map_mul_on_downscale, max_num_features)
num_convs = num_conv_per_stage_encoder[p] * 2 + 1 # + 1 for conv in skip in first block
print(p, num_feat, num_convs, current_shape)
tmp += num_convs * np.prod(current_shape) * num_feat
return tmp * batch_size
class ResidualUNetDecoder(nn.Module):
def __init__(self, previous, num_classes, num_blocks_per_stage=None, network_props=None, deep_supervision=False,
upscale_logits=False, block=BasicResidualBlock):
super(ResidualUNetDecoder, self).__init__()
self.num_classes = num_classes
self.deep_supervision = deep_supervision
"""
We assume the bottleneck is part of the encoder, so we can start with upsample -> concat here
"""
previous_stages = previous.stages
previous_stage_output_features = previous.stage_output_features
previous_stage_pool_kernel_size = previous.stage_pool_kernel_size
previous_stage_conv_op_kernel_size = previous.stage_conv_op_kernel_size
if network_props is None:
self.props = previous.props
else:
self.props = network_props
if self.props['conv_op'] == nn.Conv2d:
transpconv = nn.ConvTranspose2d
upsample_mode = "bilinear"
elif self.props['conv_op'] == nn.Conv3d:
transpconv = nn.ConvTranspose3d
upsample_mode = "trilinear"
else:
raise ValueError("unknown convolution dimensionality, conv op: %s" % str(self.props['conv_op']))
if num_blocks_per_stage is None:
num_blocks_per_stage = previous.num_blocks_per_stage[:-1][::-1]
assert len(num_blocks_per_stage) == len(previous.num_blocks_per_stage) - 1
self.stage_pool_kernel_size = previous_stage_pool_kernel_size
self.stage_output_features = previous_stage_output_features
self.stage_conv_op_kernel_size = previous_stage_conv_op_kernel_size
num_stages = len(previous_stages) - 1 # we have one less as the first stage here is what comes after the
# bottleneck
self.tus = []
self.stages = []
self.deep_supervision_outputs = []
# only used for upsample_logits
cum_upsample = np.cumprod(np.vstack(self.stage_pool_kernel_size), axis=0).astype(int)
for i, s in enumerate(np.arange(num_stages)[::-1]):
features_below = previous_stage_output_features[s + 1]
features_skip = previous_stage_output_features[s]
self.tus.append(transpconv(features_below, features_skip, previous_stage_pool_kernel_size[s + 1],
previous_stage_pool_kernel_size[s + 1], bias=False))
# after we tu we concat features so now we have 2xfeatures_skip
self.stages.append(ResidualLayer(2 * features_skip, features_skip, previous_stage_conv_op_kernel_size[s],
self.props, num_blocks_per_stage[i], None, block))
if deep_supervision and s != 0:
seg_layer = self.props['conv_op'](features_skip, num_classes, 1, 1, 0, 1, 1, False)
if upscale_logits:
upsample = Upsample(scale_factor=cum_upsample[s], mode=upsample_mode)
self.deep_supervision_outputs.append(nn.Sequential(seg_layer, upsample))
else:
self.deep_supervision_outputs.append(seg_layer)
self.segmentation_output = self.props['conv_op'](features_skip, num_classes, 1, 1, 0, 1, 1, False)
self.tus = nn.ModuleList(self.tus)
self.stages = nn.ModuleList(self.stages)
self.deep_supervision_outputs = nn.ModuleList(self.deep_supervision_outputs)
def forward(self, skips):
# skips come from the encoder. They are sorted so that the bottleneck is last in the list
# what is maybe not perfect is that the TUs and stages here are sorted the other way around
# so let's just reverse the order of skips
skips = skips[::-1]
seg_outputs = []
x = skips[0] # this is the bottleneck
for i in range(len(self.tus)):
x = self.tus[i](x)
x = torch.cat((x, skips[i + 1]), dim=1)
x = self.stages[i](x)
if self.deep_supervision and (i != len(self.tus) - 1):
seg_outputs.append(self.deep_supervision_outputs[i](x))
segmentation = self.segmentation_output(x)
if self.deep_supervision:
seg_outputs.append(segmentation)
return seg_outputs[
::-1] # seg_outputs are ordered so that the seg from the highest layer is first, the seg from
# the bottleneck of the UNet last
else:
return segmentation
@staticmethod
def compute_approx_vram_consumption(patch_size, base_num_features, max_num_features,
num_classes, pool_op_kernel_sizes, num_blocks_per_stage_decoder,
feat_map_mul_on_downscale, batch_size):
"""
This only applies for num_conv_per_stage and convolutional_upsampling=True
not real vram consumption. just a constant term to which the vram consumption will be approx proportional
(+ offset for parameter storage)
:param patch_size:
:param num_pool_per_axis:
:param base_num_features:
:param max_num_features:
:return:
"""
npool = len(pool_op_kernel_sizes) - 1
current_shape = np.array(patch_size)
tmp = (num_blocks_per_stage_decoder[-1] * 2 + 1) * np.prod(
current_shape) * base_num_features + num_classes * np.prod(current_shape)
num_feat = base_num_features
for p in range(1, npool):
current_shape = current_shape / np.array(pool_op_kernel_sizes[p])
num_feat = min(num_feat * feat_map_mul_on_downscale, max_num_features)
num_convs = num_blocks_per_stage_decoder[-(p + 1)] * 2 + 1 + 1 # +1 for transpconv and +1 for conv in skip
print(p, num_feat, num_convs, current_shape)
tmp += num_convs * np.prod(current_shape) * num_feat
return tmp * batch_size
class ResidualUNet(SegmentationNetwork):
use_this_for_batch_size_computation_2D = 858931200.0 # 1167982592.0
use_this_for_batch_size_computation_3D = 727842816.0 # 1152286720.0
default_base_num_features = 24
default_conv_per_stage = (2, 2, 2, 2, 2, 2, 2, 2)
def __init__(self, input_channels, base_num_features, num_blocks_per_stage_encoder, feat_map_mul_on_downscale,
pool_op_kernel_sizes, conv_kernel_sizes, props, num_classes, num_blocks_per_stage_decoder,
deep_supervision=False, upscale_logits=False, max_features=512, initializer=None,
block=BasicResidualBlock):
super(ResidualUNet, self).__init__()
self.conv_op = props['conv_op']
self.num_classes = num_classes
self.encoder = ResidualUNetEncoder(input_channels, base_num_features, num_blocks_per_stage_encoder,
feat_map_mul_on_downscale, pool_op_kernel_sizes, conv_kernel_sizes,
props, default_return_skips=True, max_num_features=max_features, block=block)
self.decoder = ResidualUNetDecoder(self.encoder, num_classes, num_blocks_per_stage_decoder, props,
deep_supervision, upscale_logits, block=block)
if initializer is not None:
self.apply(initializer)
def forward(self, x):
skips = self.encoder(x)
return self.decoder(skips)
@staticmethod
def compute_approx_vram_consumption(patch_size, base_num_features, max_num_features,
num_modalities, num_classes, pool_op_kernel_sizes, num_conv_per_stage_encoder,
num_conv_per_stage_decoder, feat_map_mul_on_downscale, batch_size):
enc = ResidualUNetEncoder.compute_approx_vram_consumption(patch_size, base_num_features, max_num_features,
num_modalities, pool_op_kernel_sizes,
num_conv_per_stage_encoder,
feat_map_mul_on_downscale, batch_size)
dec = ResidualUNetDecoder.compute_approx_vram_consumption(patch_size, base_num_features, max_num_features,
num_classes, pool_op_kernel_sizes,
num_conv_per_stage_decoder,
feat_map_mul_on_downscale, batch_size)
return enc + dec
class FabiansUNet(SegmentationNetwork):
"""
Residual Encoder, Plain conv decoder
"""
use_this_for_2D_configuration = 1244233721.0 # 1167982592.0
use_this_for_3D_configuration = 1230348801.0
default_blocks_per_stage_encoder = (1, 2, 3, 4, 4, 4, 4, 4, 4, 4, 4)
default_blocks_per_stage_decoder = (1, 1, 1, 1, 1, 1, 1, 1, 1, 1)
default_min_batch_size = 2 # this is what works with the numbers above
def __init__(self, input_channels, base_num_features, num_blocks_per_stage_encoder, feat_map_mul_on_downscale,
pool_op_kernel_sizes, conv_kernel_sizes, props, num_classes, num_blocks_per_stage_decoder,
deep_supervision=False, upscale_logits=False, max_features=512, initializer=None,
block=BasicResidualBlock,
props_decoder=None):
super().__init__()
self.conv_op = props['conv_op']
self.num_classes = num_classes
self.encoder = ResidualUNetEncoder(input_channels, base_num_features, num_blocks_per_stage_encoder,
feat_map_mul_on_downscale, pool_op_kernel_sizes, conv_kernel_sizes,
props, default_return_skips=True, max_num_features=max_features, block=block)
props['dropout_op_kwargs']['p'] = 0
if props_decoder is None:
props_decoder = props
self.decoder = PlainConvUNetDecoder(self.encoder, num_classes, num_blocks_per_stage_decoder, props_decoder,
deep_supervision, upscale_logits)
if initializer is not None:
self.apply(initializer)
def forward(self, x):
skips = self.encoder(x)
return self.decoder(skips)
@staticmethod
def compute_approx_vram_consumption(patch_size, base_num_features, max_num_features,
num_modalities, num_classes, pool_op_kernel_sizes, num_conv_per_stage_encoder,
num_conv_per_stage_decoder, feat_map_mul_on_downscale, batch_size):
enc = ResidualUNetEncoder.compute_approx_vram_consumption(patch_size, base_num_features, max_num_features,
num_modalities, pool_op_kernel_sizes,
num_conv_per_stage_encoder,
feat_map_mul_on_downscale, batch_size)
dec = PlainConvUNetDecoder.compute_approx_vram_consumption(patch_size, base_num_features, max_num_features,
num_classes, pool_op_kernel_sizes,
num_conv_per_stage_decoder,
feat_map_mul_on_downscale, batch_size)
return enc + dec
def find_3d_configuration():
# lets compute a reference for 3D
# we select hyperparameters here so that we get approximately the same patch size as we would get with the
# regular unet. This is just my choice. You can do whatever you want
# These default hyperparemeters will then be used by the experiment planner
# since this is more parameter intensive than the UNet, we will test a configuration that has a lot of parameters
# herefore we copy the UNet configuration for Task005_Prostate
cudnn.deterministic = False
cudnn.benchmark = True
patch_size = (20, 320, 256)
max_num_features = 320
num_modalities = 2
num_classes = 3
batch_size = 2
# now we fiddle with the network specific hyperparameters until everything just barely fits into a titanx
blocks_per_stage_encoder = FabiansUNet.default_blocks_per_stage_encoder
blocks_per_stage_decoder = FabiansUNet.default_blocks_per_stage_decoder
initial_num_features = 32
# we neeed to add a [1, 1, 1] for the res unet because in this implementation all stages of the encoder can have a stride
pool_op_kernel_sizes = [[1, 1, 1],
[1, 2, 2],
[1, 2, 2],
[2, 2, 2],
[2, 2, 2],
[1, 2, 2],
[1, 2, 2]]
conv_op_kernel_sizes = [[1, 3, 3],
[1, 3, 3],
[3, 3, 3],
[3, 3, 3],
[3, 3, 3],
[3, 3, 3],
[3, 3, 3]]
unet = FabiansUNet(num_modalities, initial_num_features, blocks_per_stage_encoder[:len(conv_op_kernel_sizes)], 2,
pool_op_kernel_sizes, conv_op_kernel_sizes,
get_default_network_config(3, dropout_p=None), num_classes,
blocks_per_stage_decoder[:len(conv_op_kernel_sizes)-1], False, False,
max_features=max_num_features).cuda()
optimizer = SGD(unet.parameters(), lr=0.1, momentum=0.95)
loss = DC_and_CE_loss({'batch_dice': True, 'smooth': 1e-5, 'do_bg': False}, {})
dummy_input = torch.rand((batch_size, num_modalities, *patch_size)).cuda()
dummy_gt = (torch.rand((batch_size, 1, *patch_size)) * num_classes).round().clamp_(0, 2).cuda().long()
for _ in range(20):
optimizer.zero_grad()
skips = unet.encoder(dummy_input)
print([i.shape for i in skips])
output = unet.decoder(skips)
l = loss(output, dummy_gt)
l.backward()
optimizer.step()
if _ == 0:
torch.cuda.empty_cache()
# that should do. Now take the network hyperparameters and insert them in FabiansUNet.compute_approx_vram_consumption
# whatever number this spits out, save it to FabiansUNet.use_this_for_batch_size_computation_3D
print(FabiansUNet.compute_approx_vram_consumption(patch_size, initial_num_features, max_num_features, num_modalities,
num_classes, pool_op_kernel_sizes,
blocks_per_stage_encoder[:len(conv_op_kernel_sizes)],
blocks_per_stage_decoder[:len(conv_op_kernel_sizes)-1], 2, batch_size))
# the output is 1230348800.0 for me
# I increment that number by 1 to allow this configuration be be chosen
def find_2d_configuration():
# lets compute a reference for 3D
# we select hyperparameters here so that we get approximately the same patch size as we would get with the
# regular unet. This is just my choice. You can do whatever you want
# These default hyperparemeters will then be used by the experiment planner
# since this is more parameter intensive than the UNet, we will test a configuration that has a lot of parameters
# herefore we copy the UNet configuration for Task003_Liver
cudnn.deterministic = False
cudnn.benchmark = True
patch_size = (512, 512)
max_num_features = 512
num_modalities = 1
num_classes = 3
batch_size = 12
# now we fiddle with the network specific hyperparameters until everything just barely fits into a titanx
blocks_per_stage_encoder = FabiansUNet.default_blocks_per_stage_encoder
blocks_per_stage_decoder = FabiansUNet.default_blocks_per_stage_decoder
initial_num_features = 30
# we neeed to add a [1, 1, 1] for the res unet because in this implementation all stages of the encoder can have a stride
pool_op_kernel_sizes = [[1, 1],
[2, 2],
[2, 2],
[2, 2],
[2, 2],
[2, 2],
[2, 2],
[2, 2]]
conv_op_kernel_sizes = [[3, 3],
[3, 3],
[3, 3],
[3, 3],
[3, 3],
[3, 3],
[3, 3],
[3, 3]]
unet = FabiansUNet(num_modalities, initial_num_features, blocks_per_stage_encoder[:len(conv_op_kernel_sizes)], 2,
pool_op_kernel_sizes, conv_op_kernel_sizes,
get_default_network_config(2, dropout_p=None), num_classes,
blocks_per_stage_decoder[:len(conv_op_kernel_sizes)-1], False, False,
max_features=max_num_features).cuda()
optimizer = SGD(unet.parameters(), lr=0.1, momentum=0.95)
loss = DC_and_CE_loss({'batch_dice': True, 'smooth': 1e-5, 'do_bg': False}, {})
dummy_input = torch.rand((batch_size, num_modalities, *patch_size)).cuda()
dummy_gt = (torch.rand((batch_size, 1, *patch_size)) * num_classes).round().clamp_(0, 2).cuda().long()
for _ in range(20):
optimizer.zero_grad()
skips = unet.encoder(dummy_input)
print([i.shape for i in skips])
output = unet.decoder(skips)
l = loss(output, dummy_gt)
l.backward()
optimizer.step()
if _ == 0:
torch.cuda.empty_cache()
# that should do. Now take the network hyperparameters and insert them in FabiansUNet.compute_approx_vram_consumption
# whatever number this spits out, save it to FabiansUNet.use_this_for_batch_size_computation_2D
print(FabiansUNet.compute_approx_vram_consumption(patch_size, initial_num_features, max_num_features, num_modalities,
num_classes, pool_op_kernel_sizes,
blocks_per_stage_encoder[:len(conv_op_kernel_sizes)],
blocks_per_stage_decoder[:len(conv_op_kernel_sizes)-1], 2, batch_size))
# the output is 1244233728.0 for me
# I increment that number by 1 to allow this configuration be be chosen
# This will not fit with 32 filters, but so will the regular U-net. We still use 32 filters in training.
# This does not matter because we are using mixed precision training now, so a rough memory approximation is OK
if __name__ == "__main__":
pass
| 24,392
| 46.829412
| 125
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/network_architecture/__init__.py
|
from __future__ import absolute_import
from . import *
| 54
| 26.5
| 38
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/network_architecture/generic_modular_UNet.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from nnunet.network_architecture.custom_modules.conv_blocks import StackedConvLayers
from nnunet.network_architecture.generic_UNet import Upsample
from nnunet.network_architecture.neural_network import SegmentationNetwork
from nnunet.training.loss_functions.dice_loss import DC_and_CE_loss
from torch import nn
import numpy as np
from torch.optim import SGD
"""
The idea behind this modular U-net ist that we decouple encoder and decoder and thus make things a) a lot more easy to
combine and b) enable easy swapping between segmentation or classification mode of the same architecture
"""
def get_default_network_config(dim=2, dropout_p=None, nonlin="LeakyReLU", norm_type="bn"):
"""
returns a dictionary that contains pointers to conv, nonlin and norm ops and the default kwargs I like to use
:return:
"""
props = {}
if dim == 2:
props['conv_op'] = nn.Conv2d
props['dropout_op'] = nn.Dropout2d
elif dim == 3:
props['conv_op'] = nn.Conv3d
props['dropout_op'] = nn.Dropout3d
else:
raise NotImplementedError
if norm_type == "bn":
if dim == 2:
props['norm_op'] = nn.BatchNorm2d
elif dim == 3:
props['norm_op'] = nn.BatchNorm3d
props['norm_op_kwargs'] = {'eps': 1e-5, 'affine': True}
elif norm_type == "in":
if dim == 2:
props['norm_op'] = nn.InstanceNorm2d
elif dim == 3:
props['norm_op'] = nn.InstanceNorm3d
props['norm_op_kwargs'] = {'eps': 1e-5, 'affine': True}
else:
raise NotImplementedError
if dropout_p is None:
props['dropout_op'] = None
props['dropout_op_kwargs'] = {'p': 0, 'inplace': True}
else:
props['dropout_op_kwargs'] = {'p': dropout_p, 'inplace': True}
props['conv_op_kwargs'] = {'stride': 1, 'dilation': 1, 'bias': True} # kernel size will be set by network!
if nonlin == "LeakyReLU":
props['nonlin'] = nn.LeakyReLU
props['nonlin_kwargs'] = {'negative_slope': 1e-2, 'inplace': True}
elif nonlin == "ReLU":
props['nonlin'] = nn.ReLU
props['nonlin_kwargs'] = {'inplace': True}
else:
raise ValueError
return props
class PlainConvUNetEncoder(nn.Module):
def __init__(self, input_channels, base_num_features, num_blocks_per_stage, feat_map_mul_on_downscale,
pool_op_kernel_sizes, conv_kernel_sizes, props, default_return_skips=True,
max_num_features=480):
"""
Following UNet building blocks can be added by utilizing the properties this class exposes (TODO)
this one includes the bottleneck layer!
:param input_channels:
:param base_num_features:
:param num_blocks_per_stage:
:param feat_map_mul_on_downscale:
:param pool_op_kernel_sizes:
:param conv_kernel_sizes:
:param props:
"""
super(PlainConvUNetEncoder, self).__init__()
self.default_return_skips = default_return_skips
self.props = props
self.stages = []
self.stage_output_features = []
self.stage_pool_kernel_size = []
self.stage_conv_op_kernel_size = []
assert len(pool_op_kernel_sizes) == len(conv_kernel_sizes)
num_stages = len(conv_kernel_sizes)
if not isinstance(num_blocks_per_stage, (list, tuple)):
num_blocks_per_stage = [num_blocks_per_stage] * num_stages
else:
assert len(num_blocks_per_stage) == num_stages
self.num_blocks_per_stage = num_blocks_per_stage # decoder may need this
current_input_features = input_channels
for stage in range(num_stages):
current_output_features = min(int(base_num_features * feat_map_mul_on_downscale ** stage), max_num_features)
current_kernel_size = conv_kernel_sizes[stage]
current_pool_kernel_size = pool_op_kernel_sizes[stage]
current_stage = StackedConvLayers(current_input_features, current_output_features, current_kernel_size,
props, num_blocks_per_stage[stage], current_pool_kernel_size)
self.stages.append(current_stage)
self.stage_output_features.append(current_output_features)
self.stage_conv_op_kernel_size.append(current_kernel_size)
self.stage_pool_kernel_size.append(current_pool_kernel_size)
# update current_input_features
current_input_features = current_output_features
self.stages = nn.ModuleList(self.stages)
def forward(self, x, return_skips=None):
"""
:param x:
:param return_skips: if none then self.default_return_skips is used
:return:
"""
skips = []
for s in self.stages:
x = s(x)
if self.default_return_skips:
skips.append(x)
if return_skips is None:
return_skips = self.default_return_skips
if return_skips:
return skips
else:
return x
@staticmethod
def compute_approx_vram_consumption(patch_size, base_num_features, max_num_features,
num_modalities, pool_op_kernel_sizes, num_blocks_per_stage_encoder,
feat_map_mul_on_downscale, batch_size):
npool = len(pool_op_kernel_sizes) - 1
current_shape = np.array(patch_size)
tmp = num_blocks_per_stage_encoder[0] * np.prod(current_shape) * base_num_features \
+ num_modalities * np.prod(current_shape)
num_feat = base_num_features
for p in range(1, npool + 1):
current_shape = current_shape / np.array(pool_op_kernel_sizes[p])
num_feat = min(num_feat * feat_map_mul_on_downscale, max_num_features)
num_convs = num_blocks_per_stage_encoder[p]
print(p, num_feat, num_convs, current_shape)
tmp += num_convs * np.prod(current_shape) * num_feat
return tmp * batch_size
class PlainConvUNetDecoder(nn.Module):
def __init__(self, previous, num_classes, num_blocks_per_stage=None, network_props=None, deep_supervision=False,
upscale_logits=False):
super(PlainConvUNetDecoder, self).__init__()
self.num_classes = num_classes
self.deep_supervision = deep_supervision
"""
We assume the bottleneck is part of the encoder, so we can start with upsample -> concat here
"""
previous_stages = previous.stages
previous_stage_output_features = previous.stage_output_features
previous_stage_pool_kernel_size = previous.stage_pool_kernel_size
previous_stage_conv_op_kernel_size = previous.stage_conv_op_kernel_size
if network_props is None:
self.props = previous.props
else:
self.props = network_props
if self.props['conv_op'] == nn.Conv2d:
transpconv = nn.ConvTranspose2d
upsample_mode = "bilinear"
elif self.props['conv_op'] == nn.Conv3d:
transpconv = nn.ConvTranspose3d
upsample_mode = "trilinear"
else:
raise ValueError("unknown convolution dimensionality, conv op: %s" % str(self.props['conv_op']))
if num_blocks_per_stage is None:
num_blocks_per_stage = previous.num_blocks_per_stage[:-1][::-1]
assert len(num_blocks_per_stage) == len(previous.num_blocks_per_stage) - 1
self.stage_pool_kernel_size = previous_stage_pool_kernel_size
self.stage_output_features = previous_stage_output_features
self.stage_conv_op_kernel_size = previous_stage_conv_op_kernel_size
num_stages = len(previous_stages) - 1 # we have one less as the first stage here is what comes after the
# bottleneck
self.tus = []
self.stages = []
self.deep_supervision_outputs = []
# only used for upsample_logits
cum_upsample = np.cumprod(np.vstack(self.stage_pool_kernel_size), axis=0).astype(int)
for i, s in enumerate(np.arange(num_stages)[::-1]):
features_below = previous_stage_output_features[s + 1]
features_skip = previous_stage_output_features[s]
self.tus.append(transpconv(features_below, features_skip, previous_stage_pool_kernel_size[s + 1],
previous_stage_pool_kernel_size[s + 1], bias=False))
# after we tu we concat features so now we have 2xfeatures_skip
self.stages.append(StackedConvLayers(2 * features_skip, features_skip,
previous_stage_conv_op_kernel_size[s], self.props,
num_blocks_per_stage[i]))
if deep_supervision and s != 0:
seg_layer = self.props['conv_op'](features_skip, num_classes, 1, 1, 0, 1, 1, False)
if upscale_logits:
upsample = Upsample(scale_factor=cum_upsample[s], mode=upsample_mode)
self.deep_supervision_outputs.append(nn.Sequential(seg_layer, upsample))
else:
self.deep_supervision_outputs.append(seg_layer)
self.segmentation_output = self.props['conv_op'](features_skip, num_classes, 1, 1, 0, 1, 1, False)
self.tus = nn.ModuleList(self.tus)
self.stages = nn.ModuleList(self.stages)
self.deep_supervision_outputs = nn.ModuleList(self.deep_supervision_outputs)
def forward(self, skips, gt=None, loss=None):
# skips come from the encoder. They are sorted so that the bottleneck is last in the list
# what is maybe not perfect is that the TUs and stages here are sorted the other way around
# so let's just reverse the order of skips
skips = skips[::-1]
seg_outputs = []
x = skips[0] # this is the bottleneck
for i in range(len(self.tus)):
x = self.tus[i](x)
x = torch.cat((x, skips[i + 1]), dim=1)
x = self.stages[i](x)
if self.deep_supervision and (i != len(self.tus) - 1):
tmp = self.deep_supervision_outputs[i](x)
if gt is not None:
tmp = loss(tmp, gt)
seg_outputs.append(tmp)
segmentation = self.segmentation_output(x)
if self.deep_supervision:
tmp = segmentation
if gt is not None:
tmp = loss(tmp, gt)
seg_outputs.append(tmp)
return seg_outputs[::-1] # seg_outputs are ordered so that the seg from the highest layer is first, the seg from
# the bottleneck of the UNet last
else:
return segmentation
@staticmethod
def compute_approx_vram_consumption(patch_size, base_num_features, max_num_features,
num_classes, pool_op_kernel_sizes, num_blocks_per_stage_decoder,
feat_map_mul_on_downscale, batch_size):
"""
This only applies for num_blocks_per_stage and convolutional_upsampling=True
not real vram consumption. just a constant term to which the vram consumption will be approx proportional
(+ offset for parameter storage)
:param patch_size:
:param num_pool_per_axis:
:param base_num_features:
:param max_num_features:
:return:
"""
npool = len(pool_op_kernel_sizes) - 1
current_shape = np.array(patch_size)
tmp = (num_blocks_per_stage_decoder[-1] + 1) * np.prod(current_shape) * base_num_features + num_classes * np.prod(current_shape)
num_feat = base_num_features
for p in range(1, npool):
current_shape = current_shape / np.array(pool_op_kernel_sizes[p])
num_feat = min(num_feat * feat_map_mul_on_downscale, max_num_features)
num_convs = num_blocks_per_stage_decoder[-(p+1)] + 1
print(p, num_feat, num_convs, current_shape)
tmp += num_convs * np.prod(current_shape) * num_feat
return tmp * batch_size
class PlainConvUNet(SegmentationNetwork):
use_this_for_batch_size_computation_2D = 1167982592.0
use_this_for_batch_size_computation_3D = 1152286720.0
def __init__(self, input_channels, base_num_features, num_blocks_per_stage_encoder, feat_map_mul_on_downscale,
pool_op_kernel_sizes, conv_kernel_sizes, props, num_classes, num_blocks_per_stage_decoder,
deep_supervision=False, upscale_logits=False, max_features=512, initializer=None):
super(PlainConvUNet, self).__init__()
self.conv_op = props['conv_op']
self.num_classes = num_classes
self.encoder = PlainConvUNetEncoder(input_channels, base_num_features, num_blocks_per_stage_encoder,
feat_map_mul_on_downscale, pool_op_kernel_sizes, conv_kernel_sizes,
props, default_return_skips=True, max_num_features=max_features)
self.decoder = PlainConvUNetDecoder(self.encoder, num_classes, num_blocks_per_stage_decoder, props,
deep_supervision, upscale_logits)
if initializer is not None:
self.apply(initializer)
def forward(self, x):
skips = self.encoder(x)
return self.decoder(skips)
@staticmethod
def compute_approx_vram_consumption(patch_size, base_num_features, max_num_features,
num_modalities, num_classes, pool_op_kernel_sizes, num_blocks_per_stage_encoder,
num_blocks_per_stage_decoder, feat_map_mul_on_downscale, batch_size):
enc = PlainConvUNetEncoder.compute_approx_vram_consumption(patch_size, base_num_features, max_num_features,
num_modalities, pool_op_kernel_sizes,
num_blocks_per_stage_encoder,
feat_map_mul_on_downscale, batch_size)
dec = PlainConvUNetDecoder.compute_approx_vram_consumption(patch_size, base_num_features, max_num_features,
num_classes, pool_op_kernel_sizes,
num_blocks_per_stage_decoder,
feat_map_mul_on_downscale, batch_size)
return enc + dec
@staticmethod
def compute_reference_for_vram_consumption_3d():
patch_size = (160, 128, 128)
pool_op_kernel_sizes = ((1, 1, 1),
(2, 2, 2),
(2, 2, 2),
(2, 2, 2),
(2, 2, 2),
(2, 2, 2))
conv_per_stage_encoder = (2, 2, 2, 2, 2, 2)
conv_per_stage_decoder = (2, 2, 2, 2, 2)
return PlainConvUNet.compute_approx_vram_consumption(patch_size, 32, 512, 4, 3, pool_op_kernel_sizes,
conv_per_stage_encoder, conv_per_stage_decoder, 2, 2)
@staticmethod
def compute_reference_for_vram_consumption_2d():
patch_size = (256, 256)
pool_op_kernel_sizes = (
(1, 1), # (256, 256)
(2, 2), # (128, 128)
(2, 2), # (64, 64)
(2, 2), # (32, 32)
(2, 2), # (16, 16)
(2, 2), # (8, 8)
(2, 2) # (4, 4)
)
conv_per_stage_encoder = (2, 2, 2, 2, 2, 2, 2)
conv_per_stage_decoder = (2, 2, 2, 2, 2, 2)
return PlainConvUNet.compute_approx_vram_consumption(patch_size, 32, 512, 4, 3, pool_op_kernel_sizes,
conv_per_stage_encoder, conv_per_stage_decoder, 2, 56)
if __name__ == "__main__":
conv_op_kernel_sizes = ((3, 3),
(3, 3),
(3, 3),
(3, 3),
(3, 3),
(3, 3),
(3, 3))
pool_op_kernel_sizes = ((1, 1),
(2, 2),
(2, 2),
(2, 2),
(2, 2),
(2, 2),
(2, 2))
patch_size = (256, 256)
batch_size = 56
unet = PlainConvUNet(4, 32, (2, 2, 2, 2, 2, 2, 2), 2, pool_op_kernel_sizes, conv_op_kernel_sizes,
get_default_network_config(2, dropout_p=None), 4, (2, 2, 2, 2, 2, 2), False, False, max_features=512).cuda()
optimizer = SGD(unet.parameters(), lr=0.1, momentum=0.95)
unet.compute_reference_for_vram_consumption_3d()
unet.compute_reference_for_vram_consumption_2d()
dummy_input = torch.rand((batch_size, 4, *patch_size)).cuda()
dummy_gt = (torch.rand((batch_size, 1, *patch_size)) * 4).round().clamp_(0, 3).cuda().long()
optimizer.zero_grad()
skips = unet.encoder(dummy_input)
print([i.shape for i in skips])
loss = DC_and_CE_loss({'batch_dice': True, 'smooth': 1e-5, 'smooth_in_nom': True,
'do_bg': False, 'rebalance_weights': None, 'background_weight': 1}, {})
output = unet.decoder(skips)
l = loss(output, dummy_gt)
l.backward()
optimizer.step()
import hiddenlayer as hl
g = hl.build_graph(unet, dummy_input)
g.save("/home/fabian/test.pdf")
"""conv_op_kernel_sizes = ((3, 3, 3),
(3, 3, 3),
(3, 3, 3),
(3, 3, 3),
(3, 3, 3),
(3, 3, 3))
pool_op_kernel_sizes = ((1, 1, 1),
(2, 2, 2),
(2, 2, 2),
(2, 2, 2),
(2, 2, 2),
(2, 2, 2))
patch_size = (160, 128, 128)
unet = PlainConvUNet(4, 32, (2, 2, 2, 2, 2, 2), 2, pool_op_kernel_sizes, conv_op_kernel_sizes,
get_default_network_config(3, dropout_p=None), 4, (2, 2, 2, 2, 2), False, False, max_features=512).cuda()
optimizer = SGD(unet.parameters(), lr=0.1, momentum=0.95)
unet.compute_reference_for_vram_consumption_3d()
unet.compute_reference_for_vram_consumption_2d()
dummy_input = torch.rand((2, 4, *patch_size)).cuda()
dummy_gt = (torch.rand((2, 1, *patch_size)) * 4).round().clamp_(0, 3).cuda().long()
optimizer.zero_grad()
skips = unet.encoder(dummy_input)
print([i.shape for i in skips])
loss = DC_and_CE_loss({'batch_dice': True, 'smooth': 1e-5, 'smooth_in_nom': True,
'do_bg': False, 'rebalance_weights': None, 'background_weight': 1}, {})
output = unet.decoder(skips)
l = loss(output, dummy_gt)
l.backward()
optimizer.step()
import hiddenlayer as hl
g = hl.build_graph(unet, dummy_input)
g.save("/home/fabian/test.pdf")"""
| 19,951
| 41.451064
| 136
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/network_architecture/custom_modules/helperModules.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from torch import nn
class Identity(nn.Module):
def __init__(self, *args, **kwargs):
super().__init__()
def forward(self, input):
return input
class MyGroupNorm(nn.GroupNorm):
def __init__(self, num_channels, eps=1e-5, affine=True, num_groups=8):
super(MyGroupNorm, self).__init__(num_groups, num_channels, eps, affine)
| 1,051
| 34.066667
| 114
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/network_architecture/custom_modules/conv_blocks.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from copy import deepcopy
from nnunet.network_architecture.custom_modules.helperModules import Identity
from torch import nn
class ConvDropoutNormReLU(nn.Module):
def __init__(self, input_channels, output_channels, kernel_size, network_props):
"""
if network_props['dropout_op'] is None then no dropout
if network_props['norm_op'] is None then no norm
:param input_channels:
:param output_channels:
:param kernel_size:
:param network_props:
"""
super(ConvDropoutNormReLU, self).__init__()
network_props = deepcopy(network_props) # network_props is a dict and mutable, so we deepcopy to be safe.
self.conv = network_props['conv_op'](input_channels, output_channels, kernel_size,
padding=[(i - 1) // 2 for i in kernel_size],
**network_props['conv_op_kwargs'])
# maybe dropout
if network_props['dropout_op'] is not None:
self.do = network_props['dropout_op'](**network_props['dropout_op_kwargs'])
else:
self.do = Identity()
if network_props['norm_op'] is not None:
self.norm = network_props['norm_op'](output_channels, **network_props['norm_op_kwargs'])
else:
self.norm = Identity()
self.nonlin = network_props['nonlin'](**network_props['nonlin_kwargs'])
self.all = nn.Sequential(self.conv, self.do, self.norm, self.nonlin)
def forward(self, x):
return self.all(x)
class StackedConvLayers(nn.Module):
def __init__(self, input_channels, output_channels, kernel_size, network_props, num_convs, first_stride=None):
"""
if network_props['dropout_op'] is None then no dropout
if network_props['norm_op'] is None then no norm
:param input_channels:
:param output_channels:
:param kernel_size:
:param network_props:
"""
super(StackedConvLayers, self).__init__()
network_props = deepcopy(network_props) # network_props is a dict and mutable, so we deepcopy to be safe.
network_props_first = deepcopy(network_props)
if first_stride is not None:
network_props_first['conv_op_kwargs']['stride'] = first_stride
self.convs = nn.Sequential(
ConvDropoutNormReLU(input_channels, output_channels, kernel_size, network_props_first),
*[ConvDropoutNormReLU(output_channels, output_channels, kernel_size, network_props) for _ in
range(num_convs - 1)]
)
def forward(self, x):
return self.convs(x)
class BasicResidualBlock(nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, props, stride=None):
"""
This is the conv bn nonlin conv bn nonlin kind of block
:param in_planes:
:param out_planes:
:param props:
:param override_stride:
"""
super().__init__()
self.kernel_size = kernel_size
props['conv_op_kwargs']['stride'] = 1
self.stride = stride
self.props = props
self.out_planes = out_planes
self.in_planes = in_planes
if stride is not None:
kwargs_conv1 = deepcopy(props['conv_op_kwargs'])
kwargs_conv1['stride'] = stride
else:
kwargs_conv1 = props['conv_op_kwargs']
self.conv1 = props['conv_op'](in_planes, out_planes, kernel_size, padding=[(i - 1) // 2 for i in kernel_size],
**kwargs_conv1)
self.norm1 = props['norm_op'](out_planes, **props['norm_op_kwargs'])
self.nonlin1 = props['nonlin'](**props['nonlin_kwargs'])
if props['dropout_op_kwargs']['p'] != 0:
self.dropout = props['dropout_op'](**props['dropout_op_kwargs'])
else:
self.dropout = Identity()
self.conv2 = props['conv_op'](out_planes, out_planes, kernel_size, padding=[(i - 1) // 2 for i in kernel_size],
**props['conv_op_kwargs'])
self.norm2 = props['norm_op'](out_planes, **props['norm_op_kwargs'])
self.nonlin2 = props['nonlin'](**props['nonlin_kwargs'])
if (self.stride is not None and any((i != 1 for i in self.stride))) or (in_planes != out_planes):
stride_here = stride if stride is not None else 1
self.downsample_skip = nn.Sequential(props['conv_op'](in_planes, out_planes, 1, stride_here, bias=False),
props['norm_op'](out_planes, **props['norm_op_kwargs']))
else:
self.downsample_skip = lambda x: x
def forward(self, x):
residual = x
out = self.dropout(self.conv1(x))
out = self.nonlin1(self.norm1(out))
out = self.norm2(self.conv2(out))
residual = self.downsample_skip(residual)
out += residual
return self.nonlin2(out)
class ResidualBottleneckBlock(nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, props, stride=None):
"""
This is the conv bn nonlin conv bn nonlin kind of block
:param in_planes:
:param out_planes:
:param props:
:param override_stride:
"""
super().__init__()
if props['dropout_op_kwargs'] is None and props['dropout_op_kwargs'] > 0:
raise NotImplementedError("ResidualBottleneckBlock does not yet support dropout!")
self.kernel_size = kernel_size
props['conv_op_kwargs']['stride'] = 1
self.stride = stride
self.props = props
self.out_planes = out_planes
self.in_planes = in_planes
self.bottleneck_planes = out_planes // 4
if stride is not None:
kwargs_conv1 = deepcopy(props['conv_op_kwargs'])
kwargs_conv1['stride'] = stride
else:
kwargs_conv1 = props['conv_op_kwargs']
self.conv1 = props['conv_op'](in_planes, self.bottleneck_planes, [1 for _ in kernel_size], padding=[0 for i in kernel_size],
**kwargs_conv1)
self.norm1 = props['norm_op'](self.bottleneck_planes, **props['norm_op_kwargs'])
self.nonlin1 = props['nonlin'](**props['nonlin_kwargs'])
self.conv2 = props['conv_op'](self.bottleneck_planes, self.bottleneck_planes, kernel_size, padding=[(i - 1) // 2 for i in kernel_size],
**props['conv_op_kwargs'])
self.norm2 = props['norm_op'](self.bottleneck_planes, **props['norm_op_kwargs'])
self.nonlin2 = props['nonlin'](**props['nonlin_kwargs'])
self.conv3 = props['conv_op'](self.bottleneck_planes, out_planes, [1 for _ in kernel_size], padding=[0 for i in kernel_size],
**props['conv_op_kwargs'])
self.norm3 = props['norm_op'](out_planes, **props['norm_op_kwargs'])
self.nonlin3 = props['nonlin'](**props['nonlin_kwargs'])
if (self.stride is not None and any((i != 1 for i in self.stride))) or (in_planes != out_planes):
stride_here = stride if stride is not None else 1
self.downsample_skip = nn.Sequential(props['conv_op'](in_planes, out_planes, 1, stride_here, bias=False),
props['norm_op'](out_planes, **props['norm_op_kwargs']))
else:
self.downsample_skip = lambda x: x
def forward(self, x):
residual = x
out = self.nonlin1(self.norm1(self.conv1(x)))
out = self.nonlin2(self.norm2(self.conv2(out)))
out = self.norm3(self.conv3(out))
residual = self.downsample_skip(residual)
out += residual
return self.nonlin3(out)
class ResidualLayer(nn.Module):
def __init__(self, input_channels, output_channels, kernel_size, network_props, num_blocks, first_stride=None, block=BasicResidualBlock):
super().__init__()
network_props = deepcopy(network_props) # network_props is a dict and mutable, so we deepcopy to be safe.
self.convs = nn.Sequential(
block(input_channels, output_channels, kernel_size, network_props, first_stride),
*[block(output_channels, output_channels, kernel_size, network_props) for _ in
range(num_blocks - 1)]
)
def forward(self, x):
return self.convs(x)
| 9,127
| 38.860262
| 143
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/network_architecture/custom_modules/feature_response_normalization.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nnunet.utilities.tensor_utilities import mean_tensor
from torch import nn
import torch
from torch.nn.parameter import Parameter
import torch.jit
class FRN3D(nn.Module):
def __init__(self, num_features: int, eps=1e-6, **kwargs):
super().__init__()
self.eps = eps
self.num_features = num_features
self.weight = Parameter(torch.ones(1, num_features, 1, 1, 1), True)
self.bias = Parameter(torch.zeros(1, num_features, 1, 1, 1), True)
self.tau = Parameter(torch.zeros(1, num_features, 1, 1, 1), True)
def forward(self, x: torch.Tensor):
x = x * torch.rsqrt(mean_tensor(x * x, [2, 3, 4], keepdim=True) + self.eps)
return torch.max(self.weight * x + self.bias, self.tau)
if __name__ == "__main__":
tmp = torch.rand((3, 32, 16, 16, 16))
frn = FRN3D(32)
out = frn(tmp)
| 1,547
| 34.181818
| 114
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/network_architecture/custom_modules/mish.py
|
############
# https://github.com/lessw2020/mish/blob/master/mish.py
# This code was taken from the repo above and was not created by me (Fabian)! Full credit goes to the original authors
############
import torch
import torch.nn as nn
import torch.nn.functional as F
# Mish - "Mish: A Self Regularized Non-Monotonic Neural Activation Function"
# https://arxiv.org/abs/1908.08681v1
# implemented for PyTorch / FastAI by lessw2020
# github: https://github.com/lessw2020/mish
class Mish(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
# inlining this saves 1 second per epoch (V100 GPU) vs having a temp x and then returning x(!)
return x * (torch.tanh(F.softplus(x)))
| 730
| 29.458333
| 118
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/utilities/file_conversions.py
|
from typing import Tuple, List
from skimage import io
import SimpleITK as sitk
import numpy as np
import tifffile
def convert_2d_image_to_nifti(filename: str, output_name: str, spacing=(999, 1, 1), transform=None, is_seg: bool = False) -> None:
"""
Reads an image (must be a format that it recognized by skimage.io.imread) and converts it into a series of niftis.
The image can have an arbitrary number of input channels which will be exported separately (_0000.nii.gz,
_0001.nii.gz, etc for images and only .nii.gz for seg).
Spacing can be ignored most of the time.
!!!2D images are often natural images which do not have a voxel spacing that could be used for resampling. These images
must be resampled by you prior to converting them to nifti!!!
Datasets converted with this utility can only be used with the 2d U-Net configuration of nnU-Net
If Transform is not None it will be applied to the image after loading.
:param is_seg:
:param transform:
:param filename:
:param output_name: do not use a file ending for this one! Example: output_name='./converted/image1'. This
function will add the suffix (_0000) and file ending (.nii.gz) for you.
:param spacing:
:return:
"""
img = io.imread(filename)
if transform is not None:
img = transform(img)
if len(img.shape) == 2: # 2d image with no color channels
img = img[None, None] # add dimensions
else:
assert len(img.shape) == 3, "image should be 3d with color channel last but has shape %s" % str(img.shape)
# we assume that the color channel is the last dimension. Transpose it to be in first
img = img.transpose((2, 0, 1))
# add third dimension
img = img[:, None]
# image is now (c, x, x, z) where x=1 since it's 2d
if is_seg:
assert img.shape[0] == 1, 'segmentations can only have one color channel, not sure what happened here'
for j, i in enumerate(img):
itk_img = sitk.GetImageFromArray(i)
itk_img.SetSpacing(list(spacing)[::-1])
if not is_seg:
sitk.WriteImage(itk_img, output_name + "_%04.0d.nii.gz" % j)
else:
sitk.WriteImage(itk_img, output_name + ".nii.gz")
def convert_3d_tiff_to_nifti(filenames: List[str], output_name: str, spacing: Tuple[tuple, list], transform=None, is_seg=False) -> None:
"""
filenames must be a list of strings, each pointing to a separate 3d tiff file. One file per modality. If your data
only has one imaging modality, simply pass a list with only a single entry
Files in filenames must be readable with
Note: we always only pass one file into tifffile.imread, not multiple (even though it supports it). This is because
I am not familiar enough with this functionality and would like to have control over what happens.
If Transform is not None it will be applied to the image after loading.
:param transform:
:param filenames:
:param output_name:
:param spacing:
:return:
"""
if is_seg:
assert len(filenames) == 1
for j, i in enumerate(filenames):
img = tifffile.imread(i)
if transform is not None:
img = transform(img)
itk_img = sitk.GetImageFromArray(img)
itk_img.SetSpacing(list(spacing)[::-1])
if not is_seg:
sitk.WriteImage(itk_img, output_name + "_%04.0d.nii.gz" % j)
else:
sitk.WriteImage(itk_img, output_name + ".nii.gz")
def convert_2d_segmentation_nifti_to_img(nifti_file: str, output_filename: str, transform=None, export_dtype=np.uint8):
img = sitk.GetArrayFromImage(sitk.ReadImage(nifti_file))
assert img.shape[0] == 1, "This function can only export 2D segmentations!"
img = img[0]
if transform is not None:
img = transform(img)
io.imsave(output_filename, img.astype(export_dtype), check_contrast=False)
def convert_3d_segmentation_nifti_to_tiff(nifti_file: str, output_filename: str, transform=None, export_dtype=np.uint8):
img = sitk.GetArrayFromImage(sitk.ReadImage(nifti_file))
assert len(img.shape) == 3, "This function can only export 3D segmentations!"
if transform is not None:
img = transform(img)
tifffile.imsave(output_filename, img.astype(export_dtype))
| 4,314
| 38.587156
| 136
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/utilities/sitk_stuff.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import SimpleITK as sitk
def copy_geometry(image: sitk.Image, ref: sitk.Image):
image.SetOrigin(ref.GetOrigin())
image.SetDirection(ref.GetDirection())
image.SetSpacing(ref.GetSpacing())
return image
| 908
| 36.875
| 114
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/utilities/recursive_delete_npz.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from batchgenerators.utilities.file_and_folder_operations import *
import argparse
import os
def recursive_delete_npz(current_directory: str):
npz_files = subfiles(current_directory, join=True, suffix=".npz")
npz_files = [i for i in npz_files if not i.endswith("segFromPrevStage.npz")] # to be extra safe
_ = [os.remove(i) for i in npz_files]
for d in subdirs(current_directory, join=False):
if d != "pred_next_stage":
recursive_delete_npz(join(current_directory, d))
if __name__ == "__main__":
parser = argparse.ArgumentParser(usage="USE THIS RESPONSIBLY! DANGEROUS! I (Fabian) use this to remove npz files "
"after I ran figure_out_what_to_submit")
parser.add_argument("-f", help="folder", required=True)
args = parser.parse_args()
recursive_delete_npz(args.f)
| 1,554
| 39.921053
| 118
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/utilities/one_hot_encoding.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
def to_one_hot(seg, all_seg_labels=None):
if all_seg_labels is None:
all_seg_labels = np.unique(seg)
result = np.zeros((len(all_seg_labels), *seg.shape), dtype=seg.dtype)
for i, l in enumerate(all_seg_labels):
result[i][seg == l] = 1
return result
| 990
| 38.64
| 114
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/utilities/file_endings.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from batchgenerators.utilities.file_and_folder_operations import *
def remove_trailing_slash(filename: str):
while filename.endswith('/'):
filename = filename[:-1]
return filename
def maybe_add_0000_to_all_niigz(folder):
nii_gz = subfiles(folder, suffix='.nii.gz')
for n in nii_gz:
n = remove_trailing_slash(n)
if not n.endswith('_0000.nii.gz'):
os.rename(n, n[:-7] + '_0000.nii.gz')
| 1,130
| 35.483871
| 114
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/utilities/nd_softmax.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from torch import nn
import torch.nn.functional as F
softmax_helper = lambda x: F.softmax(x, 1)
| 801
| 35.454545
| 114
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/utilities/tensor_utilities.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from torch import nn
def sum_tensor(inp, axes, keepdim=False):
axes = np.unique(axes).astype(int)
if keepdim:
for ax in axes:
inp = inp.sum(int(ax), keepdim=True)
else:
for ax in sorted(axes, reverse=True):
inp = inp.sum(int(ax))
return inp
def mean_tensor(inp, axes, keepdim=False):
axes = np.unique(axes).astype(int)
if keepdim:
for ax in axes:
inp = inp.mean(int(ax), keepdim=True)
else:
for ax in sorted(axes, reverse=True):
inp = inp.mean(int(ax))
return inp
def flip(x, dim):
"""
flips the tensor at dimension dim (mirroring!)
:param x:
:param dim:
:return:
"""
indices = [slice(None)] * x.dim()
indices[dim] = torch.arange(x.size(dim) - 1, -1, -1,
dtype=torch.long, device=x.device)
return x[tuple(indices)]
| 1,624
| 28.545455
| 114
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/utilities/folder_names.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from batchgenerators.utilities.file_and_folder_operations import *
from nnunet.paths import network_training_output_dir
def get_output_folder_name(model: str, task: str = None, trainer: str = None, plans: str = None, fold: int = None,
overwrite_training_output_dir: str = None):
"""
Retrieves the correct output directory for the nnU-Net model described by the input parameters
:param model:
:param task:
:param trainer:
:param plans:
:param fold:
:param overwrite_training_output_dir:
:return:
"""
assert model in ["2d", "3d_cascade_fullres", '3d_fullres', '3d_lowres']
if overwrite_training_output_dir is not None:
tr_dir = overwrite_training_output_dir
else:
tr_dir = network_training_output_dir
current = join(tr_dir, model)
if task is not None:
current = join(current, task)
if trainer is not None and plans is not None:
current = join(current, trainer + "__" + plans)
if fold is not None:
current = join(current, "fold_%d" % fold)
return current
| 1,810
| 36.729167
| 114
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/utilities/to_torch.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
def maybe_to_torch(d):
if isinstance(d, list):
d = [maybe_to_torch(i) if not isinstance(i, torch.Tensor) else i for i in d]
elif not isinstance(d, torch.Tensor):
d = torch.from_numpy(d).float()
return d
def to_cuda(data, non_blocking=True, gpu_id=0):
if isinstance(data, list):
data = [i.cuda(gpu_id, non_blocking=non_blocking) for i in data]
else:
data = data.cuda(gpu_id, non_blocking=True)
return data
| 1,167
| 35.5
| 114
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/utilities/task_name_id_conversion.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nnunet.paths import nnUNet_raw_data, preprocessing_output_dir, nnUNet_cropped_data, network_training_output_dir
from batchgenerators.utilities.file_and_folder_operations import *
import numpy as np
def convert_id_to_task_name(task_id: int):
startswith = "Task%03.0d" % task_id
if preprocessing_output_dir is not None:
candidates_preprocessed = subdirs(preprocessing_output_dir, prefix=startswith, join=False)
else:
candidates_preprocessed = []
if nnUNet_raw_data is not None:
candidates_raw = subdirs(nnUNet_raw_data, prefix=startswith, join=False)
else:
candidates_raw = []
if nnUNet_cropped_data is not None:
candidates_cropped = subdirs(nnUNet_cropped_data, prefix=startswith, join=False)
else:
candidates_cropped = []
candidates_trained_models = []
if network_training_output_dir is not None:
for m in ['2d', '3d_lowres', '3d_fullres', '3d_cascade_fullres']:
if isdir(join(network_training_output_dir, m)):
candidates_trained_models += subdirs(join(network_training_output_dir, m), prefix=startswith, join=False)
all_candidates = candidates_cropped + candidates_preprocessed + candidates_raw + candidates_trained_models
unique_candidates = np.unique(all_candidates)
if len(unique_candidates) > 1:
raise RuntimeError("More than one task name found for task id %d. Please correct that. (I looked in the "
"following folders:\n%s\n%s\n%s" % (task_id, nnUNet_raw_data, preprocessing_output_dir,
nnUNet_cropped_data))
if len(unique_candidates) == 0:
raise RuntimeError("Could not find a task with the ID %d. Make sure the requested task ID exists and that "
"nnU-Net knows where raw and preprocessed data are located (see Documentation - "
"Installation). Here are your currently defined folders:\nnnUNet_preprocessed=%s\nRESULTS_"
"FOLDER=%s\nnnUNet_raw_data_base=%s\nIf something is not right, adapt your environemnt "
"variables." %
(task_id,
os.environ.get('nnUNet_preprocessed') if os.environ.get('nnUNet_preprocessed') is not None else 'None',
os.environ.get('RESULTS_FOLDER') if os.environ.get('RESULTS_FOLDER') is not None else 'None',
os.environ.get('nnUNet_raw_data_base') if os.environ.get('nnUNet_raw_data_base') is not None else 'None',
))
return unique_candidates[0]
def convert_task_name_to_id(task_name: str):
assert task_name.startswith("Task")
task_id = int(task_name[4:7])
return task_id
| 3,514
| 50.691176
| 133
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/utilities/random_stuff.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class no_op(object):
def __enter__(self):
pass
def __exit__(self, *args):
pass
| 794
| 35.136364
| 114
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/utilities/distributed.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from torch import distributed
from torch import autograd
from torch.nn.parallel import DistributedDataParallel as DDP
def print_if_rank0(*args):
if distributed.get_rank() == 0:
print(*args)
class awesome_allgather_function(autograd.Function):
@staticmethod
def forward(ctx, input):
world_size = distributed.get_world_size()
# create a destination list for the allgather. I'm assuming you're gathering from 3 workers.
allgather_list = [torch.empty_like(input) for _ in range(world_size)]
#if distributed.get_rank() == 0:
# import IPython;IPython.embed()
distributed.all_gather(allgather_list, input)
return torch.cat(allgather_list, dim=0)
@staticmethod
def backward(ctx, grad_output):
#print_if_rank0("backward grad_output len", len(grad_output))
#print_if_rank0("backward grad_output shape", grad_output.shape)
grads_per_rank = grad_output.shape[0] // distributed.get_world_size()
rank = distributed.get_rank()
# We'll receive gradients for the entire catted forward output, so to mimic DataParallel,
# return only the slice that corresponds to this process's input:
sl = slice(rank * grads_per_rank, (rank + 1) * grads_per_rank)
#print("worker", rank, "backward slice", sl)
return grad_output[sl]
if __name__ == "__main__":
import torch.distributed as dist
import argparse
from torch import nn
from torch.optim import Adam
argumentparser = argparse.ArgumentParser()
argumentparser.add_argument("--local_rank", type=int)
args = argumentparser.parse_args()
torch.cuda.set_device(args.local_rank)
dist.init_process_group(backend='nccl', init_method='env://')
rnd = torch.rand((5, 2)).cuda()
rnd_gathered = awesome_allgather_function.apply(rnd)
print("gathering random tensors\nbefore\b", rnd, "\nafter\n", rnd_gathered)
# so far this works as expected
print("now running a DDP model")
c = nn.Conv2d(2, 3, 3, 1, 1, 1, 1, True).cuda()
c = DDP(c)
opt = Adam(c.parameters())
bs = 5
if dist.get_rank() == 0:
bs = 4
inp = torch.rand((bs, 2, 5, 5)).cuda()
out = c(inp)
print("output_shape", out.shape)
out_gathered = awesome_allgather_function.apply(out)
print("output_shape_after_gather", out_gathered.shape)
# this also works
loss = out_gathered.sum()
loss.backward()
opt.step()
| 3,172
| 34.255556
| 114
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/utilities/__init__.py
|
from __future__ import absolute_import
from . import *
| 54
| 26.5
| 38
|
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.