File size: 3,842 Bytes
b4d7ac8 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 | from dynamic_network_architectures.architectures.unet import PlainConvUNet, ResidualEncoderUNet
from dynamic_network_architectures.building_blocks.helper import get_matching_instancenorm, convert_dim_to_conv_op
from dynamic_network_architectures.initialization.weight_init import init_last_bn_before_add_to_0
from nnunetv2.utilities.network_initialization import InitWeights_He
from nnunetv2.utilities.plans_handling.plans_handler import ConfigurationManager, PlansManager
from torch import nn
def get_network_from_plans(plans_manager: PlansManager,
dataset_json: dict,
configuration_manager: ConfigurationManager,
num_input_channels: int,
deep_supervision: bool = True):
"""
we may have to change this in the future to accommodate other plans -> network mappings
num_input_channels can differ depending on whether we do cascade. Its best to make this info available in the
trainer rather than inferring it again from the plans here.
"""
num_stages = len(configuration_manager.conv_kernel_sizes)
dim = len(configuration_manager.conv_kernel_sizes[0])
conv_op = convert_dim_to_conv_op(dim)
label_manager = plans_manager.get_label_manager(dataset_json)
segmentation_network_class_name = configuration_manager.UNet_class_name
mapping = {
'PlainConvUNet': PlainConvUNet,
'ResidualEncoderUNet': ResidualEncoderUNet
}
kwargs = {
'PlainConvUNet': {
'conv_bias': True,
'norm_op': get_matching_instancenorm(conv_op),
'norm_op_kwargs': {'eps': 1e-5, 'affine': True},
'dropout_op': None, 'dropout_op_kwargs': None,
'nonlin': nn.LeakyReLU, 'nonlin_kwargs': {'inplace': True},
},
'ResidualEncoderUNet': {
'conv_bias': True,
'norm_op': get_matching_instancenorm(conv_op),
'norm_op_kwargs': {'eps': 1e-5, 'affine': True},
'dropout_op': None, 'dropout_op_kwargs': None,
'nonlin': nn.LeakyReLU, 'nonlin_kwargs': {'inplace': True},
}
}
assert segmentation_network_class_name in mapping.keys(), 'The network architecture specified by the plans file ' \
'is non-standard (maybe your own?). Yo\'ll have to dive ' \
'into either this ' \
'function (get_network_from_plans) or ' \
'the init of your nnUNetModule to accomodate that.'
network_class = mapping[segmentation_network_class_name]
conv_or_blocks_per_stage = {
'n_conv_per_stage'
if network_class != ResidualEncoderUNet else 'n_blocks_per_stage': configuration_manager.n_conv_per_stage_encoder,
'n_conv_per_stage_decoder': configuration_manager.n_conv_per_stage_decoder
}
# network class name!!
model = network_class(
input_channels=num_input_channels,
n_stages=num_stages,
features_per_stage=[min(configuration_manager.UNet_base_num_features * 2 ** i,
configuration_manager.unet_max_num_features) for i in range(num_stages)],
conv_op=conv_op,
kernel_sizes=configuration_manager.conv_kernel_sizes,
strides=configuration_manager.pool_op_kernel_sizes,
num_classes=label_manager.num_segmentation_heads,
deep_supervision=deep_supervision,
**conv_or_blocks_per_stage,
**kwargs[segmentation_network_class_name]
)
model.apply(InitWeights_He(1e-2))
if network_class == ResidualEncoderUNet:
model.apply(init_last_bn_before_add_to_0)
return model
|