| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| |
| |
| |
| |
| |
| |
|
|
| |
| |
|
|
| |
| |
|
|
| |
| |
|
|
| |
| |
| |
|
|
| |
| |
| |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| """ |
| This script is modified from from torchvision to support N-D images, |
| by overriding the definition of convolutional layers and pooling layers. |
| |
| https://github.com/pytorch/vision/blob/release/0.12/torchvision/models/detection/backbone_utils.py |
| """ |
|
|
| from __future__ import annotations |
|
|
| from torch import Tensor, nn |
|
|
| from monai.networks.nets import resnet |
| from monai.utils import optional_import |
|
|
| from .feature_pyramid_network import ExtraFPNBlock, FeaturePyramidNetwork, LastLevelMaxPool |
|
|
| torchvision_models, _ = optional_import("torchvision.models") |
|
|
| __all__ = ["BackboneWithFPN"] |
|
|
|
|
| class BackboneWithFPN(nn.Module): |
| """ |
| Adds an FPN on top of a model. |
| Internally, it uses torchvision.models._utils.IntermediateLayerGetter to |
| extract a submodel that returns the feature maps specified in return_layers. |
| The same limitations of IntermediateLayerGetter apply here. |
| |
| Same code as https://github.com/pytorch/vision/blob/release/0.12/torchvision/models/detection/backbone_utils.py |
| Except that this class uses spatial_dims |
| |
| Args: |
| backbone: backbone network |
| return_layers: a dict containing the names |
| of the modules for which the activations will be returned as |
| the key of the dict, and the value of the dict is the name |
| of the returned activation (which the user can specify). |
| in_channels_list: number of channels for each feature map |
| that is returned, in the order they are present in the OrderedDict |
| out_channels: number of channels in the FPN. |
| spatial_dims: 2D or 3D images |
| """ |
|
|
| def __init__( |
| self, |
| backbone: nn.Module, |
| return_layers: dict[str, str], |
| in_channels_list: list[int], |
| out_channels: int, |
| spatial_dims: int | None = None, |
| extra_blocks: ExtraFPNBlock | None = None, |
| ) -> None: |
| super().__init__() |
|
|
| |
| if spatial_dims is None: |
| if hasattr(backbone, "spatial_dims") and isinstance(backbone.spatial_dims, int): |
| spatial_dims = backbone.spatial_dims |
| elif isinstance(backbone.conv1, nn.Conv2d): |
| spatial_dims = 2 |
| elif isinstance(backbone.conv1, nn.Conv3d): |
| spatial_dims = 3 |
| else: |
| raise ValueError("Could not find spatial_dims of backbone, please specify it.") |
|
|
| if extra_blocks is None: |
| extra_blocks = LastLevelMaxPool(spatial_dims) |
|
|
| self.body = torchvision_models._utils.IntermediateLayerGetter(backbone, return_layers=return_layers) |
| self.fpn = FeaturePyramidNetwork( |
| spatial_dims=spatial_dims, |
| in_channels_list=in_channels_list, |
| out_channels=out_channels, |
| extra_blocks=extra_blocks, |
| ) |
| self.out_channels = out_channels |
|
|
| def forward(self, x: Tensor) -> dict[str, Tensor]: |
| """ |
| Computes the resulted feature maps of the network. |
| |
| Args: |
| x: input images |
| |
| Returns: |
| feature maps after FPN layers. They are ordered from highest resolution first. |
| """ |
| x = self.body(x) |
| y: dict[str, Tensor] = self.fpn(x) |
| return y |
|
|
|
|
| def _resnet_fpn_extractor( |
| backbone: resnet.ResNet, |
| spatial_dims: int, |
| trainable_layers: int = 5, |
| returned_layers: list[int] | None = None, |
| extra_blocks: ExtraFPNBlock | None = None, |
| ) -> BackboneWithFPN: |
| """ |
| Same code as https://github.com/pytorch/vision/blob/release/0.12/torchvision/models/detection/backbone_utils.py |
| Except that ``in_channels_stage2 = backbone.in_planes // 8`` instead of ``in_channels_stage2 = backbone.inplanes // 8``, |
| and it requires spatial_dims: 2D or 3D images. |
| """ |
|
|
| |
| if trainable_layers < 0 or trainable_layers > 5: |
| raise ValueError(f"Trainable layers should be in the range [0,5], got {trainable_layers}") |
| layers_to_train = ["layer4", "layer3", "layer2", "layer1", "conv1"][:trainable_layers] |
| if trainable_layers == 5: |
| layers_to_train.append("bn1") |
| for name, parameter in backbone.named_parameters(): |
| if all(not name.startswith(layer) for layer in layers_to_train): |
| parameter.requires_grad_(False) |
|
|
| if extra_blocks is None: |
| extra_blocks = LastLevelMaxPool(spatial_dims) |
|
|
| if returned_layers is None: |
| returned_layers = [1, 2, 3, 4] |
| if min(returned_layers) <= 0 or max(returned_layers) >= 5: |
| raise ValueError(f"Each returned layer should be in the range [1,4]. Got {returned_layers}") |
| return_layers = {f"layer{k}": str(v) for v, k in enumerate(returned_layers)} |
|
|
| in_channels_stage2 = backbone.in_planes // 8 |
| in_channels_list = [in_channels_stage2 * 2 ** (i - 1) for i in returned_layers] |
| out_channels = 256 |
| return BackboneWithFPN( |
| backbone, return_layers, in_channels_list, out_channels, extra_blocks=extra_blocks, spatial_dims=spatial_dims |
| ) |
|
|