File size: 16,158 Bytes
b4d7ac8 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 |
# Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import warnings
from collections.abc import Callable, Sequence
from typing import cast
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from monai.config import NdarrayTensor
from monai.transforms import ScaleIntensity
from monai.utils import ensure_tuple, pytorch_after
from monai.visualize.visualizer import default_upsampler
__all__ = ["CAM", "GradCAM", "GradCAMpp", "ModelWithHooks", "default_normalizer"]
def default_normalizer(x: NdarrayTensor) -> NdarrayTensor:
"""
A linear intensity scaling by mapping the (min, max) to (1, 0).
If the input data is PyTorch Tensor, the output data will be Tensor on the same device,
otherwise, output data will be numpy array.
Note: This will flip magnitudes (i.e., smallest will become biggest and vice versa).
"""
def _compute(data: np.ndarray) -> np.ndarray:
scaler = ScaleIntensity(minv=1.0, maxv=0.0)
return np.stack([scaler(i) for i in data], axis=0)
if isinstance(x, torch.Tensor):
return torch.as_tensor(_compute(x.detach().cpu().numpy()), device=x.device) # type: ignore
return _compute(x) # type: ignore
class ModelWithHooks:
"""
A model wrapper to run model forward/backward steps and storing some intermediate feature/gradient information.
"""
def __init__(
self,
nn_module: nn.Module,
target_layer_names: str | Sequence[str],
register_forward: bool = False,
register_backward: bool = False,
):
"""
Args:
nn_module: the model to be wrapped.
target_layer_names: the names of the layer to cache.
register_forward: whether to cache the forward pass output corresponding to `target_layer_names`.
register_backward: whether to cache the backward pass output corresponding to `target_layer_names`.
"""
self.model = nn_module
self.target_layers = ensure_tuple(target_layer_names)
self.gradients: dict[str, torch.Tensor] = {}
self.activations: dict[str, torch.Tensor] = {}
self.score: torch.Tensor | None = None
self.class_idx: int | None = None
self.register_backward = register_backward
self.register_forward = register_forward
_registered = []
for name, mod in nn_module.named_modules():
if name not in self.target_layers:
continue
_registered.append(name)
if self.register_backward:
if pytorch_after(1, 8):
if "inplace" in mod.__dict__ and mod.__dict__["inplace"]:
# inplace=True causes errors for register_full_backward_hook
mod.__dict__["inplace"] = False
mod.register_full_backward_hook(self.backward_hook(name))
else:
mod.register_backward_hook(self.backward_hook(name))
if self.register_forward:
mod.register_forward_hook(self.forward_hook(name))
if self.target_layers and (len(_registered) != len(self.target_layers)):
warnings.warn(f"Not all target_layers exist in the network module: targets: {self.target_layers}.")
def backward_hook(self, name):
def _hook(_module, _grad_input, grad_output):
self.gradients[name] = grad_output[0]
return _hook
def forward_hook(self, name):
def _hook(_module, _input, output):
self.activations[name] = output
return _hook
def get_layer(self, layer_id: str | Callable[[nn.Module], nn.Module]) -> nn.Module:
"""
Args:
layer_id: a layer name string or a callable. If it is a callable such as `lambda m: m.fc`,
this method will return the module `self.model.fc`.
Returns:
a submodule from self.model.
"""
if callable(layer_id):
return layer_id(self.model)
if isinstance(layer_id, str):
for name, mod in self.model.named_modules():
if name == layer_id:
return cast(nn.Module, mod)
raise NotImplementedError(f"Could not find {layer_id}.")
def class_score(self, logits: torch.Tensor, class_idx: int) -> torch.Tensor:
return logits[:, class_idx].squeeze()
def __call__(self, x, class_idx=None, retain_graph=False, **kwargs):
train = self.model.training
self.model.eval()
logits = self.model(x, **kwargs)
self.class_idx = logits.max(1)[-1] if class_idx is None else class_idx
acti, grad = None, None
if self.register_forward:
acti = tuple(self.activations[layer] for layer in self.target_layers)
if self.register_backward:
self.score = self.class_score(logits, cast(int, self.class_idx))
self.model.zero_grad()
self.score.sum().backward(retain_graph=retain_graph)
for layer in self.target_layers:
if layer not in self.gradients:
warnings.warn(
f"Backward hook for {layer} is not triggered; `requires_grad` of {layer} should be `True`."
)
grad = tuple(self.gradients[layer] for layer in self.target_layers if layer in self.gradients)
if train:
self.model.train()
return logits, acti, grad
def get_wrapped_net(self):
return self.model
class CAMBase:
"""
Base class for CAM methods.
"""
def __init__(
self,
nn_module: nn.Module,
target_layers: str,
upsampler: Callable = default_upsampler,
postprocessing: Callable = default_normalizer,
register_backward: bool = True,
) -> None:
self.nn_module: ModelWithHooks
# Convert to model with hooks if necessary
if not isinstance(nn_module, ModelWithHooks):
self.nn_module = ModelWithHooks(
nn_module, target_layers, register_forward=True, register_backward=register_backward
)
else:
self.nn_module = nn_module
self.upsampler = upsampler
self.postprocessing = postprocessing
def feature_map_size(self, input_size, device="cpu", layer_idx=-1, **kwargs):
"""
Computes the actual feature map size given `nn_module` and the target_layer name.
Args:
input_size: shape of the input tensor
device: the device used to initialise the input tensor
layer_idx: index of the target layer if there are multiple target layers. Defaults to -1.
kwargs: any extra arguments to be passed on to the module as part of its `__call__`.
Returns:
shape of the actual feature map.
"""
return self.compute_map(torch.zeros(*input_size, device=device), layer_idx=layer_idx, **kwargs).shape
def compute_map(self, x, class_idx=None, layer_idx=-1):
"""
Compute the actual feature map with input tensor `x`.
Args:
x: input to `nn_module`.
class_idx: index of the class to be visualized. Default to `None` (computing `class_idx` from `argmax`)
layer_idx: index of the target layer if there are multiple target layers. Defaults to -1.
Returns:
activation maps (raw outputs without upsampling/post-processing.)
"""
raise NotImplementedError()
def _upsample_and_post_process(self, acti_map, x):
# upsampling and postprocessing
img_spatial = x.shape[2:]
acti_map = self.upsampler(img_spatial)(acti_map)
return self.postprocessing(acti_map)
def __call__(self):
raise NotImplementedError()
class CAM(CAMBase):
"""
Compute class activation map from the last fully-connected layers before the spatial pooling.
This implementation is based on:
Zhou et al., Learning Deep Features for Discriminative Localization. CVPR '16,
https://arxiv.org/abs/1512.04150
Examples
.. code-block:: python
import torch
# densenet 2d
from monai.networks.nets import DenseNet121
from monai.visualize import CAM
model_2d = DenseNet121(spatial_dims=2, in_channels=1, out_channels=3)
cam = CAM(nn_module=model_2d, target_layers="class_layers.relu", fc_layers="class_layers.out")
result = cam(x=torch.rand((1, 1, 48, 64)))
# resnet 2d
from monai.networks.nets import seresnet50
from monai.visualize import CAM
model_2d = seresnet50(spatial_dims=2, in_channels=3, num_classes=4)
cam = CAM(nn_module=model_2d, target_layers="layer4", fc_layers="last_linear")
result = cam(x=torch.rand((2, 3, 48, 64)))
N.B.: To help select the target layer, it may be useful to list all layers:
.. code-block:: python
for name, _ in model.named_modules(): print(name)
See Also:
- :py:class:`monai.visualize.class_activation_maps.GradCAM`
"""
def __init__(
self,
nn_module: nn.Module,
target_layers: str,
fc_layers: str | Callable = "fc",
upsampler: Callable = default_upsampler,
postprocessing: Callable = default_normalizer,
) -> None:
"""
Args:
nn_module: the model to be visualized
target_layers: name of the model layer to generate the feature map.
fc_layers: a string or a callable used to get fully-connected weights to compute activation map
from the target_layers (without pooling). and evaluate it at every spatial location.
upsampler: An upsampling method to upsample the output image. Default is
N dimensional linear (bilinear, trilinear, etc.) depending on num spatial
dimensions of input.
postprocessing: a callable that applies on the upsampled output image.
Default is normalizing between min=1 and max=0 (i.e., largest input will become 0 and
smallest input will become 1).
"""
super().__init__(
nn_module=nn_module,
target_layers=target_layers,
upsampler=upsampler,
postprocessing=postprocessing,
register_backward=False,
)
self.fc_layers = fc_layers
def compute_map(self, x, class_idx=None, layer_idx=-1, **kwargs):
logits, acti, _ = self.nn_module(x, **kwargs)
acti = acti[layer_idx]
if class_idx is None:
class_idx = logits.max(1)[-1]
b, c, *spatial = acti.shape
acti = torch.split(acti.reshape(b, c, -1), 1, dim=2) # make the spatial dims 1D
fc_layers = self.nn_module.get_layer(self.fc_layers)
output = torch.stack([fc_layers(a[..., 0]) for a in acti], dim=2)
output = torch.stack([output[i, b : b + 1] for i, b in enumerate(class_idx)], dim=0)
return output.reshape(b, 1, *spatial) # resume the spatial dims on the selected class
def __call__(self, x, class_idx=None, layer_idx=-1, **kwargs):
"""
Compute the activation map with upsampling and postprocessing.
Args:
x: input tensor, shape must be compatible with `nn_module`.
class_idx: index of the class to be visualized. Default to argmax(logits)
layer_idx: index of the target layer if there are multiple target layers. Defaults to -1.
kwargs: any extra arguments to be passed on to the module as part of its `__call__`.
Returns:
activation maps
"""
acti_map = self.compute_map(x, class_idx, layer_idx, **kwargs)
return self._upsample_and_post_process(acti_map, x)
class GradCAM(CAMBase):
"""
Computes Gradient-weighted Class Activation Mapping (Grad-CAM).
This implementation is based on:
Selvaraju et al., Grad-CAM: Visual Explanations from Deep Networks via Gradient-based Localization,
https://arxiv.org/abs/1610.02391
Examples
.. code-block:: python
import torch
# densenet 2d
from monai.networks.nets import DenseNet121
from monai.visualize import GradCAM
model_2d = DenseNet121(spatial_dims=2, in_channels=1, out_channels=3)
cam = GradCAM(nn_module=model_2d, target_layers="class_layers.relu")
result = cam(x=torch.rand((1, 1, 48, 64)))
# resnet 2d
from monai.networks.nets import seresnet50
from monai.visualize import GradCAM
model_2d = seresnet50(spatial_dims=2, in_channels=3, num_classes=4)
cam = GradCAM(nn_module=model_2d, target_layers="layer4")
result = cam(x=torch.rand((2, 3, 48, 64)))
N.B.: To help select the target layer, it may be useful to list all layers:
.. code-block:: python
for name, _ in model.named_modules(): print(name)
See Also:
- :py:class:`monai.visualize.class_activation_maps.CAM`
"""
def compute_map(self, x, class_idx=None, retain_graph=False, layer_idx=-1, **kwargs):
_, acti, grad = self.nn_module(x, class_idx=class_idx, retain_graph=retain_graph, **kwargs)
acti, grad = acti[layer_idx], grad[layer_idx]
b, c, *spatial = grad.shape
weights = grad.view(b, c, -1).mean(2).view(b, c, *[1] * len(spatial))
acti_map = (weights * acti).sum(1, keepdim=True)
return F.relu(acti_map)
def __call__(self, x, class_idx=None, layer_idx=-1, retain_graph=False, **kwargs):
"""
Compute the activation map with upsampling and postprocessing.
Args:
x: input tensor, shape must be compatible with `nn_module`.
class_idx: index of the class to be visualized. Default to argmax(logits)
layer_idx: index of the target layer if there are multiple target layers. Defaults to -1.
retain_graph: whether to retain_graph for torch module backward call.
kwargs: any extra arguments to be passed on to the module as part of its `__call__`.
Returns:
activation maps
"""
acti_map = self.compute_map(x, class_idx=class_idx, retain_graph=retain_graph, layer_idx=layer_idx, **kwargs)
return self._upsample_and_post_process(acti_map, x)
class GradCAMpp(GradCAM):
"""
Computes Gradient-weighted Class Activation Mapping (Grad-CAM++).
This implementation is based on:
Chattopadhyay et al., Grad-CAM++: Improved Visual Explanations for Deep Convolutional Networks,
https://arxiv.org/abs/1710.11063
See Also:
- :py:class:`monai.visualize.class_activation_maps.GradCAM`
"""
def compute_map(self, x, class_idx=None, retain_graph=False, layer_idx=-1, **kwargs):
_, acti, grad = self.nn_module(x, class_idx=class_idx, retain_graph=retain_graph, **kwargs)
acti, grad = acti[layer_idx], grad[layer_idx]
b, c, *spatial = grad.shape
alpha_nr = grad.pow(2)
alpha_dr = alpha_nr.mul(2) + acti.mul(grad.pow(3)).view(b, c, -1).sum(-1).view(b, c, *[1] * len(spatial))
alpha_dr = torch.where(alpha_dr != 0.0, alpha_dr, torch.ones_like(alpha_dr))
alpha = alpha_nr.div(alpha_dr + 1e-7)
relu_grad = F.relu(cast(torch.Tensor, self.nn_module.score).exp() * grad)
weights = (alpha * relu_grad).view(b, c, -1).sum(-1).view(b, c, *[1] * len(spatial))
acti_map = (weights * acti).sum(1, keepdim=True)
return F.relu(acti_map)
|