Spaces:
Runtime error
Runtime error
| import numpy as np | |
| import torch | |
| import ttach as tta | |
| from typing import Callable, List, Tuple | |
| from pytorch_grad_cam.activations_and_gradients import ActivationsAndGradients | |
| from pytorch_grad_cam.utils.svd_on_activations import get_2d_projection | |
| from pytorch_grad_cam.utils.image import scale_cam_image | |
| from pytorch_grad_cam.utils.model_targets import ClassifierOutputTarget | |
| import pandas as pd | |
| import config as config | |
| import utils | |
| class BaseCAM: | |
| def __init__(self, | |
| model: torch.nn.Module, | |
| target_layers: List[torch.nn.Module], | |
| use_cuda: bool = False, | |
| reshape_transform: Callable = None, | |
| compute_input_gradient: bool = False, | |
| uses_gradients: bool = True) -> None: | |
| self.model = model.eval() | |
| self.target_layers = target_layers | |
| self.cuda = use_cuda | |
| if self.cuda: | |
| self.model = model.cuda() | |
| self.reshape_transform = reshape_transform | |
| self.compute_input_gradient = compute_input_gradient | |
| self.uses_gradients = uses_gradients | |
| self.activations_and_grads = ActivationsAndGradients( | |
| self.model, target_layers, reshape_transform) | |
| """ Get a vector of weights for every channel in the target layer. | |
| Methods that return weights channels, | |
| will typically need to only implement this function. """ | |
| def get_cam_image(self, | |
| input_tensor: torch.Tensor, | |
| target_layer: torch.nn.Module, | |
| targets: List[torch.nn.Module], | |
| activations: torch.Tensor, | |
| grads: torch.Tensor, | |
| eigen_smooth: bool = False) -> np.ndarray: | |
| return get_2d_projection(activations) | |
| def forward(self, | |
| input_tensor: torch.Tensor, | |
| targets: List[torch.nn.Module], | |
| eigen_smooth: bool = False) -> np.ndarray: | |
| if self.cuda: | |
| input_tensor = input_tensor.cuda() | |
| if self.compute_input_gradient: | |
| input_tensor = torch.autograd.Variable(input_tensor, | |
| requires_grad=True) | |
| outputs = self.activations_and_grads(input_tensor) | |
| if targets is None: | |
| bboxes = [[] for _ in range(1)] | |
| for i in range(3): | |
| batch_size, A, S, _, _ = outputs[i].shape | |
| anchor = config.SCALED_ANCHORS[i] | |
| boxes_scale_i = utils.cells_to_bboxes( | |
| outputs[i], anchor, S=S, is_preds=True | |
| ) | |
| for idx, (box) in enumerate(boxes_scale_i): | |
| bboxes[idx] += box | |
| nms_boxes = utils.non_max_suppression( | |
| bboxes[0], iou_threshold=0.5, threshold=0.4, box_format="midpoint", | |
| ) | |
| # target_categories = np.argmax(outputs.cpu().data.numpy(), axis=-1) | |
| target_categories = [box[0] for box in nms_boxes] | |
| targets = [ClassifierOutputTarget( | |
| category) for category in target_categories] | |
| if self.uses_gradients: | |
| self.model.zero_grad() | |
| loss = sum([target(output) | |
| for target, output in zip(targets, outputs)]) | |
| loss.backward(retain_graph=True) | |
| # In most of the saliency attribution papers, the saliency is | |
| # computed with a single target layer. | |
| # Commonly it is the last convolutional layer. | |
| # Here we support passing a list with multiple target layers. | |
| # It will compute the saliency image for every image, | |
| # and then aggregate them (with a default mean aggregation). | |
| # This gives you more flexibility in case you just want to | |
| # use all conv layers for example, all Batchnorm layers, | |
| # or something else. | |
| cam_per_layer = self.compute_cam_per_layer(input_tensor, | |
| targets, | |
| eigen_smooth) | |
| return self.aggregate_multi_layers(cam_per_layer) | |
| def get_target_width_height(self, | |
| input_tensor: torch.Tensor) -> Tuple[int, int]: | |
| width, height = input_tensor.size(-1), input_tensor.size(-2) | |
| return width, height | |
| def compute_cam_per_layer( | |
| self, | |
| input_tensor: torch.Tensor, | |
| targets: List[torch.nn.Module], | |
| eigen_smooth: bool) -> np.ndarray: | |
| activations_list = [a.cpu().data.numpy() | |
| for a in self.activations_and_grads.activations] | |
| grads_list = [g.cpu().data.numpy() | |
| for g in self.activations_and_grads.gradients] | |
| target_size = self.get_target_width_height(input_tensor) | |
| cam_per_target_layer = [] | |
| # Loop over the saliency image from every layer | |
| for i in range(len(self.target_layers)): | |
| target_layer = self.target_layers[i] | |
| layer_activations = None | |
| layer_grads = None | |
| if i < len(activations_list): | |
| layer_activations = activations_list[i] | |
| if i < len(grads_list): | |
| layer_grads = grads_list[i] | |
| cam = self.get_cam_image(input_tensor, | |
| target_layer, | |
| targets, | |
| layer_activations, | |
| layer_grads, | |
| eigen_smooth) | |
| cam = np.maximum(cam, 0) | |
| scaled = scale_cam_image(cam, target_size) | |
| cam_per_target_layer.append(scaled[:, None, :]) | |
| return cam_per_target_layer | |
| def aggregate_multi_layers( | |
| self, | |
| cam_per_target_layer: np.ndarray) -> np.ndarray: | |
| cam_per_target_layer = np.concatenate(cam_per_target_layer, axis=1) | |
| cam_per_target_layer = np.maximum(cam_per_target_layer, 0) | |
| result = np.mean(cam_per_target_layer, axis=1) | |
| return scale_cam_image(result) |