repo
stringlengths
1
99
file
stringlengths
13
215
code
stringlengths
12
59.2M
file_length
int64
12
59.2M
avg_line_length
float64
3.82
1.48M
max_line_length
int64
12
2.51M
extension_type
stringclasses
1 value
ERD
ERD-main/mmdet/models/dense_heads/dab_detr_head.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import Tuple import torch.nn as nn from mmcv.cnn import Linear from mmengine.model import bias_init_with_prob, constant_init from torch import Tensor from mmdet.registry import MODELS from mmdet.structures import SampleList from mmdet.utils import InstanceList from ..layers import MLP, inverse_sigmoid from .conditional_detr_head import ConditionalDETRHead @MODELS.register_module() class DABDETRHead(ConditionalDETRHead): """Head of DAB-DETR. DAB-DETR: Dynamic Anchor Boxes are Better Queries for DETR. More details can be found in the `paper <https://arxiv.org/abs/2201.12329>`_ . """ def _init_layers(self) -> None: """Initialize layers of the transformer head.""" # cls branch self.fc_cls = Linear(self.embed_dims, self.cls_out_channels) # reg branch self.fc_reg = MLP(self.embed_dims, self.embed_dims, 4, 3) def init_weights(self) -> None: """initialize weights.""" if self.loss_cls.use_sigmoid: bias_init = bias_init_with_prob(0.01) nn.init.constant_(self.fc_cls.bias, bias_init) constant_init(self.fc_reg.layers[-1], 0., bias=0.) def forward(self, hidden_states: Tensor, references: Tensor) -> Tuple[Tensor, Tensor]: """"Forward function. Args: hidden_states (Tensor): Features from transformer decoder. If `return_intermediate_dec` is True output has shape (num_decoder_layers, bs, num_queries, dim), else has shape (1, bs, num_queries, dim) which only contains the last layer outputs. references (Tensor): References from transformer decoder. If `return_intermediate_dec` is True output has shape (num_decoder_layers, bs, num_queries, 2/4), else has shape (1, bs, num_queries, 2/4) which only contains the last layer reference. Returns: tuple[Tensor]: results of head containing the following tensor. - layers_cls_scores (Tensor): Outputs from the classification head, shape (num_decoder_layers, bs, num_queries, cls_out_channels). Note cls_out_channels should include background. - layers_bbox_preds (Tensor): Sigmoid outputs from the regression head with normalized coordinate format (cx, cy, w, h), has shape (num_decoder_layers, bs, num_queries, 4). """ layers_cls_scores = self.fc_cls(hidden_states) references_before_sigmoid = inverse_sigmoid(references, eps=1e-3) tmp_reg_preds = self.fc_reg(hidden_states) tmp_reg_preds[..., :references_before_sigmoid. size(-1)] += references_before_sigmoid layers_bbox_preds = tmp_reg_preds.sigmoid() return layers_cls_scores, layers_bbox_preds def predict(self, hidden_states: Tensor, references: Tensor, batch_data_samples: SampleList, rescale: bool = True) -> InstanceList: """Perform forward propagation of the detection head and predict detection results on the features of the upstream network. Over-write because img_metas are needed as inputs for bbox_head. Args: hidden_states (Tensor): Feature from the transformer decoder, has shape (num_decoder_layers, bs, num_queries, dim). references (Tensor): references from the transformer decoder, has shape (num_decoder_layers, bs, num_queries, 2/4). batch_data_samples (List[:obj:`DetDataSample`]): The Data Samples. It usually includes information such as `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`. rescale (bool, optional): Whether to rescale the results. Defaults to True. Returns: list[obj:`InstanceData`]: Detection results of each image after the post process. """ batch_img_metas = [ data_samples.metainfo for data_samples in batch_data_samples ] last_layer_hidden_state = hidden_states[-1].unsqueeze(0) last_layer_reference = references[-1].unsqueeze(0) outs = self(last_layer_hidden_state, last_layer_reference) predictions = self.predict_by_feat( *outs, batch_img_metas=batch_img_metas, rescale=rescale) return predictions
4,553
41.560748
79
py
ERD
ERD-main/mmdet/models/dense_heads/ga_retina_head.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import Tuple import torch.nn as nn from mmcv.cnn import ConvModule from mmcv.ops import MaskedConv2d from torch import Tensor from mmdet.registry import MODELS from mmdet.utils import OptConfigType, OptMultiConfig from .guided_anchor_head import FeatureAdaption, GuidedAnchorHead @MODELS.register_module() class GARetinaHead(GuidedAnchorHead): """Guided-Anchor-based RetinaNet head.""" def __init__(self, num_classes: int, in_channels: int, stacked_convs: int = 4, conv_cfg: OptConfigType = None, norm_cfg: OptConfigType = None, init_cfg: OptMultiConfig = None, **kwargs) -> None: if init_cfg is None: init_cfg = dict( type='Normal', layer='Conv2d', std=0.01, override=[ dict( type='Normal', name='conv_loc', std=0.01, bias_prob=0.01), dict( type='Normal', name='retina_cls', std=0.01, bias_prob=0.01) ]) self.stacked_convs = stacked_convs self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg super().__init__( num_classes=num_classes, in_channels=in_channels, init_cfg=init_cfg, **kwargs) def _init_layers(self) -> None: """Initialize layers of the head.""" self.relu = nn.ReLU(inplace=True) self.cls_convs = nn.ModuleList() self.reg_convs = nn.ModuleList() for i in range(self.stacked_convs): chn = self.in_channels if i == 0 else self.feat_channels self.cls_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) self.reg_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) self.conv_loc = nn.Conv2d(self.feat_channels, 1, 1) num_anchors = self.square_anchor_generator.num_base_priors[0] self.conv_shape = nn.Conv2d(self.feat_channels, num_anchors * 2, 1) self.feature_adaption_cls = FeatureAdaption( self.feat_channels, self.feat_channels, kernel_size=3, deform_groups=self.deform_groups) self.feature_adaption_reg = FeatureAdaption( self.feat_channels, self.feat_channels, kernel_size=3, deform_groups=self.deform_groups) self.retina_cls = MaskedConv2d( self.feat_channels, self.num_base_priors * self.cls_out_channels, 3, padding=1) self.retina_reg = MaskedConv2d( self.feat_channels, self.num_base_priors * 4, 3, padding=1) def forward_single(self, x: Tensor) -> Tuple[Tensor]: """Forward feature map of a single scale level.""" cls_feat = x reg_feat = x for cls_conv in self.cls_convs: cls_feat = cls_conv(cls_feat) for reg_conv in self.reg_convs: reg_feat = reg_conv(reg_feat) loc_pred = self.conv_loc(cls_feat) shape_pred = self.conv_shape(reg_feat) cls_feat = self.feature_adaption_cls(cls_feat, shape_pred) reg_feat = self.feature_adaption_reg(reg_feat, shape_pred) if not self.training: mask = loc_pred.sigmoid()[0] >= self.loc_filter_thr else: mask = None cls_score = self.retina_cls(cls_feat, mask) bbox_pred = self.retina_reg(reg_feat, mask) return cls_score, bbox_pred, shape_pred, loc_pred
4,224
33.917355
75
py
ERD
ERD-main/mmdet/models/dense_heads/ld_head.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import List, Tuple import torch from torch import Tensor from mmdet.registry import MODELS from mmdet.structures import SampleList from mmdet.structures.bbox import bbox_overlaps from mmdet.utils import ConfigType, InstanceList, OptInstanceList, reduce_mean from ..utils import multi_apply, unpack_gt_instances from .gfl_head import GFLHead @MODELS.register_module() class LDHead(GFLHead): """Localization distillation Head. (Short description) It utilizes the learned bbox distributions to transfer the localization dark knowledge from teacher to student. Original paper: `Localization Distillation for Object Detection. <https://arxiv.org/abs/2102.12252>`_ Args: num_classes (int): Number of categories excluding the background category. in_channels (int): Number of channels in the input feature map. loss_ld (:obj:`ConfigDict` or dict): Config of Localization Distillation Loss (LD), T is the temperature for distillation. """ def __init__(self, num_classes: int, in_channels: int, loss_ld: ConfigType = dict( type='LocalizationDistillationLoss', loss_weight=0.25, T=10), **kwargs) -> dict: super().__init__( num_classes=num_classes, in_channels=in_channels, **kwargs) self.loss_ld = MODELS.build(loss_ld) def loss_by_feat_single(self, anchors: Tensor, cls_score: Tensor, bbox_pred: Tensor, labels: Tensor, label_weights: Tensor, bbox_targets: Tensor, stride: Tuple[int], soft_targets: Tensor, avg_factor: int): """Calculate the loss of a single scale level based on the features extracted by the detection head. Args: anchors (Tensor): Box reference for each scale level with shape (N, num_total_anchors, 4). cls_score (Tensor): Cls and quality joint scores for each scale level has shape (N, num_classes, H, W). bbox_pred (Tensor): Box distribution logits for each scale level with shape (N, 4*(n+1), H, W), n is max value of integral set. labels (Tensor): Labels of each anchors with shape (N, num_total_anchors). label_weights (Tensor): Label weights of each anchor with shape (N, num_total_anchors) bbox_targets (Tensor): BBox regression targets of each anchor weight shape (N, num_total_anchors, 4). stride (tuple): Stride in this scale level. soft_targets (Tensor): Soft BBox regression targets. avg_factor (int): Average factor that is used to average the loss. When using sampling method, avg_factor is usually the sum of positive and negative priors. When using `PseudoSampler`, `avg_factor` is usually equal to the number of positive priors. Returns: dict[tuple, Tensor]: Loss components and weight targets. """ assert stride[0] == stride[1], 'h stride is not equal to w stride!' anchors = anchors.reshape(-1, 4) cls_score = cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels) bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4 * (self.reg_max + 1)) soft_targets = soft_targets.permute(0, 2, 3, 1).reshape(-1, 4 * (self.reg_max + 1)) bbox_targets = bbox_targets.reshape(-1, 4) labels = labels.reshape(-1) label_weights = label_weights.reshape(-1) # FG cat_id: [0, num_classes -1], BG cat_id: num_classes bg_class_ind = self.num_classes pos_inds = ((labels >= 0) & (labels < bg_class_ind)).nonzero().squeeze(1) score = label_weights.new_zeros(labels.shape) if len(pos_inds) > 0: pos_bbox_targets = bbox_targets[pos_inds] pos_bbox_pred = bbox_pred[pos_inds] pos_anchors = anchors[pos_inds] pos_anchor_centers = self.anchor_center(pos_anchors) / stride[0] weight_targets = cls_score.detach().sigmoid() weight_targets = weight_targets.max(dim=1)[0][pos_inds] pos_bbox_pred_corners = self.integral(pos_bbox_pred) pos_decode_bbox_pred = self.bbox_coder.decode( pos_anchor_centers, pos_bbox_pred_corners) pos_decode_bbox_targets = pos_bbox_targets / stride[0] score[pos_inds] = bbox_overlaps( pos_decode_bbox_pred.detach(), pos_decode_bbox_targets, is_aligned=True) pred_corners = pos_bbox_pred.reshape(-1, self.reg_max + 1) pos_soft_targets = soft_targets[pos_inds] soft_corners = pos_soft_targets.reshape(-1, self.reg_max + 1) target_corners = self.bbox_coder.encode(pos_anchor_centers, pos_decode_bbox_targets, self.reg_max).reshape(-1) # regression loss loss_bbox = self.loss_bbox( pos_decode_bbox_pred, pos_decode_bbox_targets, weight=weight_targets, avg_factor=1.0) # dfl loss loss_dfl = self.loss_dfl( pred_corners, target_corners, weight=weight_targets[:, None].expand(-1, 4).reshape(-1), avg_factor=4.0) # ld loss loss_ld = self.loss_ld( pred_corners, soft_corners, weight=weight_targets[:, None].expand(-1, 4).reshape(-1), avg_factor=4.0) else: loss_ld = bbox_pred.sum() * 0 loss_bbox = bbox_pred.sum() * 0 loss_dfl = bbox_pred.sum() * 0 weight_targets = bbox_pred.new_tensor(0) # cls (qfl) loss loss_cls = self.loss_cls( cls_score, (labels, score), weight=label_weights, avg_factor=avg_factor) return loss_cls, loss_bbox, loss_dfl, loss_ld, weight_targets.sum() def loss(self, x: List[Tensor], out_teacher: Tuple[Tensor], batch_data_samples: SampleList) -> dict: """ Args: x (list[Tensor]): Features from FPN. out_teacher (tuple[Tensor]): The output of teacher. batch_data_samples (list[:obj:`DetDataSample`]): The batch data samples. It usually includes information such as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`. Returns: tuple[dict, list]: The loss components and proposals of each image. - losses (dict[str, Tensor]): A dictionary of loss components. - proposal_list (list[Tensor]): Proposals of each image. """ outputs = unpack_gt_instances(batch_data_samples) batch_gt_instances, batch_gt_instances_ignore, batch_img_metas \ = outputs outs = self(x) soft_targets = out_teacher[1] loss_inputs = outs + (batch_gt_instances, batch_img_metas, soft_targets) losses = self.loss_by_feat( *loss_inputs, batch_gt_instances_ignore=batch_gt_instances_ignore) return losses def loss_by_feat( self, cls_scores: List[Tensor], bbox_preds: List[Tensor], batch_gt_instances: InstanceList, batch_img_metas: List[dict], soft_targets: List[Tensor], batch_gt_instances_ignore: OptInstanceList = None) -> dict: """Compute losses of the head. Args: cls_scores (list[Tensor]): Cls and quality scores for each scale level has shape (N, num_classes, H, W). bbox_preds (list[Tensor]): Box distribution logits for each scale level with shape (N, 4*(n+1), H, W), n is max value of integral set. batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. soft_targets (list[Tensor]): Soft BBox regression targets. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. batch_gt_instances_ignore (list[:obj:`InstanceData`], Optional): Batch of gt_instances_ignore. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. Returns: dict[str, Tensor]: A dictionary of loss components. """ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] assert len(featmap_sizes) == self.prior_generator.num_levels device = cls_scores[0].device anchor_list, valid_flag_list = self.get_anchors( featmap_sizes, batch_img_metas, device=device) cls_reg_targets = self.get_targets( anchor_list, valid_flag_list, batch_gt_instances, batch_img_metas, batch_gt_instances_ignore=batch_gt_instances_ignore) (anchor_list, labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, avg_factor) = cls_reg_targets avg_factor = reduce_mean( torch.tensor(avg_factor, dtype=torch.float, device=device)).item() losses_cls, losses_bbox, losses_dfl, losses_ld, \ avg_factor = multi_apply( self.loss_by_feat_single, anchor_list, cls_scores, bbox_preds, labels_list, label_weights_list, bbox_targets_list, self.prior_generator.strides, soft_targets, avg_factor=avg_factor) avg_factor = sum(avg_factor) + 1e-6 avg_factor = reduce_mean(avg_factor).item() losses_bbox = [x / avg_factor for x in losses_bbox] losses_dfl = [x / avg_factor for x in losses_dfl] return dict( loss_cls=losses_cls, loss_bbox=losses_bbox, loss_dfl=losses_dfl, loss_ld=losses_ld)
10,750
40.670543
79
py
ERD
ERD-main/mmdet/models/dense_heads/conditional_detr_head.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import Tuple import torch import torch.nn as nn from mmengine.model import bias_init_with_prob from torch import Tensor from mmdet.models.layers.transformer import inverse_sigmoid from mmdet.registry import MODELS from mmdet.structures import SampleList from mmdet.utils import InstanceList from .detr_head import DETRHead @MODELS.register_module() class ConditionalDETRHead(DETRHead): """Head of Conditional DETR. Conditional DETR: Conditional DETR for Fast Training Convergence. More details can be found in the `paper. <https://arxiv.org/abs/2108.06152>`_ . """ def init_weights(self): """Initialize weights of the transformer head.""" super().init_weights() # The initialization below for transformer head is very # important as we use Focal_loss for loss_cls if self.loss_cls.use_sigmoid: bias_init = bias_init_with_prob(0.01) nn.init.constant_(self.fc_cls.bias, bias_init) def forward(self, hidden_states: Tensor, references: Tensor) -> Tuple[Tensor, Tensor]: """"Forward function. Args: hidden_states (Tensor): Features from transformer decoder. If `return_intermediate_dec` is True output has shape (num_decoder_layers, bs, num_queries, dim), else has shape (1, bs, num_queries, dim) which only contains the last layer outputs. references (Tensor): References from transformer decoder, has shape (bs, num_queries, 2). Returns: tuple[Tensor]: results of head containing the following tensor. - layers_cls_scores (Tensor): Outputs from the classification head, shape (num_decoder_layers, bs, num_queries, cls_out_channels). Note cls_out_channels should include background. - layers_bbox_preds (Tensor): Sigmoid outputs from the regression head with normalized coordinate format (cx, cy, w, h), has shape (num_decoder_layers, bs, num_queries, 4). """ references_unsigmoid = inverse_sigmoid(references) layers_bbox_preds = [] for layer_id in range(hidden_states.shape[0]): tmp_reg_preds = self.fc_reg( self.activate(self.reg_ffn(hidden_states[layer_id]))) tmp_reg_preds[..., :2] += references_unsigmoid outputs_coord = tmp_reg_preds.sigmoid() layers_bbox_preds.append(outputs_coord) layers_bbox_preds = torch.stack(layers_bbox_preds) layers_cls_scores = self.fc_cls(hidden_states) return layers_cls_scores, layers_bbox_preds def loss(self, hidden_states: Tensor, references: Tensor, batch_data_samples: SampleList) -> dict: """Perform forward propagation and loss calculation of the detection head on the features of the upstream network. Args: hidden_states (Tensor): Features from the transformer decoder, has shape (num_decoder_layers, bs, num_queries, dim). references (Tensor): References from the transformer decoder, has shape (num_decoder_layers, bs, num_queries, 2). batch_data_samples (List[:obj:`DetDataSample`]): The Data Samples. It usually includes information such as `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`. Returns: dict: A dictionary of loss components. """ batch_gt_instances = [] batch_img_metas = [] for data_sample in batch_data_samples: batch_img_metas.append(data_sample.metainfo) batch_gt_instances.append(data_sample.gt_instances) outs = self(hidden_states, references) loss_inputs = outs + (batch_gt_instances, batch_img_metas) losses = self.loss_by_feat(*loss_inputs) return losses def loss_and_predict( self, hidden_states: Tensor, references: Tensor, batch_data_samples: SampleList) -> Tuple[dict, InstanceList]: """Perform forward propagation of the head, then calculate loss and predictions from the features and data samples. Over-write because img_metas are needed as inputs for bbox_head. Args: hidden_states (Tensor): Features from the transformer decoder, has shape (num_decoder_layers, bs, num_queries, dim). references (Tensor): References from the transformer decoder, has shape (num_decoder_layers, bs, num_queries, 2). batch_data_samples (list[:obj:`DetDataSample`]): Each item contains the meta information of each image and corresponding annotations. Returns: tuple: The return value is a tuple contains: - losses: (dict[str, Tensor]): A dictionary of loss components. - predictions (list[:obj:`InstanceData`]): Detection results of each image after the post process. """ batch_gt_instances = [] batch_img_metas = [] for data_sample in batch_data_samples: batch_img_metas.append(data_sample.metainfo) batch_gt_instances.append(data_sample.gt_instances) outs = self(hidden_states, references) loss_inputs = outs + (batch_gt_instances, batch_img_metas) losses = self.loss_by_feat(*loss_inputs) predictions = self.predict_by_feat( *outs, batch_img_metas=batch_img_metas) return losses, predictions def predict(self, hidden_states: Tensor, references: Tensor, batch_data_samples: SampleList, rescale: bool = True) -> InstanceList: """Perform forward propagation of the detection head and predict detection results on the features of the upstream network. Over-write because img_metas are needed as inputs for bbox_head. Args: hidden_states (Tensor): Features from the transformer decoder, has shape (num_decoder_layers, bs, num_queries, dim). references (Tensor): References from the transformer decoder, has shape (num_decoder_layers, bs, num_queries, 2). batch_data_samples (List[:obj:`DetDataSample`]): The Data Samples. It usually includes information such as `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`. rescale (bool, optional): Whether to rescale the results. Defaults to True. Returns: list[obj:`InstanceData`]: Detection results of each image after the post process. """ batch_img_metas = [ data_samples.metainfo for data_samples in batch_data_samples ] last_layer_hidden_state = hidden_states[-1].unsqueeze(0) outs = self(last_layer_hidden_state, references) predictions = self.predict_by_feat( *outs, batch_img_metas=batch_img_metas, rescale=rescale) return predictions
7,186
41.526627
79
py
ERD
ERD-main/mmdet/models/dense_heads/ssd_head.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import Dict, List, Optional, Sequence, Tuple import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule from torch import Tensor from mmdet.registry import MODELS, TASK_UTILS from mmdet.utils import ConfigType, InstanceList, MultiConfig, OptInstanceList from ..losses import smooth_l1_loss from ..task_modules.samplers import PseudoSampler from ..utils import multi_apply from .anchor_head import AnchorHead # TODO: add loss evaluator for SSD @MODELS.register_module() class SSDHead(AnchorHead): """Implementation of `SSD head <https://arxiv.org/abs/1512.02325>`_ Args: num_classes (int): Number of categories excluding the background category. in_channels (Sequence[int]): Number of channels in the input feature map. stacked_convs (int): Number of conv layers in cls and reg tower. Defaults to 0. feat_channels (int): Number of hidden channels when stacked_convs > 0. Defaults to 256. use_depthwise (bool): Whether to use DepthwiseSeparableConv. Defaults to False. conv_cfg (:obj:`ConfigDict` or dict, Optional): Dictionary to construct and config conv layer. Defaults to None. norm_cfg (:obj:`ConfigDict` or dict, Optional): Dictionary to construct and config norm layer. Defaults to None. act_cfg (:obj:`ConfigDict` or dict, Optional): Dictionary to construct and config activation layer. Defaults to None. anchor_generator (:obj:`ConfigDict` or dict): Config dict for anchor generator. bbox_coder (:obj:`ConfigDict` or dict): Config of bounding box coder. reg_decoded_bbox (bool): If true, the regression loss would be applied directly on decoded bounding boxes, converting both the predicted boxes and regression targets to absolute coordinates format. Defaults to False. It should be `True` when using `IoULoss`, `GIoULoss`, or `DIoULoss` in the bbox head. train_cfg (:obj:`ConfigDict` or dict, Optional): Training config of anchor head. test_cfg (:obj:`ConfigDict` or dict, Optional): Testing config of anchor head. init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \ dict], Optional): Initialization config dict. """ # noqa: W605 def __init__( self, num_classes: int = 80, in_channels: Sequence[int] = (512, 1024, 512, 256, 256, 256), stacked_convs: int = 0, feat_channels: int = 256, use_depthwise: bool = False, conv_cfg: Optional[ConfigType] = None, norm_cfg: Optional[ConfigType] = None, act_cfg: Optional[ConfigType] = None, anchor_generator: ConfigType = dict( type='SSDAnchorGenerator', scale_major=False, input_size=300, strides=[8, 16, 32, 64, 100, 300], ratios=([2], [2, 3], [2, 3], [2, 3], [2], [2]), basesize_ratio_range=(0.1, 0.9)), bbox_coder: ConfigType = dict( type='DeltaXYWHBBoxCoder', clip_border=True, target_means=[.0, .0, .0, .0], target_stds=[1.0, 1.0, 1.0, 1.0], ), reg_decoded_bbox: bool = False, train_cfg: Optional[ConfigType] = None, test_cfg: Optional[ConfigType] = None, init_cfg: MultiConfig = dict( type='Xavier', layer='Conv2d', distribution='uniform', bias=0) ) -> None: super(AnchorHead, self).__init__(init_cfg=init_cfg) self.num_classes = num_classes self.in_channels = in_channels self.stacked_convs = stacked_convs self.feat_channels = feat_channels self.use_depthwise = use_depthwise self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.act_cfg = act_cfg self.cls_out_channels = num_classes + 1 # add background class self.prior_generator = TASK_UTILS.build(anchor_generator) # Usually the numbers of anchors for each level are the same # except SSD detectors. So it is an int in the most dense # heads but a list of int in SSDHead self.num_base_priors = self.prior_generator.num_base_priors self._init_layers() self.bbox_coder = TASK_UTILS.build(bbox_coder) self.reg_decoded_bbox = reg_decoded_bbox self.use_sigmoid_cls = False self.cls_focal_loss = False self.train_cfg = train_cfg self.test_cfg = test_cfg if self.train_cfg: self.assigner = TASK_UTILS.build(self.train_cfg['assigner']) if self.train_cfg.get('sampler', None) is not None: self.sampler = TASK_UTILS.build( self.train_cfg['sampler'], default_args=dict(context=self)) else: self.sampler = PseudoSampler(context=self) def _init_layers(self) -> None: """Initialize layers of the head.""" self.cls_convs = nn.ModuleList() self.reg_convs = nn.ModuleList() # TODO: Use registry to choose ConvModule type conv = DepthwiseSeparableConvModule \ if self.use_depthwise else ConvModule for channel, num_base_priors in zip(self.in_channels, self.num_base_priors): cls_layers = [] reg_layers = [] in_channel = channel # build stacked conv tower, not used in default ssd for i in range(self.stacked_convs): cls_layers.append( conv( in_channel, self.feat_channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)) reg_layers.append( conv( in_channel, self.feat_channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)) in_channel = self.feat_channels # SSD-Lite head if self.use_depthwise: cls_layers.append( ConvModule( in_channel, in_channel, 3, padding=1, groups=in_channel, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)) reg_layers.append( ConvModule( in_channel, in_channel, 3, padding=1, groups=in_channel, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)) cls_layers.append( nn.Conv2d( in_channel, num_base_priors * self.cls_out_channels, kernel_size=1 if self.use_depthwise else 3, padding=0 if self.use_depthwise else 1)) reg_layers.append( nn.Conv2d( in_channel, num_base_priors * 4, kernel_size=1 if self.use_depthwise else 3, padding=0 if self.use_depthwise else 1)) self.cls_convs.append(nn.Sequential(*cls_layers)) self.reg_convs.append(nn.Sequential(*reg_layers)) def forward(self, x: Tuple[Tensor]) -> Tuple[List[Tensor], List[Tensor]]: """Forward features from the upstream network. Args: x (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. Returns: tuple[list[Tensor], list[Tensor]]: A tuple of cls_scores list and bbox_preds list. - cls_scores (list[Tensor]): Classification scores for all scale \ levels, each is a 4D-tensor, the channels number is \ num_anchors * num_classes. - bbox_preds (list[Tensor]): Box energies / deltas for all scale \ levels, each is a 4D-tensor, the channels number is \ num_anchors * 4. """ cls_scores = [] bbox_preds = [] for feat, reg_conv, cls_conv in zip(x, self.reg_convs, self.cls_convs): cls_scores.append(cls_conv(feat)) bbox_preds.append(reg_conv(feat)) return cls_scores, bbox_preds def loss_by_feat_single(self, cls_score: Tensor, bbox_pred: Tensor, anchor: Tensor, labels: Tensor, label_weights: Tensor, bbox_targets: Tensor, bbox_weights: Tensor, avg_factor: int) -> Tuple[Tensor, Tensor]: """Compute loss of a single image. Args: cls_score (Tensor): Box scores for eachimage Has shape (num_total_anchors, num_classes). bbox_pred (Tensor): Box energies / deltas for each image level with shape (num_total_anchors, 4). anchors (Tensor): Box reference for each scale level with shape (num_total_anchors, 4). labels (Tensor): Labels of each anchors with shape (num_total_anchors,). label_weights (Tensor): Label weights of each anchor with shape (num_total_anchors,) bbox_targets (Tensor): BBox regression targets of each anchor weight shape (num_total_anchors, 4). bbox_weights (Tensor): BBox regression loss weights of each anchor with shape (num_total_anchors, 4). avg_factor (int): Average factor that is used to average the loss. When using sampling method, avg_factor is usually the sum of positive and negative priors. When using `PseudoSampler`, `avg_factor` is usually equal to the number of positive priors. Returns: Tuple[Tensor, Tensor]: A tuple of cls loss and bbox loss of one feature map. """ loss_cls_all = F.cross_entropy( cls_score, labels, reduction='none') * label_weights # FG cat_id: [0, num_classes -1], BG cat_id: num_classes pos_inds = ((labels >= 0) & (labels < self.num_classes)).nonzero( as_tuple=False).reshape(-1) neg_inds = (labels == self.num_classes).nonzero( as_tuple=False).view(-1) num_pos_samples = pos_inds.size(0) num_neg_samples = self.train_cfg['neg_pos_ratio'] * num_pos_samples if num_neg_samples > neg_inds.size(0): num_neg_samples = neg_inds.size(0) topk_loss_cls_neg, _ = loss_cls_all[neg_inds].topk(num_neg_samples) loss_cls_pos = loss_cls_all[pos_inds].sum() loss_cls_neg = topk_loss_cls_neg.sum() loss_cls = (loss_cls_pos + loss_cls_neg) / avg_factor if self.reg_decoded_bbox: # When the regression loss (e.g. `IouLoss`, `GIouLoss`) # is applied directly on the decoded bounding boxes, it # decodes the already encoded coordinates to absolute format. bbox_pred = self.bbox_coder.decode(anchor, bbox_pred) loss_bbox = smooth_l1_loss( bbox_pred, bbox_targets, bbox_weights, beta=self.train_cfg['smoothl1_beta'], avg_factor=avg_factor) return loss_cls[None], loss_bbox def loss_by_feat( self, cls_scores: List[Tensor], bbox_preds: List[Tensor], batch_gt_instances: InstanceList, batch_img_metas: List[dict], batch_gt_instances_ignore: OptInstanceList = None ) -> Dict[str, List[Tensor]]: """Compute losses of the head. Args: cls_scores (list[Tensor]): Box scores for each scale level Has shape (N, num_anchors * num_classes, H, W) bbox_preds (list[Tensor]): Box energies / deltas for each scale level with shape (N, num_anchors * 4, H, W) batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. batch_gt_instances_ignore (list[:obj:`InstanceData`], Optional): Batch of gt_instances_ignore. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. Returns: dict[str, list[Tensor]]: A dictionary of loss components. the dict has components below: - loss_cls (list[Tensor]): A list containing each feature map \ classification loss. - loss_bbox (list[Tensor]): A list containing each feature map \ regression loss. """ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] assert len(featmap_sizes) == self.prior_generator.num_levels device = cls_scores[0].device anchor_list, valid_flag_list = self.get_anchors( featmap_sizes, batch_img_metas, device=device) cls_reg_targets = self.get_targets( anchor_list, valid_flag_list, batch_gt_instances, batch_img_metas, batch_gt_instances_ignore=batch_gt_instances_ignore, unmap_outputs=True) (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, avg_factor) = cls_reg_targets num_images = len(batch_img_metas) all_cls_scores = torch.cat([ s.permute(0, 2, 3, 1).reshape( num_images, -1, self.cls_out_channels) for s in cls_scores ], 1) all_labels = torch.cat(labels_list, -1).view(num_images, -1) all_label_weights = torch.cat(label_weights_list, -1).view(num_images, -1) all_bbox_preds = torch.cat([ b.permute(0, 2, 3, 1).reshape(num_images, -1, 4) for b in bbox_preds ], -2) all_bbox_targets = torch.cat(bbox_targets_list, -2).view(num_images, -1, 4) all_bbox_weights = torch.cat(bbox_weights_list, -2).view(num_images, -1, 4) # concat all level anchors to a single tensor all_anchors = [] for i in range(num_images): all_anchors.append(torch.cat(anchor_list[i])) losses_cls, losses_bbox = multi_apply( self.loss_by_feat_single, all_cls_scores, all_bbox_preds, all_anchors, all_labels, all_label_weights, all_bbox_targets, all_bbox_weights, avg_factor=avg_factor) return dict(loss_cls=losses_cls, loss_bbox=losses_bbox)
15,517
41.749311
79
py
ERD
ERD-main/mmdet/models/dense_heads/fcos_head.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import Dict, List, Tuple import torch import torch.nn as nn from mmcv.cnn import Scale from mmengine.structures import InstanceData from torch import Tensor from mmdet.registry import MODELS from mmdet.utils import (ConfigType, InstanceList, MultiConfig, OptInstanceList, RangeType, reduce_mean) from ..utils import multi_apply from .anchor_free_head import AnchorFreeHead INF = 1e8 @MODELS.register_module() class FCOSHead(AnchorFreeHead): """Anchor-free head used in `FCOS <https://arxiv.org/abs/1904.01355>`_. The FCOS head does not use anchor boxes. Instead bounding boxes are predicted at each pixel and a centerness measure is used to suppress low-quality predictions. Here norm_on_bbox, centerness_on_reg, dcn_on_last_conv are training tricks used in official repo, which will bring remarkable mAP gains of up to 4.9. Please see https://github.com/tianzhi0549/FCOS for more detail. Args: num_classes (int): Number of categories excluding the background category. in_channels (int): Number of channels in the input feature map. strides (Sequence[int] or Sequence[Tuple[int, int]]): Strides of points in multiple feature levels. Defaults to (4, 8, 16, 32, 64). regress_ranges (Sequence[Tuple[int, int]]): Regress range of multiple level points. center_sampling (bool): If true, use center sampling. Defaults to False. center_sample_radius (float): Radius of center sampling. Defaults to 1.5. norm_on_bbox (bool): If true, normalize the regression targets with FPN strides. Defaults to False. centerness_on_reg (bool): If true, position centerness on the regress branch. Please refer to https://github.com/tianzhi0549/FCOS/issues/89#issuecomment-516877042. Defaults to False. conv_bias (bool or str): If specified as `auto`, it will be decided by the norm_cfg. Bias of conv will be set as True if `norm_cfg` is None, otherwise False. Defaults to "auto". loss_cls (:obj:`ConfigDict` or dict): Config of classification loss. loss_bbox (:obj:`ConfigDict` or dict): Config of localization loss. loss_centerness (:obj:`ConfigDict`, or dict): Config of centerness loss. norm_cfg (:obj:`ConfigDict` or dict): dictionary to construct and config norm layer. Defaults to ``norm_cfg=dict(type='GN', num_groups=32, requires_grad=True)``. init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \ dict]): Initialization config dict. Example: >>> self = FCOSHead(11, 7) >>> feats = [torch.rand(1, 7, s, s) for s in [4, 8, 16, 32, 64]] >>> cls_score, bbox_pred, centerness = self.forward(feats) >>> assert len(cls_score) == len(self.scales) """ # noqa: E501 def __init__(self, num_classes: int, in_channels: int, regress_ranges: RangeType = ((-1, 64), (64, 128), (128, 256), (256, 512), (512, INF)), center_sampling: bool = False, center_sample_radius: float = 1.5, norm_on_bbox: bool = False, centerness_on_reg: bool = False, loss_cls: ConfigType = dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_bbox: ConfigType = dict(type='IoULoss', loss_weight=1.0), loss_centerness: ConfigType = dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), norm_cfg: ConfigType = dict( type='GN', num_groups=32, requires_grad=True), init_cfg: MultiConfig = dict( type='Normal', layer='Conv2d', std=0.01, override=dict( type='Normal', name='conv_cls', std=0.01, bias_prob=0.01)), **kwargs) -> None: self.regress_ranges = regress_ranges self.center_sampling = center_sampling self.center_sample_radius = center_sample_radius self.norm_on_bbox = norm_on_bbox self.centerness_on_reg = centerness_on_reg super().__init__( num_classes=num_classes, in_channels=in_channels, loss_cls=loss_cls, loss_bbox=loss_bbox, norm_cfg=norm_cfg, init_cfg=init_cfg, **kwargs) self.loss_centerness = MODELS.build(loss_centerness) def _init_layers(self) -> None: """Initialize layers of the head.""" super()._init_layers() self.conv_centerness = nn.Conv2d(self.feat_channels, 1, 3, padding=1) self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides]) def forward( self, x: Tuple[Tensor] ) -> Tuple[List[Tensor], List[Tensor], List[Tensor]]: """Forward features from the upstream network. Args: feats (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. Returns: tuple: A tuple of each level outputs. - cls_scores (list[Tensor]): Box scores for each scale level, \ each is a 4D-tensor, the channel number is \ num_points * num_classes. - bbox_preds (list[Tensor]): Box energies / deltas for each \ scale level, each is a 4D-tensor, the channel number is \ num_points * 4. - centernesses (list[Tensor]): centerness for each scale level, \ each is a 4D-tensor, the channel number is num_points * 1. """ return multi_apply(self.forward_single, x, self.scales, self.strides) def forward_single(self, x: Tensor, scale: Scale, stride: int) -> Tuple[Tensor, Tensor, Tensor]: """Forward features of a single scale level. Args: x (Tensor): FPN feature maps of the specified stride. scale (:obj:`mmcv.cnn.Scale`): Learnable scale module to resize the bbox prediction. stride (int): The corresponding stride for feature maps, only used to normalize the bbox prediction when self.norm_on_bbox is True. Returns: tuple: scores for each class, bbox predictions and centerness predictions of input feature maps. """ cls_score, bbox_pred, cls_feat, reg_feat = super().forward_single(x) if self.centerness_on_reg: centerness = self.conv_centerness(reg_feat) else: centerness = self.conv_centerness(cls_feat) # scale the bbox_pred of different level # float to avoid overflow when enabling FP16 bbox_pred = scale(bbox_pred).float() if self.norm_on_bbox: # bbox_pred needed for gradient computation has been modified # by F.relu(bbox_pred) when run with PyTorch 1.10. So replace # F.relu(bbox_pred) with bbox_pred.clamp(min=0) bbox_pred = bbox_pred.clamp(min=0) if not self.training: bbox_pred *= stride else: bbox_pred = bbox_pred.exp() return cls_score, bbox_pred, centerness def loss_by_feat( self, cls_scores: List[Tensor], bbox_preds: List[Tensor], centernesses: List[Tensor], batch_gt_instances: InstanceList, batch_img_metas: List[dict], batch_gt_instances_ignore: OptInstanceList = None ) -> Dict[str, Tensor]: """Calculate the loss based on the features extracted by the detection head. Args: cls_scores (list[Tensor]): Box scores for each scale level, each is a 4D-tensor, the channel number is num_points * num_classes. bbox_preds (list[Tensor]): Box energies / deltas for each scale level, each is a 4D-tensor, the channel number is num_points * 4. centernesses (list[Tensor]): centerness for each scale level, each is a 4D-tensor, the channel number is num_points * 1. batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. batch_gt_instances_ignore (list[:obj:`InstanceData`], Optional): Batch of gt_instances_ignore. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. Returns: dict[str, Tensor]: A dictionary of loss components. """ assert len(cls_scores) == len(bbox_preds) == len(centernesses) featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] all_level_points = self.prior_generator.grid_priors( featmap_sizes, dtype=bbox_preds[0].dtype, device=bbox_preds[0].device) labels, bbox_targets = self.get_targets(all_level_points, batch_gt_instances) num_imgs = cls_scores[0].size(0) # flatten cls_scores, bbox_preds and centerness flatten_cls_scores = [ cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels) for cls_score in cls_scores ] flatten_bbox_preds = [ bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4) for bbox_pred in bbox_preds ] flatten_centerness = [ centerness.permute(0, 2, 3, 1).reshape(-1) for centerness in centernesses ] flatten_cls_scores = torch.cat(flatten_cls_scores) flatten_bbox_preds = torch.cat(flatten_bbox_preds) flatten_centerness = torch.cat(flatten_centerness) flatten_labels = torch.cat(labels) flatten_bbox_targets = torch.cat(bbox_targets) # repeat points to align with bbox_preds flatten_points = torch.cat( [points.repeat(num_imgs, 1) for points in all_level_points]) # FG cat_id: [0, num_classes -1], BG cat_id: num_classes bg_class_ind = self.num_classes pos_inds = ((flatten_labels >= 0) & (flatten_labels < bg_class_ind)).nonzero().reshape(-1) num_pos = torch.tensor( len(pos_inds), dtype=torch.float, device=bbox_preds[0].device) num_pos = max(reduce_mean(num_pos), 1.0) loss_cls = self.loss_cls( flatten_cls_scores, flatten_labels, avg_factor=num_pos) pos_bbox_preds = flatten_bbox_preds[pos_inds] pos_centerness = flatten_centerness[pos_inds] pos_bbox_targets = flatten_bbox_targets[pos_inds] pos_centerness_targets = self.centerness_target(pos_bbox_targets) # centerness weighted iou loss centerness_denorm = max( reduce_mean(pos_centerness_targets.sum().detach()), 1e-6) if len(pos_inds) > 0: pos_points = flatten_points[pos_inds] pos_decoded_bbox_preds = self.bbox_coder.decode( pos_points, pos_bbox_preds) pos_decoded_target_preds = self.bbox_coder.decode( pos_points, pos_bbox_targets) loss_bbox = self.loss_bbox( pos_decoded_bbox_preds, pos_decoded_target_preds, weight=pos_centerness_targets, avg_factor=centerness_denorm) loss_centerness = self.loss_centerness( pos_centerness, pos_centerness_targets, avg_factor=num_pos) else: loss_bbox = pos_bbox_preds.sum() loss_centerness = pos_centerness.sum() return dict( loss_cls=loss_cls, loss_bbox=loss_bbox, loss_centerness=loss_centerness) def get_targets( self, points: List[Tensor], batch_gt_instances: InstanceList ) -> Tuple[List[Tensor], List[Tensor]]: """Compute regression, classification and centerness targets for points in multiple images. Args: points (list[Tensor]): Points of each fpn level, each has shape (num_points, 2). batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. Returns: tuple: Targets of each level. - concat_lvl_labels (list[Tensor]): Labels of each level. - concat_lvl_bbox_targets (list[Tensor]): BBox targets of each \ level. """ assert len(points) == len(self.regress_ranges) num_levels = len(points) # expand regress ranges to align with points expanded_regress_ranges = [ points[i].new_tensor(self.regress_ranges[i])[None].expand_as( points[i]) for i in range(num_levels) ] # concat all levels points and regress ranges concat_regress_ranges = torch.cat(expanded_regress_ranges, dim=0) concat_points = torch.cat(points, dim=0) # the number of points per img, per lvl num_points = [center.size(0) for center in points] # get labels and bbox_targets of each image labels_list, bbox_targets_list = multi_apply( self._get_targets_single, batch_gt_instances, points=concat_points, regress_ranges=concat_regress_ranges, num_points_per_lvl=num_points) # split to per img, per level labels_list = [labels.split(num_points, 0) for labels in labels_list] bbox_targets_list = [ bbox_targets.split(num_points, 0) for bbox_targets in bbox_targets_list ] # concat per level image concat_lvl_labels = [] concat_lvl_bbox_targets = [] for i in range(num_levels): concat_lvl_labels.append( torch.cat([labels[i] for labels in labels_list])) bbox_targets = torch.cat( [bbox_targets[i] for bbox_targets in bbox_targets_list]) if self.norm_on_bbox: bbox_targets = bbox_targets / self.strides[i] concat_lvl_bbox_targets.append(bbox_targets) return concat_lvl_labels, concat_lvl_bbox_targets def _get_targets_single( self, gt_instances: InstanceData, points: Tensor, regress_ranges: Tensor, num_points_per_lvl: List[int]) -> Tuple[Tensor, Tensor]: """Compute regression and classification targets for a single image.""" num_points = points.size(0) num_gts = len(gt_instances) gt_bboxes = gt_instances.bboxes gt_labels = gt_instances.labels if num_gts == 0: return gt_labels.new_full((num_points,), self.num_classes), \ gt_bboxes.new_zeros((num_points, 4)) areas = (gt_bboxes[:, 2] - gt_bboxes[:, 0]) * ( gt_bboxes[:, 3] - gt_bboxes[:, 1]) # TODO: figure out why these two are different # areas = areas[None].expand(num_points, num_gts) areas = areas[None].repeat(num_points, 1) regress_ranges = regress_ranges[:, None, :].expand( num_points, num_gts, 2) gt_bboxes = gt_bboxes[None].expand(num_points, num_gts, 4) xs, ys = points[:, 0], points[:, 1] xs = xs[:, None].expand(num_points, num_gts) ys = ys[:, None].expand(num_points, num_gts) left = xs - gt_bboxes[..., 0] right = gt_bboxes[..., 2] - xs top = ys - gt_bboxes[..., 1] bottom = gt_bboxes[..., 3] - ys bbox_targets = torch.stack((left, top, right, bottom), -1) if self.center_sampling: # condition1: inside a `center bbox` radius = self.center_sample_radius center_xs = (gt_bboxes[..., 0] + gt_bboxes[..., 2]) / 2 center_ys = (gt_bboxes[..., 1] + gt_bboxes[..., 3]) / 2 center_gts = torch.zeros_like(gt_bboxes) stride = center_xs.new_zeros(center_xs.shape) # project the points on current lvl back to the `original` sizes lvl_begin = 0 for lvl_idx, num_points_lvl in enumerate(num_points_per_lvl): lvl_end = lvl_begin + num_points_lvl stride[lvl_begin:lvl_end] = self.strides[lvl_idx] * radius lvl_begin = lvl_end x_mins = center_xs - stride y_mins = center_ys - stride x_maxs = center_xs + stride y_maxs = center_ys + stride center_gts[..., 0] = torch.where(x_mins > gt_bboxes[..., 0], x_mins, gt_bboxes[..., 0]) center_gts[..., 1] = torch.where(y_mins > gt_bboxes[..., 1], y_mins, gt_bboxes[..., 1]) center_gts[..., 2] = torch.where(x_maxs > gt_bboxes[..., 2], gt_bboxes[..., 2], x_maxs) center_gts[..., 3] = torch.where(y_maxs > gt_bboxes[..., 3], gt_bboxes[..., 3], y_maxs) cb_dist_left = xs - center_gts[..., 0] cb_dist_right = center_gts[..., 2] - xs cb_dist_top = ys - center_gts[..., 1] cb_dist_bottom = center_gts[..., 3] - ys center_bbox = torch.stack( (cb_dist_left, cb_dist_top, cb_dist_right, cb_dist_bottom), -1) inside_gt_bbox_mask = center_bbox.min(-1)[0] > 0 else: # condition1: inside a gt bbox inside_gt_bbox_mask = bbox_targets.min(-1)[0] > 0 # condition2: limit the regression range for each location max_regress_distance = bbox_targets.max(-1)[0] inside_regress_range = ( (max_regress_distance >= regress_ranges[..., 0]) & (max_regress_distance <= regress_ranges[..., 1])) # if there are still more than one objects for a location, # we choose the one with minimal area areas[inside_gt_bbox_mask == 0] = INF areas[inside_regress_range == 0] = INF min_area, min_area_inds = areas.min(dim=1) labels = gt_labels[min_area_inds] labels[min_area == INF] = self.num_classes # set as BG bbox_targets = bbox_targets[range(num_points), min_area_inds] return labels, bbox_targets def centerness_target(self, pos_bbox_targets: Tensor) -> Tensor: """Compute centerness targets. Args: pos_bbox_targets (Tensor): BBox targets of positive bboxes in shape (num_pos, 4) Returns: Tensor: Centerness target. """ # only calculate pos centerness targets, otherwise there may be nan left_right = pos_bbox_targets[:, [0, 2]] top_bottom = pos_bbox_targets[:, [1, 3]] if len(left_right) == 0: centerness_targets = left_right[..., 0] else: centerness_targets = ( left_right.min(dim=-1)[0] / left_right.max(dim=-1)[0]) * ( top_bottom.min(dim=-1)[0] / top_bottom.max(dim=-1)[0]) return torch.sqrt(centerness_targets)
19,856
42.546053
113
py
ERD
ERD-main/mmdet/models/dense_heads/lad_head.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import List, Optional import torch from torch import Tensor from mmdet.registry import MODELS from mmdet.structures import SampleList from mmdet.structures.bbox import bbox_overlaps from mmdet.utils import InstanceList, OptInstanceList from ..utils import levels_to_images, multi_apply, unpack_gt_instances from .paa_head import PAAHead @MODELS.register_module() class LADHead(PAAHead): """Label Assignment Head from the paper: `Improving Object Detection by Label Assignment Distillation <https://arxiv.org/pdf/2108.10520.pdf>`_""" def get_label_assignment( self, cls_scores: List[Tensor], bbox_preds: List[Tensor], iou_preds: List[Tensor], batch_gt_instances: InstanceList, batch_img_metas: List[dict], batch_gt_instances_ignore: OptInstanceList = None) -> tuple: """Get label assignment (from teacher). Args: cls_scores (list[Tensor]): Box scores for each scale level Has shape (N, num_anchors * num_classes, H, W) bbox_preds (list[Tensor]): Box energies / deltas for each scale level with shape (N, num_anchors * 4, H, W) iou_preds (list[Tensor]): iou_preds for each scale level with shape (N, num_anchors * 1, H, W) batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. batch_gt_instances_ignore (list[:obj:`InstanceData`], optional): Batch of gt_instances_ignore. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. Returns: tuple: Returns a tuple containing label assignment variables. - labels (Tensor): Labels of all anchors, each with shape (num_anchors,). - labels_weight (Tensor): Label weights of all anchor. each with shape (num_anchors,). - bboxes_target (Tensor): BBox targets of all anchors. each with shape (num_anchors, 4). - bboxes_weight (Tensor): BBox weights of all anchors. each with shape (num_anchors, 4). - pos_inds_flatten (Tensor): Contains all index of positive sample in all anchor. - pos_anchors (Tensor): Positive anchors. - num_pos (int): Number of positive anchors. """ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] assert len(featmap_sizes) == self.prior_generator.num_levels device = cls_scores[0].device anchor_list, valid_flag_list = self.get_anchors( featmap_sizes, batch_img_metas, device=device) cls_reg_targets = self.get_targets( anchor_list, valid_flag_list, batch_gt_instances, batch_img_metas, batch_gt_instances_ignore=batch_gt_instances_ignore, ) (labels, labels_weight, bboxes_target, bboxes_weight, pos_inds, pos_gt_index) = cls_reg_targets cls_scores = levels_to_images(cls_scores) cls_scores = [ item.reshape(-1, self.cls_out_channels) for item in cls_scores ] bbox_preds = levels_to_images(bbox_preds) bbox_preds = [item.reshape(-1, 4) for item in bbox_preds] pos_losses_list, = multi_apply(self.get_pos_loss, anchor_list, cls_scores, bbox_preds, labels, labels_weight, bboxes_target, bboxes_weight, pos_inds) with torch.no_grad(): reassign_labels, reassign_label_weight, \ reassign_bbox_weights, num_pos = multi_apply( self.paa_reassign, pos_losses_list, labels, labels_weight, bboxes_weight, pos_inds, pos_gt_index, anchor_list) num_pos = sum(num_pos) # convert all tensor list to a flatten tensor labels = torch.cat(reassign_labels, 0).view(-1) flatten_anchors = torch.cat( [torch.cat(item, 0) for item in anchor_list]) labels_weight = torch.cat(reassign_label_weight, 0).view(-1) bboxes_target = torch.cat(bboxes_target, 0).view(-1, bboxes_target[0].size(-1)) pos_inds_flatten = ((labels >= 0) & (labels < self.num_classes)).nonzero().reshape(-1) if num_pos: pos_anchors = flatten_anchors[pos_inds_flatten] else: pos_anchors = None label_assignment_results = (labels, labels_weight, bboxes_target, bboxes_weight, pos_inds_flatten, pos_anchors, num_pos) return label_assignment_results def loss(self, x: List[Tensor], label_assignment_results: tuple, batch_data_samples: SampleList) -> dict: """Forward train with the available label assignment (student receives from teacher). Args: x (list[Tensor]): Features from FPN. label_assignment_results (tuple): As the outputs defined in the function `self.get_label_assignment`. batch_data_samples (list[:obj:`DetDataSample`]): The batch data samples. It usually includes information such as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`. Returns: losses: (dict[str, Tensor]): A dictionary of loss components. """ outputs = unpack_gt_instances(batch_data_samples) batch_gt_instances, batch_gt_instances_ignore, batch_img_metas \ = outputs outs = self(x) loss_inputs = outs + (batch_gt_instances, batch_img_metas) losses = self.loss_by_feat( *loss_inputs, batch_gt_instances_ignore=batch_gt_instances_ignore, label_assignment_results=label_assignment_results) return losses def loss_by_feat(self, cls_scores: List[Tensor], bbox_preds: List[Tensor], iou_preds: List[Tensor], batch_gt_instances: InstanceList, batch_img_metas: List[dict], batch_gt_instances_ignore: OptInstanceList = None, label_assignment_results: Optional[tuple] = None) -> dict: """Compute losses of the head. Args: cls_scores (list[Tensor]): Box scores for each scale level Has shape (N, num_anchors * num_classes, H, W) bbox_preds (list[Tensor]): Box energies / deltas for each scale level with shape (N, num_anchors * 4, H, W) iou_preds (list[Tensor]): iou_preds for each scale level with shape (N, num_anchors * 1, H, W) batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. batch_gt_instances_ignore (list[:obj:`InstanceData`], optional): Batch of gt_instances_ignore. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. label_assignment_results (tuple, optional): As the outputs defined in the function `self.get_ label_assignment`. Returns: dict[str, Tensor]: A dictionary of loss gmm_assignment. """ (labels, labels_weight, bboxes_target, bboxes_weight, pos_inds_flatten, pos_anchors, num_pos) = label_assignment_results cls_scores = levels_to_images(cls_scores) cls_scores = [ item.reshape(-1, self.cls_out_channels) for item in cls_scores ] bbox_preds = levels_to_images(bbox_preds) bbox_preds = [item.reshape(-1, 4) for item in bbox_preds] iou_preds = levels_to_images(iou_preds) iou_preds = [item.reshape(-1, 1) for item in iou_preds] # convert all tensor list to a flatten tensor cls_scores = torch.cat(cls_scores, 0).view(-1, cls_scores[0].size(-1)) bbox_preds = torch.cat(bbox_preds, 0).view(-1, bbox_preds[0].size(-1)) iou_preds = torch.cat(iou_preds, 0).view(-1, iou_preds[0].size(-1)) losses_cls = self.loss_cls( cls_scores, labels, labels_weight, avg_factor=max(num_pos, len(batch_img_metas))) # avoid num_pos=0 if num_pos: pos_bbox_pred = self.bbox_coder.decode( pos_anchors, bbox_preds[pos_inds_flatten]) pos_bbox_target = bboxes_target[pos_inds_flatten] iou_target = bbox_overlaps( pos_bbox_pred.detach(), pos_bbox_target, is_aligned=True) losses_iou = self.loss_centerness( iou_preds[pos_inds_flatten], iou_target.unsqueeze(-1), avg_factor=num_pos) losses_bbox = self.loss_bbox( pos_bbox_pred, pos_bbox_target, avg_factor=num_pos) else: losses_iou = iou_preds.sum() * 0 losses_bbox = bbox_preds.sum() * 0 return dict( loss_cls=losses_cls, loss_bbox=losses_bbox, loss_iou=losses_iou)
9,938
42.784141
79
py
ERD
ERD-main/mmdet/models/dense_heads/yolo_head.py
# Copyright (c) OpenMMLab. All rights reserved. # Copyright (c) 2019 Western Digital Corporation or its affiliates. import copy import warnings from typing import List, Optional, Sequence, Tuple import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import ConvModule, is_norm from mmengine.model import bias_init_with_prob, constant_init, normal_init from mmengine.structures import InstanceData from torch import Tensor from mmdet.registry import MODELS, TASK_UTILS from mmdet.utils import (ConfigType, InstanceList, OptConfigType, OptInstanceList) from ..task_modules.samplers import PseudoSampler from ..utils import filter_scores_and_topk, images_to_levels, multi_apply from .base_dense_head import BaseDenseHead @MODELS.register_module() class YOLOV3Head(BaseDenseHead): """YOLOV3Head Paper link: https://arxiv.org/abs/1804.02767. Args: num_classes (int): The number of object classes (w/o background) in_channels (Sequence[int]): Number of input channels per scale. out_channels (Sequence[int]): The number of output channels per scale before the final 1x1 layer. Default: (1024, 512, 256). anchor_generator (:obj:`ConfigDict` or dict): Config dict for anchor generator. bbox_coder (:obj:`ConfigDict` or dict): Config of bounding box coder. featmap_strides (Sequence[int]): The stride of each scale. Should be in descending order. Defaults to (32, 16, 8). one_hot_smoother (float): Set a non-zero value to enable label-smooth Defaults to 0. conv_cfg (:obj:`ConfigDict` or dict, optional): Config dict for convolution layer. Defaults to None. norm_cfg (:obj:`ConfigDict` or dict): Dictionary to construct and config norm layer. Defaults to dict(type='BN', requires_grad=True). act_cfg (:obj:`ConfigDict` or dict): Config dict for activation layer. Defaults to dict(type='LeakyReLU', negative_slope=0.1). loss_cls (:obj:`ConfigDict` or dict): Config of classification loss. loss_conf (:obj:`ConfigDict` or dict): Config of confidence loss. loss_xy (:obj:`ConfigDict` or dict): Config of xy coordinate loss. loss_wh (:obj:`ConfigDict` or dict): Config of wh coordinate loss. train_cfg (:obj:`ConfigDict` or dict, optional): Training config of YOLOV3 head. Defaults to None. test_cfg (:obj:`ConfigDict` or dict, optional): Testing config of YOLOV3 head. Defaults to None. """ def __init__(self, num_classes: int, in_channels: Sequence[int], out_channels: Sequence[int] = (1024, 512, 256), anchor_generator: ConfigType = dict( type='YOLOAnchorGenerator', base_sizes=[[(116, 90), (156, 198), (373, 326)], [(30, 61), (62, 45), (59, 119)], [(10, 13), (16, 30), (33, 23)]], strides=[32, 16, 8]), bbox_coder: ConfigType = dict(type='YOLOBBoxCoder'), featmap_strides: Sequence[int] = (32, 16, 8), one_hot_smoother: float = 0., conv_cfg: OptConfigType = None, norm_cfg: ConfigType = dict(type='BN', requires_grad=True), act_cfg: ConfigType = dict( type='LeakyReLU', negative_slope=0.1), loss_cls: ConfigType = dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_conf: ConfigType = dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_xy: ConfigType = dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_wh: ConfigType = dict(type='MSELoss', loss_weight=1.0), train_cfg: OptConfigType = None, test_cfg: OptConfigType = None) -> None: super().__init__(init_cfg=None) # Check params assert (len(in_channels) == len(out_channels) == len(featmap_strides)) self.num_classes = num_classes self.in_channels = in_channels self.out_channels = out_channels self.featmap_strides = featmap_strides self.train_cfg = train_cfg self.test_cfg = test_cfg if self.train_cfg: self.assigner = TASK_UTILS.build(self.train_cfg['assigner']) if train_cfg.get('sampler', None) is not None: self.sampler = TASK_UTILS.build( self.train_cfg['sampler'], context=self) else: self.sampler = PseudoSampler() self.one_hot_smoother = one_hot_smoother self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.act_cfg = act_cfg self.bbox_coder = TASK_UTILS.build(bbox_coder) self.prior_generator = TASK_UTILS.build(anchor_generator) self.loss_cls = MODELS.build(loss_cls) self.loss_conf = MODELS.build(loss_conf) self.loss_xy = MODELS.build(loss_xy) self.loss_wh = MODELS.build(loss_wh) self.num_base_priors = self.prior_generator.num_base_priors[0] assert len( self.prior_generator.num_base_priors) == len(featmap_strides) self._init_layers() @property def num_levels(self) -> int: """int: number of feature map levels""" return len(self.featmap_strides) @property def num_attrib(self) -> int: """int: number of attributes in pred_map, bboxes (4) + objectness (1) + num_classes""" return 5 + self.num_classes def _init_layers(self) -> None: """initialize conv layers in YOLOv3 head.""" self.convs_bridge = nn.ModuleList() self.convs_pred = nn.ModuleList() for i in range(self.num_levels): conv_bridge = ConvModule( self.in_channels[i], self.out_channels[i], 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg) conv_pred = nn.Conv2d(self.out_channels[i], self.num_base_priors * self.num_attrib, 1) self.convs_bridge.append(conv_bridge) self.convs_pred.append(conv_pred) def init_weights(self) -> None: """initialize weights.""" for m in self.modules(): if isinstance(m, nn.Conv2d): normal_init(m, mean=0, std=0.01) if is_norm(m): constant_init(m, 1) # Use prior in model initialization to improve stability for conv_pred, stride in zip(self.convs_pred, self.featmap_strides): bias = conv_pred.bias.reshape(self.num_base_priors, -1) # init objectness with prior of 8 objects per feature map # refer to https://github.com/ultralytics/yolov3 nn.init.constant_(bias.data[:, 4], bias_init_with_prob(8 / (608 / stride)**2)) nn.init.constant_(bias.data[:, 5:], bias_init_with_prob(0.01)) def forward(self, x: Tuple[Tensor, ...]) -> tuple: """Forward features from the upstream network. Args: x (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. Returns: tuple[Tensor]: A tuple of multi-level predication map, each is a 4D-tensor of shape (batch_size, 5+num_classes, height, width). """ assert len(x) == self.num_levels pred_maps = [] for i in range(self.num_levels): feat = x[i] feat = self.convs_bridge[i](feat) pred_map = self.convs_pred[i](feat) pred_maps.append(pred_map) return tuple(pred_maps), def predict_by_feat(self, pred_maps: Sequence[Tensor], batch_img_metas: Optional[List[dict]], cfg: OptConfigType = None, rescale: bool = False, with_nms: bool = True) -> InstanceList: """Transform a batch of output features extracted from the head into bbox results. It has been accelerated since PR #5991. Args: pred_maps (Sequence[Tensor]): Raw predictions for a batch of images. batch_img_metas (list[dict], Optional): Batch image meta info. Defaults to None. cfg (:obj:`ConfigDict` or dict, optional): Test / postprocessing configuration, if None, test_cfg would be used. Defaults to None. rescale (bool): If True, return boxes in original image space. Defaults to False. with_nms (bool): If True, do nms before return boxes. Defaults to True. Returns: list[:obj:`InstanceData`]: Object detection results of each image after the post process. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). """ assert len(pred_maps) == self.num_levels cfg = self.test_cfg if cfg is None else cfg cfg = copy.deepcopy(cfg) num_imgs = len(batch_img_metas) featmap_sizes = [pred_map.shape[-2:] for pred_map in pred_maps] mlvl_anchors = self.prior_generator.grid_priors( featmap_sizes, device=pred_maps[0].device) flatten_preds = [] flatten_strides = [] for pred, stride in zip(pred_maps, self.featmap_strides): pred = pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, self.num_attrib) pred[..., :2].sigmoid_() flatten_preds.append(pred) flatten_strides.append( pred.new_tensor(stride).expand(pred.size(1))) flatten_preds = torch.cat(flatten_preds, dim=1) flatten_bbox_preds = flatten_preds[..., :4] flatten_objectness = flatten_preds[..., 4].sigmoid() flatten_cls_scores = flatten_preds[..., 5:].sigmoid() flatten_anchors = torch.cat(mlvl_anchors) flatten_strides = torch.cat(flatten_strides) flatten_bboxes = self.bbox_coder.decode(flatten_anchors, flatten_bbox_preds, flatten_strides.unsqueeze(-1)) results_list = [] for (bboxes, scores, objectness, img_meta) in zip(flatten_bboxes, flatten_cls_scores, flatten_objectness, batch_img_metas): # Filtering out all predictions with conf < conf_thr conf_thr = cfg.get('conf_thr', -1) if conf_thr > 0: conf_inds = objectness >= conf_thr bboxes = bboxes[conf_inds, :] scores = scores[conf_inds, :] objectness = objectness[conf_inds] score_thr = cfg.get('score_thr', 0) nms_pre = cfg.get('nms_pre', -1) scores, labels, keep_idxs, _ = filter_scores_and_topk( scores, score_thr, nms_pre) results = InstanceData( scores=scores, labels=labels, bboxes=bboxes[keep_idxs], score_factors=objectness[keep_idxs], ) results = self._bbox_post_process( results=results, cfg=cfg, rescale=rescale, with_nms=with_nms, img_meta=img_meta) results_list.append(results) return results_list def loss_by_feat( self, pred_maps: Sequence[Tensor], batch_gt_instances: InstanceList, batch_img_metas: List[dict], batch_gt_instances_ignore: OptInstanceList = None) -> dict: """Calculate the loss based on the features extracted by the detection head. Args: pred_maps (list[Tensor]): Prediction map for each scale level, shape (N, num_anchors * num_attrib, H, W) batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. batch_gt_instances_ignore (list[:obj:`InstanceData`], optional): Batch of gt_instances_ignore. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. Returns: dict: A dictionary of loss components. """ num_imgs = len(batch_img_metas) device = pred_maps[0][0].device featmap_sizes = [ pred_maps[i].shape[-2:] for i in range(self.num_levels) ] mlvl_anchors = self.prior_generator.grid_priors( featmap_sizes, device=device) anchor_list = [mlvl_anchors for _ in range(num_imgs)] responsible_flag_list = [] for img_id in range(num_imgs): responsible_flag_list.append( self.responsible_flags(featmap_sizes, batch_gt_instances[img_id].bboxes, device)) target_maps_list, neg_maps_list = self.get_targets( anchor_list, responsible_flag_list, batch_gt_instances) losses_cls, losses_conf, losses_xy, losses_wh = multi_apply( self.loss_by_feat_single, pred_maps, target_maps_list, neg_maps_list) return dict( loss_cls=losses_cls, loss_conf=losses_conf, loss_xy=losses_xy, loss_wh=losses_wh) def loss_by_feat_single(self, pred_map: Tensor, target_map: Tensor, neg_map: Tensor) -> tuple: """Calculate the loss of a single scale level based on the features extracted by the detection head. Args: pred_map (Tensor): Raw predictions for a single level. target_map (Tensor): The Ground-Truth target for a single level. neg_map (Tensor): The negative masks for a single level. Returns: tuple: loss_cls (Tensor): Classification loss. loss_conf (Tensor): Confidence loss. loss_xy (Tensor): Regression loss of x, y coordinate. loss_wh (Tensor): Regression loss of w, h coordinate. """ num_imgs = len(pred_map) pred_map = pred_map.permute(0, 2, 3, 1).reshape(num_imgs, -1, self.num_attrib) neg_mask = neg_map.float() pos_mask = target_map[..., 4] pos_and_neg_mask = neg_mask + pos_mask pos_mask = pos_mask.unsqueeze(dim=-1) if torch.max(pos_and_neg_mask) > 1.: warnings.warn('There is overlap between pos and neg sample.') pos_and_neg_mask = pos_and_neg_mask.clamp(min=0., max=1.) pred_xy = pred_map[..., :2] pred_wh = pred_map[..., 2:4] pred_conf = pred_map[..., 4] pred_label = pred_map[..., 5:] target_xy = target_map[..., :2] target_wh = target_map[..., 2:4] target_conf = target_map[..., 4] target_label = target_map[..., 5:] loss_cls = self.loss_cls(pred_label, target_label, weight=pos_mask) loss_conf = self.loss_conf( pred_conf, target_conf, weight=pos_and_neg_mask) loss_xy = self.loss_xy(pred_xy, target_xy, weight=pos_mask) loss_wh = self.loss_wh(pred_wh, target_wh, weight=pos_mask) return loss_cls, loss_conf, loss_xy, loss_wh def get_targets(self, anchor_list: List[List[Tensor]], responsible_flag_list: List[List[Tensor]], batch_gt_instances: List[InstanceData]) -> tuple: """Compute target maps for anchors in multiple images. Args: anchor_list (list[list[Tensor]]): Multi level anchors of each image. The outer list indicates images, and the inner list corresponds to feature levels of the image. Each element of the inner list is a tensor of shape (num_total_anchors, 4). responsible_flag_list (list[list[Tensor]]): Multi level responsible flags of each image. Each element is a tensor of shape (num_total_anchors, ) batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. Returns: tuple: Usually returns a tuple containing learning targets. - target_map_list (list[Tensor]): Target map of each level. - neg_map_list (list[Tensor]): Negative map of each level. """ num_imgs = len(anchor_list) # anchor number of multi levels num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]] results = multi_apply(self._get_targets_single, anchor_list, responsible_flag_list, batch_gt_instances) all_target_maps, all_neg_maps = results assert num_imgs == len(all_target_maps) == len(all_neg_maps) target_maps_list = images_to_levels(all_target_maps, num_level_anchors) neg_maps_list = images_to_levels(all_neg_maps, num_level_anchors) return target_maps_list, neg_maps_list def _get_targets_single(self, anchors: List[Tensor], responsible_flags: List[Tensor], gt_instances: InstanceData) -> tuple: """Generate matching bounding box prior and converted GT. Args: anchors (List[Tensor]): Multi-level anchors of the image. responsible_flags (List[Tensor]): Multi-level responsible flags of anchors gt_instances (:obj:`InstanceData`): Ground truth of instance annotations. It should includes ``bboxes`` and ``labels`` attributes. Returns: tuple: target_map (Tensor): Predication target map of each scale level, shape (num_total_anchors, 5+num_classes) neg_map (Tensor): Negative map of each scale level, shape (num_total_anchors,) """ gt_bboxes = gt_instances.bboxes gt_labels = gt_instances.labels anchor_strides = [] for i in range(len(anchors)): anchor_strides.append( torch.tensor(self.featmap_strides[i], device=gt_bboxes.device).repeat(len(anchors[i]))) concat_anchors = torch.cat(anchors) concat_responsible_flags = torch.cat(responsible_flags) anchor_strides = torch.cat(anchor_strides) assert len(anchor_strides) == len(concat_anchors) == \ len(concat_responsible_flags) pred_instances = InstanceData( priors=concat_anchors, responsible_flags=concat_responsible_flags) assign_result = self.assigner.assign(pred_instances, gt_instances) sampling_result = self.sampler.sample(assign_result, pred_instances, gt_instances) target_map = concat_anchors.new_zeros( concat_anchors.size(0), self.num_attrib) target_map[sampling_result.pos_inds, :4] = self.bbox_coder.encode( sampling_result.pos_priors, sampling_result.pos_gt_bboxes, anchor_strides[sampling_result.pos_inds]) target_map[sampling_result.pos_inds, 4] = 1 gt_labels_one_hot = F.one_hot( gt_labels, num_classes=self.num_classes).float() if self.one_hot_smoother != 0: # label smooth gt_labels_one_hot = gt_labels_one_hot * ( 1 - self.one_hot_smoother ) + self.one_hot_smoother / self.num_classes target_map[sampling_result.pos_inds, 5:] = gt_labels_one_hot[ sampling_result.pos_assigned_gt_inds] neg_map = concat_anchors.new_zeros( concat_anchors.size(0), dtype=torch.uint8) neg_map[sampling_result.neg_inds] = 1 return target_map, neg_map def responsible_flags(self, featmap_sizes: List[tuple], gt_bboxes: Tensor, device: str) -> List[Tensor]: """Generate responsible anchor flags of grid cells in multiple scales. Args: featmap_sizes (List[tuple]): List of feature map sizes in multiple feature levels. gt_bboxes (Tensor): Ground truth boxes, shape (n, 4). device (str): Device where the anchors will be put on. Return: List[Tensor]: responsible flags of anchors in multiple level """ assert self.num_levels == len(featmap_sizes) multi_level_responsible_flags = [] for i in range(self.num_levels): anchor_stride = self.prior_generator.strides[i] feat_h, feat_w = featmap_sizes[i] gt_cx = ((gt_bboxes[:, 0] + gt_bboxes[:, 2]) * 0.5).to(device) gt_cy = ((gt_bboxes[:, 1] + gt_bboxes[:, 3]) * 0.5).to(device) gt_grid_x = torch.floor(gt_cx / anchor_stride[0]).long() gt_grid_y = torch.floor(gt_cy / anchor_stride[1]).long() # row major indexing gt_bboxes_grid_idx = gt_grid_y * feat_w + gt_grid_x responsible_grid = torch.zeros( feat_h * feat_w, dtype=torch.uint8, device=device) responsible_grid[gt_bboxes_grid_idx] = 1 responsible_grid = responsible_grid[:, None].expand( responsible_grid.size(0), self.prior_generator.num_base_priors[i]).contiguous().view(-1) multi_level_responsible_flags.append(responsible_grid) return multi_level_responsible_flags
22,709
42.011364
79
py
ERD
ERD-main/mmdet/models/dense_heads/centripetal_head.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import List, Optional, Tuple import torch.nn as nn from mmcv.cnn import ConvModule from mmcv.ops import DeformConv2d from mmengine.model import normal_init from torch import Tensor from mmdet.registry import MODELS from mmdet.utils import (ConfigType, InstanceList, OptInstanceList, OptMultiConfig) from ..utils import multi_apply from .corner_head import CornerHead @MODELS.register_module() class CentripetalHead(CornerHead): """Head of CentripetalNet: Pursuing High-quality Keypoint Pairs for Object Detection. CentripetalHead inherits from :class:`CornerHead`. It removes the embedding branch and adds guiding shift and centripetal shift branches. More details can be found in the `paper <https://arxiv.org/abs/2003.09119>`_ . Args: num_classes (int): Number of categories excluding the background category. in_channels (int): Number of channels in the input feature map. num_feat_levels (int): Levels of feature from the previous module. 2 for HourglassNet-104 and 1 for HourglassNet-52. HourglassNet-104 outputs the final feature and intermediate supervision feature and HourglassNet-52 only outputs the final feature. Defaults to 2. corner_emb_channels (int): Channel of embedding vector. Defaults to 1. train_cfg (:obj:`ConfigDict` or dict, optional): Training config. Useless in CornerHead, but we keep this variable for SingleStageDetector. test_cfg (:obj:`ConfigDict` or dict, optional): Testing config of CornerHead. loss_heatmap (:obj:`ConfigDict` or dict): Config of corner heatmap loss. Defaults to GaussianFocalLoss. loss_embedding (:obj:`ConfigDict` or dict): Config of corner embedding loss. Defaults to AssociativeEmbeddingLoss. loss_offset (:obj:`ConfigDict` or dict): Config of corner offset loss. Defaults to SmoothL1Loss. loss_guiding_shift (:obj:`ConfigDict` or dict): Config of guiding shift loss. Defaults to SmoothL1Loss. loss_centripetal_shift (:obj:`ConfigDict` or dict): Config of centripetal shift loss. Defaults to SmoothL1Loss. init_cfg (:obj:`ConfigDict` or dict, optional): the config to control the initialization. """ def __init__(self, *args, centripetal_shift_channels: int = 2, guiding_shift_channels: int = 2, feat_adaption_conv_kernel: int = 3, loss_guiding_shift: ConfigType = dict( type='SmoothL1Loss', beta=1.0, loss_weight=0.05), loss_centripetal_shift: ConfigType = dict( type='SmoothL1Loss', beta=1.0, loss_weight=1), init_cfg: OptMultiConfig = None, **kwargs) -> None: assert init_cfg is None, 'To prevent abnormal initialization ' \ 'behavior, init_cfg is not allowed to be set' assert centripetal_shift_channels == 2, ( 'CentripetalHead only support centripetal_shift_channels == 2') self.centripetal_shift_channels = centripetal_shift_channels assert guiding_shift_channels == 2, ( 'CentripetalHead only support guiding_shift_channels == 2') self.guiding_shift_channels = guiding_shift_channels self.feat_adaption_conv_kernel = feat_adaption_conv_kernel super().__init__(*args, init_cfg=init_cfg, **kwargs) self.loss_guiding_shift = MODELS.build(loss_guiding_shift) self.loss_centripetal_shift = MODELS.build(loss_centripetal_shift) def _init_centripetal_layers(self) -> None: """Initialize centripetal layers. Including feature adaption deform convs (feat_adaption), deform offset prediction convs (dcn_off), guiding shift (guiding_shift) and centripetal shift ( centripetal_shift). Each branch has two parts: prefix `tl_` for top-left and `br_` for bottom-right. """ self.tl_feat_adaption = nn.ModuleList() self.br_feat_adaption = nn.ModuleList() self.tl_dcn_offset = nn.ModuleList() self.br_dcn_offset = nn.ModuleList() self.tl_guiding_shift = nn.ModuleList() self.br_guiding_shift = nn.ModuleList() self.tl_centripetal_shift = nn.ModuleList() self.br_centripetal_shift = nn.ModuleList() for _ in range(self.num_feat_levels): self.tl_feat_adaption.append( DeformConv2d(self.in_channels, self.in_channels, self.feat_adaption_conv_kernel, 1, 1)) self.br_feat_adaption.append( DeformConv2d(self.in_channels, self.in_channels, self.feat_adaption_conv_kernel, 1, 1)) self.tl_guiding_shift.append( self._make_layers( out_channels=self.guiding_shift_channels, in_channels=self.in_channels)) self.br_guiding_shift.append( self._make_layers( out_channels=self.guiding_shift_channels, in_channels=self.in_channels)) self.tl_dcn_offset.append( ConvModule( self.guiding_shift_channels, self.feat_adaption_conv_kernel**2 * self.guiding_shift_channels, 1, bias=False, act_cfg=None)) self.br_dcn_offset.append( ConvModule( self.guiding_shift_channels, self.feat_adaption_conv_kernel**2 * self.guiding_shift_channels, 1, bias=False, act_cfg=None)) self.tl_centripetal_shift.append( self._make_layers( out_channels=self.centripetal_shift_channels, in_channels=self.in_channels)) self.br_centripetal_shift.append( self._make_layers( out_channels=self.centripetal_shift_channels, in_channels=self.in_channels)) def _init_layers(self) -> None: """Initialize layers for CentripetalHead. Including two parts: CornerHead layers and CentripetalHead layers """ super()._init_layers() # using _init_layers in CornerHead self._init_centripetal_layers() def init_weights(self) -> None: super().init_weights() for i in range(self.num_feat_levels): normal_init(self.tl_feat_adaption[i], std=0.01) normal_init(self.br_feat_adaption[i], std=0.01) normal_init(self.tl_dcn_offset[i].conv, std=0.1) normal_init(self.br_dcn_offset[i].conv, std=0.1) _ = [x.conv.reset_parameters() for x in self.tl_guiding_shift[i]] _ = [x.conv.reset_parameters() for x in self.br_guiding_shift[i]] _ = [ x.conv.reset_parameters() for x in self.tl_centripetal_shift[i] ] _ = [ x.conv.reset_parameters() for x in self.br_centripetal_shift[i] ] def forward_single(self, x: Tensor, lvl_ind: int) -> List[Tensor]: """Forward feature of a single level. Args: x (Tensor): Feature of a single level. lvl_ind (int): Level index of current feature. Returns: tuple[Tensor]: A tuple of CentripetalHead's output for current feature level. Containing the following Tensors: - tl_heat (Tensor): Predicted top-left corner heatmap. - br_heat (Tensor): Predicted bottom-right corner heatmap. - tl_off (Tensor): Predicted top-left offset heatmap. - br_off (Tensor): Predicted bottom-right offset heatmap. - tl_guiding_shift (Tensor): Predicted top-left guiding shift heatmap. - br_guiding_shift (Tensor): Predicted bottom-right guiding shift heatmap. - tl_centripetal_shift (Tensor): Predicted top-left centripetal shift heatmap. - br_centripetal_shift (Tensor): Predicted bottom-right centripetal shift heatmap. """ tl_heat, br_heat, _, _, tl_off, br_off, tl_pool, br_pool = super( ).forward_single( x, lvl_ind, return_pool=True) tl_guiding_shift = self.tl_guiding_shift[lvl_ind](tl_pool) br_guiding_shift = self.br_guiding_shift[lvl_ind](br_pool) tl_dcn_offset = self.tl_dcn_offset[lvl_ind](tl_guiding_shift.detach()) br_dcn_offset = self.br_dcn_offset[lvl_ind](br_guiding_shift.detach()) tl_feat_adaption = self.tl_feat_adaption[lvl_ind](tl_pool, tl_dcn_offset) br_feat_adaption = self.br_feat_adaption[lvl_ind](br_pool, br_dcn_offset) tl_centripetal_shift = self.tl_centripetal_shift[lvl_ind]( tl_feat_adaption) br_centripetal_shift = self.br_centripetal_shift[lvl_ind]( br_feat_adaption) result_list = [ tl_heat, br_heat, tl_off, br_off, tl_guiding_shift, br_guiding_shift, tl_centripetal_shift, br_centripetal_shift ] return result_list def loss_by_feat( self, tl_heats: List[Tensor], br_heats: List[Tensor], tl_offs: List[Tensor], br_offs: List[Tensor], tl_guiding_shifts: List[Tensor], br_guiding_shifts: List[Tensor], tl_centripetal_shifts: List[Tensor], br_centripetal_shifts: List[Tensor], batch_gt_instances: InstanceList, batch_img_metas: List[dict], batch_gt_instances_ignore: OptInstanceList = None) -> dict: """Calculate the loss based on the features extracted by the detection head. Args: tl_heats (list[Tensor]): Top-left corner heatmaps for each level with shape (N, num_classes, H, W). br_heats (list[Tensor]): Bottom-right corner heatmaps for each level with shape (N, num_classes, H, W). tl_offs (list[Tensor]): Top-left corner offsets for each level with shape (N, corner_offset_channels, H, W). br_offs (list[Tensor]): Bottom-right corner offsets for each level with shape (N, corner_offset_channels, H, W). tl_guiding_shifts (list[Tensor]): Top-left guiding shifts for each level with shape (N, guiding_shift_channels, H, W). br_guiding_shifts (list[Tensor]): Bottom-right guiding shifts for each level with shape (N, guiding_shift_channels, H, W). tl_centripetal_shifts (list[Tensor]): Top-left centripetal shifts for each level with shape (N, centripetal_shift_channels, H, W). br_centripetal_shifts (list[Tensor]): Bottom-right centripetal shifts for each level with shape (N, centripetal_shift_channels, H, W). batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. batch_gt_instances_ignore (list[:obj:`InstanceData`], optional): Specify which bounding boxes can be ignored when computing the loss. Returns: dict[str, Tensor]: A dictionary of loss components. Containing the following losses: - det_loss (list[Tensor]): Corner keypoint losses of all feature levels. - off_loss (list[Tensor]): Corner offset losses of all feature levels. - guiding_loss (list[Tensor]): Guiding shift losses of all feature levels. - centripetal_loss (list[Tensor]): Centripetal shift losses of all feature levels. """ gt_bboxes = [ gt_instances.bboxes for gt_instances in batch_gt_instances ] gt_labels = [ gt_instances.labels for gt_instances in batch_gt_instances ] targets = self.get_targets( gt_bboxes, gt_labels, tl_heats[-1].shape, batch_img_metas[0]['batch_input_shape'], with_corner_emb=self.with_corner_emb, with_guiding_shift=True, with_centripetal_shift=True) mlvl_targets = [targets for _ in range(self.num_feat_levels)] [det_losses, off_losses, guiding_losses, centripetal_losses ] = multi_apply(self.loss_by_feat_single, tl_heats, br_heats, tl_offs, br_offs, tl_guiding_shifts, br_guiding_shifts, tl_centripetal_shifts, br_centripetal_shifts, mlvl_targets) loss_dict = dict( det_loss=det_losses, off_loss=off_losses, guiding_loss=guiding_losses, centripetal_loss=centripetal_losses) return loss_dict def loss_by_feat_single(self, tl_hmp: Tensor, br_hmp: Tensor, tl_off: Tensor, br_off: Tensor, tl_guiding_shift: Tensor, br_guiding_shift: Tensor, tl_centripetal_shift: Tensor, br_centripetal_shift: Tensor, targets: dict) -> Tuple[Tensor, ...]: """Calculate the loss of a single scale level based on the features extracted by the detection head. Args: tl_hmp (Tensor): Top-left corner heatmap for current level with shape (N, num_classes, H, W). br_hmp (Tensor): Bottom-right corner heatmap for current level with shape (N, num_classes, H, W). tl_off (Tensor): Top-left corner offset for current level with shape (N, corner_offset_channels, H, W). br_off (Tensor): Bottom-right corner offset for current level with shape (N, corner_offset_channels, H, W). tl_guiding_shift (Tensor): Top-left guiding shift for current level with shape (N, guiding_shift_channels, H, W). br_guiding_shift (Tensor): Bottom-right guiding shift for current level with shape (N, guiding_shift_channels, H, W). tl_centripetal_shift (Tensor): Top-left centripetal shift for current level with shape (N, centripetal_shift_channels, H, W). br_centripetal_shift (Tensor): Bottom-right centripetal shift for current level with shape (N, centripetal_shift_channels, H, W). targets (dict): Corner target generated by `get_targets`. Returns: tuple[torch.Tensor]: Losses of the head's different branches containing the following losses: - det_loss (Tensor): Corner keypoint loss. - off_loss (Tensor): Corner offset loss. - guiding_loss (Tensor): Guiding shift loss. - centripetal_loss (Tensor): Centripetal shift loss. """ targets['corner_embedding'] = None det_loss, _, _, off_loss = super().loss_by_feat_single( tl_hmp, br_hmp, None, None, tl_off, br_off, targets) gt_tl_guiding_shift = targets['topleft_guiding_shift'] gt_br_guiding_shift = targets['bottomright_guiding_shift'] gt_tl_centripetal_shift = targets['topleft_centripetal_shift'] gt_br_centripetal_shift = targets['bottomright_centripetal_shift'] gt_tl_heatmap = targets['topleft_heatmap'] gt_br_heatmap = targets['bottomright_heatmap'] # We only compute the offset loss at the real corner position. # The value of real corner would be 1 in heatmap ground truth. # The mask is computed in class agnostic mode and its shape is # batch * 1 * width * height. tl_mask = gt_tl_heatmap.eq(1).sum(1).gt(0).unsqueeze(1).type_as( gt_tl_heatmap) br_mask = gt_br_heatmap.eq(1).sum(1).gt(0).unsqueeze(1).type_as( gt_br_heatmap) # Guiding shift loss tl_guiding_loss = self.loss_guiding_shift( tl_guiding_shift, gt_tl_guiding_shift, tl_mask, avg_factor=tl_mask.sum()) br_guiding_loss = self.loss_guiding_shift( br_guiding_shift, gt_br_guiding_shift, br_mask, avg_factor=br_mask.sum()) guiding_loss = (tl_guiding_loss + br_guiding_loss) / 2.0 # Centripetal shift loss tl_centripetal_loss = self.loss_centripetal_shift( tl_centripetal_shift, gt_tl_centripetal_shift, tl_mask, avg_factor=tl_mask.sum()) br_centripetal_loss = self.loss_centripetal_shift( br_centripetal_shift, gt_br_centripetal_shift, br_mask, avg_factor=br_mask.sum()) centripetal_loss = (tl_centripetal_loss + br_centripetal_loss) / 2.0 return det_loss, off_loss, guiding_loss, centripetal_loss def predict_by_feat(self, tl_heats: List[Tensor], br_heats: List[Tensor], tl_offs: List[Tensor], br_offs: List[Tensor], tl_guiding_shifts: List[Tensor], br_guiding_shifts: List[Tensor], tl_centripetal_shifts: List[Tensor], br_centripetal_shifts: List[Tensor], batch_img_metas: Optional[List[dict]] = None, rescale: bool = False, with_nms: bool = True) -> InstanceList: """Transform a batch of output features extracted from the head into bbox results. Args: tl_heats (list[Tensor]): Top-left corner heatmaps for each level with shape (N, num_classes, H, W). br_heats (list[Tensor]): Bottom-right corner heatmaps for each level with shape (N, num_classes, H, W). tl_offs (list[Tensor]): Top-left corner offsets for each level with shape (N, corner_offset_channels, H, W). br_offs (list[Tensor]): Bottom-right corner offsets for each level with shape (N, corner_offset_channels, H, W). tl_guiding_shifts (list[Tensor]): Top-left guiding shifts for each level with shape (N, guiding_shift_channels, H, W). Useless in this function, we keep this arg because it's the raw output from CentripetalHead. br_guiding_shifts (list[Tensor]): Bottom-right guiding shifts for each level with shape (N, guiding_shift_channels, H, W). Useless in this function, we keep this arg because it's the raw output from CentripetalHead. tl_centripetal_shifts (list[Tensor]): Top-left centripetal shifts for each level with shape (N, centripetal_shift_channels, H, W). br_centripetal_shifts (list[Tensor]): Bottom-right centripetal shifts for each level with shape (N, centripetal_shift_channels, H, W). batch_img_metas (list[dict], optional): Batch image meta info. Defaults to None. rescale (bool): If True, return boxes in original image space. Defaults to False. with_nms (bool): If True, do nms before return boxes. Defaults to True. Returns: list[:obj:`InstanceData`]: Object detection results of each image after the post process. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). """ assert tl_heats[-1].shape[0] == br_heats[-1].shape[0] == len( batch_img_metas) result_list = [] for img_id in range(len(batch_img_metas)): result_list.append( self._predict_by_feat_single( tl_heats[-1][img_id:img_id + 1, :], br_heats[-1][img_id:img_id + 1, :], tl_offs[-1][img_id:img_id + 1, :], br_offs[-1][img_id:img_id + 1, :], batch_img_metas[img_id], tl_emb=None, br_emb=None, tl_centripetal_shift=tl_centripetal_shifts[-1][ img_id:img_id + 1, :], br_centripetal_shift=br_centripetal_shifts[-1][ img_id:img_id + 1, :], rescale=rescale, with_nms=with_nms)) return result_list
21,678
46.128261
79
py
ERD
ERD-main/mmdet/models/dense_heads/paa_head.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import List, Optional, Tuple import numpy as np import torch from mmengine.structures import InstanceData from torch import Tensor from mmdet.registry import MODELS from mmdet.structures.bbox import bbox_overlaps from mmdet.utils import (ConfigType, InstanceList, OptConfigType, OptInstanceList) from ..layers import multiclass_nms from ..utils import levels_to_images, multi_apply from . import ATSSHead EPS = 1e-12 try: import sklearn.mixture as skm except ImportError: skm = None @MODELS.register_module() class PAAHead(ATSSHead): """Head of PAAAssignment: Probabilistic Anchor Assignment with IoU Prediction for Object Detection. Code is modified from the `official github repo <https://github.com/kkhoot/PAA/blob/master/paa_core /modeling/rpn/paa/loss.py>`_. More details can be found in the `paper <https://arxiv.org/abs/2007.08103>`_ . Args: topk (int): Select topk samples with smallest loss in each level. score_voting (bool): Whether to use score voting in post-process. covariance_type : String describing the type of covariance parameters to be used in :class:`sklearn.mixture.GaussianMixture`. It must be one of: - 'full': each component has its own general covariance matrix - 'tied': all components share the same general covariance matrix - 'diag': each component has its own diagonal covariance matrix - 'spherical': each component has its own single variance Default: 'diag'. From 'full' to 'spherical', the gmm fitting process is faster yet the performance could be influenced. For most cases, 'diag' should be a good choice. """ def __init__(self, *args, topk: int = 9, score_voting: bool = True, covariance_type: str = 'diag', **kwargs): # topk used in paa reassign process self.topk = topk self.with_score_voting = score_voting self.covariance_type = covariance_type super().__init__(*args, **kwargs) def loss_by_feat( self, cls_scores: List[Tensor], bbox_preds: List[Tensor], iou_preds: List[Tensor], batch_gt_instances: InstanceList, batch_img_metas: List[dict], batch_gt_instances_ignore: OptInstanceList = None) -> dict: """Calculate the loss based on the features extracted by the detection head. Args: cls_scores (list[Tensor]): Box scores for each scale level Has shape (N, num_anchors * num_classes, H, W) bbox_preds (list[Tensor]): Box energies / deltas for each scale level with shape (N, num_anchors * 4, H, W) iou_preds (list[Tensor]): iou_preds for each scale level with shape (N, num_anchors * 1, H, W) batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. batch_gt_instances_ignore (list[:obj:`InstanceData`], optional): Batch of gt_instances_ignore. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. Returns: dict[str, Tensor]: A dictionary of loss gmm_assignment. """ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] assert len(featmap_sizes) == self.prior_generator.num_levels device = cls_scores[0].device anchor_list, valid_flag_list = self.get_anchors( featmap_sizes, batch_img_metas, device=device) cls_reg_targets = self.get_targets( anchor_list, valid_flag_list, batch_gt_instances, batch_img_metas, batch_gt_instances_ignore=batch_gt_instances_ignore, ) (labels, labels_weight, bboxes_target, bboxes_weight, pos_inds, pos_gt_index) = cls_reg_targets cls_scores = levels_to_images(cls_scores) cls_scores = [ item.reshape(-1, self.cls_out_channels) for item in cls_scores ] bbox_preds = levels_to_images(bbox_preds) bbox_preds = [item.reshape(-1, 4) for item in bbox_preds] iou_preds = levels_to_images(iou_preds) iou_preds = [item.reshape(-1, 1) for item in iou_preds] pos_losses_list, = multi_apply(self.get_pos_loss, anchor_list, cls_scores, bbox_preds, labels, labels_weight, bboxes_target, bboxes_weight, pos_inds) with torch.no_grad(): reassign_labels, reassign_label_weight, \ reassign_bbox_weights, num_pos = multi_apply( self.paa_reassign, pos_losses_list, labels, labels_weight, bboxes_weight, pos_inds, pos_gt_index, anchor_list) num_pos = sum(num_pos) # convert all tensor list to a flatten tensor cls_scores = torch.cat(cls_scores, 0).view(-1, cls_scores[0].size(-1)) bbox_preds = torch.cat(bbox_preds, 0).view(-1, bbox_preds[0].size(-1)) iou_preds = torch.cat(iou_preds, 0).view(-1, iou_preds[0].size(-1)) labels = torch.cat(reassign_labels, 0).view(-1) flatten_anchors = torch.cat( [torch.cat(item, 0) for item in anchor_list]) labels_weight = torch.cat(reassign_label_weight, 0).view(-1) bboxes_target = torch.cat(bboxes_target, 0).view(-1, bboxes_target[0].size(-1)) pos_inds_flatten = ((labels >= 0) & (labels < self.num_classes)).nonzero().reshape(-1) losses_cls = self.loss_cls( cls_scores, labels, labels_weight, avg_factor=max(num_pos, len(batch_img_metas))) # avoid num_pos=0 if num_pos: pos_bbox_pred = self.bbox_coder.decode( flatten_anchors[pos_inds_flatten], bbox_preds[pos_inds_flatten]) pos_bbox_target = bboxes_target[pos_inds_flatten] iou_target = bbox_overlaps( pos_bbox_pred.detach(), pos_bbox_target, is_aligned=True) losses_iou = self.loss_centerness( iou_preds[pos_inds_flatten], iou_target.unsqueeze(-1), avg_factor=num_pos) losses_bbox = self.loss_bbox( pos_bbox_pred, pos_bbox_target, iou_target.clamp(min=EPS), avg_factor=iou_target.sum()) else: losses_iou = iou_preds.sum() * 0 losses_bbox = bbox_preds.sum() * 0 return dict( loss_cls=losses_cls, loss_bbox=losses_bbox, loss_iou=losses_iou) def get_pos_loss(self, anchors: List[Tensor], cls_score: Tensor, bbox_pred: Tensor, label: Tensor, label_weight: Tensor, bbox_target: dict, bbox_weight: Tensor, pos_inds: Tensor) -> Tensor: """Calculate loss of all potential positive samples obtained from first match process. Args: anchors (list[Tensor]): Anchors of each scale. cls_score (Tensor): Box scores of single image with shape (num_anchors, num_classes) bbox_pred (Tensor): Box energies / deltas of single image with shape (num_anchors, 4) label (Tensor): classification target of each anchor with shape (num_anchors,) label_weight (Tensor): Classification loss weight of each anchor with shape (num_anchors). bbox_target (dict): Regression target of each anchor with shape (num_anchors, 4). bbox_weight (Tensor): Bbox weight of each anchor with shape (num_anchors, 4). pos_inds (Tensor): Index of all positive samples got from first assign process. Returns: Tensor: Losses of all positive samples in single image. """ if not len(pos_inds): return cls_score.new([]), anchors_all_level = torch.cat(anchors, 0) pos_scores = cls_score[pos_inds] pos_bbox_pred = bbox_pred[pos_inds] pos_label = label[pos_inds] pos_label_weight = label_weight[pos_inds] pos_bbox_target = bbox_target[pos_inds] pos_bbox_weight = bbox_weight[pos_inds] pos_anchors = anchors_all_level[pos_inds] pos_bbox_pred = self.bbox_coder.decode(pos_anchors, pos_bbox_pred) # to keep loss dimension loss_cls = self.loss_cls( pos_scores, pos_label, pos_label_weight, avg_factor=1.0, reduction_override='none') loss_bbox = self.loss_bbox( pos_bbox_pred, pos_bbox_target, pos_bbox_weight, avg_factor=1.0, # keep same loss weight before reassign reduction_override='none') loss_cls = loss_cls.sum(-1) pos_loss = loss_bbox + loss_cls return pos_loss, def paa_reassign(self, pos_losses: Tensor, label: Tensor, label_weight: Tensor, bbox_weight: Tensor, pos_inds: Tensor, pos_gt_inds: Tensor, anchors: List[Tensor]) -> tuple: """Fit loss to GMM distribution and separate positive, ignore, negative samples again with GMM model. Args: pos_losses (Tensor): Losses of all positive samples in single image. label (Tensor): classification target of each anchor with shape (num_anchors,) label_weight (Tensor): Classification loss weight of each anchor with shape (num_anchors). bbox_weight (Tensor): Bbox weight of each anchor with shape (num_anchors, 4). pos_inds (Tensor): Index of all positive samples got from first assign process. pos_gt_inds (Tensor): Gt_index of all positive samples got from first assign process. anchors (list[Tensor]): Anchors of each scale. Returns: tuple: Usually returns a tuple containing learning targets. - label (Tensor): classification target of each anchor after paa assign, with shape (num_anchors,) - label_weight (Tensor): Classification loss weight of each anchor after paa assign, with shape (num_anchors). - bbox_weight (Tensor): Bbox weight of each anchor with shape (num_anchors, 4). - num_pos (int): The number of positive samples after paa assign. """ if not len(pos_inds): return label, label_weight, bbox_weight, 0 label = label.clone() label_weight = label_weight.clone() bbox_weight = bbox_weight.clone() num_gt = pos_gt_inds.max() + 1 num_level = len(anchors) num_anchors_each_level = [item.size(0) for item in anchors] num_anchors_each_level.insert(0, 0) inds_level_interval = np.cumsum(num_anchors_each_level) pos_level_mask = [] for i in range(num_level): mask = (pos_inds >= inds_level_interval[i]) & ( pos_inds < inds_level_interval[i + 1]) pos_level_mask.append(mask) pos_inds_after_paa = [label.new_tensor([])] ignore_inds_after_paa = [label.new_tensor([])] for gt_ind in range(num_gt): pos_inds_gmm = [] pos_loss_gmm = [] gt_mask = pos_gt_inds == gt_ind for level in range(num_level): level_mask = pos_level_mask[level] level_gt_mask = level_mask & gt_mask value, topk_inds = pos_losses[level_gt_mask].topk( min(level_gt_mask.sum(), self.topk), largest=False) pos_inds_gmm.append(pos_inds[level_gt_mask][topk_inds]) pos_loss_gmm.append(value) pos_inds_gmm = torch.cat(pos_inds_gmm) pos_loss_gmm = torch.cat(pos_loss_gmm) # fix gmm need at least two sample if len(pos_inds_gmm) < 2: continue device = pos_inds_gmm.device pos_loss_gmm, sort_inds = pos_loss_gmm.sort() pos_inds_gmm = pos_inds_gmm[sort_inds] pos_loss_gmm = pos_loss_gmm.view(-1, 1).cpu().numpy() min_loss, max_loss = pos_loss_gmm.min(), pos_loss_gmm.max() means_init = np.array([min_loss, max_loss]).reshape(2, 1) weights_init = np.array([0.5, 0.5]) precisions_init = np.array([1.0, 1.0]).reshape(2, 1, 1) # full if self.covariance_type == 'spherical': precisions_init = precisions_init.reshape(2) elif self.covariance_type == 'diag': precisions_init = precisions_init.reshape(2, 1) elif self.covariance_type == 'tied': precisions_init = np.array([[1.0]]) if skm is None: raise ImportError('Please run "pip install sklearn" ' 'to install sklearn first.') gmm = skm.GaussianMixture( 2, weights_init=weights_init, means_init=means_init, precisions_init=precisions_init, covariance_type=self.covariance_type) gmm.fit(pos_loss_gmm) gmm_assignment = gmm.predict(pos_loss_gmm) scores = gmm.score_samples(pos_loss_gmm) gmm_assignment = torch.from_numpy(gmm_assignment).to(device) scores = torch.from_numpy(scores).to(device) pos_inds_temp, ignore_inds_temp = self.gmm_separation_scheme( gmm_assignment, scores, pos_inds_gmm) pos_inds_after_paa.append(pos_inds_temp) ignore_inds_after_paa.append(ignore_inds_temp) pos_inds_after_paa = torch.cat(pos_inds_after_paa) ignore_inds_after_paa = torch.cat(ignore_inds_after_paa) reassign_mask = (pos_inds.unsqueeze(1) != pos_inds_after_paa).all(1) reassign_ids = pos_inds[reassign_mask] label[reassign_ids] = self.num_classes label_weight[ignore_inds_after_paa] = 0 bbox_weight[reassign_ids] = 0 num_pos = len(pos_inds_after_paa) return label, label_weight, bbox_weight, num_pos def gmm_separation_scheme(self, gmm_assignment: Tensor, scores: Tensor, pos_inds_gmm: Tensor) -> Tuple[Tensor, Tensor]: """A general separation scheme for gmm model. It separates a GMM distribution of candidate samples into three parts, 0 1 and uncertain areas, and you can implement other separation schemes by rewriting this function. Args: gmm_assignment (Tensor): The prediction of GMM which is of shape (num_samples,). The 0/1 value indicates the distribution that each sample comes from. scores (Tensor): The probability of sample coming from the fit GMM distribution. The tensor is of shape (num_samples,). pos_inds_gmm (Tensor): All the indexes of samples which are used to fit GMM model. The tensor is of shape (num_samples,) Returns: tuple[Tensor, Tensor]: The indices of positive and ignored samples. - pos_inds_temp (Tensor): Indices of positive samples. - ignore_inds_temp (Tensor): Indices of ignore samples. """ # The implementation is (c) in Fig.3 in origin paper instead of (b). # You can refer to issues such as # https://github.com/kkhoot/PAA/issues/8 and # https://github.com/kkhoot/PAA/issues/9. fgs = gmm_assignment == 0 pos_inds_temp = fgs.new_tensor([], dtype=torch.long) ignore_inds_temp = fgs.new_tensor([], dtype=torch.long) if fgs.nonzero().numel(): _, pos_thr_ind = scores[fgs].topk(1) pos_inds_temp = pos_inds_gmm[fgs][:pos_thr_ind + 1] ignore_inds_temp = pos_inds_gmm.new_tensor([]) return pos_inds_temp, ignore_inds_temp def get_targets(self, anchor_list: List[List[Tensor]], valid_flag_list: List[List[Tensor]], batch_gt_instances: InstanceList, batch_img_metas: List[dict], batch_gt_instances_ignore: OptInstanceList = None, unmap_outputs: bool = True) -> tuple: """Get targets for PAA head. This method is almost the same as `AnchorHead.get_targets()`. We direct return the results from _get_targets_single instead map it to levels by images_to_levels function. Args: anchor_list (list[list[Tensor]]): Multi level anchors of each image. The outer list indicates images, and the inner list corresponds to feature levels of the image. Each element of the inner list is a tensor of shape (num_anchors, 4). valid_flag_list (list[list[Tensor]]): Multi level valid flags of each image. The outer list indicates images, and the inner list corresponds to feature levels of the image. Each element of the inner list is a tensor of shape (num_anchors, ) batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. batch_gt_instances_ignore (list[:obj:`InstanceData`], optional): Batch of gt_instances_ignore. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. unmap_outputs (bool): Whether to map outputs back to the original set of anchors. Defaults to True. Returns: tuple: Usually returns a tuple containing learning targets. - labels (list[Tensor]): Labels of all anchors, each with shape (num_anchors,). - label_weights (list[Tensor]): Label weights of all anchor. each with shape (num_anchors,). - bbox_targets (list[Tensor]): BBox targets of all anchors. each with shape (num_anchors, 4). - bbox_weights (list[Tensor]): BBox weights of all anchors. each with shape (num_anchors, 4). - pos_inds (list[Tensor]): Contains all index of positive sample in all anchor. - gt_inds (list[Tensor]): Contains all gt_index of positive sample in all anchor. """ num_imgs = len(batch_img_metas) assert len(anchor_list) == len(valid_flag_list) == num_imgs concat_anchor_list = [] concat_valid_flag_list = [] for i in range(num_imgs): assert len(anchor_list[i]) == len(valid_flag_list[i]) concat_anchor_list.append(torch.cat(anchor_list[i])) concat_valid_flag_list.append(torch.cat(valid_flag_list[i])) # compute targets for each image if batch_gt_instances_ignore is None: batch_gt_instances_ignore = [None] * num_imgs results = multi_apply( self._get_targets_single, concat_anchor_list, concat_valid_flag_list, batch_gt_instances, batch_img_metas, batch_gt_instances_ignore, unmap_outputs=unmap_outputs) (labels, label_weights, bbox_targets, bbox_weights, valid_pos_inds, valid_neg_inds, sampling_result) = results # Due to valid flag of anchors, we have to calculate the real pos_inds # in origin anchor set. pos_inds = [] for i, single_labels in enumerate(labels): pos_mask = (0 <= single_labels) & ( single_labels < self.num_classes) pos_inds.append(pos_mask.nonzero().view(-1)) gt_inds = [item.pos_assigned_gt_inds for item in sampling_result] return (labels, label_weights, bbox_targets, bbox_weights, pos_inds, gt_inds) def _get_targets_single(self, flat_anchors: Tensor, valid_flags: Tensor, gt_instances: InstanceData, img_meta: dict, gt_instances_ignore: Optional[InstanceData] = None, unmap_outputs: bool = True) -> tuple: """Compute regression and classification targets for anchors in a single image. This method is same as `AnchorHead._get_targets_single()`. """ assert unmap_outputs, 'We must map outputs back to the original' \ 'set of anchors in PAAhead' return super(ATSSHead, self)._get_targets_single( flat_anchors, valid_flags, gt_instances, img_meta, gt_instances_ignore, unmap_outputs=True) def predict_by_feat(self, cls_scores: List[Tensor], bbox_preds: List[Tensor], score_factors: Optional[List[Tensor]] = None, batch_img_metas: Optional[List[dict]] = None, cfg: OptConfigType = None, rescale: bool = False, with_nms: bool = True) -> InstanceList: """Transform a batch of output features extracted from the head into bbox results. This method is same as `BaseDenseHead.get_results()`. """ assert with_nms, 'PAA only supports "with_nms=True" now and it ' \ 'means PAAHead does not support ' \ 'test-time augmentation' return super().predict_by_feat( cls_scores=cls_scores, bbox_preds=bbox_preds, score_factors=score_factors, batch_img_metas=batch_img_metas, cfg=cfg, rescale=rescale, with_nms=with_nms) def _predict_by_feat_single(self, cls_score_list: List[Tensor], bbox_pred_list: List[Tensor], score_factor_list: List[Tensor], mlvl_priors: List[Tensor], img_meta: dict, cfg: OptConfigType = None, rescale: bool = False, with_nms: bool = True) -> InstanceData: """Transform a single image's features extracted from the head into bbox results. Args: cls_score_list (list[Tensor]): Box scores from all scale levels of a single image, each item has shape (num_priors * num_classes, H, W). bbox_pred_list (list[Tensor]): Box energies / deltas from all scale levels of a single image, each item has shape (num_priors * 4, H, W). score_factor_list (list[Tensor]): Score factors from all scale levels of a single image, each item has shape (num_priors * 1, H, W). mlvl_priors (list[Tensor]): Each element in the list is the priors of a single level in feature pyramid, has shape (num_priors, 4). img_meta (dict): Image meta info. cfg (:obj:`ConfigDict` or dict, optional): Test / postprocessing configuration, if None, test_cfg would be used. rescale (bool): If True, return boxes in original image space. Default: False. with_nms (bool): If True, do nms before return boxes. Default: True. Returns: :obj:`InstanceData`: Detection results of each image after the post process. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). """ cfg = self.test_cfg if cfg is None else cfg img_shape = img_meta['img_shape'] nms_pre = cfg.get('nms_pre', -1) mlvl_bboxes = [] mlvl_scores = [] mlvl_score_factors = [] for level_idx, (cls_score, bbox_pred, score_factor, priors) in \ enumerate(zip(cls_score_list, bbox_pred_list, score_factor_list, mlvl_priors)): assert cls_score.size()[-2:] == bbox_pred.size()[-2:] scores = cls_score.permute(1, 2, 0).reshape( -1, self.cls_out_channels).sigmoid() bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4) score_factor = score_factor.permute(1, 2, 0).reshape(-1).sigmoid() if 0 < nms_pre < scores.shape[0]: max_scores, _ = (scores * score_factor[:, None]).sqrt().max(dim=1) _, topk_inds = max_scores.topk(nms_pre) priors = priors[topk_inds, :] bbox_pred = bbox_pred[topk_inds, :] scores = scores[topk_inds, :] score_factor = score_factor[topk_inds] bboxes = self.bbox_coder.decode( priors, bbox_pred, max_shape=img_shape) mlvl_bboxes.append(bboxes) mlvl_scores.append(scores) mlvl_score_factors.append(score_factor) results = InstanceData() results.bboxes = torch.cat(mlvl_bboxes) results.scores = torch.cat(mlvl_scores) results.score_factors = torch.cat(mlvl_score_factors) return self._bbox_post_process(results, cfg, rescale, with_nms, img_meta) def _bbox_post_process(self, results: InstanceData, cfg: ConfigType, rescale: bool = False, with_nms: bool = True, img_meta: Optional[dict] = None): """bbox post-processing method. The boxes would be rescaled to the original image scale and do the nms operation. Usually with_nms is False is used for aug test. Args: results (:obj:`InstaceData`): Detection instance results, each item has shape (num_bboxes, ). cfg (:obj:`ConfigDict` or dict): Test / postprocessing configuration, if None, test_cfg would be used. rescale (bool): If True, return boxes in original image space. Default: False. with_nms (bool): If True, do nms before return boxes. Default: True. img_meta (dict, optional): Image meta info. Defaults to None. Returns: :obj:`InstanceData`: Detection results of each image after the post process. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). """ if rescale: results.bboxes /= results.bboxes.new_tensor( img_meta['scale_factor']).repeat((1, 2)) # Add a dummy background class to the backend when using sigmoid # remind that we set FG labels to [0, num_class-1] since mmdet v2.0 # BG cat_id: num_class padding = results.scores.new_zeros(results.scores.shape[0], 1) mlvl_scores = torch.cat([results.scores, padding], dim=1) mlvl_nms_scores = (mlvl_scores * results.score_factors[:, None]).sqrt() det_bboxes, det_labels = multiclass_nms( results.bboxes, mlvl_nms_scores, cfg.score_thr, cfg.nms, cfg.max_per_img, score_factors=None) if self.with_score_voting and len(det_bboxes) > 0: det_bboxes, det_labels = self.score_voting(det_bboxes, det_labels, results.bboxes, mlvl_nms_scores, cfg.score_thr) nms_results = InstanceData() nms_results.bboxes = det_bboxes[:, :-1] nms_results.scores = det_bboxes[:, -1] nms_results.labels = det_labels return nms_results def score_voting(self, det_bboxes: Tensor, det_labels: Tensor, mlvl_bboxes: Tensor, mlvl_nms_scores: Tensor, score_thr: float) -> Tuple[Tensor, Tensor]: """Implementation of score voting method works on each remaining boxes after NMS procedure. Args: det_bboxes (Tensor): Remaining boxes after NMS procedure, with shape (k, 5), each dimension means (x1, y1, x2, y2, score). det_labels (Tensor): The label of remaining boxes, with shape (k, 1),Labels are 0-based. mlvl_bboxes (Tensor): All boxes before the NMS procedure, with shape (num_anchors,4). mlvl_nms_scores (Tensor): The scores of all boxes which is used in the NMS procedure, with shape (num_anchors, num_class) score_thr (float): The score threshold of bboxes. Returns: tuple: Usually returns a tuple containing voting results. - det_bboxes_voted (Tensor): Remaining boxes after score voting procedure, with shape (k, 5), each dimension means (x1, y1, x2, y2, score). - det_labels_voted (Tensor): Label of remaining bboxes after voting, with shape (num_anchors,). """ candidate_mask = mlvl_nms_scores > score_thr candidate_mask_nonzeros = candidate_mask.nonzero(as_tuple=False) candidate_inds = candidate_mask_nonzeros[:, 0] candidate_labels = candidate_mask_nonzeros[:, 1] candidate_bboxes = mlvl_bboxes[candidate_inds] candidate_scores = mlvl_nms_scores[candidate_mask] det_bboxes_voted = [] det_labels_voted = [] for cls in range(self.cls_out_channels): candidate_cls_mask = candidate_labels == cls if not candidate_cls_mask.any(): continue candidate_cls_scores = candidate_scores[candidate_cls_mask] candidate_cls_bboxes = candidate_bboxes[candidate_cls_mask] det_cls_mask = det_labels == cls det_cls_bboxes = det_bboxes[det_cls_mask].view( -1, det_bboxes.size(-1)) det_candidate_ious = bbox_overlaps(det_cls_bboxes[:, :4], candidate_cls_bboxes) for det_ind in range(len(det_cls_bboxes)): single_det_ious = det_candidate_ious[det_ind] pos_ious_mask = single_det_ious > 0.01 pos_ious = single_det_ious[pos_ious_mask] pos_bboxes = candidate_cls_bboxes[pos_ious_mask] pos_scores = candidate_cls_scores[pos_ious_mask] pis = (torch.exp(-(1 - pos_ious)**2 / 0.025) * pos_scores)[:, None] voted_box = torch.sum( pis * pos_bboxes, dim=0) / torch.sum( pis, dim=0) voted_score = det_cls_bboxes[det_ind][-1:][None, :] det_bboxes_voted.append( torch.cat((voted_box[None, :], voted_score), dim=1)) det_labels_voted.append(cls) det_bboxes_voted = torch.cat(det_bboxes_voted, dim=0) det_labels_voted = det_labels.new_tensor(det_labels_voted) return det_bboxes_voted, det_labels_voted
33,336
44.604651
79
py
ERD
ERD-main/mmdet/models/dense_heads/retina_sepbn_head.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import Tuple import torch.nn as nn from mmcv.cnn import ConvModule from mmengine.model import bias_init_with_prob, normal_init from torch import Tensor from mmdet.registry import MODELS from mmdet.utils import OptConfigType, OptMultiConfig from .anchor_head import AnchorHead @MODELS.register_module() class RetinaSepBNHead(AnchorHead): """"RetinaHead with separate BN. In RetinaHead, conv/norm layers are shared across different FPN levels, while in RetinaSepBNHead, conv layers are shared across different FPN levels, but BN layers are separated. """ def __init__(self, num_classes: int, num_ins: int, in_channels: int, stacked_convs: int = 4, conv_cfg: OptConfigType = None, norm_cfg: OptConfigType = None, init_cfg: OptMultiConfig = None, **kwargs) -> None: assert init_cfg is None, 'To prevent abnormal initialization ' \ 'behavior, init_cfg is not allowed to be set' self.stacked_convs = stacked_convs self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.num_ins = num_ins super().__init__( num_classes=num_classes, in_channels=in_channels, init_cfg=init_cfg, **kwargs) def _init_layers(self) -> None: """Initialize layers of the head.""" self.relu = nn.ReLU(inplace=True) self.cls_convs = nn.ModuleList() self.reg_convs = nn.ModuleList() for i in range(self.num_ins): cls_convs = nn.ModuleList() reg_convs = nn.ModuleList() for j in range(self.stacked_convs): chn = self.in_channels if j == 0 else self.feat_channels cls_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) reg_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) self.cls_convs.append(cls_convs) self.reg_convs.append(reg_convs) for i in range(self.stacked_convs): for j in range(1, self.num_ins): self.cls_convs[j][i].conv = self.cls_convs[0][i].conv self.reg_convs[j][i].conv = self.reg_convs[0][i].conv self.retina_cls = nn.Conv2d( self.feat_channels, self.num_base_priors * self.cls_out_channels, 3, padding=1) self.retina_reg = nn.Conv2d( self.feat_channels, self.num_base_priors * 4, 3, padding=1) def init_weights(self) -> None: """Initialize weights of the head.""" super().init_weights() for m in self.cls_convs[0]: normal_init(m.conv, std=0.01) for m in self.reg_convs[0]: normal_init(m.conv, std=0.01) bias_cls = bias_init_with_prob(0.01) normal_init(self.retina_cls, std=0.01, bias=bias_cls) normal_init(self.retina_reg, std=0.01) def forward(self, feats: Tuple[Tensor]) -> tuple: """Forward features from the upstream network. Args: feats (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. Returns: tuple: Usually a tuple of classification scores and bbox prediction - cls_scores (list[Tensor]): Classification scores for all scale levels, each is a 4D-tensor, the channels number is num_anchors * num_classes. - bbox_preds (list[Tensor]): Box energies / deltas for all scale levels, each is a 4D-tensor, the channels number is num_anchors * 4. """ cls_scores = [] bbox_preds = [] for i, x in enumerate(feats): cls_feat = feats[i] reg_feat = feats[i] for cls_conv in self.cls_convs[i]: cls_feat = cls_conv(cls_feat) for reg_conv in self.reg_convs[i]: reg_feat = reg_conv(reg_feat) cls_score = self.retina_cls(cls_feat) bbox_pred = self.retina_reg(reg_feat) cls_scores.append(cls_score) bbox_preds.append(bbox_pred) return cls_scores, bbox_preds
4,841
36.828125
79
py
ERD
ERD-main/mmdet/models/dense_heads/anchor_free_head.py
# Copyright (c) OpenMMLab. All rights reserved. from abc import abstractmethod from typing import Any, List, Sequence, Tuple, Union import torch.nn as nn from mmcv.cnn import ConvModule from numpy import ndarray from torch import Tensor from mmdet.registry import MODELS, TASK_UTILS from mmdet.utils import (ConfigType, InstanceList, MultiConfig, OptConfigType, OptInstanceList) from ..task_modules.prior_generators import MlvlPointGenerator from ..utils import multi_apply from .base_dense_head import BaseDenseHead StrideType = Union[Sequence[int], Sequence[Tuple[int, int]]] @MODELS.register_module() class AnchorFreeHead(BaseDenseHead): """Anchor-free head (FCOS, Fovea, RepPoints, etc.). Args: num_classes (int): Number of categories excluding the background category. in_channels (int): Number of channels in the input feature map. feat_channels (int): Number of hidden channels. Used in child classes. stacked_convs (int): Number of stacking convs of the head. strides (Sequence[int] or Sequence[Tuple[int, int]]): Downsample factor of each feature map. dcn_on_last_conv (bool): If true, use dcn in the last layer of towers. Defaults to False. conv_bias (bool or str): If specified as `auto`, it will be decided by the norm_cfg. Bias of conv will be set as True if `norm_cfg` is None, otherwise False. Default: "auto". loss_cls (:obj:`ConfigDict` or dict): Config of classification loss. loss_bbox (:obj:`ConfigDict` or dict): Config of localization loss. bbox_coder (:obj:`ConfigDict` or dict): Config of bbox coder. Defaults 'DistancePointBBoxCoder'. conv_cfg (:obj:`ConfigDict` or dict, Optional): Config dict for convolution layer. Defaults to None. norm_cfg (:obj:`ConfigDict` or dict, Optional): Config dict for normalization layer. Defaults to None. train_cfg (:obj:`ConfigDict` or dict, Optional): Training config of anchor-free head. test_cfg (:obj:`ConfigDict` or dict, Optional): Testing config of anchor-free head. init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \ dict]): Initialization config dict. """ # noqa: W605 _version = 1 def __init__( self, num_classes: int, in_channels: int, feat_channels: int = 256, stacked_convs: int = 4, strides: StrideType = (4, 8, 16, 32, 64), dcn_on_last_conv: bool = False, conv_bias: Union[bool, str] = 'auto', loss_cls: ConfigType = dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_bbox: ConfigType = dict(type='IoULoss', loss_weight=1.0), bbox_coder: ConfigType = dict(type='DistancePointBBoxCoder'), conv_cfg: OptConfigType = None, norm_cfg: OptConfigType = None, train_cfg: OptConfigType = None, test_cfg: OptConfigType = None, init_cfg: MultiConfig = dict( type='Normal', layer='Conv2d', std=0.01, override=dict( type='Normal', name='conv_cls', std=0.01, bias_prob=0.01)) ) -> None: super().__init__(init_cfg=init_cfg) self.num_classes = num_classes self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False) if self.use_sigmoid_cls: self.cls_out_channels = num_classes else: self.cls_out_channels = num_classes + 1 self.in_channels = in_channels self.feat_channels = feat_channels self.stacked_convs = stacked_convs self.strides = strides self.dcn_on_last_conv = dcn_on_last_conv assert conv_bias == 'auto' or isinstance(conv_bias, bool) self.conv_bias = conv_bias self.loss_cls = MODELS.build(loss_cls) self.loss_bbox = MODELS.build(loss_bbox) self.bbox_coder = TASK_UTILS.build(bbox_coder) self.prior_generator = MlvlPointGenerator(strides) # In order to keep a more general interface and be consistent with # anchor_head. We can think of point like one anchor self.num_base_priors = self.prior_generator.num_base_priors[0] self.train_cfg = train_cfg self.test_cfg = test_cfg self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.fp16_enabled = False self._init_layers() def _init_layers(self) -> None: """Initialize layers of the head.""" self._init_cls_convs() self._init_reg_convs() self._init_predictor() def _init_cls_convs(self) -> None: """Initialize classification conv layers of the head.""" self.cls_convs = nn.ModuleList() for i in range(self.stacked_convs): chn = self.in_channels if i == 0 else self.feat_channels if self.dcn_on_last_conv and i == self.stacked_convs - 1: conv_cfg = dict(type='DCNv2') else: conv_cfg = self.conv_cfg self.cls_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=conv_cfg, norm_cfg=self.norm_cfg, bias=self.conv_bias)) def _init_reg_convs(self) -> None: """Initialize bbox regression conv layers of the head.""" self.reg_convs = nn.ModuleList() for i in range(self.stacked_convs): chn = self.in_channels if i == 0 else self.feat_channels if self.dcn_on_last_conv and i == self.stacked_convs - 1: conv_cfg = dict(type='DCNv2') else: conv_cfg = self.conv_cfg self.reg_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=conv_cfg, norm_cfg=self.norm_cfg, bias=self.conv_bias)) def _init_predictor(self) -> None: """Initialize predictor layers of the head.""" self.conv_cls = nn.Conv2d( self.feat_channels, self.cls_out_channels, 3, padding=1) self.conv_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1) def _load_from_state_dict(self, state_dict: dict, prefix: str, local_metadata: dict, strict: bool, missing_keys: Union[List[str], str], unexpected_keys: Union[List[str], str], error_msgs: Union[List[str], str]) -> None: """Hack some keys of the model state dict so that can load checkpoints of previous version.""" version = local_metadata.get('version', None) if version is None: # the key is different in early versions # for example, 'fcos_cls' become 'conv_cls' now bbox_head_keys = [ k for k in state_dict.keys() if k.startswith(prefix) ] ori_predictor_keys = [] new_predictor_keys = [] # e.g. 'fcos_cls' or 'fcos_reg' for key in bbox_head_keys: ori_predictor_keys.append(key) key = key.split('.') if len(key) < 2: conv_name = None elif key[1].endswith('cls'): conv_name = 'conv_cls' elif key[1].endswith('reg'): conv_name = 'conv_reg' elif key[1].endswith('centerness'): conv_name = 'conv_centerness' else: conv_name = None if conv_name is not None: key[1] = conv_name new_predictor_keys.append('.'.join(key)) else: ori_predictor_keys.pop(-1) for i in range(len(new_predictor_keys)): state_dict[new_predictor_keys[i]] = state_dict.pop( ori_predictor_keys[i]) super()._load_from_state_dict(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs) def forward(self, x: Tuple[Tensor]) -> Tuple[List[Tensor], List[Tensor]]: """Forward features from the upstream network. Args: feats (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. Returns: tuple: Usually contain classification scores and bbox predictions. - cls_scores (list[Tensor]): Box scores for each scale level, \ each is a 4D-tensor, the channel number is \ num_points * num_classes. - bbox_preds (list[Tensor]): Box energies / deltas for each scale \ level, each is a 4D-tensor, the channel number is num_points * 4. """ return multi_apply(self.forward_single, x)[:2] def forward_single(self, x: Tensor) -> Tuple[Tensor, ...]: """Forward features of a single scale level. Args: x (Tensor): FPN feature maps of the specified stride. Returns: tuple: Scores for each class, bbox predictions, features after classification and regression conv layers, some models needs these features like FCOS. """ cls_feat = x reg_feat = x for cls_layer in self.cls_convs: cls_feat = cls_layer(cls_feat) cls_score = self.conv_cls(cls_feat) for reg_layer in self.reg_convs: reg_feat = reg_layer(reg_feat) bbox_pred = self.conv_reg(reg_feat) return cls_score, bbox_pred, cls_feat, reg_feat @abstractmethod def loss_by_feat( self, cls_scores: List[Tensor], bbox_preds: List[Tensor], batch_gt_instances: InstanceList, batch_img_metas: List[dict], batch_gt_instances_ignore: OptInstanceList = None) -> dict: """Calculate the loss based on the features extracted by the detection head. Args: cls_scores (list[Tensor]): Box scores for each scale level, each is a 4D-tensor, the channel number is num_points * num_classes. bbox_preds (list[Tensor]): Box energies / deltas for each scale level, each is a 4D-tensor, the channel number is num_points * 4. batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. batch_gt_instances_ignore (list[:obj:`InstanceData`], Optional): Batch of gt_instances_ignore. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. """ raise NotImplementedError @abstractmethod def get_targets(self, points: List[Tensor], batch_gt_instances: InstanceList) -> Any: """Compute regression, classification and centerness targets for points in multiple images. Args: points (list[Tensor]): Points of each fpn level, each has shape (num_points, 2). batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. """ raise NotImplementedError # TODO refactor aug_test def aug_test(self, aug_batch_feats: List[Tensor], aug_batch_img_metas: List[List[Tensor]], rescale: bool = False) -> List[ndarray]: """Test function with test time augmentation. Args: aug_batch_feats (list[Tensor]): the outer list indicates test-time augmentations and inner Tensor should have a shape NxCxHxW, which contains features for all images in the batch. aug_batch_img_metas (list[list[dict]]): the outer list indicates test-time augs (multiscale, flip, etc.) and the inner list indicates images in a batch. each dict has image information. rescale (bool, optional): Whether to rescale the results. Defaults to False. Returns: list[ndarray]: bbox results of each class """ return self.aug_test_bboxes( aug_batch_feats, aug_batch_img_metas, rescale=rescale)
13,049
40.037736
79
py
ERD
ERD-main/mmdet/models/dense_heads/boxinst_head.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import List import torch import torch.nn.functional as F from mmengine import MessageHub from mmengine.structures import InstanceData from torch import Tensor from mmdet.registry import MODELS from mmdet.utils import InstanceList from ..utils.misc import unfold_wo_center from .condinst_head import CondInstBboxHead, CondInstMaskHead @MODELS.register_module() class BoxInstBboxHead(CondInstBboxHead): """BoxInst box head used in https://arxiv.org/abs/2012.02310.""" def __init__(self, *args, **kwargs) -> None: super().__init__(*args, **kwargs) @MODELS.register_module() class BoxInstMaskHead(CondInstMaskHead): """BoxInst mask head used in https://arxiv.org/abs/2012.02310. This head outputs the mask for BoxInst. Args: pairwise_size (dict): The size of neighborhood for each pixel. Defaults to 3. pairwise_dilation (int): The dilation of neighborhood for each pixel. Defaults to 2. warmup_iters (int): Warmup iterations for pair-wise loss. Defaults to 10000. """ def __init__(self, *arg, pairwise_size: int = 3, pairwise_dilation: int = 2, warmup_iters: int = 10000, **kwargs) -> None: self.pairwise_size = pairwise_size self.pairwise_dilation = pairwise_dilation self.warmup_iters = warmup_iters super().__init__(*arg, **kwargs) def get_pairwise_affinity(self, mask_logits: Tensor) -> Tensor: """Compute the pairwise affinity for each pixel.""" log_fg_prob = F.logsigmoid(mask_logits).unsqueeze(1) log_bg_prob = F.logsigmoid(-mask_logits).unsqueeze(1) log_fg_prob_unfold = unfold_wo_center( log_fg_prob, kernel_size=self.pairwise_size, dilation=self.pairwise_dilation) log_bg_prob_unfold = unfold_wo_center( log_bg_prob, kernel_size=self.pairwise_size, dilation=self.pairwise_dilation) # the probability of making the same prediction: # p_i * p_j + (1 - p_i) * (1 - p_j) # we compute the the probability in log space # to avoid numerical instability log_same_fg_prob = log_fg_prob[:, :, None] + log_fg_prob_unfold log_same_bg_prob = log_bg_prob[:, :, None] + log_bg_prob_unfold # TODO: Figure out the difference between it and directly sum max_ = torch.max(log_same_fg_prob, log_same_bg_prob) log_same_prob = torch.log( torch.exp(log_same_fg_prob - max_) + torch.exp(log_same_bg_prob - max_)) + max_ return -log_same_prob[:, 0] def loss_by_feat(self, mask_preds: List[Tensor], batch_gt_instances: InstanceList, batch_img_metas: List[dict], positive_infos: InstanceList, **kwargs) -> dict: """Calculate the loss based on the features extracted by the mask head. Args: mask_preds (list[Tensor]): List of predicted masks, each has shape (num_classes, H, W). batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes``, ``masks``, and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of multiple images. positive_infos (List[:obj:``InstanceData``]): Information of positive samples of each image that are assigned in detection head. Returns: dict[str, Tensor]: A dictionary of loss components. """ assert positive_infos is not None, \ 'positive_infos should not be None in `BoxInstMaskHead`' losses = dict() loss_mask_project = 0. loss_mask_pairwise = 0. num_imgs = len(mask_preds) total_pos = 0. avg_fatcor = 0. for idx in range(num_imgs): (mask_pred, pos_mask_targets, pos_pairwise_masks, num_pos) = \ self._get_targets_single( mask_preds[idx], batch_gt_instances[idx], positive_infos[idx]) # mask loss total_pos += num_pos if num_pos == 0 or pos_mask_targets is None: loss_project = mask_pred.new_zeros(1).mean() loss_pairwise = mask_pred.new_zeros(1).mean() avg_fatcor += 0. else: # compute the project term loss_project_x = self.loss_mask( mask_pred.max(dim=1, keepdim=True)[0], pos_mask_targets.max(dim=1, keepdim=True)[0], reduction_override='none').sum() loss_project_y = self.loss_mask( mask_pred.max(dim=2, keepdim=True)[0], pos_mask_targets.max(dim=2, keepdim=True)[0], reduction_override='none').sum() loss_project = loss_project_x + loss_project_y # compute the pairwise term pairwise_affinity = self.get_pairwise_affinity(mask_pred) avg_fatcor += pos_pairwise_masks.sum().clamp(min=1.0) loss_pairwise = (pairwise_affinity * pos_pairwise_masks).sum() loss_mask_project += loss_project loss_mask_pairwise += loss_pairwise if total_pos == 0: total_pos += 1 # avoid nan if avg_fatcor == 0: avg_fatcor += 1 # avoid nan loss_mask_project = loss_mask_project / total_pos loss_mask_pairwise = loss_mask_pairwise / avg_fatcor message_hub = MessageHub.get_current_instance() iter = message_hub.get_info('iter') warmup_factor = min(iter / float(self.warmup_iters), 1.0) loss_mask_pairwise *= warmup_factor losses.update( loss_mask_project=loss_mask_project, loss_mask_pairwise=loss_mask_pairwise) return losses def _get_targets_single(self, mask_preds: Tensor, gt_instances: InstanceData, positive_info: InstanceData): """Compute targets for predictions of single image. Args: mask_preds (Tensor): Predicted prototypes with shape (num_classes, H, W). gt_instances (:obj:`InstanceData`): Ground truth of instance annotations. It should includes ``bboxes``, ``labels``, and ``masks`` attributes. positive_info (:obj:`InstanceData`): Information of positive samples that are assigned in detection head. It usually contains following keys. - pos_assigned_gt_inds (Tensor): Assigner GT indexes of positive proposals, has shape (num_pos, ) - pos_inds (Tensor): Positive index of image, has shape (num_pos, ). - param_pred (Tensor): Positive param preditions with shape (num_pos, num_params). Returns: tuple: Usually returns a tuple containing learning targets. - mask_preds (Tensor): Positive predicted mask with shape (num_pos, mask_h, mask_w). - pos_mask_targets (Tensor): Positive mask targets with shape (num_pos, mask_h, mask_w). - pos_pairwise_masks (Tensor): Positive pairwise masks with shape: (num_pos, num_neighborhood, mask_h, mask_w). - num_pos (int): Positive numbers. """ gt_bboxes = gt_instances.bboxes device = gt_bboxes.device # Note that gt_masks are generated by full box # from BoxInstDataPreprocessor gt_masks = gt_instances.masks.to_tensor( dtype=torch.bool, device=device).float() # Note that pairwise_masks are generated by image color similarity # from BoxInstDataPreprocessor pairwise_masks = gt_instances.pairwise_masks pairwise_masks = pairwise_masks.to(device=device) # process with mask targets pos_assigned_gt_inds = positive_info.get('pos_assigned_gt_inds') scores = positive_info.get('scores') centernesses = positive_info.get('centernesses') num_pos = pos_assigned_gt_inds.size(0) if gt_masks.size(0) == 0 or num_pos == 0: return mask_preds, None, None, 0 # Since we're producing (near) full image masks, # it'd take too much vram to backprop on every single mask. # Thus we select only a subset. if (self.max_masks_to_train != -1) and \ (num_pos > self.max_masks_to_train): perm = torch.randperm(num_pos) select = perm[:self.max_masks_to_train] mask_preds = mask_preds[select] pos_assigned_gt_inds = pos_assigned_gt_inds[select] num_pos = self.max_masks_to_train elif self.topk_masks_per_img != -1: unique_gt_inds = pos_assigned_gt_inds.unique() num_inst_per_gt = max( int(self.topk_masks_per_img / len(unique_gt_inds)), 1) keep_mask_preds = [] keep_pos_assigned_gt_inds = [] for gt_ind in unique_gt_inds: per_inst_pos_inds = (pos_assigned_gt_inds == gt_ind) mask_preds_per_inst = mask_preds[per_inst_pos_inds] gt_inds_per_inst = pos_assigned_gt_inds[per_inst_pos_inds] if sum(per_inst_pos_inds) > num_inst_per_gt: per_inst_scores = scores[per_inst_pos_inds].sigmoid().max( dim=1)[0] per_inst_centerness = centernesses[ per_inst_pos_inds].sigmoid().reshape(-1, ) select = (per_inst_scores * per_inst_centerness).topk( k=num_inst_per_gt, dim=0)[1] mask_preds_per_inst = mask_preds_per_inst[select] gt_inds_per_inst = gt_inds_per_inst[select] keep_mask_preds.append(mask_preds_per_inst) keep_pos_assigned_gt_inds.append(gt_inds_per_inst) mask_preds = torch.cat(keep_mask_preds) pos_assigned_gt_inds = torch.cat(keep_pos_assigned_gt_inds) num_pos = pos_assigned_gt_inds.size(0) # Follow the origin implement start = int(self.mask_out_stride // 2) gt_masks = gt_masks[:, start::self.mask_out_stride, start::self.mask_out_stride] gt_masks = gt_masks.gt(0.5).float() pos_mask_targets = gt_masks[pos_assigned_gt_inds] pos_pairwise_masks = pairwise_masks[pos_assigned_gt_inds] pos_pairwise_masks = pos_pairwise_masks * pos_mask_targets.unsqueeze(1) return (mask_preds, pos_mask_targets, pos_pairwise_masks, num_pos)
10,963
42.335968
79
py
ERD
ERD-main/mmdet/models/dense_heads/maskformer_head.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import Dict, List, Optional, Tuple, Union import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import Conv2d from mmengine.model import caffe2_xavier_init from mmengine.structures import InstanceData, PixelData from torch import Tensor from mmdet.models.layers.pixel_decoder import PixelDecoder from mmdet.registry import MODELS, TASK_UTILS from mmdet.structures import SampleList from mmdet.utils import (ConfigType, InstanceList, OptConfigType, OptMultiConfig, reduce_mean) from ..layers import DetrTransformerDecoder, SinePositionalEncoding from ..utils import multi_apply, preprocess_panoptic_gt from .anchor_free_head import AnchorFreeHead @MODELS.register_module() class MaskFormerHead(AnchorFreeHead): """Implements the MaskFormer head. See `Per-Pixel Classification is Not All You Need for Semantic Segmentation <https://arxiv.org/pdf/2107.06278>`_ for details. Args: in_channels (list[int]): Number of channels in the input feature map. feat_channels (int): Number of channels for feature. out_channels (int): Number of channels for output. num_things_classes (int): Number of things. num_stuff_classes (int): Number of stuff. num_queries (int): Number of query in Transformer. pixel_decoder (:obj:`ConfigDict` or dict): Config for pixel decoder. enforce_decoder_input_project (bool): Whether to add a layer to change the embed_dim of transformer encoder in pixel decoder to the embed_dim of transformer decoder. Defaults to False. transformer_decoder (:obj:`ConfigDict` or dict): Config for transformer decoder. positional_encoding (:obj:`ConfigDict` or dict): Config for transformer decoder position encoding. loss_cls (:obj:`ConfigDict` or dict): Config of the classification loss. Defaults to `CrossEntropyLoss`. loss_mask (:obj:`ConfigDict` or dict): Config of the mask loss. Defaults to `FocalLoss`. loss_dice (:obj:`ConfigDict` or dict): Config of the dice loss. Defaults to `DiceLoss`. train_cfg (:obj:`ConfigDict` or dict, optional): Training config of MaskFormer head. test_cfg (:obj:`ConfigDict` or dict, optional): Testing config of MaskFormer head. init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \ dict], optional): Initialization config dict. Defaults to None. """ def __init__(self, in_channels: List[int], feat_channels: int, out_channels: int, num_things_classes: int = 80, num_stuff_classes: int = 53, num_queries: int = 100, pixel_decoder: ConfigType = ..., enforce_decoder_input_project: bool = False, transformer_decoder: ConfigType = ..., positional_encoding: ConfigType = dict( num_feats=128, normalize=True), loss_cls: ConfigType = dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0, class_weight=[1.0] * 133 + [0.1]), loss_mask: ConfigType = dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=20.0), loss_dice: ConfigType = dict( type='DiceLoss', use_sigmoid=True, activate=True, naive_dice=True, loss_weight=1.0), train_cfg: OptConfigType = None, test_cfg: OptConfigType = None, init_cfg: OptMultiConfig = None, **kwargs) -> None: super(AnchorFreeHead, self).__init__(init_cfg=init_cfg) self.num_things_classes = num_things_classes self.num_stuff_classes = num_stuff_classes self.num_classes = self.num_things_classes + self.num_stuff_classes self.num_queries = num_queries pixel_decoder.update( in_channels=in_channels, feat_channels=feat_channels, out_channels=out_channels) self.pixel_decoder = MODELS.build(pixel_decoder) self.transformer_decoder = DetrTransformerDecoder( **transformer_decoder) self.decoder_embed_dims = self.transformer_decoder.embed_dims if type(self.pixel_decoder) == PixelDecoder and ( self.decoder_embed_dims != in_channels[-1] or enforce_decoder_input_project): self.decoder_input_proj = Conv2d( in_channels[-1], self.decoder_embed_dims, kernel_size=1) else: self.decoder_input_proj = nn.Identity() self.decoder_pe = SinePositionalEncoding(**positional_encoding) self.query_embed = nn.Embedding(self.num_queries, out_channels) self.cls_embed = nn.Linear(feat_channels, self.num_classes + 1) self.mask_embed = nn.Sequential( nn.Linear(feat_channels, feat_channels), nn.ReLU(inplace=True), nn.Linear(feat_channels, feat_channels), nn.ReLU(inplace=True), nn.Linear(feat_channels, out_channels)) self.test_cfg = test_cfg self.train_cfg = train_cfg if train_cfg: self.assigner = TASK_UTILS.build(train_cfg['assigner']) self.sampler = TASK_UTILS.build( train_cfg['sampler'], default_args=dict(context=self)) self.class_weight = loss_cls.class_weight self.loss_cls = MODELS.build(loss_cls) self.loss_mask = MODELS.build(loss_mask) self.loss_dice = MODELS.build(loss_dice) def init_weights(self) -> None: if isinstance(self.decoder_input_proj, Conv2d): caffe2_xavier_init(self.decoder_input_proj, bias=0) self.pixel_decoder.init_weights() for p in self.transformer_decoder.parameters(): if p.dim() > 1: nn.init.xavier_uniform_(p) def preprocess_gt( self, batch_gt_instances: InstanceList, batch_gt_semantic_segs: List[Optional[PixelData]]) -> InstanceList: """Preprocess the ground truth for all images. Args: batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``labels``, each is ground truth labels of each bbox, with shape (num_gts, ) and ``masks``, each is ground truth masks of each instances of a image, shape (num_gts, h, w). gt_semantic_seg (list[Optional[PixelData]]): Ground truth of semantic segmentation, each with the shape (1, h, w). [0, num_thing_class - 1] means things, [num_thing_class, num_class-1] means stuff, 255 means VOID. It's None when training instance segmentation. Returns: list[obj:`InstanceData`]: each contains the following keys - labels (Tensor): Ground truth class indices\ for a image, with shape (n, ), n is the sum of\ number of stuff type and number of instance in a image. - masks (Tensor): Ground truth mask for a\ image, with shape (n, h, w). """ num_things_list = [self.num_things_classes] * len(batch_gt_instances) num_stuff_list = [self.num_stuff_classes] * len(batch_gt_instances) gt_labels_list = [ gt_instances['labels'] for gt_instances in batch_gt_instances ] gt_masks_list = [ gt_instances['masks'] for gt_instances in batch_gt_instances ] gt_semantic_segs = [ None if gt_semantic_seg is None else gt_semantic_seg.sem_seg for gt_semantic_seg in batch_gt_semantic_segs ] targets = multi_apply(preprocess_panoptic_gt, gt_labels_list, gt_masks_list, gt_semantic_segs, num_things_list, num_stuff_list) labels, masks = targets batch_gt_instances = [ InstanceData(labels=label, masks=mask) for label, mask in zip(labels, masks) ] return batch_gt_instances def get_targets( self, cls_scores_list: List[Tensor], mask_preds_list: List[Tensor], batch_gt_instances: InstanceList, batch_img_metas: List[dict], return_sampling_results: bool = False ) -> Tuple[List[Union[Tensor, int]]]: """Compute classification and mask targets for all images for a decoder layer. Args: cls_scores_list (list[Tensor]): Mask score logits from a single decoder layer for all images. Each with shape (num_queries, cls_out_channels). mask_preds_list (list[Tensor]): Mask logits from a single decoder layer for all images. Each with shape (num_queries, h, w). batch_gt_instances (list[obj:`InstanceData`]): each contains ``labels`` and ``masks``. batch_img_metas (list[dict]): List of image meta information. return_sampling_results (bool): Whether to return the sampling results. Defaults to False. Returns: tuple: a tuple containing the following targets. - labels_list (list[Tensor]): Labels of all images.\ Each with shape (num_queries, ). - label_weights_list (list[Tensor]): Label weights\ of all images. Each with shape (num_queries, ). - mask_targets_list (list[Tensor]): Mask targets of\ all images. Each with shape (num_queries, h, w). - mask_weights_list (list[Tensor]): Mask weights of\ all images. Each with shape (num_queries, ). - avg_factor (int): Average factor that is used to average\ the loss. When using sampling method, avg_factor is usually the sum of positive and negative priors. When using `MaskPseudoSampler`, `avg_factor` is usually equal to the number of positive priors. additional_returns: This function enables user-defined returns from `self._get_targets_single`. These returns are currently refined to properties at each feature map (i.e. having HxW dimension). The results will be concatenated after the end. """ results = multi_apply(self._get_targets_single, cls_scores_list, mask_preds_list, batch_gt_instances, batch_img_metas) (labels_list, label_weights_list, mask_targets_list, mask_weights_list, pos_inds_list, neg_inds_list, sampling_results_list) = results[:7] rest_results = list(results[7:]) avg_factor = sum( [results.avg_factor for results in sampling_results_list]) res = (labels_list, label_weights_list, mask_targets_list, mask_weights_list, avg_factor) if return_sampling_results: res = res + (sampling_results_list) return res + tuple(rest_results) def _get_targets_single(self, cls_score: Tensor, mask_pred: Tensor, gt_instances: InstanceData, img_meta: dict) -> Tuple[Tensor]: """Compute classification and mask targets for one image. Args: cls_score (Tensor): Mask score logits from a single decoder layer for one image. Shape (num_queries, cls_out_channels). mask_pred (Tensor): Mask logits for a single decoder layer for one image. Shape (num_queries, h, w). gt_instances (:obj:`InstanceData`): It contains ``labels`` and ``masks``. img_meta (dict): Image informtation. Returns: tuple: a tuple containing the following for one image. - labels (Tensor): Labels of each image. shape (num_queries, ). - label_weights (Tensor): Label weights of each image. shape (num_queries, ). - mask_targets (Tensor): Mask targets of each image. shape (num_queries, h, w). - mask_weights (Tensor): Mask weights of each image. shape (num_queries, ). - pos_inds (Tensor): Sampled positive indices for each image. - neg_inds (Tensor): Sampled negative indices for each image. - sampling_result (:obj:`SamplingResult`): Sampling results. """ gt_masks = gt_instances.masks gt_labels = gt_instances.labels target_shape = mask_pred.shape[-2:] if gt_masks.shape[0] > 0: gt_masks_downsampled = F.interpolate( gt_masks.unsqueeze(1).float(), target_shape, mode='nearest').squeeze(1).long() else: gt_masks_downsampled = gt_masks pred_instances = InstanceData(scores=cls_score, masks=mask_pred) downsampled_gt_instances = InstanceData( labels=gt_labels, masks=gt_masks_downsampled) # assign and sample assign_result = self.assigner.assign( pred_instances=pred_instances, gt_instances=downsampled_gt_instances, img_meta=img_meta) sampling_result = self.sampler.sample( assign_result=assign_result, pred_instances=pred_instances, gt_instances=gt_instances) pos_inds = sampling_result.pos_inds neg_inds = sampling_result.neg_inds # label target labels = gt_labels.new_full((self.num_queries, ), self.num_classes, dtype=torch.long) labels[pos_inds] = gt_labels[sampling_result.pos_assigned_gt_inds] label_weights = gt_labels.new_ones(self.num_queries) # mask target mask_targets = gt_masks[sampling_result.pos_assigned_gt_inds] mask_weights = mask_pred.new_zeros((self.num_queries, )) mask_weights[pos_inds] = 1.0 return (labels, label_weights, mask_targets, mask_weights, pos_inds, neg_inds, sampling_result) def loss_by_feat(self, all_cls_scores: Tensor, all_mask_preds: Tensor, batch_gt_instances: List[InstanceData], batch_img_metas: List[dict]) -> Dict[str, Tensor]: """Loss function. Args: all_cls_scores (Tensor): Classification scores for all decoder layers with shape (num_decoder, batch_size, num_queries, cls_out_channels). Note `cls_out_channels` should includes background. all_mask_preds (Tensor): Mask scores for all decoder layers with shape (num_decoder, batch_size, num_queries, h, w). batch_gt_instances (list[obj:`InstanceData`]): each contains ``labels`` and ``masks``. batch_img_metas (list[dict]): List of image meta information. Returns: dict[str, Tensor]: A dictionary of loss components. """ num_dec_layers = len(all_cls_scores) batch_gt_instances_list = [ batch_gt_instances for _ in range(num_dec_layers) ] img_metas_list = [batch_img_metas for _ in range(num_dec_layers)] losses_cls, losses_mask, losses_dice = multi_apply( self._loss_by_feat_single, all_cls_scores, all_mask_preds, batch_gt_instances_list, img_metas_list) loss_dict = dict() # loss from the last decoder layer loss_dict['loss_cls'] = losses_cls[-1] loss_dict['loss_mask'] = losses_mask[-1] loss_dict['loss_dice'] = losses_dice[-1] # loss from other decoder layers num_dec_layer = 0 for loss_cls_i, loss_mask_i, loss_dice_i in zip( losses_cls[:-1], losses_mask[:-1], losses_dice[:-1]): loss_dict[f'd{num_dec_layer}.loss_cls'] = loss_cls_i loss_dict[f'd{num_dec_layer}.loss_mask'] = loss_mask_i loss_dict[f'd{num_dec_layer}.loss_dice'] = loss_dice_i num_dec_layer += 1 return loss_dict def _loss_by_feat_single(self, cls_scores: Tensor, mask_preds: Tensor, batch_gt_instances: List[InstanceData], batch_img_metas: List[dict]) -> Tuple[Tensor]: """Loss function for outputs from a single decoder layer. Args: cls_scores (Tensor): Mask score logits from a single decoder layer for all images. Shape (batch_size, num_queries, cls_out_channels). Note `cls_out_channels` should includes background. mask_preds (Tensor): Mask logits for a pixel decoder for all images. Shape (batch_size, num_queries, h, w). batch_gt_instances (list[obj:`InstanceData`]): each contains ``labels`` and ``masks``. batch_img_metas (list[dict]): List of image meta information. Returns: tuple[Tensor]: Loss components for outputs from a single decoder\ layer. """ num_imgs = cls_scores.size(0) cls_scores_list = [cls_scores[i] for i in range(num_imgs)] mask_preds_list = [mask_preds[i] for i in range(num_imgs)] (labels_list, label_weights_list, mask_targets_list, mask_weights_list, avg_factor) = self.get_targets(cls_scores_list, mask_preds_list, batch_gt_instances, batch_img_metas) # shape (batch_size, num_queries) labels = torch.stack(labels_list, dim=0) # shape (batch_size, num_queries) label_weights = torch.stack(label_weights_list, dim=0) # shape (num_total_gts, h, w) mask_targets = torch.cat(mask_targets_list, dim=0) # shape (batch_size, num_queries) mask_weights = torch.stack(mask_weights_list, dim=0) # classfication loss # shape (batch_size * num_queries, ) cls_scores = cls_scores.flatten(0, 1) labels = labels.flatten(0, 1) label_weights = label_weights.flatten(0, 1) class_weight = cls_scores.new_tensor(self.class_weight) loss_cls = self.loss_cls( cls_scores, labels, label_weights, avg_factor=class_weight[labels].sum()) num_total_masks = reduce_mean(cls_scores.new_tensor([avg_factor])) num_total_masks = max(num_total_masks, 1) # extract positive ones # shape (batch_size, num_queries, h, w) -> (num_total_gts, h, w) mask_preds = mask_preds[mask_weights > 0] target_shape = mask_targets.shape[-2:] if mask_targets.shape[0] == 0: # zero match loss_dice = mask_preds.sum() loss_mask = mask_preds.sum() return loss_cls, loss_mask, loss_dice # upsample to shape of target # shape (num_total_gts, h, w) mask_preds = F.interpolate( mask_preds.unsqueeze(1), target_shape, mode='bilinear', align_corners=False).squeeze(1) # dice loss loss_dice = self.loss_dice( mask_preds, mask_targets, avg_factor=num_total_masks) # mask loss # FocalLoss support input of shape (n, num_class) h, w = mask_preds.shape[-2:] # shape (num_total_gts, h, w) -> (num_total_gts * h * w, 1) mask_preds = mask_preds.reshape(-1, 1) # shape (num_total_gts, h, w) -> (num_total_gts * h * w) mask_targets = mask_targets.reshape(-1) # target is (1 - mask_targets) !!! loss_mask = self.loss_mask( mask_preds, 1 - mask_targets, avg_factor=num_total_masks * h * w) return loss_cls, loss_mask, loss_dice def forward(self, x: Tuple[Tensor], batch_data_samples: SampleList) -> Tuple[Tensor]: """Forward function. Args: x (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. batch_data_samples (List[:obj:`DetDataSample`]): The Data Samples. It usually includes information such as `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`. Returns: tuple[Tensor]: a tuple contains two elements. - all_cls_scores (Tensor): Classification scores for each\ scale level. Each is a 4D-tensor with shape\ (num_decoder, batch_size, num_queries, cls_out_channels).\ Note `cls_out_channels` should includes background. - all_mask_preds (Tensor): Mask scores for each decoder\ layer. Each with shape (num_decoder, batch_size,\ num_queries, h, w). """ batch_img_metas = [ data_sample.metainfo for data_sample in batch_data_samples ] batch_size = len(batch_img_metas) input_img_h, input_img_w = batch_img_metas[0]['batch_input_shape'] padding_mask = x[-1].new_ones((batch_size, input_img_h, input_img_w), dtype=torch.float32) for i in range(batch_size): img_h, img_w = batch_img_metas[i]['img_shape'] padding_mask[i, :img_h, :img_w] = 0 padding_mask = F.interpolate( padding_mask.unsqueeze(1), size=x[-1].shape[-2:], mode='nearest').to(torch.bool).squeeze(1) # when backbone is swin, memory is output of last stage of swin. # when backbone is r50, memory is output of tranformer encoder. mask_features, memory = self.pixel_decoder(x, batch_img_metas) pos_embed = self.decoder_pe(padding_mask) memory = self.decoder_input_proj(memory) # shape (batch_size, c, h, w) -> (batch_size, h*w, c) memory = memory.flatten(2).permute(0, 2, 1) pos_embed = pos_embed.flatten(2).permute(0, 2, 1) # shape (batch_size, h * w) padding_mask = padding_mask.flatten(1) # shape = (num_queries, embed_dims) query_embed = self.query_embed.weight # shape = (batch_size, num_queries, embed_dims) query_embed = query_embed.unsqueeze(0).repeat(batch_size, 1, 1) target = torch.zeros_like(query_embed) # shape (num_decoder, num_queries, batch_size, embed_dims) out_dec = self.transformer_decoder( query=target, key=memory, value=memory, query_pos=query_embed, key_pos=pos_embed, key_padding_mask=padding_mask) # cls_scores all_cls_scores = self.cls_embed(out_dec) # mask_preds mask_embed = self.mask_embed(out_dec) all_mask_preds = torch.einsum('lbqc,bchw->lbqhw', mask_embed, mask_features) return all_cls_scores, all_mask_preds def loss( self, x: Tuple[Tensor], batch_data_samples: SampleList, ) -> Dict[str, Tensor]: """Perform forward propagation and loss calculation of the panoptic head on the features of the upstream network. Args: x (tuple[Tensor]): Multi-level features from the upstream network, each is a 4D-tensor. batch_data_samples (List[:obj:`DetDataSample`]): The Data Samples. It usually includes information such as `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`. Returns: dict[str, Tensor]: a dictionary of loss components """ batch_img_metas = [] batch_gt_instances = [] batch_gt_semantic_segs = [] for data_sample in batch_data_samples: batch_img_metas.append(data_sample.metainfo) batch_gt_instances.append(data_sample.gt_instances) if 'gt_sem_seg' in data_sample: batch_gt_semantic_segs.append(data_sample.gt_sem_seg) else: batch_gt_semantic_segs.append(None) # forward all_cls_scores, all_mask_preds = self(x, batch_data_samples) # preprocess ground truth batch_gt_instances = self.preprocess_gt(batch_gt_instances, batch_gt_semantic_segs) # loss losses = self.loss_by_feat(all_cls_scores, all_mask_preds, batch_gt_instances, batch_img_metas) return losses def predict(self, x: Tuple[Tensor], batch_data_samples: SampleList) -> Tuple[Tensor]: """Test without augmentaton. Args: x (tuple[Tensor]): Multi-level features from the upstream network, each is a 4D-tensor. batch_data_samples (List[:obj:`DetDataSample`]): The Data Samples. It usually includes information such as `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`. Returns: tuple[Tensor]: A tuple contains two tensors. - mask_cls_results (Tensor): Mask classification logits,\ shape (batch_size, num_queries, cls_out_channels). Note `cls_out_channels` should includes background. - mask_pred_results (Tensor): Mask logits, shape \ (batch_size, num_queries, h, w). """ batch_img_metas = [ data_sample.metainfo for data_sample in batch_data_samples ] all_cls_scores, all_mask_preds = self(x, batch_data_samples) mask_cls_results = all_cls_scores[-1] mask_pred_results = all_mask_preds[-1] # upsample masks img_shape = batch_img_metas[0]['batch_input_shape'] mask_pred_results = F.interpolate( mask_pred_results, size=(img_shape[0], img_shape[1]), mode='bilinear', align_corners=False) return mask_cls_results, mask_pred_results
26,604
43.194352
79
py
ERD
ERD-main/mmdet/models/dense_heads/pisa_ssd_head.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import Dict, List, Union import torch from torch import Tensor from mmdet.registry import MODELS from mmdet.utils import InstanceList, OptInstanceList from ..losses import CrossEntropyLoss, SmoothL1Loss, carl_loss, isr_p from ..utils import multi_apply from .ssd_head import SSDHead # TODO: add loss evaluator for SSD @MODELS.register_module() class PISASSDHead(SSDHead): """Implementation of `PISA SSD head <https://arxiv.org/abs/1904.04821>`_ Args: num_classes (int): Number of categories excluding the background category. in_channels (Sequence[int]): Number of channels in the input feature map. stacked_convs (int): Number of conv layers in cls and reg tower. Defaults to 0. feat_channels (int): Number of hidden channels when stacked_convs > 0. Defaults to 256. use_depthwise (bool): Whether to use DepthwiseSeparableConv. Defaults to False. conv_cfg (:obj:`ConfigDict` or dict, Optional): Dictionary to construct and config conv layer. Defaults to None. norm_cfg (:obj:`ConfigDict` or dict, Optional): Dictionary to construct and config norm layer. Defaults to None. act_cfg (:obj:`ConfigDict` or dict, Optional): Dictionary to construct and config activation layer. Defaults to None. anchor_generator (:obj:`ConfigDict` or dict): Config dict for anchor generator. bbox_coder (:obj:`ConfigDict` or dict): Config of bounding box coder. reg_decoded_bbox (bool): If true, the regression loss would be applied directly on decoded bounding boxes, converting both the predicted boxes and regression targets to absolute coordinates format. Defaults to False. It should be `True` when using `IoULoss`, `GIoULoss`, or `DIoULoss` in the bbox head. train_cfg (:obj:`ConfigDict` or dict, Optional): Training config of anchor head. test_cfg (:obj:`ConfigDict` or dict, Optional): Testing config of anchor head. init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \ dict], Optional): Initialization config dict. """ # noqa: W605 def loss_by_feat( self, cls_scores: List[Tensor], bbox_preds: List[Tensor], batch_gt_instances: InstanceList, batch_img_metas: List[dict], batch_gt_instances_ignore: OptInstanceList = None ) -> Dict[str, Union[List[Tensor], Tensor]]: """Compute losses of the head. Args: cls_scores (list[Tensor]): Box scores for each scale level Has shape (N, num_anchors * num_classes, H, W) bbox_preds (list[Tensor]): Box energies / deltas for each scale level with shape (N, num_anchors * 4, H, W) batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. batch_gt_instances_ignore (list[:obj:`InstanceData`], Optional): Batch of gt_instances_ignore. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. Returns: dict[str, Union[List[Tensor], Tensor]]: A dictionary of loss components. the dict has components below: - loss_cls (list[Tensor]): A list containing each feature map \ classification loss. - loss_bbox (list[Tensor]): A list containing each feature map \ regression loss. - loss_carl (Tensor): The loss of CARL. """ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] assert len(featmap_sizes) == self.prior_generator.num_levels device = cls_scores[0].device anchor_list, valid_flag_list = self.get_anchors( featmap_sizes, batch_img_metas, device=device) cls_reg_targets = self.get_targets( anchor_list, valid_flag_list, batch_gt_instances, batch_img_metas, batch_gt_instances_ignore=batch_gt_instances_ignore, unmap_outputs=False, return_sampling_results=True) (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, avg_factor, sampling_results_list) = cls_reg_targets num_images = len(batch_img_metas) all_cls_scores = torch.cat([ s.permute(0, 2, 3, 1).reshape( num_images, -1, self.cls_out_channels) for s in cls_scores ], 1) all_labels = torch.cat(labels_list, -1).view(num_images, -1) all_label_weights = torch.cat(label_weights_list, -1).view(num_images, -1) all_bbox_preds = torch.cat([ b.permute(0, 2, 3, 1).reshape(num_images, -1, 4) for b in bbox_preds ], -2) all_bbox_targets = torch.cat(bbox_targets_list, -2).view(num_images, -1, 4) all_bbox_weights = torch.cat(bbox_weights_list, -2).view(num_images, -1, 4) # concat all level anchors to a single tensor all_anchors = [] for i in range(num_images): all_anchors.append(torch.cat(anchor_list[i])) isr_cfg = self.train_cfg.get('isr', None) all_targets = (all_labels.view(-1), all_label_weights.view(-1), all_bbox_targets.view(-1, 4), all_bbox_weights.view(-1, 4)) # apply ISR-P if isr_cfg is not None: all_targets = isr_p( all_cls_scores.view(-1, all_cls_scores.size(-1)), all_bbox_preds.view(-1, 4), all_targets, torch.cat(all_anchors), sampling_results_list, loss_cls=CrossEntropyLoss(), bbox_coder=self.bbox_coder, **self.train_cfg['isr'], num_class=self.num_classes) (new_labels, new_label_weights, new_bbox_targets, new_bbox_weights) = all_targets all_labels = new_labels.view(all_labels.shape) all_label_weights = new_label_weights.view(all_label_weights.shape) all_bbox_targets = new_bbox_targets.view(all_bbox_targets.shape) all_bbox_weights = new_bbox_weights.view(all_bbox_weights.shape) # add CARL loss carl_loss_cfg = self.train_cfg.get('carl', None) if carl_loss_cfg is not None: loss_carl = carl_loss( all_cls_scores.view(-1, all_cls_scores.size(-1)), all_targets[0], all_bbox_preds.view(-1, 4), all_targets[2], SmoothL1Loss(beta=1.), **self.train_cfg['carl'], avg_factor=avg_factor, num_class=self.num_classes) # check NaN and Inf assert torch.isfinite(all_cls_scores).all().item(), \ 'classification scores become infinite or NaN!' assert torch.isfinite(all_bbox_preds).all().item(), \ 'bbox predications become infinite or NaN!' losses_cls, losses_bbox = multi_apply( self.loss_by_feat_single, all_cls_scores, all_bbox_preds, all_anchors, all_labels, all_label_weights, all_bbox_targets, all_bbox_weights, avg_factor=avg_factor) loss_dict = dict(loss_cls=losses_cls, loss_bbox=losses_bbox) if carl_loss_cfg is not None: loss_dict.update(loss_carl) return loss_dict
7,998
42.710383
79
py
ERD
ERD-main/mmdet/models/dense_heads/base_mask_head.py
# Copyright (c) OpenMMLab. All rights reserved. from abc import ABCMeta, abstractmethod from typing import List, Tuple, Union from mmengine.model import BaseModule from torch import Tensor from mmdet.structures import SampleList from mmdet.utils import InstanceList, OptInstanceList, OptMultiConfig from ..utils import unpack_gt_instances class BaseMaskHead(BaseModule, metaclass=ABCMeta): """Base class for mask heads used in One-Stage Instance Segmentation.""" def __init__(self, init_cfg: OptMultiConfig = None) -> None: super().__init__(init_cfg=init_cfg) @abstractmethod def loss_by_feat(self, *args, **kwargs): """Calculate the loss based on the features extracted by the mask head.""" pass @abstractmethod def predict_by_feat(self, *args, **kwargs): """Transform a batch of output features extracted from the head into mask results.""" pass def loss(self, x: Union[List[Tensor], Tuple[Tensor]], batch_data_samples: SampleList, positive_infos: OptInstanceList = None, **kwargs) -> dict: """Perform forward propagation and loss calculation of the mask head on the features of the upstream network. Args: x (list[Tensor] | tuple[Tensor]): Features from FPN. Each has a shape (B, C, H, W). batch_data_samples (list[:obj:`DetDataSample`]): Each item contains the meta information of each image and corresponding annotations. positive_infos (list[:obj:`InstanceData`], optional): Information of positive samples. Used when the label assignment is done outside the MaskHead, e.g., BboxHead in YOLACT or CondInst, etc. When the label assignment is done in MaskHead, it would be None, like SOLO or SOLOv2. All values in it should have shape (num_positive_samples, *). Returns: dict: A dictionary of loss components. """ if positive_infos is None: outs = self(x) else: outs = self(x, positive_infos) assert isinstance(outs, tuple), 'Forward results should be a tuple, ' \ 'even if only one item is returned' outputs = unpack_gt_instances(batch_data_samples) batch_gt_instances, batch_gt_instances_ignore, batch_img_metas \ = outputs for gt_instances, img_metas in zip(batch_gt_instances, batch_img_metas): img_shape = img_metas['batch_input_shape'] gt_masks = gt_instances.masks.pad(img_shape) gt_instances.masks = gt_masks losses = self.loss_by_feat( *outs, batch_gt_instances=batch_gt_instances, batch_img_metas=batch_img_metas, positive_infos=positive_infos, batch_gt_instances_ignore=batch_gt_instances_ignore, **kwargs) return losses def predict(self, x: Tuple[Tensor], batch_data_samples: SampleList, rescale: bool = False, results_list: OptInstanceList = None, **kwargs) -> InstanceList: """Test function without test-time augmentation. Args: x (tuple[Tensor]): Multi-level features from the upstream network, each is a 4D-tensor. batch_data_samples (List[:obj:`DetDataSample`]): The Data Samples. It usually includes information such as `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`. rescale (bool, optional): Whether to rescale the results. Defaults to False. results_list (list[obj:`InstanceData`], optional): Detection results of each image after the post process. Only exist if there is a `bbox_head`, like `YOLACT`, `CondInst`, etc. Returns: list[obj:`InstanceData`]: Instance segmentation results of each image after the post process. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance,) - labels (Tensor): Has a shape (num_instances,). - masks (Tensor): Processed mask results, has a shape (num_instances, h, w). """ batch_img_metas = [ data_samples.metainfo for data_samples in batch_data_samples ] if results_list is None: outs = self(x) else: outs = self(x, results_list) results_list = self.predict_by_feat( *outs, batch_img_metas=batch_img_metas, rescale=rescale, results_list=results_list, **kwargs) return results_list
4,989
37.682171
79
py
ERD
ERD-main/mmdet/models/dense_heads/solo_head.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import List, Optional, Tuple import mmcv import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import ConvModule from mmengine.structures import InstanceData from torch import Tensor from mmdet.models.utils.misc import floordiv from mmdet.registry import MODELS from mmdet.utils import ConfigType, InstanceList, MultiConfig, OptConfigType from ..layers import mask_matrix_nms from ..utils import center_of_mass, generate_coordinate, multi_apply from .base_mask_head import BaseMaskHead @MODELS.register_module() class SOLOHead(BaseMaskHead): """SOLO mask head used in `SOLO: Segmenting Objects by Locations. <https://arxiv.org/abs/1912.04488>`_ Args: num_classes (int): Number of categories excluding the background category. in_channels (int): Number of channels in the input feature map. feat_channels (int): Number of hidden channels. Used in child classes. Defaults to 256. stacked_convs (int): Number of stacking convs of the head. Defaults to 4. strides (tuple): Downsample factor of each feature map. scale_ranges (tuple[tuple[int, int]]): Area range of multiple level masks, in the format [(min1, max1), (min2, max2), ...]. A range of (16, 64) means the area range between (16, 64). pos_scale (float): Constant scale factor to control the center region. num_grids (list[int]): Divided image into a uniform grids, each feature map has a different grid value. The number of output channels is grid ** 2. Defaults to [40, 36, 24, 16, 12]. cls_down_index (int): The index of downsample operation in classification branch. Defaults to 0. loss_mask (dict): Config of mask loss. loss_cls (dict): Config of classification loss. norm_cfg (dict): Dictionary to construct and config norm layer. Defaults to norm_cfg=dict(type='GN', num_groups=32, requires_grad=True). train_cfg (dict): Training config of head. test_cfg (dict): Testing config of head. init_cfg (dict or list[dict], optional): Initialization config dict. """ def __init__( self, num_classes: int, in_channels: int, feat_channels: int = 256, stacked_convs: int = 4, strides: tuple = (4, 8, 16, 32, 64), scale_ranges: tuple = ((8, 32), (16, 64), (32, 128), (64, 256), (128, 512)), pos_scale: float = 0.2, num_grids: list = [40, 36, 24, 16, 12], cls_down_index: int = 0, loss_mask: ConfigType = dict( type='DiceLoss', use_sigmoid=True, loss_weight=3.0), loss_cls: ConfigType = dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), norm_cfg: ConfigType = dict( type='GN', num_groups=32, requires_grad=True), train_cfg: OptConfigType = None, test_cfg: OptConfigType = None, init_cfg: MultiConfig = [ dict(type='Normal', layer='Conv2d', std=0.01), dict( type='Normal', std=0.01, bias_prob=0.01, override=dict(name='conv_mask_list')), dict( type='Normal', std=0.01, bias_prob=0.01, override=dict(name='conv_cls')) ] ) -> None: super().__init__(init_cfg=init_cfg) self.num_classes = num_classes self.cls_out_channels = self.num_classes self.in_channels = in_channels self.feat_channels = feat_channels self.stacked_convs = stacked_convs self.strides = strides self.num_grids = num_grids # number of FPN feats self.num_levels = len(strides) assert self.num_levels == len(scale_ranges) == len(num_grids) self.scale_ranges = scale_ranges self.pos_scale = pos_scale self.cls_down_index = cls_down_index self.loss_cls = MODELS.build(loss_cls) self.loss_mask = MODELS.build(loss_mask) self.norm_cfg = norm_cfg self.init_cfg = init_cfg self.train_cfg = train_cfg self.test_cfg = test_cfg self._init_layers() def _init_layers(self) -> None: """Initialize layers of the head.""" self.mask_convs = nn.ModuleList() self.cls_convs = nn.ModuleList() for i in range(self.stacked_convs): chn = self.in_channels + 2 if i == 0 else self.feat_channels self.mask_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, norm_cfg=self.norm_cfg)) chn = self.in_channels if i == 0 else self.feat_channels self.cls_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, norm_cfg=self.norm_cfg)) self.conv_mask_list = nn.ModuleList() for num_grid in self.num_grids: self.conv_mask_list.append( nn.Conv2d(self.feat_channels, num_grid**2, 1)) self.conv_cls = nn.Conv2d( self.feat_channels, self.cls_out_channels, 3, padding=1) def resize_feats(self, x: Tuple[Tensor]) -> List[Tensor]: """Downsample the first feat and upsample last feat in feats. Args: x (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. Returns: list[Tensor]: Features after resizing, each is a 4D-tensor. """ out = [] for i in range(len(x)): if i == 0: out.append( F.interpolate(x[0], scale_factor=0.5, mode='bilinear')) elif i == len(x) - 1: out.append( F.interpolate( x[i], size=x[i - 1].shape[-2:], mode='bilinear')) else: out.append(x[i]) return out def forward(self, x: Tuple[Tensor]) -> tuple: """Forward features from the upstream network. Args: x (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. Returns: tuple: A tuple of classification scores and mask prediction. - mlvl_mask_preds (list[Tensor]): Multi-level mask prediction. Each element in the list has shape (batch_size, num_grids**2 ,h ,w). - mlvl_cls_preds (list[Tensor]): Multi-level scores. Each element in the list has shape (batch_size, num_classes, num_grids ,num_grids). """ assert len(x) == self.num_levels feats = self.resize_feats(x) mlvl_mask_preds = [] mlvl_cls_preds = [] for i in range(self.num_levels): x = feats[i] mask_feat = x cls_feat = x # generate and concat the coordinate coord_feat = generate_coordinate(mask_feat.size(), mask_feat.device) mask_feat = torch.cat([mask_feat, coord_feat], 1) for mask_layer in (self.mask_convs): mask_feat = mask_layer(mask_feat) mask_feat = F.interpolate( mask_feat, scale_factor=2, mode='bilinear') mask_preds = self.conv_mask_list[i](mask_feat) # cls branch for j, cls_layer in enumerate(self.cls_convs): if j == self.cls_down_index: num_grid = self.num_grids[i] cls_feat = F.interpolate( cls_feat, size=num_grid, mode='bilinear') cls_feat = cls_layer(cls_feat) cls_pred = self.conv_cls(cls_feat) if not self.training: feat_wh = feats[0].size()[-2:] upsampled_size = (feat_wh[0] * 2, feat_wh[1] * 2) mask_preds = F.interpolate( mask_preds.sigmoid(), size=upsampled_size, mode='bilinear') cls_pred = cls_pred.sigmoid() # get local maximum local_max = F.max_pool2d(cls_pred, 2, stride=1, padding=1) keep_mask = local_max[:, :, :-1, :-1] == cls_pred cls_pred = cls_pred * keep_mask mlvl_mask_preds.append(mask_preds) mlvl_cls_preds.append(cls_pred) return mlvl_mask_preds, mlvl_cls_preds def loss_by_feat(self, mlvl_mask_preds: List[Tensor], mlvl_cls_preds: List[Tensor], batch_gt_instances: InstanceList, batch_img_metas: List[dict], **kwargs) -> dict: """Calculate the loss based on the features extracted by the mask head. Args: mlvl_mask_preds (list[Tensor]): Multi-level mask prediction. Each element in the list has shape (batch_size, num_grids**2 ,h ,w). batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes``, ``masks``, and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of multiple images. Returns: dict[str, Tensor]: A dictionary of loss components. """ num_levels = self.num_levels num_imgs = len(batch_img_metas) featmap_sizes = [featmap.size()[-2:] for featmap in mlvl_mask_preds] # `BoolTensor` in `pos_masks` represent # whether the corresponding point is # positive pos_mask_targets, labels, pos_masks = multi_apply( self._get_targets_single, batch_gt_instances, featmap_sizes=featmap_sizes) # change from the outside list meaning multi images # to the outside list meaning multi levels mlvl_pos_mask_targets = [[] for _ in range(num_levels)] mlvl_pos_mask_preds = [[] for _ in range(num_levels)] mlvl_pos_masks = [[] for _ in range(num_levels)] mlvl_labels = [[] for _ in range(num_levels)] for img_id in range(num_imgs): assert num_levels == len(pos_mask_targets[img_id]) for lvl in range(num_levels): mlvl_pos_mask_targets[lvl].append( pos_mask_targets[img_id][lvl]) mlvl_pos_mask_preds[lvl].append( mlvl_mask_preds[lvl][img_id, pos_masks[img_id][lvl], ...]) mlvl_pos_masks[lvl].append(pos_masks[img_id][lvl].flatten()) mlvl_labels[lvl].append(labels[img_id][lvl].flatten()) # cat multiple image temp_mlvl_cls_preds = [] for lvl in range(num_levels): mlvl_pos_mask_targets[lvl] = torch.cat( mlvl_pos_mask_targets[lvl], dim=0) mlvl_pos_mask_preds[lvl] = torch.cat( mlvl_pos_mask_preds[lvl], dim=0) mlvl_pos_masks[lvl] = torch.cat(mlvl_pos_masks[lvl], dim=0) mlvl_labels[lvl] = torch.cat(mlvl_labels[lvl], dim=0) temp_mlvl_cls_preds.append(mlvl_cls_preds[lvl].permute( 0, 2, 3, 1).reshape(-1, self.cls_out_channels)) num_pos = sum(item.sum() for item in mlvl_pos_masks) # dice loss loss_mask = [] for pred, target in zip(mlvl_pos_mask_preds, mlvl_pos_mask_targets): if pred.size()[0] == 0: loss_mask.append(pred.sum().unsqueeze(0)) continue loss_mask.append( self.loss_mask(pred, target, reduction_override='none')) if num_pos > 0: loss_mask = torch.cat(loss_mask).sum() / num_pos else: loss_mask = torch.cat(loss_mask).mean() flatten_labels = torch.cat(mlvl_labels) flatten_cls_preds = torch.cat(temp_mlvl_cls_preds) loss_cls = self.loss_cls( flatten_cls_preds, flatten_labels, avg_factor=num_pos + 1) return dict(loss_mask=loss_mask, loss_cls=loss_cls) def _get_targets_single(self, gt_instances: InstanceData, featmap_sizes: Optional[list] = None) -> tuple: """Compute targets for predictions of single image. Args: gt_instances (:obj:`InstanceData`): Ground truth of instance annotations. It should includes ``bboxes``, ``labels``, and ``masks`` attributes. featmap_sizes (list[:obj:`torch.size`]): Size of each feature map from feature pyramid, each element means (feat_h, feat_w). Defaults to None. Returns: Tuple: Usually returns a tuple containing targets for predictions. - mlvl_pos_mask_targets (list[Tensor]): Each element represent the binary mask targets for positive points in this level, has shape (num_pos, out_h, out_w). - mlvl_labels (list[Tensor]): Each element is classification labels for all points in this level, has shape (num_grid, num_grid). - mlvl_pos_masks (list[Tensor]): Each element is a `BoolTensor` to represent whether the corresponding point in single level is positive, has shape (num_grid **2). """ gt_labels = gt_instances.labels device = gt_labels.device gt_bboxes = gt_instances.bboxes gt_areas = torch.sqrt((gt_bboxes[:, 2] - gt_bboxes[:, 0]) * (gt_bboxes[:, 3] - gt_bboxes[:, 1])) gt_masks = gt_instances.masks.to_tensor( dtype=torch.bool, device=device) mlvl_pos_mask_targets = [] mlvl_labels = [] mlvl_pos_masks = [] for (lower_bound, upper_bound), stride, featmap_size, num_grid \ in zip(self.scale_ranges, self.strides, featmap_sizes, self.num_grids): mask_target = torch.zeros( [num_grid**2, featmap_size[0], featmap_size[1]], dtype=torch.uint8, device=device) # FG cat_id: [0, num_classes -1], BG cat_id: num_classes labels = torch.zeros([num_grid, num_grid], dtype=torch.int64, device=device) + self.num_classes pos_mask = torch.zeros([num_grid**2], dtype=torch.bool, device=device) gt_inds = ((gt_areas >= lower_bound) & (gt_areas <= upper_bound)).nonzero().flatten() if len(gt_inds) == 0: mlvl_pos_mask_targets.append( mask_target.new_zeros(0, featmap_size[0], featmap_size[1])) mlvl_labels.append(labels) mlvl_pos_masks.append(pos_mask) continue hit_gt_bboxes = gt_bboxes[gt_inds] hit_gt_labels = gt_labels[gt_inds] hit_gt_masks = gt_masks[gt_inds, ...] pos_w_ranges = 0.5 * (hit_gt_bboxes[:, 2] - hit_gt_bboxes[:, 0]) * self.pos_scale pos_h_ranges = 0.5 * (hit_gt_bboxes[:, 3] - hit_gt_bboxes[:, 1]) * self.pos_scale # Make sure hit_gt_masks has a value valid_mask_flags = hit_gt_masks.sum(dim=-1).sum(dim=-1) > 0 output_stride = stride / 2 for gt_mask, gt_label, pos_h_range, pos_w_range, \ valid_mask_flag in \ zip(hit_gt_masks, hit_gt_labels, pos_h_ranges, pos_w_ranges, valid_mask_flags): if not valid_mask_flag: continue upsampled_size = (featmap_sizes[0][0] * 4, featmap_sizes[0][1] * 4) center_h, center_w = center_of_mass(gt_mask) coord_w = int( floordiv((center_w / upsampled_size[1]), (1. / num_grid), rounding_mode='trunc')) coord_h = int( floordiv((center_h / upsampled_size[0]), (1. / num_grid), rounding_mode='trunc')) # left, top, right, down top_box = max( 0, int( floordiv( (center_h - pos_h_range) / upsampled_size[0], (1. / num_grid), rounding_mode='trunc'))) down_box = min( num_grid - 1, int( floordiv( (center_h + pos_h_range) / upsampled_size[0], (1. / num_grid), rounding_mode='trunc'))) left_box = max( 0, int( floordiv( (center_w - pos_w_range) / upsampled_size[1], (1. / num_grid), rounding_mode='trunc'))) right_box = min( num_grid - 1, int( floordiv( (center_w + pos_w_range) / upsampled_size[1], (1. / num_grid), rounding_mode='trunc'))) top = max(top_box, coord_h - 1) down = min(down_box, coord_h + 1) left = max(coord_w - 1, left_box) right = min(right_box, coord_w + 1) labels[top:(down + 1), left:(right + 1)] = gt_label # ins gt_mask = np.uint8(gt_mask.cpu().numpy()) # Follow the original implementation, F.interpolate is # different from cv2 and opencv gt_mask = mmcv.imrescale(gt_mask, scale=1. / output_stride) gt_mask = torch.from_numpy(gt_mask).to(device=device) for i in range(top, down + 1): for j in range(left, right + 1): index = int(i * num_grid + j) mask_target[index, :gt_mask.shape[0], :gt_mask. shape[1]] = gt_mask pos_mask[index] = True mlvl_pos_mask_targets.append(mask_target[pos_mask]) mlvl_labels.append(labels) mlvl_pos_masks.append(pos_mask) return mlvl_pos_mask_targets, mlvl_labels, mlvl_pos_masks def predict_by_feat(self, mlvl_mask_preds: List[Tensor], mlvl_cls_scores: List[Tensor], batch_img_metas: List[dict], **kwargs) -> InstanceList: """Transform a batch of output features extracted from the head into mask results. Args: mlvl_mask_preds (list[Tensor]): Multi-level mask prediction. Each element in the list has shape (batch_size, num_grids**2 ,h ,w). mlvl_cls_scores (list[Tensor]): Multi-level scores. Each element in the list has shape (batch_size, num_classes, num_grids ,num_grids). batch_img_metas (list[dict]): Meta information of all images. Returns: list[:obj:`InstanceData`]: Processed results of multiple images.Each :obj:`InstanceData` usually contains following keys. - scores (Tensor): Classification scores, has shape (num_instance,). - labels (Tensor): Has shape (num_instances,). - masks (Tensor): Processed mask results, has shape (num_instances, h, w). """ mlvl_cls_scores = [ item.permute(0, 2, 3, 1) for item in mlvl_cls_scores ] assert len(mlvl_mask_preds) == len(mlvl_cls_scores) num_levels = len(mlvl_cls_scores) results_list = [] for img_id in range(len(batch_img_metas)): cls_pred_list = [ mlvl_cls_scores[lvl][img_id].view(-1, self.cls_out_channels) for lvl in range(num_levels) ] mask_pred_list = [ mlvl_mask_preds[lvl][img_id] for lvl in range(num_levels) ] cls_pred_list = torch.cat(cls_pred_list, dim=0) mask_pred_list = torch.cat(mask_pred_list, dim=0) img_meta = batch_img_metas[img_id] results = self._predict_by_feat_single( cls_pred_list, mask_pred_list, img_meta=img_meta) results_list.append(results) return results_list def _predict_by_feat_single(self, cls_scores: Tensor, mask_preds: Tensor, img_meta: dict, cfg: OptConfigType = None) -> InstanceData: """Transform a single image's features extracted from the head into mask results. Args: cls_scores (Tensor): Classification score of all points in single image, has shape (num_points, num_classes). mask_preds (Tensor): Mask prediction of all points in single image, has shape (num_points, feat_h, feat_w). img_meta (dict): Meta information of corresponding image. cfg (dict, optional): Config used in test phase. Defaults to None. Returns: :obj:`InstanceData`: Processed results of single image. it usually contains following keys. - scores (Tensor): Classification scores, has shape (num_instance,). - labels (Tensor): Has shape (num_instances,). - masks (Tensor): Processed mask results, has shape (num_instances, h, w). """ def empty_results(cls_scores, ori_shape): """Generate a empty results.""" results = InstanceData() results.scores = cls_scores.new_ones(0) results.masks = cls_scores.new_zeros(0, *ori_shape) results.labels = cls_scores.new_ones(0) results.bboxes = cls_scores.new_zeros(0, 4) return results cfg = self.test_cfg if cfg is None else cfg assert len(cls_scores) == len(mask_preds) featmap_size = mask_preds.size()[-2:] h, w = img_meta['img_shape'][:2] upsampled_size = (featmap_size[0] * 4, featmap_size[1] * 4) score_mask = (cls_scores > cfg.score_thr) cls_scores = cls_scores[score_mask] if len(cls_scores) == 0: return empty_results(cls_scores, img_meta['ori_shape'][:2]) inds = score_mask.nonzero() cls_labels = inds[:, 1] # Filter the mask mask with an area is smaller than # stride of corresponding feature level lvl_interval = cls_labels.new_tensor(self.num_grids).pow(2).cumsum(0) strides = cls_scores.new_ones(lvl_interval[-1]) strides[:lvl_interval[0]] *= self.strides[0] for lvl in range(1, self.num_levels): strides[lvl_interval[lvl - 1]:lvl_interval[lvl]] *= self.strides[lvl] strides = strides[inds[:, 0]] mask_preds = mask_preds[inds[:, 0]] masks = mask_preds > cfg.mask_thr sum_masks = masks.sum((1, 2)).float() keep = sum_masks > strides if keep.sum() == 0: return empty_results(cls_scores, img_meta['ori_shape'][:2]) masks = masks[keep] mask_preds = mask_preds[keep] sum_masks = sum_masks[keep] cls_scores = cls_scores[keep] cls_labels = cls_labels[keep] # maskness. mask_scores = (mask_preds * masks).sum((1, 2)) / sum_masks cls_scores *= mask_scores scores, labels, _, keep_inds = mask_matrix_nms( masks, cls_labels, cls_scores, mask_area=sum_masks, nms_pre=cfg.nms_pre, max_num=cfg.max_per_img, kernel=cfg.kernel, sigma=cfg.sigma, filter_thr=cfg.filter_thr) # mask_matrix_nms may return an empty Tensor if len(keep_inds) == 0: return empty_results(cls_scores, img_meta['ori_shape'][:2]) mask_preds = mask_preds[keep_inds] mask_preds = F.interpolate( mask_preds.unsqueeze(0), size=upsampled_size, mode='bilinear')[:, :, :h, :w] mask_preds = F.interpolate( mask_preds, size=img_meta['ori_shape'][:2], mode='bilinear').squeeze(0) masks = mask_preds > cfg.mask_thr results = InstanceData() results.masks = masks results.labels = labels results.scores = scores # create an empty bbox in InstanceData to avoid bugs when # calculating metrics. results.bboxes = results.scores.new_zeros(len(scores), 4) return results @MODELS.register_module() class DecoupledSOLOHead(SOLOHead): """Decoupled SOLO mask head used in `SOLO: Segmenting Objects by Locations. <https://arxiv.org/abs/1912.04488>`_ Args: init_cfg (dict or list[dict], optional): Initialization config dict. """ def __init__(self, *args, init_cfg: MultiConfig = [ dict(type='Normal', layer='Conv2d', std=0.01), dict( type='Normal', std=0.01, bias_prob=0.01, override=dict(name='conv_mask_list_x')), dict( type='Normal', std=0.01, bias_prob=0.01, override=dict(name='conv_mask_list_y')), dict( type='Normal', std=0.01, bias_prob=0.01, override=dict(name='conv_cls')) ], **kwargs) -> None: super().__init__(*args, init_cfg=init_cfg, **kwargs) def _init_layers(self) -> None: self.mask_convs_x = nn.ModuleList() self.mask_convs_y = nn.ModuleList() self.cls_convs = nn.ModuleList() for i in range(self.stacked_convs): chn = self.in_channels + 1 if i == 0 else self.feat_channels self.mask_convs_x.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, norm_cfg=self.norm_cfg)) self.mask_convs_y.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, norm_cfg=self.norm_cfg)) chn = self.in_channels if i == 0 else self.feat_channels self.cls_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, norm_cfg=self.norm_cfg)) self.conv_mask_list_x = nn.ModuleList() self.conv_mask_list_y = nn.ModuleList() for num_grid in self.num_grids: self.conv_mask_list_x.append( nn.Conv2d(self.feat_channels, num_grid, 3, padding=1)) self.conv_mask_list_y.append( nn.Conv2d(self.feat_channels, num_grid, 3, padding=1)) self.conv_cls = nn.Conv2d( self.feat_channels, self.cls_out_channels, 3, padding=1) def forward(self, x: Tuple[Tensor]) -> Tuple: """Forward features from the upstream network. Args: x (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. Returns: tuple: A tuple of classification scores and mask prediction. - mlvl_mask_preds_x (list[Tensor]): Multi-level mask prediction from x branch. Each element in the list has shape (batch_size, num_grids ,h ,w). - mlvl_mask_preds_y (list[Tensor]): Multi-level mask prediction from y branch. Each element in the list has shape (batch_size, num_grids ,h ,w). - mlvl_cls_preds (list[Tensor]): Multi-level scores. Each element in the list has shape (batch_size, num_classes, num_grids ,num_grids). """ assert len(x) == self.num_levels feats = self.resize_feats(x) mask_preds_x = [] mask_preds_y = [] cls_preds = [] for i in range(self.num_levels): x = feats[i] mask_feat = x cls_feat = x # generate and concat the coordinate coord_feat = generate_coordinate(mask_feat.size(), mask_feat.device) mask_feat_x = torch.cat([mask_feat, coord_feat[:, 0:1, ...]], 1) mask_feat_y = torch.cat([mask_feat, coord_feat[:, 1:2, ...]], 1) for mask_layer_x, mask_layer_y in \ zip(self.mask_convs_x, self.mask_convs_y): mask_feat_x = mask_layer_x(mask_feat_x) mask_feat_y = mask_layer_y(mask_feat_y) mask_feat_x = F.interpolate( mask_feat_x, scale_factor=2, mode='bilinear') mask_feat_y = F.interpolate( mask_feat_y, scale_factor=2, mode='bilinear') mask_pred_x = self.conv_mask_list_x[i](mask_feat_x) mask_pred_y = self.conv_mask_list_y[i](mask_feat_y) # cls branch for j, cls_layer in enumerate(self.cls_convs): if j == self.cls_down_index: num_grid = self.num_grids[i] cls_feat = F.interpolate( cls_feat, size=num_grid, mode='bilinear') cls_feat = cls_layer(cls_feat) cls_pred = self.conv_cls(cls_feat) if not self.training: feat_wh = feats[0].size()[-2:] upsampled_size = (feat_wh[0] * 2, feat_wh[1] * 2) mask_pred_x = F.interpolate( mask_pred_x.sigmoid(), size=upsampled_size, mode='bilinear') mask_pred_y = F.interpolate( mask_pred_y.sigmoid(), size=upsampled_size, mode='bilinear') cls_pred = cls_pred.sigmoid() # get local maximum local_max = F.max_pool2d(cls_pred, 2, stride=1, padding=1) keep_mask = local_max[:, :, :-1, :-1] == cls_pred cls_pred = cls_pred * keep_mask mask_preds_x.append(mask_pred_x) mask_preds_y.append(mask_pred_y) cls_preds.append(cls_pred) return mask_preds_x, mask_preds_y, cls_preds def loss_by_feat(self, mlvl_mask_preds_x: List[Tensor], mlvl_mask_preds_y: List[Tensor], mlvl_cls_preds: List[Tensor], batch_gt_instances: InstanceList, batch_img_metas: List[dict], **kwargs) -> dict: """Calculate the loss based on the features extracted by the mask head. Args: mlvl_mask_preds_x (list[Tensor]): Multi-level mask prediction from x branch. Each element in the list has shape (batch_size, num_grids ,h ,w). mlvl_mask_preds_y (list[Tensor]): Multi-level mask prediction from y branch. Each element in the list has shape (batch_size, num_grids ,h ,w). mlvl_cls_preds (list[Tensor]): Multi-level scores. Each element in the list has shape (batch_size, num_classes, num_grids ,num_grids). batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes``, ``masks``, and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of multiple images. Returns: dict[str, Tensor]: A dictionary of loss components. """ num_levels = self.num_levels num_imgs = len(batch_img_metas) featmap_sizes = [featmap.size()[-2:] for featmap in mlvl_mask_preds_x] pos_mask_targets, labels, xy_pos_indexes = multi_apply( self._get_targets_single, batch_gt_instances, featmap_sizes=featmap_sizes) # change from the outside list meaning multi images # to the outside list meaning multi levels mlvl_pos_mask_targets = [[] for _ in range(num_levels)] mlvl_pos_mask_preds_x = [[] for _ in range(num_levels)] mlvl_pos_mask_preds_y = [[] for _ in range(num_levels)] mlvl_labels = [[] for _ in range(num_levels)] for img_id in range(num_imgs): for lvl in range(num_levels): mlvl_pos_mask_targets[lvl].append( pos_mask_targets[img_id][lvl]) mlvl_pos_mask_preds_x[lvl].append( mlvl_mask_preds_x[lvl][img_id, xy_pos_indexes[img_id][lvl][:, 1]]) mlvl_pos_mask_preds_y[lvl].append( mlvl_mask_preds_y[lvl][img_id, xy_pos_indexes[img_id][lvl][:, 0]]) mlvl_labels[lvl].append(labels[img_id][lvl].flatten()) # cat multiple image temp_mlvl_cls_preds = [] for lvl in range(num_levels): mlvl_pos_mask_targets[lvl] = torch.cat( mlvl_pos_mask_targets[lvl], dim=0) mlvl_pos_mask_preds_x[lvl] = torch.cat( mlvl_pos_mask_preds_x[lvl], dim=0) mlvl_pos_mask_preds_y[lvl] = torch.cat( mlvl_pos_mask_preds_y[lvl], dim=0) mlvl_labels[lvl] = torch.cat(mlvl_labels[lvl], dim=0) temp_mlvl_cls_preds.append(mlvl_cls_preds[lvl].permute( 0, 2, 3, 1).reshape(-1, self.cls_out_channels)) num_pos = 0. # dice loss loss_mask = [] for pred_x, pred_y, target in \ zip(mlvl_pos_mask_preds_x, mlvl_pos_mask_preds_y, mlvl_pos_mask_targets): num_masks = pred_x.size(0) if num_masks == 0: # make sure can get grad loss_mask.append((pred_x.sum() + pred_y.sum()).unsqueeze(0)) continue num_pos += num_masks pred_mask = pred_y.sigmoid() * pred_x.sigmoid() loss_mask.append( self.loss_mask(pred_mask, target, reduction_override='none')) if num_pos > 0: loss_mask = torch.cat(loss_mask).sum() / num_pos else: loss_mask = torch.cat(loss_mask).mean() # cate flatten_labels = torch.cat(mlvl_labels) flatten_cls_preds = torch.cat(temp_mlvl_cls_preds) loss_cls = self.loss_cls( flatten_cls_preds, flatten_labels, avg_factor=num_pos + 1) return dict(loss_mask=loss_mask, loss_cls=loss_cls) def _get_targets_single(self, gt_instances: InstanceData, featmap_sizes: Optional[list] = None) -> tuple: """Compute targets for predictions of single image. Args: gt_instances (:obj:`InstanceData`): Ground truth of instance annotations. It should includes ``bboxes``, ``labels``, and ``masks`` attributes. featmap_sizes (list[:obj:`torch.size`]): Size of each feature map from feature pyramid, each element means (feat_h, feat_w). Defaults to None. Returns: Tuple: Usually returns a tuple containing targets for predictions. - mlvl_pos_mask_targets (list[Tensor]): Each element represent the binary mask targets for positive points in this level, has shape (num_pos, out_h, out_w). - mlvl_labels (list[Tensor]): Each element is classification labels for all points in this level, has shape (num_grid, num_grid). - mlvl_xy_pos_indexes (list[Tensor]): Each element in the list contains the index of positive samples in corresponding level, has shape (num_pos, 2), last dimension 2 present (index_x, index_y). """ mlvl_pos_mask_targets, mlvl_labels, mlvl_pos_masks = \ super()._get_targets_single(gt_instances, featmap_sizes=featmap_sizes) mlvl_xy_pos_indexes = [(item - self.num_classes).nonzero() for item in mlvl_labels] return mlvl_pos_mask_targets, mlvl_labels, mlvl_xy_pos_indexes def predict_by_feat(self, mlvl_mask_preds_x: List[Tensor], mlvl_mask_preds_y: List[Tensor], mlvl_cls_scores: List[Tensor], batch_img_metas: List[dict], **kwargs) -> InstanceList: """Transform a batch of output features extracted from the head into mask results. Args: mlvl_mask_preds_x (list[Tensor]): Multi-level mask prediction from x branch. Each element in the list has shape (batch_size, num_grids ,h ,w). mlvl_mask_preds_y (list[Tensor]): Multi-level mask prediction from y branch. Each element in the list has shape (batch_size, num_grids ,h ,w). mlvl_cls_scores (list[Tensor]): Multi-level scores. Each element in the list has shape (batch_size, num_classes ,num_grids ,num_grids). batch_img_metas (list[dict]): Meta information of all images. Returns: list[:obj:`InstanceData`]: Processed results of multiple images.Each :obj:`InstanceData` usually contains following keys. - scores (Tensor): Classification scores, has shape (num_instance,). - labels (Tensor): Has shape (num_instances,). - masks (Tensor): Processed mask results, has shape (num_instances, h, w). """ mlvl_cls_scores = [ item.permute(0, 2, 3, 1) for item in mlvl_cls_scores ] assert len(mlvl_mask_preds_x) == len(mlvl_cls_scores) num_levels = len(mlvl_cls_scores) results_list = [] for img_id in range(len(batch_img_metas)): cls_pred_list = [ mlvl_cls_scores[i][img_id].view( -1, self.cls_out_channels).detach() for i in range(num_levels) ] mask_pred_list_x = [ mlvl_mask_preds_x[i][img_id] for i in range(num_levels) ] mask_pred_list_y = [ mlvl_mask_preds_y[i][img_id] for i in range(num_levels) ] cls_pred_list = torch.cat(cls_pred_list, dim=0) mask_pred_list_x = torch.cat(mask_pred_list_x, dim=0) mask_pred_list_y = torch.cat(mask_pred_list_y, dim=0) img_meta = batch_img_metas[img_id] results = self._predict_by_feat_single( cls_pred_list, mask_pred_list_x, mask_pred_list_y, img_meta=img_meta) results_list.append(results) return results_list def _predict_by_feat_single(self, cls_scores: Tensor, mask_preds_x: Tensor, mask_preds_y: Tensor, img_meta: dict, cfg: OptConfigType = None) -> InstanceData: """Transform a single image's features extracted from the head into mask results. Args: cls_scores (Tensor): Classification score of all points in single image, has shape (num_points, num_classes). mask_preds_x (Tensor): Mask prediction of x branch of all points in single image, has shape (sum_num_grids, feat_h, feat_w). mask_preds_y (Tensor): Mask prediction of y branch of all points in single image, has shape (sum_num_grids, feat_h, feat_w). img_meta (dict): Meta information of corresponding image. cfg (dict): Config used in test phase. Returns: :obj:`InstanceData`: Processed results of single image. it usually contains following keys. - scores (Tensor): Classification scores, has shape (num_instance,). - labels (Tensor): Has shape (num_instances,). - masks (Tensor): Processed mask results, has shape (num_instances, h, w). """ def empty_results(cls_scores, ori_shape): """Generate a empty results.""" results = InstanceData() results.scores = cls_scores.new_ones(0) results.masks = cls_scores.new_zeros(0, *ori_shape) results.labels = cls_scores.new_ones(0) results.bboxes = cls_scores.new_zeros(0, 4) return results cfg = self.test_cfg if cfg is None else cfg featmap_size = mask_preds_x.size()[-2:] h, w = img_meta['img_shape'][:2] upsampled_size = (featmap_size[0] * 4, featmap_size[1] * 4) score_mask = (cls_scores > cfg.score_thr) cls_scores = cls_scores[score_mask] inds = score_mask.nonzero() lvl_interval = inds.new_tensor(self.num_grids).pow(2).cumsum(0) num_all_points = lvl_interval[-1] lvl_start_index = inds.new_ones(num_all_points) num_grids = inds.new_ones(num_all_points) seg_size = inds.new_tensor(self.num_grids).cumsum(0) mask_lvl_start_index = inds.new_ones(num_all_points) strides = inds.new_ones(num_all_points) lvl_start_index[:lvl_interval[0]] *= 0 mask_lvl_start_index[:lvl_interval[0]] *= 0 num_grids[:lvl_interval[0]] *= self.num_grids[0] strides[:lvl_interval[0]] *= self.strides[0] for lvl in range(1, self.num_levels): lvl_start_index[lvl_interval[lvl - 1]:lvl_interval[lvl]] *= \ lvl_interval[lvl - 1] mask_lvl_start_index[lvl_interval[lvl - 1]:lvl_interval[lvl]] *= \ seg_size[lvl - 1] num_grids[lvl_interval[lvl - 1]:lvl_interval[lvl]] *= \ self.num_grids[lvl] strides[lvl_interval[lvl - 1]:lvl_interval[lvl]] *= \ self.strides[lvl] lvl_start_index = lvl_start_index[inds[:, 0]] mask_lvl_start_index = mask_lvl_start_index[inds[:, 0]] num_grids = num_grids[inds[:, 0]] strides = strides[inds[:, 0]] y_lvl_offset = (inds[:, 0] - lvl_start_index) // num_grids x_lvl_offset = (inds[:, 0] - lvl_start_index) % num_grids y_inds = mask_lvl_start_index + y_lvl_offset x_inds = mask_lvl_start_index + x_lvl_offset cls_labels = inds[:, 1] mask_preds = mask_preds_x[x_inds, ...] * mask_preds_y[y_inds, ...] masks = mask_preds > cfg.mask_thr sum_masks = masks.sum((1, 2)).float() keep = sum_masks > strides if keep.sum() == 0: return empty_results(cls_scores, img_meta['ori_shape'][:2]) masks = masks[keep] mask_preds = mask_preds[keep] sum_masks = sum_masks[keep] cls_scores = cls_scores[keep] cls_labels = cls_labels[keep] # maskness. mask_scores = (mask_preds * masks).sum((1, 2)) / sum_masks cls_scores *= mask_scores scores, labels, _, keep_inds = mask_matrix_nms( masks, cls_labels, cls_scores, mask_area=sum_masks, nms_pre=cfg.nms_pre, max_num=cfg.max_per_img, kernel=cfg.kernel, sigma=cfg.sigma, filter_thr=cfg.filter_thr) # mask_matrix_nms may return an empty Tensor if len(keep_inds) == 0: return empty_results(cls_scores, img_meta['ori_shape'][:2]) mask_preds = mask_preds[keep_inds] mask_preds = F.interpolate( mask_preds.unsqueeze(0), size=upsampled_size, mode='bilinear')[:, :, :h, :w] mask_preds = F.interpolate( mask_preds, size=img_meta['ori_shape'][:2], mode='bilinear').squeeze(0) masks = mask_preds > cfg.mask_thr results = InstanceData() results.masks = masks results.labels = labels results.scores = scores # create an empty bbox in InstanceData to avoid bugs when # calculating metrics. results.bboxes = results.scores.new_zeros(len(scores), 4) return results @MODELS.register_module() class DecoupledSOLOLightHead(DecoupledSOLOHead): """Decoupled Light SOLO mask head used in `SOLO: Segmenting Objects by Locations <https://arxiv.org/abs/1912.04488>`_ Args: with_dcn (bool): Whether use dcn in mask_convs and cls_convs, Defaults to False. init_cfg (dict or list[dict], optional): Initialization config dict. """ def __init__(self, *args, dcn_cfg: OptConfigType = None, init_cfg: MultiConfig = [ dict(type='Normal', layer='Conv2d', std=0.01), dict( type='Normal', std=0.01, bias_prob=0.01, override=dict(name='conv_mask_list_x')), dict( type='Normal', std=0.01, bias_prob=0.01, override=dict(name='conv_mask_list_y')), dict( type='Normal', std=0.01, bias_prob=0.01, override=dict(name='conv_cls')) ], **kwargs) -> None: assert dcn_cfg is None or isinstance(dcn_cfg, dict) self.dcn_cfg = dcn_cfg super().__init__(*args, init_cfg=init_cfg, **kwargs) def _init_layers(self) -> None: self.mask_convs = nn.ModuleList() self.cls_convs = nn.ModuleList() for i in range(self.stacked_convs): if self.dcn_cfg is not None \ and i == self.stacked_convs - 1: conv_cfg = self.dcn_cfg else: conv_cfg = None chn = self.in_channels + 2 if i == 0 else self.feat_channels self.mask_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=conv_cfg, norm_cfg=self.norm_cfg)) chn = self.in_channels if i == 0 else self.feat_channels self.cls_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=conv_cfg, norm_cfg=self.norm_cfg)) self.conv_mask_list_x = nn.ModuleList() self.conv_mask_list_y = nn.ModuleList() for num_grid in self.num_grids: self.conv_mask_list_x.append( nn.Conv2d(self.feat_channels, num_grid, 3, padding=1)) self.conv_mask_list_y.append( nn.Conv2d(self.feat_channels, num_grid, 3, padding=1)) self.conv_cls = nn.Conv2d( self.feat_channels, self.cls_out_channels, 3, padding=1) def forward(self, x: Tuple[Tensor]) -> Tuple: """Forward features from the upstream network. Args: x (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. Returns: tuple: A tuple of classification scores and mask prediction. - mlvl_mask_preds_x (list[Tensor]): Multi-level mask prediction from x branch. Each element in the list has shape (batch_size, num_grids ,h ,w). - mlvl_mask_preds_y (list[Tensor]): Multi-level mask prediction from y branch. Each element in the list has shape (batch_size, num_grids ,h ,w). - mlvl_cls_preds (list[Tensor]): Multi-level scores. Each element in the list has shape (batch_size, num_classes, num_grids ,num_grids). """ assert len(x) == self.num_levels feats = self.resize_feats(x) mask_preds_x = [] mask_preds_y = [] cls_preds = [] for i in range(self.num_levels): x = feats[i] mask_feat = x cls_feat = x # generate and concat the coordinate coord_feat = generate_coordinate(mask_feat.size(), mask_feat.device) mask_feat = torch.cat([mask_feat, coord_feat], 1) for mask_layer in self.mask_convs: mask_feat = mask_layer(mask_feat) mask_feat = F.interpolate( mask_feat, scale_factor=2, mode='bilinear') mask_pred_x = self.conv_mask_list_x[i](mask_feat) mask_pred_y = self.conv_mask_list_y[i](mask_feat) # cls branch for j, cls_layer in enumerate(self.cls_convs): if j == self.cls_down_index: num_grid = self.num_grids[i] cls_feat = F.interpolate( cls_feat, size=num_grid, mode='bilinear') cls_feat = cls_layer(cls_feat) cls_pred = self.conv_cls(cls_feat) if not self.training: feat_wh = feats[0].size()[-2:] upsampled_size = (feat_wh[0] * 2, feat_wh[1] * 2) mask_pred_x = F.interpolate( mask_pred_x.sigmoid(), size=upsampled_size, mode='bilinear') mask_pred_y = F.interpolate( mask_pred_y.sigmoid(), size=upsampled_size, mode='bilinear') cls_pred = cls_pred.sigmoid() # get local maximum local_max = F.max_pool2d(cls_pred, 2, stride=1, padding=1) keep_mask = local_max[:, :, :-1, :-1] == cls_pred cls_pred = cls_pred * keep_mask mask_preds_x.append(mask_pred_x) mask_preds_y.append(mask_pred_y) cls_preds.append(cls_pred) return mask_preds_x, mask_preds_y, cls_preds
52,112
40.228639
79
py
ERD
ERD-main/mmdet/models/dense_heads/embedding_rpn_head.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import List import torch import torch.nn as nn from mmengine.model import BaseModule from mmengine.structures import InstanceData from torch import Tensor from mmdet.registry import MODELS from mmdet.structures.bbox import bbox_cxcywh_to_xyxy from mmdet.structures.det_data_sample import SampleList from mmdet.utils import InstanceList, OptConfigType @MODELS.register_module() class EmbeddingRPNHead(BaseModule): """RPNHead in the `Sparse R-CNN <https://arxiv.org/abs/2011.12450>`_ . Unlike traditional RPNHead, this module does not need FPN input, but just decode `init_proposal_bboxes` and expand the first dimension of `init_proposal_bboxes` and `init_proposal_features` to the batch_size. Args: num_proposals (int): Number of init_proposals. Defaults to 100. proposal_feature_channel (int): Channel number of init_proposal_feature. Defaults to 256. init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \ dict]): Initialization config dict. Defaults to None. """ def __init__(self, num_proposals: int = 100, proposal_feature_channel: int = 256, init_cfg: OptConfigType = None, **kwargs) -> None: # `**kwargs` is necessary to avoid some potential error. assert init_cfg is None, 'To prevent abnormal initialization ' \ 'behavior, init_cfg is not allowed to be set' super().__init__(init_cfg=init_cfg) self.num_proposals = num_proposals self.proposal_feature_channel = proposal_feature_channel self._init_layers() def _init_layers(self) -> None: """Initialize a sparse set of proposal boxes and proposal features.""" self.init_proposal_bboxes = nn.Embedding(self.num_proposals, 4) self.init_proposal_features = nn.Embedding( self.num_proposals, self.proposal_feature_channel) def init_weights(self) -> None: """Initialize the init_proposal_bboxes as normalized. [c_x, c_y, w, h], and we initialize it to the size of the entire image. """ super().init_weights() nn.init.constant_(self.init_proposal_bboxes.weight[:, :2], 0.5) nn.init.constant_(self.init_proposal_bboxes.weight[:, 2:], 1) def _decode_init_proposals(self, x: List[Tensor], batch_data_samples: SampleList) -> InstanceList: """Decode init_proposal_bboxes according to the size of images and expand dimension of init_proposal_features to batch_size. Args: x (list[Tensor]): List of FPN features. batch_data_samples (List[:obj:`DetDataSample`]): The Data Samples. It usually includes information such as `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`. Returns: List[:obj:`InstanceData`:] Detection results of each image. Each item usually contains following keys. - proposals: Decoded proposal bboxes, has shape (num_proposals, 4). - features: init_proposal_features, expanded proposal features, has shape (num_proposals, proposal_feature_channel). - imgs_whwh: Tensor with shape (num_proposals, 4), the dimension means [img_width, img_height, img_width, img_height]. """ batch_img_metas = [] for data_sample in batch_data_samples: batch_img_metas.append(data_sample.metainfo) proposals = self.init_proposal_bboxes.weight.clone() proposals = bbox_cxcywh_to_xyxy(proposals) imgs_whwh = [] for meta in batch_img_metas: h, w = meta['img_shape'][:2] imgs_whwh.append(x[0].new_tensor([[w, h, w, h]])) imgs_whwh = torch.cat(imgs_whwh, dim=0) imgs_whwh = imgs_whwh[:, None, :] proposals = proposals * imgs_whwh rpn_results_list = [] for idx in range(len(batch_img_metas)): rpn_results = InstanceData() rpn_results.bboxes = proposals[idx] rpn_results.imgs_whwh = imgs_whwh[idx].repeat( self.num_proposals, 1) rpn_results.features = self.init_proposal_features.weight.clone() rpn_results_list.append(rpn_results) return rpn_results_list def loss(self, *args, **kwargs): """Perform forward propagation and loss calculation of the detection head on the features of the upstream network.""" raise NotImplementedError( 'EmbeddingRPNHead does not have `loss`, please use ' '`predict` or `loss_and_predict` instead.') def predict(self, x: List[Tensor], batch_data_samples: SampleList, **kwargs) -> InstanceList: """Perform forward propagation of the detection head and predict detection results on the features of the upstream network.""" # `**kwargs` is necessary to avoid some potential error. return self._decode_init_proposals( x=x, batch_data_samples=batch_data_samples) def loss_and_predict(self, x: List[Tensor], batch_data_samples: SampleList, **kwargs) -> tuple: """Perform forward propagation of the head, then calculate loss and predictions from the features and data samples.""" # `**kwargs` is necessary to avoid some potential error. predictions = self._decode_init_proposals( x=x, batch_data_samples=batch_data_samples) return dict(), predictions
5,695
41.827068
79
py
ERD
ERD-main/mmdet/models/dense_heads/autoassign_head.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import Dict, List, Sequence, Tuple import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import Scale from mmengine.model import bias_init_with_prob, normal_init from mmengine.structures import InstanceData from torch import Tensor from mmdet.registry import MODELS from mmdet.structures.bbox import bbox_overlaps from mmdet.utils import InstanceList, OptInstanceList, reduce_mean from ..task_modules.prior_generators import MlvlPointGenerator from ..utils import levels_to_images, multi_apply from .fcos_head import FCOSHead EPS = 1e-12 class CenterPrior(nn.Module): """Center Weighting module to adjust the category-specific prior distributions. Args: force_topk (bool): When no point falls into gt_bbox, forcibly select the k points closest to the center to calculate the center prior. Defaults to False. topk (int): The number of points used to calculate the center prior when no point falls in gt_bbox. Only work when force_topk if True. Defaults to 9. num_classes (int): The class number of dataset. Defaults to 80. strides (Sequence[int]): The stride of each input feature map. Defaults to (8, 16, 32, 64, 128). """ def __init__( self, force_topk: bool = False, topk: int = 9, num_classes: int = 80, strides: Sequence[int] = (8, 16, 32, 64, 128) ) -> None: super().__init__() self.mean = nn.Parameter(torch.zeros(num_classes, 2)) self.sigma = nn.Parameter(torch.ones(num_classes, 2)) self.strides = strides self.force_topk = force_topk self.topk = topk def forward(self, anchor_points_list: List[Tensor], gt_instances: InstanceData, inside_gt_bbox_mask: Tensor) -> Tuple[Tensor, Tensor]: """Get the center prior of each point on the feature map for each instance. Args: anchor_points_list (list[Tensor]): list of coordinate of points on feature map. Each with shape (num_points, 2). gt_instances (:obj:`InstanceData`): Ground truth of instance annotations. It should includes ``bboxes`` and ``labels`` attributes. inside_gt_bbox_mask (Tensor): Tensor of bool type, with shape of (num_points, num_gt), each value is used to mark whether this point falls within a certain gt. Returns: tuple[Tensor, Tensor]: - center_prior_weights(Tensor): Float tensor with shape of \ (num_points, num_gt). Each value represents the center \ weighting coefficient. - inside_gt_bbox_mask (Tensor): Tensor of bool type, with shape \ of (num_points, num_gt), each value is used to mark whether this \ point falls within a certain gt or is the topk nearest points for \ a specific gt_bbox. """ gt_bboxes = gt_instances.bboxes labels = gt_instances.labels inside_gt_bbox_mask = inside_gt_bbox_mask.clone() num_gts = len(labels) num_points = sum([len(item) for item in anchor_points_list]) if num_gts == 0: return gt_bboxes.new_zeros(num_points, num_gts), inside_gt_bbox_mask center_prior_list = [] for slvl_points, stride in zip(anchor_points_list, self.strides): # slvl_points: points from single level in FPN, has shape (h*w, 2) # single_level_points has shape (h*w, num_gt, 2) single_level_points = slvl_points[:, None, :].expand( (slvl_points.size(0), len(gt_bboxes), 2)) gt_center_x = ((gt_bboxes[:, 0] + gt_bboxes[:, 2]) / 2) gt_center_y = ((gt_bboxes[:, 1] + gt_bboxes[:, 3]) / 2) gt_center = torch.stack((gt_center_x, gt_center_y), dim=1) gt_center = gt_center[None] # instance_center has shape (1, num_gt, 2) instance_center = self.mean[labels][None] # instance_sigma has shape (1, num_gt, 2) instance_sigma = self.sigma[labels][None] # distance has shape (num_points, num_gt, 2) distance = (((single_level_points - gt_center) / float(stride) - instance_center)**2) center_prior = torch.exp(-distance / (2 * instance_sigma**2)).prod(dim=-1) center_prior_list.append(center_prior) center_prior_weights = torch.cat(center_prior_list, dim=0) if self.force_topk: gt_inds_no_points_inside = torch.nonzero( inside_gt_bbox_mask.sum(0) == 0).reshape(-1) if gt_inds_no_points_inside.numel(): topk_center_index = \ center_prior_weights[:, gt_inds_no_points_inside].topk( self.topk, dim=0)[1] temp_mask = inside_gt_bbox_mask[:, gt_inds_no_points_inside] inside_gt_bbox_mask[:, gt_inds_no_points_inside] = \ torch.scatter(temp_mask, dim=0, index=topk_center_index, src=torch.ones_like( topk_center_index, dtype=torch.bool)) center_prior_weights[~inside_gt_bbox_mask] = 0 return center_prior_weights, inside_gt_bbox_mask @MODELS.register_module() class AutoAssignHead(FCOSHead): """AutoAssignHead head used in AutoAssign. More details can be found in the `paper <https://arxiv.org/abs/2007.03496>`_ . Args: force_topk (bool): Used in center prior initialization to handle extremely small gt. Default is False. topk (int): The number of points used to calculate the center prior when no point falls in gt_bbox. Only work when force_topk if True. Defaults to 9. pos_loss_weight (float): The loss weight of positive loss and with default value 0.25. neg_loss_weight (float): The loss weight of negative loss and with default value 0.75. center_loss_weight (float): The loss weight of center prior loss and with default value 0.75. """ def __init__(self, *args, force_topk: bool = False, topk: int = 9, pos_loss_weight: float = 0.25, neg_loss_weight: float = 0.75, center_loss_weight: float = 0.75, **kwargs) -> None: super().__init__(*args, conv_bias=True, **kwargs) self.center_prior = CenterPrior( force_topk=force_topk, topk=topk, num_classes=self.num_classes, strides=self.strides) self.pos_loss_weight = pos_loss_weight self.neg_loss_weight = neg_loss_weight self.center_loss_weight = center_loss_weight self.prior_generator = MlvlPointGenerator(self.strides, offset=0) def init_weights(self) -> None: """Initialize weights of the head. In particular, we have special initialization for classified conv's and regression conv's bias """ super(AutoAssignHead, self).init_weights() bias_cls = bias_init_with_prob(0.02) normal_init(self.conv_cls, std=0.01, bias=bias_cls) normal_init(self.conv_reg, std=0.01, bias=4.0) def forward_single(self, x: Tensor, scale: Scale, stride: int) -> Tuple[Tensor, Tensor, Tensor]: """Forward features of a single scale level. Args: x (Tensor): FPN feature maps of the specified stride. scale (:obj:`mmcv.cnn.Scale`): Learnable scale module to resize the bbox prediction. stride (int): The corresponding stride for feature maps, only used to normalize the bbox prediction when self.norm_on_bbox is True. Returns: tuple[Tensor, Tensor, Tensor]: scores for each class, bbox predictions and centerness predictions of input feature maps. """ cls_score, bbox_pred, cls_feat, reg_feat = super( FCOSHead, self).forward_single(x) centerness = self.conv_centerness(reg_feat) # scale the bbox_pred of different level # float to avoid overflow when enabling FP16 bbox_pred = scale(bbox_pred).float() # bbox_pred needed for gradient computation has been modified # by F.relu(bbox_pred) when run with PyTorch 1.10. So replace # F.relu(bbox_pred) with bbox_pred.clamp(min=0) bbox_pred = bbox_pred.clamp(min=0) bbox_pred *= stride return cls_score, bbox_pred, centerness def get_pos_loss_single(self, cls_score: Tensor, objectness: Tensor, reg_loss: Tensor, gt_instances: InstanceData, center_prior_weights: Tensor) -> Tuple[Tensor]: """Calculate the positive loss of all points in gt_bboxes. Args: cls_score (Tensor): All category scores for each point on the feature map. The shape is (num_points, num_class). objectness (Tensor): Foreground probability of all points, has shape (num_points, 1). reg_loss (Tensor): The regression loss of each gt_bbox and each prediction box, has shape of (num_points, num_gt). gt_instances (:obj:`InstanceData`): Ground truth of instance annotations. It should includes ``bboxes`` and ``labels`` attributes. center_prior_weights (Tensor): Float tensor with shape of (num_points, num_gt). Each value represents the center weighting coefficient. Returns: tuple[Tensor]: - pos_loss (Tensor): The positive loss of all points in the \ gt_bboxes. """ gt_labels = gt_instances.labels # p_loc: localization confidence p_loc = torch.exp(-reg_loss) # p_cls: classification confidence p_cls = (cls_score * objectness)[:, gt_labels] # p_pos: joint confidence indicator p_pos = p_cls * p_loc # 3 is a hyper-parameter to control the contributions of high and # low confidence locations towards positive losses. confidence_weight = torch.exp(p_pos * 3) p_pos_weight = (confidence_weight * center_prior_weights) / ( (confidence_weight * center_prior_weights).sum( 0, keepdim=True)).clamp(min=EPS) reweighted_p_pos = (p_pos * p_pos_weight).sum(0) pos_loss = F.binary_cross_entropy( reweighted_p_pos, torch.ones_like(reweighted_p_pos), reduction='none') pos_loss = pos_loss.sum() * self.pos_loss_weight return pos_loss, def get_neg_loss_single(self, cls_score: Tensor, objectness: Tensor, gt_instances: InstanceData, ious: Tensor, inside_gt_bbox_mask: Tensor) -> Tuple[Tensor]: """Calculate the negative loss of all points in feature map. Args: cls_score (Tensor): All category scores for each point on the feature map. The shape is (num_points, num_class). objectness (Tensor): Foreground probability of all points and is shape of (num_points, 1). gt_instances (:obj:`InstanceData`): Ground truth of instance annotations. It should includes ``bboxes`` and ``labels`` attributes. ious (Tensor): Float tensor with shape of (num_points, num_gt). Each value represent the iou of pred_bbox and gt_bboxes. inside_gt_bbox_mask (Tensor): Tensor of bool type, with shape of (num_points, num_gt), each value is used to mark whether this point falls within a certain gt. Returns: tuple[Tensor]: - neg_loss (Tensor): The negative loss of all points in the \ feature map. """ gt_labels = gt_instances.labels num_gts = len(gt_labels) joint_conf = (cls_score * objectness) p_neg_weight = torch.ones_like(joint_conf) if num_gts > 0: # the order of dinmension would affect the value of # p_neg_weight, we strictly follow the original # implementation. inside_gt_bbox_mask = inside_gt_bbox_mask.permute(1, 0) ious = ious.permute(1, 0) foreground_idxs = torch.nonzero(inside_gt_bbox_mask, as_tuple=True) temp_weight = (1 / (1 - ious[foreground_idxs]).clamp_(EPS)) def normalize(x): return (x - x.min() + EPS) / (x.max() - x.min() + EPS) for instance_idx in range(num_gts): idxs = foreground_idxs[0] == instance_idx if idxs.any(): temp_weight[idxs] = normalize(temp_weight[idxs]) p_neg_weight[foreground_idxs[1], gt_labels[foreground_idxs[0]]] = 1 - temp_weight logits = (joint_conf * p_neg_weight) neg_loss = ( logits**2 * F.binary_cross_entropy( logits, torch.zeros_like(logits), reduction='none')) neg_loss = neg_loss.sum() * self.neg_loss_weight return neg_loss, def loss_by_feat( self, cls_scores: List[Tensor], bbox_preds: List[Tensor], objectnesses: List[Tensor], batch_gt_instances: InstanceList, batch_img_metas: List[dict], batch_gt_instances_ignore: OptInstanceList = None ) -> Dict[str, Tensor]: """Calculate the loss based on the features extracted by the detection head. Args: cls_scores (list[Tensor]): Box scores for each scale level, each is a 4D-tensor, the channel number is num_points * num_classes. bbox_preds (list[Tensor]): Box energies / deltas for each scale level, each is a 4D-tensor, the channel number is num_points * 4. objectnesses (list[Tensor]): objectness for each scale level, each is a 4D-tensor, the channel number is num_points * 1. batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. batch_gt_instances_ignore (list[:obj:`InstanceData`], optional): Batch of gt_instances_ignore. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. Returns: dict[str, Tensor]: A dictionary of loss components. """ assert len(cls_scores) == len(bbox_preds) == len(objectnesses) all_num_gt = sum([len(item) for item in batch_gt_instances]) featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] all_level_points = self.prior_generator.grid_priors( featmap_sizes, dtype=bbox_preds[0].dtype, device=bbox_preds[0].device) inside_gt_bbox_mask_list, bbox_targets_list = self.get_targets( all_level_points, batch_gt_instances) center_prior_weight_list = [] temp_inside_gt_bbox_mask_list = [] for gt_instances, inside_gt_bbox_mask in zip(batch_gt_instances, inside_gt_bbox_mask_list): center_prior_weight, inside_gt_bbox_mask = \ self.center_prior(all_level_points, gt_instances, inside_gt_bbox_mask) center_prior_weight_list.append(center_prior_weight) temp_inside_gt_bbox_mask_list.append(inside_gt_bbox_mask) inside_gt_bbox_mask_list = temp_inside_gt_bbox_mask_list mlvl_points = torch.cat(all_level_points, dim=0) bbox_preds = levels_to_images(bbox_preds) cls_scores = levels_to_images(cls_scores) objectnesses = levels_to_images(objectnesses) reg_loss_list = [] ious_list = [] num_points = len(mlvl_points) for bbox_pred, encoded_targets, inside_gt_bbox_mask in zip( bbox_preds, bbox_targets_list, inside_gt_bbox_mask_list): temp_num_gt = encoded_targets.size(1) expand_mlvl_points = mlvl_points[:, None, :].expand( num_points, temp_num_gt, 2).reshape(-1, 2) encoded_targets = encoded_targets.reshape(-1, 4) expand_bbox_pred = bbox_pred[:, None, :].expand( num_points, temp_num_gt, 4).reshape(-1, 4) decoded_bbox_preds = self.bbox_coder.decode( expand_mlvl_points, expand_bbox_pred) decoded_target_preds = self.bbox_coder.decode( expand_mlvl_points, encoded_targets) with torch.no_grad(): ious = bbox_overlaps( decoded_bbox_preds, decoded_target_preds, is_aligned=True) ious = ious.reshape(num_points, temp_num_gt) if temp_num_gt: ious = ious.max( dim=-1, keepdim=True).values.repeat(1, temp_num_gt) else: ious = ious.new_zeros(num_points, temp_num_gt) ious[~inside_gt_bbox_mask] = 0 ious_list.append(ious) loss_bbox = self.loss_bbox( decoded_bbox_preds, decoded_target_preds, weight=None, reduction_override='none') reg_loss_list.append(loss_bbox.reshape(num_points, temp_num_gt)) cls_scores = [item.sigmoid() for item in cls_scores] objectnesses = [item.sigmoid() for item in objectnesses] pos_loss_list, = multi_apply(self.get_pos_loss_single, cls_scores, objectnesses, reg_loss_list, batch_gt_instances, center_prior_weight_list) pos_avg_factor = reduce_mean( bbox_pred.new_tensor(all_num_gt)).clamp_(min=1) pos_loss = sum(pos_loss_list) / pos_avg_factor neg_loss_list, = multi_apply(self.get_neg_loss_single, cls_scores, objectnesses, batch_gt_instances, ious_list, inside_gt_bbox_mask_list) neg_avg_factor = sum(item.data.sum() for item in center_prior_weight_list) neg_avg_factor = reduce_mean(neg_avg_factor).clamp_(min=1) neg_loss = sum(neg_loss_list) / neg_avg_factor center_loss = [] for i in range(len(batch_img_metas)): if inside_gt_bbox_mask_list[i].any(): center_loss.append( len(batch_gt_instances[i]) / center_prior_weight_list[i].sum().clamp_(min=EPS)) # when width or height of gt_bbox is smaller than stride of p3 else: center_loss.append(center_prior_weight_list[i].sum() * 0) center_loss = torch.stack(center_loss).mean() * self.center_loss_weight # avoid dead lock in DDP if all_num_gt == 0: pos_loss = bbox_preds[0].sum() * 0 dummy_center_prior_loss = self.center_prior.mean.sum( ) * 0 + self.center_prior.sigma.sum() * 0 center_loss = objectnesses[0].sum() * 0 + dummy_center_prior_loss loss = dict( loss_pos=pos_loss, loss_neg=neg_loss, loss_center=center_loss) return loss def get_targets( self, points: List[Tensor], batch_gt_instances: InstanceList ) -> Tuple[List[Tensor], List[Tensor]]: """Compute regression targets and each point inside or outside gt_bbox in multiple images. Args: points (list[Tensor]): Points of all fpn level, each has shape (num_points, 2). batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. Returns: tuple(list[Tensor], list[Tensor]): - inside_gt_bbox_mask_list (list[Tensor]): Each Tensor is with \ bool type and shape of (num_points, num_gt), each value is used \ to mark whether this point falls within a certain gt. - concat_lvl_bbox_targets (list[Tensor]): BBox targets of each \ level. Each tensor has shape (num_points, num_gt, 4). """ concat_points = torch.cat(points, dim=0) # the number of points per img, per lvl inside_gt_bbox_mask_list, bbox_targets_list = multi_apply( self._get_targets_single, batch_gt_instances, points=concat_points) return inside_gt_bbox_mask_list, bbox_targets_list def _get_targets_single(self, gt_instances: InstanceData, points: Tensor) -> Tuple[Tensor, Tensor]: """Compute regression targets and each point inside or outside gt_bbox for a single image. Args: gt_instances (:obj:`InstanceData`): Ground truth of instance annotations. It should includes ``bboxes`` and ``labels`` attributes. points (Tensor): Points of all fpn level, has shape (num_points, 2). Returns: tuple[Tensor, Tensor]: Containing the following Tensors: - inside_gt_bbox_mask (Tensor): Bool tensor with shape \ (num_points, num_gt), each value is used to mark whether this \ point falls within a certain gt. - bbox_targets (Tensor): BBox targets of each points with each \ gt_bboxes, has shape (num_points, num_gt, 4). """ gt_bboxes = gt_instances.bboxes num_points = points.size(0) num_gts = gt_bboxes.size(0) gt_bboxes = gt_bboxes[None].expand(num_points, num_gts, 4) xs, ys = points[:, 0], points[:, 1] xs = xs[:, None] ys = ys[:, None] left = xs - gt_bboxes[..., 0] right = gt_bboxes[..., 2] - xs top = ys - gt_bboxes[..., 1] bottom = gt_bboxes[..., 3] - ys bbox_targets = torch.stack((left, top, right, bottom), -1) if num_gts: inside_gt_bbox_mask = bbox_targets.min(-1)[0] > 0 else: inside_gt_bbox_mask = bbox_targets.new_zeros((num_points, num_gts), dtype=torch.bool) return inside_gt_bbox_mask, bbox_targets
23,317
43.415238
79
py
ERD
ERD-main/mmdet/models/dense_heads/dino_head.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import Dict, List, Tuple import torch from mmengine.structures import InstanceData from torch import Tensor from mmdet.registry import MODELS from mmdet.structures import SampleList from mmdet.structures.bbox import bbox_cxcywh_to_xyxy, bbox_xyxy_to_cxcywh from mmdet.utils import InstanceList, OptInstanceList, reduce_mean from ..utils import multi_apply from .deformable_detr_head import DeformableDETRHead @MODELS.register_module() class DINOHead(DeformableDETRHead): r"""Head of the DINO: DETR with Improved DeNoising Anchor Boxes for End-to-End Object Detection Code is modified from the `official github repo <https://github.com/IDEA-Research/DINO>`_. More details can be found in the `paper <https://arxiv.org/abs/2203.03605>`_ . """ def loss(self, hidden_states: Tensor, references: List[Tensor], enc_outputs_class: Tensor, enc_outputs_coord: Tensor, batch_data_samples: SampleList, dn_meta: Dict[str, int]) -> dict: """Perform forward propagation and loss calculation of the detection head on the queries of the upstream network. Args: hidden_states (Tensor): Hidden states output from each decoder layer, has shape (num_decoder_layers, bs, num_queries_total, dim), where `num_queries_total` is the sum of `num_denoising_queries` and `num_matching_queries` when `self.training` is `True`, else `num_matching_queries`. references (list[Tensor]): List of the reference from the decoder. The first reference is the `init_reference` (initial) and the other num_decoder_layers(6) references are `inter_references` (intermediate). The `init_reference` has shape (bs, num_queries_total, 4) and each `inter_reference` has shape (bs, num_queries, 4) with the last dimension arranged as (cx, cy, w, h). enc_outputs_class (Tensor): The score of each point on encode feature map, has shape (bs, num_feat_points, cls_out_channels). enc_outputs_coord (Tensor): The proposal generate from the encode feature map, has shape (bs, num_feat_points, 4) with the last dimension arranged as (cx, cy, w, h). batch_data_samples (list[:obj:`DetDataSample`]): The Data Samples. It usually includes information such as `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`. dn_meta (Dict[str, int]): The dictionary saves information about group collation, including 'num_denoising_queries' and 'num_denoising_groups'. It will be used for split outputs of denoising and matching parts and loss calculation. Returns: dict: A dictionary of loss components. """ batch_gt_instances = [] batch_img_metas = [] for data_sample in batch_data_samples: batch_img_metas.append(data_sample.metainfo) batch_gt_instances.append(data_sample.gt_instances) outs = self(hidden_states, references) loss_inputs = outs + (enc_outputs_class, enc_outputs_coord, batch_gt_instances, batch_img_metas, dn_meta) losses = self.loss_by_feat(*loss_inputs) return losses def loss_by_feat( self, all_layers_cls_scores: Tensor, all_layers_bbox_preds: Tensor, enc_cls_scores: Tensor, enc_bbox_preds: Tensor, batch_gt_instances: InstanceList, batch_img_metas: List[dict], dn_meta: Dict[str, int], batch_gt_instances_ignore: OptInstanceList = None ) -> Dict[str, Tensor]: """Loss function. Args: all_layers_cls_scores (Tensor): Classification scores of all decoder layers, has shape (num_decoder_layers, bs, num_queries_total, cls_out_channels), where `num_queries_total` is the sum of `num_denoising_queries` and `num_matching_queries`. all_layers_bbox_preds (Tensor): Regression outputs of all decoder layers. Each is a 4D-tensor with normalized coordinate format (cx, cy, w, h) and has shape (num_decoder_layers, bs, num_queries_total, 4). enc_cls_scores (Tensor): The score of each point on encode feature map, has shape (bs, num_feat_points, cls_out_channels). enc_bbox_preds (Tensor): The proposal generate from the encode feature map, has shape (bs, num_feat_points, 4) with the last dimension arranged as (cx, cy, w, h). batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. dn_meta (Dict[str, int]): The dictionary saves information about group collation, including 'num_denoising_queries' and 'num_denoising_groups'. It will be used for split outputs of denoising and matching parts and loss calculation. batch_gt_instances_ignore (list[:obj:`InstanceData`], optional): Batch of gt_instances_ignore. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. Returns: dict[str, Tensor]: A dictionary of loss components. """ # extract denoising and matching part of outputs (all_layers_matching_cls_scores, all_layers_matching_bbox_preds, all_layers_denoising_cls_scores, all_layers_denoising_bbox_preds) = \ self.split_outputs( all_layers_cls_scores, all_layers_bbox_preds, dn_meta) loss_dict = super(DeformableDETRHead, self).loss_by_feat( all_layers_matching_cls_scores, all_layers_matching_bbox_preds, batch_gt_instances, batch_img_metas, batch_gt_instances_ignore) # NOTE DETRHead.loss_by_feat but not DeformableDETRHead.loss_by_feat # is called, because the encoder loss calculations are different # between DINO and DeformableDETR. # loss of proposal generated from encode feature map. if enc_cls_scores is not None: # NOTE The enc_loss calculation of the DINO is # different from that of Deformable DETR. enc_loss_cls, enc_losses_bbox, enc_losses_iou = \ self.loss_by_feat_single( enc_cls_scores, enc_bbox_preds, batch_gt_instances=batch_gt_instances, batch_img_metas=batch_img_metas) loss_dict['enc_loss_cls'] = enc_loss_cls loss_dict['enc_loss_bbox'] = enc_losses_bbox loss_dict['enc_loss_iou'] = enc_losses_iou if all_layers_denoising_cls_scores is not None: # calculate denoising loss from all decoder layers dn_losses_cls, dn_losses_bbox, dn_losses_iou = self.loss_dn( all_layers_denoising_cls_scores, all_layers_denoising_bbox_preds, batch_gt_instances=batch_gt_instances, batch_img_metas=batch_img_metas, dn_meta=dn_meta) # collate denoising loss loss_dict['dn_loss_cls'] = dn_losses_cls[-1] loss_dict['dn_loss_bbox'] = dn_losses_bbox[-1] loss_dict['dn_loss_iou'] = dn_losses_iou[-1] for num_dec_layer, (loss_cls_i, loss_bbox_i, loss_iou_i) in \ enumerate(zip(dn_losses_cls[:-1], dn_losses_bbox[:-1], dn_losses_iou[:-1])): loss_dict[f'd{num_dec_layer}.dn_loss_cls'] = loss_cls_i loss_dict[f'd{num_dec_layer}.dn_loss_bbox'] = loss_bbox_i loss_dict[f'd{num_dec_layer}.dn_loss_iou'] = loss_iou_i return loss_dict def loss_dn(self, all_layers_denoising_cls_scores: Tensor, all_layers_denoising_bbox_preds: Tensor, batch_gt_instances: InstanceList, batch_img_metas: List[dict], dn_meta: Dict[str, int]) -> Tuple[List[Tensor]]: """Calculate denoising loss. Args: all_layers_denoising_cls_scores (Tensor): Classification scores of all decoder layers in denoising part, has shape ( num_decoder_layers, bs, num_denoising_queries, cls_out_channels). all_layers_denoising_bbox_preds (Tensor): Regression outputs of all decoder layers in denoising part. Each is a 4D-tensor with normalized coordinate format (cx, cy, w, h) and has shape (num_decoder_layers, bs, num_denoising_queries, 4). batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. dn_meta (Dict[str, int]): The dictionary saves information about group collation, including 'num_denoising_queries' and 'num_denoising_groups'. It will be used for split outputs of denoising and matching parts and loss calculation. Returns: Tuple[List[Tensor]]: The loss_dn_cls, loss_dn_bbox, and loss_dn_iou of each decoder layers. """ return multi_apply( self._loss_dn_single, all_layers_denoising_cls_scores, all_layers_denoising_bbox_preds, batch_gt_instances=batch_gt_instances, batch_img_metas=batch_img_metas, dn_meta=dn_meta) def _loss_dn_single(self, dn_cls_scores: Tensor, dn_bbox_preds: Tensor, batch_gt_instances: InstanceList, batch_img_metas: List[dict], dn_meta: Dict[str, int]) -> Tuple[Tensor]: """Denoising loss for outputs from a single decoder layer. Args: dn_cls_scores (Tensor): Classification scores of a single decoder layer in denoising part, has shape (bs, num_denoising_queries, cls_out_channels). dn_bbox_preds (Tensor): Regression outputs of a single decoder layer in denoising part. Each is a 4D-tensor with normalized coordinate format (cx, cy, w, h) and has shape (bs, num_denoising_queries, 4). batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. dn_meta (Dict[str, int]): The dictionary saves information about group collation, including 'num_denoising_queries' and 'num_denoising_groups'. It will be used for split outputs of denoising and matching parts and loss calculation. Returns: Tuple[Tensor]: A tuple including `loss_cls`, `loss_box` and `loss_iou`. """ cls_reg_targets = self.get_dn_targets(batch_gt_instances, batch_img_metas, dn_meta) (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_pos, num_total_neg) = cls_reg_targets labels = torch.cat(labels_list, 0) label_weights = torch.cat(label_weights_list, 0) bbox_targets = torch.cat(bbox_targets_list, 0) bbox_weights = torch.cat(bbox_weights_list, 0) # classification loss cls_scores = dn_cls_scores.reshape(-1, self.cls_out_channels) # construct weighted avg_factor to match with the official DETR repo cls_avg_factor = \ num_total_pos * 1.0 + num_total_neg * self.bg_cls_weight if self.sync_cls_avg_factor: cls_avg_factor = reduce_mean( cls_scores.new_tensor([cls_avg_factor])) cls_avg_factor = max(cls_avg_factor, 1) if len(cls_scores) > 0: loss_cls = self.loss_cls( cls_scores, labels, label_weights, avg_factor=cls_avg_factor) else: loss_cls = torch.zeros( 1, dtype=cls_scores.dtype, device=cls_scores.device) # Compute the average number of gt boxes across all gpus, for # normalization purposes num_total_pos = loss_cls.new_tensor([num_total_pos]) num_total_pos = torch.clamp(reduce_mean(num_total_pos), min=1).item() # construct factors used for rescale bboxes factors = [] for img_meta, bbox_pred in zip(batch_img_metas, dn_bbox_preds): img_h, img_w = img_meta['img_shape'] factor = bbox_pred.new_tensor([img_w, img_h, img_w, img_h]).unsqueeze(0).repeat( bbox_pred.size(0), 1) factors.append(factor) factors = torch.cat(factors) # DETR regress the relative position of boxes (cxcywh) in the image, # thus the learning target is normalized by the image size. So here # we need to re-scale them for calculating IoU loss bbox_preds = dn_bbox_preds.reshape(-1, 4) bboxes = bbox_cxcywh_to_xyxy(bbox_preds) * factors bboxes_gt = bbox_cxcywh_to_xyxy(bbox_targets) * factors # regression IoU loss, defaultly GIoU loss loss_iou = self.loss_iou( bboxes, bboxes_gt, bbox_weights, avg_factor=num_total_pos) # regression L1 loss loss_bbox = self.loss_bbox( bbox_preds, bbox_targets, bbox_weights, avg_factor=num_total_pos) return loss_cls, loss_bbox, loss_iou def get_dn_targets(self, batch_gt_instances: InstanceList, batch_img_metas: dict, dn_meta: Dict[str, int]) -> tuple: """Get targets in denoising part for a batch of images. Args: batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. dn_meta (Dict[str, int]): The dictionary saves information about group collation, including 'num_denoising_queries' and 'num_denoising_groups'. It will be used for split outputs of denoising and matching parts and loss calculation. Returns: tuple: a tuple containing the following targets. - labels_list (list[Tensor]): Labels for all images. - label_weights_list (list[Tensor]): Label weights for all images. - bbox_targets_list (list[Tensor]): BBox targets for all images. - bbox_weights_list (list[Tensor]): BBox weights for all images. - num_total_pos (int): Number of positive samples in all images. - num_total_neg (int): Number of negative samples in all images. """ (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, pos_inds_list, neg_inds_list) = multi_apply( self._get_dn_targets_single, batch_gt_instances, batch_img_metas, dn_meta=dn_meta) num_total_pos = sum((inds.numel() for inds in pos_inds_list)) num_total_neg = sum((inds.numel() for inds in neg_inds_list)) return (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_pos, num_total_neg) def _get_dn_targets_single(self, gt_instances: InstanceData, img_meta: dict, dn_meta: Dict[str, int]) -> tuple: """Get targets in denoising part for one image. Args: gt_instances (:obj:`InstanceData`): Ground truth of instance annotations. It should includes ``bboxes`` and ``labels`` attributes. img_meta (dict): Meta information for one image. dn_meta (Dict[str, int]): The dictionary saves information about group collation, including 'num_denoising_queries' and 'num_denoising_groups'. It will be used for split outputs of denoising and matching parts and loss calculation. Returns: tuple[Tensor]: a tuple containing the following for one image. - labels (Tensor): Labels of each image. - label_weights (Tensor]): Label weights of each image. - bbox_targets (Tensor): BBox targets of each image. - bbox_weights (Tensor): BBox weights of each image. - pos_inds (Tensor): Sampled positive indices for each image. - neg_inds (Tensor): Sampled negative indices for each image. """ gt_bboxes = gt_instances.bboxes gt_labels = gt_instances.labels num_groups = dn_meta['num_denoising_groups'] num_denoising_queries = dn_meta['num_denoising_queries'] num_queries_each_group = int(num_denoising_queries / num_groups) device = gt_bboxes.device if len(gt_labels) > 0: t = torch.arange(len(gt_labels), dtype=torch.long, device=device) t = t.unsqueeze(0).repeat(num_groups, 1) pos_assigned_gt_inds = t.flatten() pos_inds = torch.arange( num_groups, dtype=torch.long, device=device) pos_inds = pos_inds.unsqueeze(1) * num_queries_each_group + t pos_inds = pos_inds.flatten() else: pos_inds = pos_assigned_gt_inds = \ gt_bboxes.new_tensor([], dtype=torch.long) neg_inds = pos_inds + num_queries_each_group // 2 # label targets labels = gt_bboxes.new_full((num_denoising_queries, ), self.num_classes, dtype=torch.long) labels[pos_inds] = gt_labels[pos_assigned_gt_inds] label_weights = gt_bboxes.new_ones(num_denoising_queries) # bbox targets bbox_targets = torch.zeros(num_denoising_queries, 4, device=device) bbox_weights = torch.zeros(num_denoising_queries, 4, device=device) bbox_weights[pos_inds] = 1.0 img_h, img_w = img_meta['img_shape'] # DETR regress the relative position of boxes (cxcywh) in the image. # Thus the learning target should be normalized by the image size, also # the box format should be converted from defaultly x1y1x2y2 to cxcywh. factor = gt_bboxes.new_tensor([img_w, img_h, img_w, img_h]).unsqueeze(0) gt_bboxes_normalized = gt_bboxes / factor gt_bboxes_targets = bbox_xyxy_to_cxcywh(gt_bboxes_normalized) bbox_targets[pos_inds] = gt_bboxes_targets.repeat([num_groups, 1]) return (labels, label_weights, bbox_targets, bbox_weights, pos_inds, neg_inds) @staticmethod def split_outputs(all_layers_cls_scores: Tensor, all_layers_bbox_preds: Tensor, dn_meta: Dict[str, int]) -> Tuple[Tensor]: """Split outputs of the denoising part and the matching part. For the total outputs of `num_queries_total` length, the former `num_denoising_queries` outputs are from denoising queries, and the rest `num_matching_queries` ones are from matching queries, where `num_queries_total` is the sum of `num_denoising_queries` and `num_matching_queries`. Args: all_layers_cls_scores (Tensor): Classification scores of all decoder layers, has shape (num_decoder_layers, bs, num_queries_total, cls_out_channels). all_layers_bbox_preds (Tensor): Regression outputs of all decoder layers. Each is a 4D-tensor with normalized coordinate format (cx, cy, w, h) and has shape (num_decoder_layers, bs, num_queries_total, 4). dn_meta (Dict[str, int]): The dictionary saves information about group collation, including 'num_denoising_queries' and 'num_denoising_groups'. Returns: Tuple[Tensor]: a tuple containing the following outputs. - all_layers_matching_cls_scores (Tensor): Classification scores of all decoder layers in matching part, has shape (num_decoder_layers, bs, num_matching_queries, cls_out_channels). - all_layers_matching_bbox_preds (Tensor): Regression outputs of all decoder layers in matching part. Each is a 4D-tensor with normalized coordinate format (cx, cy, w, h) and has shape (num_decoder_layers, bs, num_matching_queries, 4). - all_layers_denoising_cls_scores (Tensor): Classification scores of all decoder layers in denoising part, has shape (num_decoder_layers, bs, num_denoising_queries, cls_out_channels). - all_layers_denoising_bbox_preds (Tensor): Regression outputs of all decoder layers in denoising part. Each is a 4D-tensor with normalized coordinate format (cx, cy, w, h) and has shape (num_decoder_layers, bs, num_denoising_queries, 4). """ num_denoising_queries = dn_meta['num_denoising_queries'] if dn_meta is not None: all_layers_denoising_cls_scores = \ all_layers_cls_scores[:, :, : num_denoising_queries, :] all_layers_denoising_bbox_preds = \ all_layers_bbox_preds[:, :, : num_denoising_queries, :] all_layers_matching_cls_scores = \ all_layers_cls_scores[:, :, num_denoising_queries:, :] all_layers_matching_bbox_preds = \ all_layers_bbox_preds[:, :, num_denoising_queries:, :] else: all_layers_denoising_cls_scores = None all_layers_denoising_bbox_preds = None all_layers_matching_cls_scores = all_layers_cls_scores all_layers_matching_bbox_preds = all_layers_bbox_preds return (all_layers_matching_cls_scores, all_layers_matching_bbox_preds, all_layers_denoising_cls_scores, all_layers_denoising_bbox_preds)
23,047
49.43326
79
py
ERD
ERD-main/mmdet/models/dense_heads/pisa_retinanet_head.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import List import torch from torch import Tensor from mmdet.registry import MODELS from mmdet.utils import InstanceList, OptInstanceList from ..losses import carl_loss, isr_p from ..utils import images_to_levels from .retina_head import RetinaHead @MODELS.register_module() class PISARetinaHead(RetinaHead): """PISA Retinanet Head. The head owns the same structure with Retinanet Head, but differs in two aspects: 1. Importance-based Sample Reweighting Positive (ISR-P) is applied to change the positive loss weights. 2. Classification-aware regression loss is adopted as a third loss. """ def loss_by_feat( self, cls_scores: List[Tensor], bbox_preds: List[Tensor], batch_gt_instances: InstanceList, batch_img_metas: List[dict], batch_gt_instances_ignore: OptInstanceList = None) -> dict: """Compute losses of the head. Args: cls_scores (list[Tensor]): Box scores for each scale level Has shape (N, num_anchors * num_classes, H, W) bbox_preds (list[Tensor]): Box energies / deltas for each scale level with shape (N, num_anchors * 4, H, W) batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. batch_gt_instances_ignore (list[:obj:`InstanceData`], optional): Batch of gt_instances_ignore. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. Returns: dict: Loss dict, comprise classification loss, regression loss and carl loss. """ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] assert len(featmap_sizes) == self.prior_generator.num_levels device = cls_scores[0].device anchor_list, valid_flag_list = self.get_anchors( featmap_sizes, batch_img_metas, device=device) label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 cls_reg_targets = self.get_targets( anchor_list, valid_flag_list, batch_gt_instances, batch_img_metas, batch_gt_instances_ignore=batch_gt_instances_ignore, return_sampling_results=True) if cls_reg_targets is None: return None (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, avg_factor, sampling_results_list) = cls_reg_targets # anchor number of multi levels num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]] # concat all level anchors and flags to a single tensor concat_anchor_list = [] for i in range(len(anchor_list)): concat_anchor_list.append(torch.cat(anchor_list[i])) all_anchor_list = images_to_levels(concat_anchor_list, num_level_anchors) num_imgs = len(batch_img_metas) flatten_cls_scores = [ cls_score.permute(0, 2, 3, 1).reshape(num_imgs, -1, label_channels) for cls_score in cls_scores ] flatten_cls_scores = torch.cat( flatten_cls_scores, dim=1).reshape(-1, flatten_cls_scores[0].size(-1)) flatten_bbox_preds = [ bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, 4) for bbox_pred in bbox_preds ] flatten_bbox_preds = torch.cat( flatten_bbox_preds, dim=1).view(-1, flatten_bbox_preds[0].size(-1)) flatten_labels = torch.cat(labels_list, dim=1).reshape(-1) flatten_label_weights = torch.cat( label_weights_list, dim=1).reshape(-1) flatten_anchors = torch.cat(all_anchor_list, dim=1).reshape(-1, 4) flatten_bbox_targets = torch.cat( bbox_targets_list, dim=1).reshape(-1, 4) flatten_bbox_weights = torch.cat( bbox_weights_list, dim=1).reshape(-1, 4) # Apply ISR-P isr_cfg = self.train_cfg.get('isr', None) if isr_cfg is not None: all_targets = (flatten_labels, flatten_label_weights, flatten_bbox_targets, flatten_bbox_weights) with torch.no_grad(): all_targets = isr_p( flatten_cls_scores, flatten_bbox_preds, all_targets, flatten_anchors, sampling_results_list, bbox_coder=self.bbox_coder, loss_cls=self.loss_cls, num_class=self.num_classes, **self.train_cfg['isr']) (flatten_labels, flatten_label_weights, flatten_bbox_targets, flatten_bbox_weights) = all_targets # For convenience we compute loss once instead separating by fpn level, # so that we don't need to separate the weights by level again. # The result should be the same losses_cls = self.loss_cls( flatten_cls_scores, flatten_labels, flatten_label_weights, avg_factor=avg_factor) losses_bbox = self.loss_bbox( flatten_bbox_preds, flatten_bbox_targets, flatten_bbox_weights, avg_factor=avg_factor) loss_dict = dict(loss_cls=losses_cls, loss_bbox=losses_bbox) # CARL Loss carl_cfg = self.train_cfg.get('carl', None) if carl_cfg is not None: loss_carl = carl_loss( flatten_cls_scores, flatten_labels, flatten_bbox_preds, flatten_bbox_targets, self.loss_bbox, **self.train_cfg['carl'], avg_factor=avg_factor, sigmoid=True, num_class=self.num_classes) loss_dict.update(loss_carl) return loss_dict
6,298
39.63871
79
py
ERD
ERD-main/mmdet/models/dense_heads/gfl_head.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import List, Optional, Sequence, Tuple import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import ConvModule, Scale from mmengine.config import ConfigDict from mmengine.structures import InstanceData from torch import Tensor from mmdet.registry import MODELS, TASK_UTILS from mmdet.structures import SampleList from mmdet.structures.bbox import bbox_overlaps from mmdet.utils import (ConfigType, InstanceList, MultiConfig, OptConfigType, OptInstanceList, reduce_mean) from .anchor_head import AnchorHead from ..task_modules.prior_generators import anchor_inside_flags from ..task_modules.samplers import PseudoSampler from ..utils import (filter_scores_and_topk, images_to_levels, multi_apply, unmap) from ..utils import unpack_gt_instances INF = 100000.0 EPS = 1.0e-7 class Integral(nn.Module): """A fixed layer for calculating integral result from distribution. This layer calculates the target location by :math: ``sum{P(y_i) * y_i}``, P(y_i) denotes the softmax vector that represents the discrete distribution y_i denotes the discrete set, usually {0, 1, 2, ..., reg_max} Args: reg_max (int): The maximal value of the discrete set. Defaults to 16. You may want to reset it according to your new dataset or related settings. """ def __init__(self, reg_max: int = 16) -> None: super().__init__() self.reg_max = reg_max self.register_buffer('project', torch.linspace(0, self.reg_max, self.reg_max + 1)) def forward(self, x: Tensor) -> Tensor: """Forward feature from the regression head to get integral result of bounding box location. Args: x (Tensor): Features of the regression head, shape (N, 4*(n+1)), n is self.reg_max. Returns: x (Tensor): Integral result of box locations, i.e., distance offsets from the box center in four directions, shape (N, 4). """ x = F.softmax(x.reshape(-1, self.reg_max + 1), dim=1) x = F.linear(x, self.project.type_as(x)).reshape(-1, 4) return x @MODELS.register_module() class GFLHead(AnchorHead): """Generalized Focal Loss: Learning Qualified and Distributed Bounding Boxes for Dense Object Detection. GFL head structure is similar with ATSS, however GFL uses 1) joint representation for classification and localization quality, and 2) flexible General distribution for bounding box locations, which are supervised by Quality Focal Loss (QFL) and Distribution Focal Loss (DFL), respectively https://arxiv.org/abs/2006.04388 Args: num_classes (int): Number of categories excluding the background category. in_channels (int): Number of channels in the input feature map. stacked_convs (int): Number of conv layers in cls and reg tower. Defaults to 4. conv_cfg (:obj:`ConfigDict` or dict, optional): dictionary to construct and config conv layer. Defaults to None. norm_cfg (:obj:`ConfigDict` or dict): dictionary to construct and config norm layer. Default: dict(type='GN', num_groups=32, requires_grad=True). loss_qfl (:obj:`ConfigDict` or dict): Config of Quality Focal Loss (QFL). bbox_coder (:obj:`ConfigDict` or dict): Config of bbox coder. Defaults to 'DistancePointBBoxCoder'. reg_max (int): Max value of integral set :math: ``{0, ..., reg_max}`` in QFL setting. Defaults to 16. init_cfg (:obj:`ConfigDict` or dict or list[dict] or list[:obj:`ConfigDict`]): Initialization config dict. Example: >>> self = GFLHead(11, 7) >>> feats = [torch.rand(1, 7, s, s) for s in [4, 8, 16, 32, 64]] >>> cls_quality_score, bbox_pred = self.forward(feats) >>> assert len(cls_quality_score) == len(self.scales) """ def __init__(self, num_classes: int, in_channels: int, stacked_convs: int = 4, conv_cfg: OptConfigType = None, norm_cfg: ConfigType = dict( type='GN', num_groups=32, requires_grad=True), loss_dfl: ConfigType = dict( type='DistributionFocalLoss', loss_weight=0.25), bbox_coder: ConfigType = dict(type='DistancePointBBoxCoder'), reg_max: int = 16, init_cfg: MultiConfig = dict( type='Normal', layer='Conv2d', std=0.01, override=dict( type='Normal', name='gfl_cls', std=0.01, bias_prob=0.01)), **kwargs) -> None: self.stacked_convs = stacked_convs self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.reg_max = reg_max super().__init__( num_classes=num_classes, in_channels=in_channels, bbox_coder=bbox_coder, init_cfg=init_cfg, **kwargs) if self.train_cfg: self.assigner = TASK_UTILS.build(self.train_cfg['assigner']) if self.train_cfg.get('sampler', None) is not None: self.sampler = TASK_UTILS.build( self.train_cfg['sampler'], default_args=dict(context=self)) else: self.sampler = PseudoSampler(context=self) self.integral = Integral(self.reg_max) self.loss_dfl = MODELS.build(loss_dfl) iou_calculator = dict(type='BboxOverlaps2D') self.iou_calculator = TASK_UTILS.build(iou_calculator) # for import v3 self.loss_cls_for_replay_v3 = MODELS.build(dict(type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)) def _init_layers(self) -> None: """Initialize layers of the head.""" self.relu = nn.ReLU() self.cls_convs = nn.ModuleList() self.reg_convs = nn.ModuleList() for i in range(self.stacked_convs): chn = self.in_channels if i == 0 else self.feat_channels self.cls_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) self.reg_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) assert self.num_anchors == 1, 'anchor free version' self.gfl_cls = nn.Conv2d( self.feat_channels, self.cls_out_channels, 3, padding=1) self.gfl_reg = nn.Conv2d( self.feat_channels, 4 * (self.reg_max + 1), 3, padding=1) self.scales = nn.ModuleList( [Scale(1.0) for _ in self.prior_generator.strides]) def forward(self, x: Tuple[Tensor]) -> Tuple[List[Tensor]]: """Forward features from the upstream network. Args: x (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. Returns: tuple: Usually a tuple of classification scores and bbox prediction - cls_scores (list[Tensor]): Classification and quality (IoU) joint scores for all scale levels, each is a 4D-tensor, the channel number is num_classes. - bbox_preds (list[Tensor]): Box distribution logits for all scale levels, each is a 4D-tensor, the channel number is 4*(n+1), n is max value of integral set. """ return multi_apply(self.forward_single, x, self.scales) def forward_single(self, x: Tensor, scale: Scale) -> Sequence[Tensor]: """Forward feature of a single scale level. Args: x (Tensor): Features of a single scale level. scale (:obj: `mmcv.cnn.Scale`): Learnable scale module to resize the bbox prediction. Returns: tuple: - cls_score (Tensor): Cls and quality joint scores for a single scale level the channel number is num_classes. - bbox_pred (Tensor): Box distribution logits for a single scale level, the channel number is 4*(n+1), n is max value of integral set. """ cls_feat = x reg_feat = x for cls_conv in self.cls_convs: cls_feat = cls_conv(cls_feat) for reg_conv in self.reg_convs: reg_feat = reg_conv(reg_feat) cls_score = self.gfl_cls(cls_feat) bbox_pred = scale(self.gfl_reg(reg_feat)).float() return cls_score, bbox_pred def anchor_center(self, anchors: Tensor) -> Tensor: """Get anchor centers from anchors. Args: anchors (Tensor): Anchor list with shape (N, 4), ``xyxy`` format. Returns: Tensor: Anchor centers with shape (N, 2), ``xy`` format. """ anchors_cx = (anchors[..., 2] + anchors[..., 0]) / 2 anchors_cy = (anchors[..., 3] + anchors[..., 1]) / 2 return torch.stack([anchors_cx, anchors_cy], dim=-1) def loss_by_feat_single(self, anchors: Tensor, cls_score: Tensor, bbox_pred: Tensor, labels: Tensor, label_weights: Tensor, bbox_targets: Tensor, stride: Tuple[int], avg_factor: int) -> dict: """Calculate the loss of a single scale level based on the features extracted by the detection head. Args: anchors (Tensor): Box reference for each scale level with shape (N, num_total_anchors, 4). cls_score (Tensor): Cls and quality joint scores for each scale level has shape (N, num_classes, H, W). bbox_pred (Tensor): Box distribution logits for each scale level with shape (N, 4*(n+1), H, W), n is max value of integral set. labels (Tensor): Labels of each anchors with shape (N, num_total_anchors). label_weights (Tensor): Label weights of each anchor with shape (N, num_total_anchors) bbox_targets (Tensor): BBox regression targets of each anchor weight shape (N, num_total_anchors, 4). stride (Tuple[int]): Stride in this scale level. avg_factor (int): Average factor that is used to average the loss. When using sampling method, avg_factor is usually the sum of positive and negative priors. When using `PseudoSampler`, `avg_factor` is usually equal to the number of positive priors. Returns: dict[str, Tensor]: A dictionary of loss components. """ assert stride[0] == stride[1], 'h stride is not equal to w stride!' anchors = anchors.reshape(-1, 4) cls_score = cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels) bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4 * (self.reg_max + 1)) bbox_targets = bbox_targets.reshape(-1, 4) labels = labels.reshape(-1) label_weights = label_weights.reshape(-1) # FG cat_id: [0, num_classes -1], BG cat_id: num_classes bg_class_ind = self.num_classes pos_inds = ((labels >= 0) & (labels < bg_class_ind)).nonzero().squeeze(1) score = label_weights.new_zeros(labels.shape) if len(pos_inds) > 0: pos_bbox_targets = bbox_targets[pos_inds] pos_bbox_pred = bbox_pred[pos_inds] pos_anchors = anchors[pos_inds] pos_anchor_centers = self.anchor_center(pos_anchors) / stride[0] weight_targets = cls_score.detach().sigmoid() weight_targets = weight_targets.max(dim=1)[0][pos_inds] pos_bbox_pred_corners = self.integral(pos_bbox_pred) pos_decode_bbox_pred = self.bbox_coder.decode( pos_anchor_centers, pos_bbox_pred_corners) pos_decode_bbox_targets = pos_bbox_targets / stride[0] score[pos_inds] = bbox_overlaps( pos_decode_bbox_pred.detach(), pos_decode_bbox_targets, is_aligned=True) pred_corners = pos_bbox_pred.reshape(-1, self.reg_max + 1) target_corners = self.bbox_coder.encode(pos_anchor_centers, pos_decode_bbox_targets, self.reg_max).reshape(-1) # regression loss loss_bbox = self.loss_bbox( pos_decode_bbox_pred, pos_decode_bbox_targets, weight=weight_targets, avg_factor=1.0) # dfl loss loss_dfl = self.loss_dfl( pred_corners, target_corners, weight=weight_targets[:, None].expand(-1, 4).reshape(-1), avg_factor=4.0) else: loss_bbox = bbox_pred.sum() * 0 loss_dfl = bbox_pred.sum() * 0 weight_targets = bbox_pred.new_tensor(0) # cls (qfl) loss loss_cls = self.loss_cls( cls_score, (labels, score), weight=label_weights, avg_factor=avg_factor) return loss_cls, loss_bbox, loss_dfl, weight_targets.sum() def loss_by_feat( self, cls_scores: List[Tensor], bbox_preds: List[Tensor], batch_gt_instances: InstanceList, batch_img_metas: List[dict], batch_gt_instances_ignore: OptInstanceList = None) -> dict: """Calculate the loss based on the features extracted by the detection head. Args: cls_scores (list[Tensor]): Cls and quality scores for each scale level has shape (N, num_classes, H, W). bbox_preds (list[Tensor]): Box distribution logits for each scale level with shape (N, 4*(n+1), H, W), n is max value of integral set. batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. batch_gt_instances_ignore (list[:obj:`InstanceData`], Optional): Batch of gt_instances_ignore. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. Returns: dict[str, Tensor]: A dictionary of loss components. """ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] assert len(featmap_sizes) == self.prior_generator.num_levels device = cls_scores[0].device anchor_list, valid_flag_list = self.get_anchors( featmap_sizes, batch_img_metas, device=device) cls_reg_targets = self.get_targets( anchor_list, valid_flag_list, batch_gt_instances, batch_img_metas, batch_gt_instances_ignore=batch_gt_instances_ignore) (anchor_list, labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, avg_factor) = cls_reg_targets avg_factor = reduce_mean( torch.tensor(avg_factor, dtype=torch.float, device=device)).item() losses_cls, losses_bbox, losses_dfl, \ avg_factor = multi_apply( self.loss_by_feat_single, anchor_list, cls_scores, bbox_preds, labels_list, label_weights_list, bbox_targets_list, self.prior_generator.strides, avg_factor=avg_factor) avg_factor = sum(avg_factor) avg_factor = reduce_mean(avg_factor).clamp_(min=1).item() losses_bbox = list(map(lambda x: x / avg_factor, losses_bbox)) losses_dfl = list(map(lambda x: x / avg_factor, losses_dfl)) return dict( loss_cls=losses_cls, loss_bbox=losses_bbox, loss_dfl=losses_dfl) def _predict_by_feat_single(self, cls_score_list: List[Tensor], bbox_pred_list: List[Tensor], score_factor_list: List[Tensor], mlvl_priors: List[Tensor], img_meta: dict, cfg: ConfigDict, rescale: bool = False, with_nms: bool = True) -> InstanceData: """Transform a single image's features extracted from the head into bbox results. Args: cls_score_list (list[Tensor]): Box scores from all scale levels of a single image, each item has shape (num_priors * num_classes, H, W). bbox_pred_list (list[Tensor]): Box energies / deltas from all scale levels of a single image, each item has shape (num_priors * 4, H, W). score_factor_list (list[Tensor]): Score factor from all scale levels of a single image. GFL head does not need this value. mlvl_priors (list[Tensor]): Each element in the list is the priors of a single level in feature pyramid, has shape (num_priors, 4). img_meta (dict): Image meta info. cfg (:obj: `ConfigDict`): Test / postprocessing configuration, if None, test_cfg would be used. rescale (bool): If True, return boxes in original image space. Defaults to False. with_nms (bool): If True, do nms before return boxes. Defaults to True. Returns: tuple[Tensor]: Results of detected bboxes and labels. If with_nms is False and mlvl_score_factor is None, return mlvl_bboxes and mlvl_scores, else return mlvl_bboxes, mlvl_scores and mlvl_score_factor. Usually with_nms is False is used for aug test. If with_nms is True, then return the following format - det_bboxes (Tensor): Predicted bboxes with shape [num_bboxes, 5], where the first 4 columns are bounding box positions (tl_x, tl_y, br_x, br_y) and the 5-th column are scores between 0 and 1. - det_labels (Tensor): Predicted labels of the corresponding box with shape [num_bboxes]. """ cfg = self.test_cfg if cfg is None else cfg img_shape = img_meta['img_shape'] nms_pre = cfg.get('nms_pre', -1) mlvl_bboxes = [] mlvl_scores = [] mlvl_labels = [] for level_idx, (cls_score, bbox_pred, stride, priors) in enumerate( zip(cls_score_list, bbox_pred_list, self.prior_generator.strides, mlvl_priors)): assert cls_score.size()[-2:] == bbox_pred.size()[-2:] assert stride[0] == stride[1] bbox_pred = bbox_pred.permute(1, 2, 0) bbox_pred = self.integral(bbox_pred) * stride[0] scores = cls_score.permute(1, 2, 0).reshape( -1, self.cls_out_channels).sigmoid() # After https://github.com/open-mmlab/mmdetection/pull/6268/, # this operation keeps fewer bboxes under the same `nms_pre`. # There is no difference in performance for most models. If you # find a slight drop in performance, you can set a larger # `nms_pre` than before. results = filter_scores_and_topk( scores, cfg.score_thr, nms_pre, dict(bbox_pred=bbox_pred, priors=priors)) scores, labels, _, filtered_results = results bbox_pred = filtered_results['bbox_pred'] priors = filtered_results['priors'] bboxes = self.bbox_coder.decode( self.anchor_center(priors), bbox_pred, max_shape=img_shape) mlvl_bboxes.append(bboxes) mlvl_scores.append(scores) mlvl_labels.append(labels) results = InstanceData() results.bboxes = torch.cat(mlvl_bboxes) results.scores = torch.cat(mlvl_scores) results.labels = torch.cat(mlvl_labels) return self._bbox_post_process( results=results, cfg=cfg, rescale=rescale, with_nms=with_nms, img_meta=img_meta) def get_targets(self, anchor_list: List[Tensor], valid_flag_list: List[Tensor], batch_gt_instances: InstanceList, batch_img_metas: List[dict], batch_gt_instances_ignore: OptInstanceList = None, unmap_outputs=True) -> tuple: """Get targets for GFL head. This method is almost the same as `AnchorHead.get_targets()`. Besides returning the targets as the parent method does, it also returns the anchors as the first element of the returned tuple. """ num_imgs = len(batch_img_metas) assert len(anchor_list) == len(valid_flag_list) == num_imgs # anchor number of multi levels num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]] num_level_anchors_list = [num_level_anchors] * num_imgs # concat all level anchors and flags to a single tensor for i in range(num_imgs): assert len(anchor_list[i]) == len(valid_flag_list[i]) anchor_list[i] = torch.cat(anchor_list[i]) valid_flag_list[i] = torch.cat(valid_flag_list[i]) # compute targets for each image if batch_gt_instances_ignore is None: batch_gt_instances_ignore = [None] * num_imgs (all_anchors, all_labels, all_label_weights, all_bbox_targets, all_bbox_weights, pos_inds_list, neg_inds_list, sampling_results_list) = multi_apply( self._get_targets_single, anchor_list, valid_flag_list, num_level_anchors_list, batch_gt_instances, batch_img_metas, batch_gt_instances_ignore, unmap_outputs=unmap_outputs) # Get `avg_factor` of all images, which calculate in `SamplingResult`. # When using sampling method, avg_factor is usually the sum of # positive and negative priors. When using `PseudoSampler`, # `avg_factor` is usually equal to the number of positive priors. avg_factor = sum( [results.avg_factor for results in sampling_results_list]) # split targets to a list w.r.t. multiple levels anchors_list = images_to_levels(all_anchors, num_level_anchors) labels_list = images_to_levels(all_labels, num_level_anchors) label_weights_list = images_to_levels(all_label_weights, num_level_anchors) bbox_targets_list = images_to_levels(all_bbox_targets, num_level_anchors) bbox_weights_list = images_to_levels(all_bbox_weights, num_level_anchors) return (anchors_list, labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, avg_factor) def _get_targets_single(self, flat_anchors: Tensor, valid_flags: Tensor, num_level_anchors: List[int], gt_instances: InstanceData, img_meta: dict, gt_instances_ignore: Optional[InstanceData] = None, unmap_outputs: bool = True) -> tuple: """Compute regression, classification targets for anchors in a single image. Args: flat_anchors (Tensor): Multi-level anchors of the image, which are concatenated into a single tensor of shape (num_anchors, 4) valid_flags (Tensor): Multi level valid flags of the image, which are concatenated into a single tensor of shape (num_anchors,). num_level_anchors (list[int]): Number of anchors of each scale level. gt_instances (:obj:`InstanceData`): Ground truth of instance annotations. It usually includes ``bboxes`` and ``labels`` attributes. img_meta (dict): Meta information for current image. gt_instances_ignore (:obj:`InstanceData`, optional): Instances to be ignored during training. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. unmap_outputs (bool): Whether to map outputs back to the original set of anchors. Defaults to True. Returns: tuple: N is the number of total anchors in the image. - anchors (Tensor): All anchors in the image with shape (N, 4). - labels (Tensor): Labels of all anchors in the image with shape (N,). - label_weights (Tensor): Label weights of all anchor in the image with shape (N,). - bbox_targets (Tensor): BBox targets of all anchors in the image with shape (N, 4). - bbox_weights (Tensor): BBox weights of all anchors in the image with shape (N, 4). - pos_inds (Tensor): Indices of positive anchor with shape (num_pos,). - neg_inds (Tensor): Indices of negative anchor with shape (num_neg,). - sampling_result (:obj:`SamplingResult`): Sampling results. """ inside_flags = anchor_inside_flags(flat_anchors, valid_flags, img_meta['img_shape'][:2], self.train_cfg['allowed_border']) if not inside_flags.any(): raise ValueError( 'There is no valid anchor inside the image boundary. Please ' 'check the image size and anchor sizes, or set ' '``allowed_border`` to -1 to skip the condition.') # assign gt and sample anchors anchors = flat_anchors[inside_flags, :] num_level_anchors_inside = self.get_num_level_anchors_inside( num_level_anchors, inside_flags) pred_instances = InstanceData(priors=anchors) assign_result = self.assigner.assign( pred_instances=pred_instances, num_level_priors=num_level_anchors_inside, gt_instances=gt_instances, gt_instances_ignore=gt_instances_ignore) sampling_result = self.sampler.sample( assign_result=assign_result, pred_instances=pred_instances, gt_instances=gt_instances) num_valid_anchors = anchors.shape[0] bbox_targets = torch.zeros_like(anchors) bbox_weights = torch.zeros_like(anchors) labels = anchors.new_full((num_valid_anchors,), self.num_classes, dtype=torch.long) label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float) pos_inds = sampling_result.pos_inds neg_inds = sampling_result.neg_inds if len(pos_inds) > 0: pos_bbox_targets = sampling_result.pos_gt_bboxes bbox_targets[pos_inds, :] = pos_bbox_targets bbox_weights[pos_inds, :] = 1.0 labels[pos_inds] = sampling_result.pos_gt_labels if self.train_cfg['pos_weight'] <= 0: label_weights[pos_inds] = 1.0 else: label_weights[pos_inds] = self.train_cfg['pos_weight'] if len(neg_inds) > 0: label_weights[neg_inds] = 1.0 # map up to original set of anchors if unmap_outputs: num_total_anchors = flat_anchors.size(0) anchors = unmap(anchors, num_total_anchors, inside_flags) labels = unmap( labels, num_total_anchors, inside_flags, fill=self.num_classes) label_weights = unmap(label_weights, num_total_anchors, inside_flags) bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags) bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags) return (anchors, labels, label_weights, bbox_targets, bbox_weights, pos_inds, neg_inds, sampling_result) def get_num_level_anchors_inside(self, num_level_anchors: List[int], inside_flags: Tensor) -> List[int]: """Get the number of valid anchors in every level.""" split_inside_flags = torch.split(inside_flags, num_level_anchors) num_level_anchors_inside = [ int(flags.sum()) for flags in split_inside_flags ] return num_level_anchors_inside
29,890
42.957353
116
py
ERD
ERD-main/mmdet/models/dense_heads/centernet_update_head.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import Dict, List, Optional, Sequence, Tuple import torch import torch.nn as nn from mmcv.cnn import Scale from mmengine.structures import InstanceData from torch import Tensor from mmdet.registry import MODELS from mmdet.structures.bbox import bbox2distance from mmdet.utils import (ConfigType, InstanceList, OptConfigType, OptInstanceList, reduce_mean) from ..utils import multi_apply from .anchor_free_head import AnchorFreeHead INF = 1000000000 RangeType = Sequence[Tuple[int, int]] def _transpose(tensor_list: List[Tensor], num_point_list: list) -> List[Tensor]: """This function is used to transpose image first tensors to level first ones.""" for img_idx in range(len(tensor_list)): tensor_list[img_idx] = torch.split( tensor_list[img_idx], num_point_list, dim=0) tensors_level_first = [] for targets_per_level in zip(*tensor_list): tensors_level_first.append(torch.cat(targets_per_level, dim=0)) return tensors_level_first @MODELS.register_module() class CenterNetUpdateHead(AnchorFreeHead): """CenterNetUpdateHead is an improved version of CenterNet in CenterNet2. Paper link `<https://arxiv.org/abs/2103.07461>`_. Args: num_classes (int): Number of categories excluding the background category. in_channels (int): Number of channel in the input feature map. regress_ranges (Sequence[Tuple[int, int]]): Regress range of multiple level points. hm_min_radius (int): Heatmap target minimum radius of cls branch. Defaults to 4. hm_min_overlap (float): Heatmap target minimum overlap of cls branch. Defaults to 0.8. more_pos_thresh (float): The filtering threshold when the cls branch adds more positive samples. Defaults to 0.2. more_pos_topk (int): The maximum number of additional positive samples added to each gt. Defaults to 9. soft_weight_on_reg (bool): Whether to use the soft target of the cls branch as the soft weight of the bbox branch. Defaults to False. loss_cls (:obj:`ConfigDict` or dict): Config of cls loss. Defaults to dict(type='GaussianFocalLoss', loss_weight=1.0) loss_bbox (:obj:`ConfigDict` or dict): Config of bbox loss. Defaults to dict(type='GIoULoss', loss_weight=2.0). norm_cfg (:obj:`ConfigDict` or dict, optional): dictionary to construct and config norm layer. Defaults to ``norm_cfg=dict(type='GN', num_groups=32, requires_grad=True)``. train_cfg (:obj:`ConfigDict` or dict, optional): Training config. Unused in CenterNet. Reserved for compatibility with SingleStageDetector. test_cfg (:obj:`ConfigDict` or dict, optional): Testing config of CenterNet. """ def __init__(self, num_classes: int, in_channels: int, regress_ranges: RangeType = ((0, 80), (64, 160), (128, 320), (256, 640), (512, INF)), hm_min_radius: int = 4, hm_min_overlap: float = 0.8, more_pos_thresh: float = 0.2, more_pos_topk: int = 9, soft_weight_on_reg: bool = False, loss_cls: ConfigType = dict( type='GaussianFocalLoss', pos_weight=0.25, neg_weight=0.75, loss_weight=1.0), loss_bbox: ConfigType = dict( type='GIoULoss', loss_weight=2.0), norm_cfg: OptConfigType = dict( type='GN', num_groups=32, requires_grad=True), train_cfg: OptConfigType = None, test_cfg: OptConfigType = None, **kwargs) -> None: super().__init__( num_classes=num_classes, in_channels=in_channels, loss_cls=loss_cls, loss_bbox=loss_bbox, norm_cfg=norm_cfg, train_cfg=train_cfg, test_cfg=test_cfg, **kwargs) self.soft_weight_on_reg = soft_weight_on_reg self.hm_min_radius = hm_min_radius self.more_pos_thresh = more_pos_thresh self.more_pos_topk = more_pos_topk self.delta = (1 - hm_min_overlap) / (1 + hm_min_overlap) self.sigmoid_clamp = 0.0001 # GaussianFocalLoss must be sigmoid mode self.use_sigmoid_cls = True self.cls_out_channels = num_classes self.regress_ranges = regress_ranges self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides]) def _init_predictor(self) -> None: """Initialize predictor layers of the head.""" self.conv_cls = nn.Conv2d( self.feat_channels, self.num_classes, 3, padding=1) self.conv_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1) def forward(self, x: Tuple[Tensor]) -> Tuple[List[Tensor], List[Tensor]]: """Forward features from the upstream network. Args: x (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. Returns: tuple: A tuple of each level outputs. - cls_scores (list[Tensor]): Box scores for each scale level, \ each is a 4D-tensor, the channel number is num_classes. - bbox_preds (list[Tensor]): Box energies / deltas for each \ scale level, each is a 4D-tensor, the channel number is 4. """ return multi_apply(self.forward_single, x, self.scales, self.strides) def forward_single(self, x: Tensor, scale: Scale, stride: int) -> Tuple[Tensor, Tensor]: """Forward features of a single scale level. Args: x (Tensor): FPN feature maps of the specified stride. scale (:obj:`mmcv.cnn.Scale`): Learnable scale module to resize the bbox prediction. stride (int): The corresponding stride for feature maps. Returns: tuple: scores for each class, bbox predictions of input feature maps. """ cls_score, bbox_pred, _, _ = super().forward_single(x) # scale the bbox_pred of different level # float to avoid overflow when enabling FP16 bbox_pred = scale(bbox_pred).float() # bbox_pred needed for gradient computation has been modified # by F.relu(bbox_pred) when run with PyTorch 1.10. So replace # F.relu(bbox_pred) with bbox_pred.clamp(min=0) bbox_pred = bbox_pred.clamp(min=0) if not self.training: bbox_pred *= stride return cls_score, bbox_pred def loss_by_feat( self, cls_scores: List[Tensor], bbox_preds: List[Tensor], batch_gt_instances: InstanceList, batch_img_metas: List[dict], batch_gt_instances_ignore: OptInstanceList = None ) -> Dict[str, Tensor]: """Calculate the loss based on the features extracted by the detection head. Args: cls_scores (list[Tensor]): Box scores for each scale level, each is a 4D-tensor, the channel number is num_classes. bbox_preds (list[Tensor]): Box energies / deltas for each scale level, each is a 4D-tensor, the channel number is 4. batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. batch_gt_instances_ignore (list[:obj:`InstanceData`], optional): Batch of gt_instances_ignore. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. Returns: dict[str, Tensor]: A dictionary of loss components. """ num_imgs = cls_scores[0].size(0) assert len(cls_scores) == len(bbox_preds) featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] all_level_points = self.prior_generator.grid_priors( featmap_sizes, dtype=bbox_preds[0].dtype, device=bbox_preds[0].device) # 1 flatten outputs flatten_cls_scores = [ cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels) for cls_score in cls_scores ] flatten_bbox_preds = [ bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4) for bbox_pred in bbox_preds ] flatten_cls_scores = torch.cat(flatten_cls_scores) flatten_bbox_preds = torch.cat(flatten_bbox_preds) # repeat points to align with bbox_preds flatten_points = torch.cat( [points.repeat(num_imgs, 1) for points in all_level_points]) assert (torch.isfinite(flatten_bbox_preds).all().item()) # 2 calc reg and cls branch targets cls_targets, bbox_targets = self.get_targets(all_level_points, batch_gt_instances) # 3 add more pos index for cls branch featmap_sizes = flatten_points.new_tensor(featmap_sizes) pos_inds, cls_labels = self.add_cls_pos_inds(flatten_points, flatten_bbox_preds, featmap_sizes, batch_gt_instances) # 4 calc cls loss if pos_inds is None: # num_gts=0 num_pos_cls = bbox_preds[0].new_tensor(0, dtype=torch.float) else: num_pos_cls = bbox_preds[0].new_tensor( len(pos_inds), dtype=torch.float) num_pos_cls = max(reduce_mean(num_pos_cls), 1.0) flatten_cls_scores = flatten_cls_scores.sigmoid().clamp( min=self.sigmoid_clamp, max=1 - self.sigmoid_clamp) cls_loss = self.loss_cls( flatten_cls_scores, cls_targets, pos_inds=pos_inds, pos_labels=cls_labels, avg_factor=num_pos_cls) # 5 calc reg loss pos_bbox_inds = torch.nonzero( bbox_targets.max(dim=1)[0] >= 0).squeeze(1) pos_bbox_preds = flatten_bbox_preds[pos_bbox_inds] pos_bbox_targets = bbox_targets[pos_bbox_inds] bbox_weight_map = cls_targets.max(dim=1)[0] bbox_weight_map = bbox_weight_map[pos_bbox_inds] bbox_weight_map = bbox_weight_map if self.soft_weight_on_reg \ else torch.ones_like(bbox_weight_map) num_pos_bbox = max(reduce_mean(bbox_weight_map.sum()), 1.0) if len(pos_bbox_inds) > 0: pos_points = flatten_points[pos_bbox_inds] pos_decoded_bbox_preds = self.bbox_coder.decode( pos_points, pos_bbox_preds) pos_decoded_target_preds = self.bbox_coder.decode( pos_points, pos_bbox_targets) bbox_loss = self.loss_bbox( pos_decoded_bbox_preds, pos_decoded_target_preds, weight=bbox_weight_map, avg_factor=num_pos_bbox) else: bbox_loss = flatten_bbox_preds.sum() * 0 return dict(loss_cls=cls_loss, loss_bbox=bbox_loss) def get_targets( self, points: List[Tensor], batch_gt_instances: InstanceList, ) -> Tuple[Tensor, Tensor]: """Compute classification and bbox targets for points in multiple images. Args: points (list[Tensor]): Points of each fpn level, each has shape (num_points, 2). batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. Returns: tuple: Targets of each level. - concat_lvl_labels (Tensor): Labels of all level and batch. - concat_lvl_bbox_targets (Tensor): BBox targets of all \ level and batch. """ assert len(points) == len(self.regress_ranges) num_levels = len(points) # the number of points per img, per lvl num_points = [center.size(0) for center in points] # expand regress ranges to align with points expanded_regress_ranges = [ points[i].new_tensor(self.regress_ranges[i])[None].expand_as( points[i]) for i in range(num_levels) ] # concat all levels points and regress ranges concat_regress_ranges = torch.cat(expanded_regress_ranges, dim=0) concat_points = torch.cat(points, dim=0) concat_strides = torch.cat([ concat_points.new_ones(num_points[i]) * self.strides[i] for i in range(num_levels) ]) # get labels and bbox_targets of each image cls_targets_list, bbox_targets_list = multi_apply( self._get_targets_single, batch_gt_instances, points=concat_points, regress_ranges=concat_regress_ranges, strides=concat_strides) bbox_targets_list = _transpose(bbox_targets_list, num_points) cls_targets_list = _transpose(cls_targets_list, num_points) concat_lvl_bbox_targets = torch.cat(bbox_targets_list, 0) concat_lvl_cls_targets = torch.cat(cls_targets_list, dim=0) return concat_lvl_cls_targets, concat_lvl_bbox_targets def _get_targets_single(self, gt_instances: InstanceData, points: Tensor, regress_ranges: Tensor, strides: Tensor) -> Tuple[Tensor, Tensor]: """Compute classification and bbox targets for a single image.""" num_points = points.size(0) num_gts = len(gt_instances) gt_bboxes = gt_instances.bboxes gt_labels = gt_instances.labels if num_gts == 0: return gt_labels.new_full((num_points, self.num_classes), self.num_classes), \ gt_bboxes.new_full((num_points, 4), -1) # Calculate the regression tblr target corresponding to all points points = points[:, None].expand(num_points, num_gts, 2) gt_bboxes = gt_bboxes[None].expand(num_points, num_gts, 4) strides = strides[:, None, None].expand(num_points, num_gts, 2) bbox_target = bbox2distance(points, gt_bboxes) # M x N x 4 # condition1: inside a gt bbox inside_gt_bbox_mask = bbox_target.min(dim=2)[0] > 0 # M x N # condition2: Calculate the nearest points from # the upper, lower, left and right ranges from # the center of the gt bbox centers = ((gt_bboxes[..., [0, 1]] + gt_bboxes[..., [2, 3]]) / 2) centers_discret = ((centers / strides).int() * strides).float() + \ strides / 2 centers_discret_dist = points - centers_discret dist_x = centers_discret_dist[..., 0].abs() dist_y = centers_discret_dist[..., 1].abs() inside_gt_center3x3_mask = (dist_x <= strides[..., 0]) & \ (dist_y <= strides[..., 0]) # condition3: limit the regression range for each location bbox_target_wh = bbox_target[..., :2] + bbox_target[..., 2:] crit = (bbox_target_wh**2).sum(dim=2)**0.5 / 2 inside_fpn_level_mask = (crit >= regress_ranges[:, [0]]) & \ (crit <= regress_ranges[:, [1]]) bbox_target_mask = inside_gt_bbox_mask & \ inside_gt_center3x3_mask & \ inside_fpn_level_mask # Calculate the distance weight map gt_center_peak_mask = ((centers_discret_dist**2).sum(dim=2) == 0) weighted_dist = ((points - centers)**2).sum(dim=2) # M x N weighted_dist[gt_center_peak_mask] = 0 areas = (gt_bboxes[..., 2] - gt_bboxes[..., 0]) * ( gt_bboxes[..., 3] - gt_bboxes[..., 1]) radius = self.delta**2 * 2 * areas radius = torch.clamp(radius, min=self.hm_min_radius**2) weighted_dist = weighted_dist / radius # Calculate bbox_target bbox_weighted_dist = weighted_dist.clone() bbox_weighted_dist[bbox_target_mask == 0] = INF * 1.0 min_dist, min_inds = bbox_weighted_dist.min(dim=1) bbox_target = bbox_target[range(len(bbox_target)), min_inds] # M x N x 4 --> M x 4 bbox_target[min_dist == INF] = -INF # Convert to feature map scale bbox_target /= strides[:, 0, :].repeat(1, 2) # Calculate cls_target cls_target = self._create_heatmaps_from_dist(weighted_dist, gt_labels) return cls_target, bbox_target @torch.no_grad() def add_cls_pos_inds( self, flatten_points: Tensor, flatten_bbox_preds: Tensor, featmap_sizes: Tensor, batch_gt_instances: InstanceList ) -> Tuple[Optional[Tensor], Optional[Tensor]]: """Provide additional adaptive positive samples to the classification branch. Args: flatten_points (Tensor): The point after flatten, including batch image and all levels. The shape is (N, 2). flatten_bbox_preds (Tensor): The bbox predicts after flatten, including batch image and all levels. The shape is (N, 4). featmap_sizes (Tensor): Feature map size of all layers. The shape is (5, 2). batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. Returns: tuple: - pos_inds (Tensor): Adaptively selected positive sample index. - cls_labels (Tensor): Corresponding positive class label. """ outputs = self._get_center3x3_region_index_targets( batch_gt_instances, featmap_sizes) cls_labels, fpn_level_masks, center3x3_inds, \ center3x3_bbox_targets, center3x3_masks = outputs num_gts, total_level, K = cls_labels.shape[0], len( self.strides), center3x3_masks.shape[-1] if num_gts == 0: return None, None # The out-of-bounds index is forcibly set to 0 # to prevent loss calculation errors center3x3_inds[center3x3_masks == 0] = 0 reg_pred_center3x3 = flatten_bbox_preds[center3x3_inds] center3x3_points = flatten_points[center3x3_inds].view(-1, 2) center3x3_bbox_targets_expand = center3x3_bbox_targets.view( -1, 4).clamp(min=0) pos_decoded_bbox_preds = self.bbox_coder.decode( center3x3_points, reg_pred_center3x3.view(-1, 4)) pos_decoded_target_preds = self.bbox_coder.decode( center3x3_points, center3x3_bbox_targets_expand) center3x3_bbox_loss = self.loss_bbox( pos_decoded_bbox_preds, pos_decoded_target_preds, None, reduction_override='none').view(num_gts, total_level, K) / self.loss_bbox.loss_weight # Invalid index Loss set to infinity center3x3_bbox_loss[center3x3_masks == 0] = INF # 4 is the center point of the sampled 9 points, the center point # of gt bbox after discretization. # The center point of gt bbox after discretization # must be a positive sample, so we force its loss to be set to 0. center3x3_bbox_loss.view(-1, K)[fpn_level_masks.view(-1), 4] = 0 center3x3_bbox_loss = center3x3_bbox_loss.view(num_gts, -1) loss_thr = torch.kthvalue( center3x3_bbox_loss, self.more_pos_topk, dim=1)[0] loss_thr[loss_thr > self.more_pos_thresh] = self.more_pos_thresh new_pos = center3x3_bbox_loss < loss_thr.view(num_gts, 1) pos_inds = center3x3_inds.view(num_gts, -1)[new_pos] cls_labels = cls_labels.view(num_gts, 1).expand(num_gts, total_level * K)[new_pos] return pos_inds, cls_labels def _create_heatmaps_from_dist(self, weighted_dist: Tensor, cls_labels: Tensor) -> Tensor: """Generate heatmaps of classification branch based on weighted distance map.""" heatmaps = weighted_dist.new_zeros( (weighted_dist.shape[0], self.num_classes)) for c in range(self.num_classes): inds = (cls_labels == c) # N if inds.int().sum() == 0: continue heatmaps[:, c] = torch.exp(-weighted_dist[:, inds].min(dim=1)[0]) zeros = heatmaps[:, c] < 1e-4 heatmaps[zeros, c] = 0 return heatmaps def _get_center3x3_region_index_targets(self, bacth_gt_instances: InstanceList, shapes_per_level: Tensor) -> tuple: """Get the center (and the 3x3 region near center) locations and target of each objects.""" cls_labels = [] inside_fpn_level_masks = [] center3x3_inds = [] center3x3_masks = [] center3x3_bbox_targets = [] total_levels = len(self.strides) batch = len(bacth_gt_instances) shapes_per_level = shapes_per_level.long() area_per_level = (shapes_per_level[:, 0] * shapes_per_level[:, 1]) # Select a total of 9 positions of 3x3 in the center of the gt bbox # as candidate positive samples K = 9 dx = shapes_per_level.new_tensor([-1, 0, 1, -1, 0, 1, -1, 0, 1]).view(1, 1, K) dy = shapes_per_level.new_tensor([-1, -1, -1, 0, 0, 0, 1, 1, 1]).view(1, 1, K) regress_ranges = shapes_per_level.new_tensor(self.regress_ranges).view( len(self.regress_ranges), 2) # L x 2 strides = shapes_per_level.new_tensor(self.strides) start_coord_pre_level = [] _start = 0 for level in range(total_levels): start_coord_pre_level.append(_start) _start = _start + batch * area_per_level[level] start_coord_pre_level = shapes_per_level.new_tensor( start_coord_pre_level).view(1, total_levels, 1) area_per_level = area_per_level.view(1, total_levels, 1) for im_i in range(batch): gt_instance = bacth_gt_instances[im_i] gt_bboxes = gt_instance.bboxes gt_labels = gt_instance.labels num_gts = gt_bboxes.shape[0] if num_gts == 0: continue cls_labels.append(gt_labels) gt_bboxes = gt_bboxes[:, None].expand(num_gts, total_levels, 4) expanded_strides = strides[None, :, None].expand(num_gts, total_levels, 2) expanded_regress_ranges = regress_ranges[None].expand( num_gts, total_levels, 2) expanded_shapes_per_level = shapes_per_level[None].expand( num_gts, total_levels, 2) # calc reg_target centers = ((gt_bboxes[..., [0, 1]] + gt_bboxes[..., [2, 3]]) / 2) centers_inds = (centers / expanded_strides).long() centers_discret = centers_inds * expanded_strides \ + expanded_strides // 2 bbox_target = bbox2distance(centers_discret, gt_bboxes) # M x N x 4 # calc inside_fpn_level_mask bbox_target_wh = bbox_target[..., :2] + bbox_target[..., 2:] crit = (bbox_target_wh**2).sum(dim=2)**0.5 / 2 inside_fpn_level_mask = \ (crit >= expanded_regress_ranges[..., 0]) & \ (crit <= expanded_regress_ranges[..., 1]) inside_gt_bbox_mask = bbox_target.min(dim=2)[0] >= 0 inside_fpn_level_mask = inside_gt_bbox_mask & inside_fpn_level_mask inside_fpn_level_masks.append(inside_fpn_level_mask) # calc center3x3_ind and mask expand_ws = expanded_shapes_per_level[..., 1:2].expand( num_gts, total_levels, K) expand_hs = expanded_shapes_per_level[..., 0:1].expand( num_gts, total_levels, K) centers_inds_x = centers_inds[..., 0:1] centers_inds_y = centers_inds[..., 1:2] center3x3_idx = start_coord_pre_level + \ im_i * area_per_level + \ (centers_inds_y + dy) * expand_ws + \ (centers_inds_x + dx) center3x3_mask = \ ((centers_inds_y + dy) < expand_hs) & \ ((centers_inds_y + dy) >= 0) & \ ((centers_inds_x + dx) < expand_ws) & \ ((centers_inds_x + dx) >= 0) # recalc center3x3 region reg target bbox_target = bbox_target / expanded_strides.repeat(1, 1, 2) center3x3_bbox_target = bbox_target[..., None, :].expand( num_gts, total_levels, K, 4).clone() center3x3_bbox_target[..., 0] += dx center3x3_bbox_target[..., 1] += dy center3x3_bbox_target[..., 2] -= dx center3x3_bbox_target[..., 3] -= dy # update center3x3_mask center3x3_mask = center3x3_mask & ( center3x3_bbox_target.min(dim=3)[0] >= 0) # n x L x K center3x3_inds.append(center3x3_idx) center3x3_masks.append(center3x3_mask) center3x3_bbox_targets.append(center3x3_bbox_target) if len(inside_fpn_level_masks) > 0: cls_labels = torch.cat(cls_labels, dim=0) inside_fpn_level_masks = torch.cat(inside_fpn_level_masks, dim=0) center3x3_inds = torch.cat(center3x3_inds, dim=0).long() center3x3_bbox_targets = torch.cat(center3x3_bbox_targets, dim=0) center3x3_masks = torch.cat(center3x3_masks, dim=0) else: cls_labels = shapes_per_level.new_zeros(0).long() inside_fpn_level_masks = shapes_per_level.new_zeros( (0, total_levels)).bool() center3x3_inds = shapes_per_level.new_zeros( (0, total_levels, K)).long() center3x3_bbox_targets = shapes_per_level.new_zeros( (0, total_levels, K, 4)).float() center3x3_masks = shapes_per_level.new_zeros( (0, total_levels, K)).bool() return cls_labels, inside_fpn_level_masks, center3x3_inds, \ center3x3_bbox_targets, center3x3_masks
27,030
42.2496
79
py
ERD
ERD-main/mmdet/models/dense_heads/corner_head.py
# Copyright (c) OpenMMLab. All rights reserved. from logging import warning from math import ceil, log from typing import List, Optional, Sequence, Tuple import torch import torch.nn as nn from mmcv.cnn import ConvModule from mmcv.ops import CornerPool, batched_nms from mmengine.config import ConfigDict from mmengine.model import BaseModule, bias_init_with_prob from mmengine.structures import InstanceData from torch import Tensor from mmdet.registry import MODELS from mmdet.utils import (ConfigType, InstanceList, OptConfigType, OptInstanceList, OptMultiConfig) from ..utils import (gather_feat, gaussian_radius, gen_gaussian_target, get_local_maximum, get_topk_from_heatmap, multi_apply, transpose_and_gather_feat) from .base_dense_head import BaseDenseHead class BiCornerPool(BaseModule): """Bidirectional Corner Pooling Module (TopLeft, BottomRight, etc.) Args: in_channels (int): Input channels of module. directions (list[str]): Directions of two CornerPools. out_channels (int): Output channels of module. feat_channels (int): Feature channels of module. norm_cfg (:obj:`ConfigDict` or dict): Dictionary to construct and config norm layer. init_cfg (:obj:`ConfigDict` or dict, optional): the config to control the initialization. """ def __init__(self, in_channels: int, directions: List[int], feat_channels: int = 128, out_channels: int = 128, norm_cfg: ConfigType = dict(type='BN', requires_grad=True), init_cfg: OptMultiConfig = None) -> None: super().__init__(init_cfg) self.direction1_conv = ConvModule( in_channels, feat_channels, 3, padding=1, norm_cfg=norm_cfg) self.direction2_conv = ConvModule( in_channels, feat_channels, 3, padding=1, norm_cfg=norm_cfg) self.aftpool_conv = ConvModule( feat_channels, out_channels, 3, padding=1, norm_cfg=norm_cfg, act_cfg=None) self.conv1 = ConvModule( in_channels, out_channels, 1, norm_cfg=norm_cfg, act_cfg=None) self.conv2 = ConvModule( in_channels, out_channels, 3, padding=1, norm_cfg=norm_cfg) self.direction1_pool = CornerPool(directions[0]) self.direction2_pool = CornerPool(directions[1]) self.relu = nn.ReLU(inplace=True) def forward(self, x: Tensor) -> Tensor: """Forward features from the upstream network. Args: x (tensor): Input feature of BiCornerPool. Returns: conv2 (tensor): Output feature of BiCornerPool. """ direction1_conv = self.direction1_conv(x) direction2_conv = self.direction2_conv(x) direction1_feat = self.direction1_pool(direction1_conv) direction2_feat = self.direction2_pool(direction2_conv) aftpool_conv = self.aftpool_conv(direction1_feat + direction2_feat) conv1 = self.conv1(x) relu = self.relu(aftpool_conv + conv1) conv2 = self.conv2(relu) return conv2 @MODELS.register_module() class CornerHead(BaseDenseHead): """Head of CornerNet: Detecting Objects as Paired Keypoints. Code is modified from the `official github repo <https://github.com/princeton-vl/CornerNet/blob/master/models/py_utils/ kp.py#L73>`_ . More details can be found in the `paper <https://arxiv.org/abs/1808.01244>`_ . Args: num_classes (int): Number of categories excluding the background category. in_channels (int): Number of channels in the input feature map. num_feat_levels (int): Levels of feature from the previous module. 2 for HourglassNet-104 and 1 for HourglassNet-52. Because HourglassNet-104 outputs the final feature and intermediate supervision feature and HourglassNet-52 only outputs the final feature. Defaults to 2. corner_emb_channels (int): Channel of embedding vector. Defaults to 1. train_cfg (:obj:`ConfigDict` or dict, optional): Training config. Useless in CornerHead, but we keep this variable for SingleStageDetector. test_cfg (:obj:`ConfigDict` or dict, optional): Testing config of CornerHead. loss_heatmap (:obj:`ConfigDict` or dict): Config of corner heatmap loss. Defaults to GaussianFocalLoss. loss_embedding (:obj:`ConfigDict` or dict): Config of corner embedding loss. Defaults to AssociativeEmbeddingLoss. loss_offset (:obj:`ConfigDict` or dict): Config of corner offset loss. Defaults to SmoothL1Loss. init_cfg (:obj:`ConfigDict` or dict, optional): the config to control the initialization. """ def __init__(self, num_classes: int, in_channels: int, num_feat_levels: int = 2, corner_emb_channels: int = 1, train_cfg: OptConfigType = None, test_cfg: OptConfigType = None, loss_heatmap: ConfigType = dict( type='GaussianFocalLoss', alpha=2.0, gamma=4.0, loss_weight=1), loss_embedding: ConfigType = dict( type='AssociativeEmbeddingLoss', pull_weight=0.25, push_weight=0.25), loss_offset: ConfigType = dict( type='SmoothL1Loss', beta=1.0, loss_weight=1), init_cfg: OptMultiConfig = None) -> None: assert init_cfg is None, 'To prevent abnormal initialization ' \ 'behavior, init_cfg is not allowed to be set' super().__init__(init_cfg=init_cfg) self.num_classes = num_classes self.in_channels = in_channels self.corner_emb_channels = corner_emb_channels self.with_corner_emb = self.corner_emb_channels > 0 self.corner_offset_channels = 2 self.num_feat_levels = num_feat_levels self.loss_heatmap = MODELS.build( loss_heatmap) if loss_heatmap is not None else None self.loss_embedding = MODELS.build( loss_embedding) if loss_embedding is not None else None self.loss_offset = MODELS.build( loss_offset) if loss_offset is not None else None self.train_cfg = train_cfg self.test_cfg = test_cfg self._init_layers() def _make_layers(self, out_channels: int, in_channels: int = 256, feat_channels: int = 256) -> nn.Sequential: """Initialize conv sequential for CornerHead.""" return nn.Sequential( ConvModule(in_channels, feat_channels, 3, padding=1), ConvModule( feat_channels, out_channels, 1, norm_cfg=None, act_cfg=None)) def _init_corner_kpt_layers(self) -> None: """Initialize corner keypoint layers. Including corner heatmap branch and corner offset branch. Each branch has two parts: prefix `tl_` for top-left and `br_` for bottom-right. """ self.tl_pool, self.br_pool = nn.ModuleList(), nn.ModuleList() self.tl_heat, self.br_heat = nn.ModuleList(), nn.ModuleList() self.tl_off, self.br_off = nn.ModuleList(), nn.ModuleList() for _ in range(self.num_feat_levels): self.tl_pool.append( BiCornerPool( self.in_channels, ['top', 'left'], out_channels=self.in_channels)) self.br_pool.append( BiCornerPool( self.in_channels, ['bottom', 'right'], out_channels=self.in_channels)) self.tl_heat.append( self._make_layers( out_channels=self.num_classes, in_channels=self.in_channels)) self.br_heat.append( self._make_layers( out_channels=self.num_classes, in_channels=self.in_channels)) self.tl_off.append( self._make_layers( out_channels=self.corner_offset_channels, in_channels=self.in_channels)) self.br_off.append( self._make_layers( out_channels=self.corner_offset_channels, in_channels=self.in_channels)) def _init_corner_emb_layers(self) -> None: """Initialize corner embedding layers. Only include corner embedding branch with two parts: prefix `tl_` for top-left and `br_` for bottom-right. """ self.tl_emb, self.br_emb = nn.ModuleList(), nn.ModuleList() for _ in range(self.num_feat_levels): self.tl_emb.append( self._make_layers( out_channels=self.corner_emb_channels, in_channels=self.in_channels)) self.br_emb.append( self._make_layers( out_channels=self.corner_emb_channels, in_channels=self.in_channels)) def _init_layers(self) -> None: """Initialize layers for CornerHead. Including two parts: corner keypoint layers and corner embedding layers """ self._init_corner_kpt_layers() if self.with_corner_emb: self._init_corner_emb_layers() def init_weights(self) -> None: super().init_weights() bias_init = bias_init_with_prob(0.1) for i in range(self.num_feat_levels): # The initialization of parameters are different between # nn.Conv2d and ConvModule. Our experiments show that # using the original initialization of nn.Conv2d increases # the final mAP by about 0.2% self.tl_heat[i][-1].conv.reset_parameters() self.tl_heat[i][-1].conv.bias.data.fill_(bias_init) self.br_heat[i][-1].conv.reset_parameters() self.br_heat[i][-1].conv.bias.data.fill_(bias_init) self.tl_off[i][-1].conv.reset_parameters() self.br_off[i][-1].conv.reset_parameters() if self.with_corner_emb: self.tl_emb[i][-1].conv.reset_parameters() self.br_emb[i][-1].conv.reset_parameters() def forward(self, feats: Tuple[Tensor]) -> tuple: """Forward features from the upstream network. Args: feats (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. Returns: tuple: Usually a tuple of corner heatmaps, offset heatmaps and embedding heatmaps. - tl_heats (list[Tensor]): Top-left corner heatmaps for all levels, each is a 4D-tensor, the channels number is num_classes. - br_heats (list[Tensor]): Bottom-right corner heatmaps for all levels, each is a 4D-tensor, the channels number is num_classes. - tl_embs (list[Tensor] | list[None]): Top-left embedding heatmaps for all levels, each is a 4D-tensor or None. If not None, the channels number is corner_emb_channels. - br_embs (list[Tensor] | list[None]): Bottom-right embedding heatmaps for all levels, each is a 4D-tensor or None. If not None, the channels number is corner_emb_channels. - tl_offs (list[Tensor]): Top-left offset heatmaps for all levels, each is a 4D-tensor. The channels number is corner_offset_channels. - br_offs (list[Tensor]): Bottom-right offset heatmaps for all levels, each is a 4D-tensor. The channels number is corner_offset_channels. """ lvl_ind = list(range(self.num_feat_levels)) return multi_apply(self.forward_single, feats, lvl_ind) def forward_single(self, x: Tensor, lvl_ind: int, return_pool: bool = False) -> List[Tensor]: """Forward feature of a single level. Args: x (Tensor): Feature of a single level. lvl_ind (int): Level index of current feature. return_pool (bool): Return corner pool feature or not. Defaults to False. Returns: tuple[Tensor]: A tuple of CornerHead's output for current feature level. Containing the following Tensors: - tl_heat (Tensor): Predicted top-left corner heatmap. - br_heat (Tensor): Predicted bottom-right corner heatmap. - tl_emb (Tensor | None): Predicted top-left embedding heatmap. None for `self.with_corner_emb == False`. - br_emb (Tensor | None): Predicted bottom-right embedding heatmap. None for `self.with_corner_emb == False`. - tl_off (Tensor): Predicted top-left offset heatmap. - br_off (Tensor): Predicted bottom-right offset heatmap. - tl_pool (Tensor): Top-left corner pool feature. Not must have. - br_pool (Tensor): Bottom-right corner pool feature. Not must have. """ tl_pool = self.tl_pool[lvl_ind](x) tl_heat = self.tl_heat[lvl_ind](tl_pool) br_pool = self.br_pool[lvl_ind](x) br_heat = self.br_heat[lvl_ind](br_pool) tl_emb, br_emb = None, None if self.with_corner_emb: tl_emb = self.tl_emb[lvl_ind](tl_pool) br_emb = self.br_emb[lvl_ind](br_pool) tl_off = self.tl_off[lvl_ind](tl_pool) br_off = self.br_off[lvl_ind](br_pool) result_list = [tl_heat, br_heat, tl_emb, br_emb, tl_off, br_off] if return_pool: result_list.append(tl_pool) result_list.append(br_pool) return result_list def get_targets(self, gt_bboxes: List[Tensor], gt_labels: List[Tensor], feat_shape: Sequence[int], img_shape: Sequence[int], with_corner_emb: bool = False, with_guiding_shift: bool = False, with_centripetal_shift: bool = False) -> dict: """Generate corner targets. Including corner heatmap, corner offset. Optional: corner embedding, corner guiding shift, centripetal shift. For CornerNet, we generate corner heatmap, corner offset and corner embedding from this function. For CentripetalNet, we generate corner heatmap, corner offset, guiding shift and centripetal shift from this function. Args: gt_bboxes (list[Tensor]): Ground truth bboxes of each image, each has shape (num_gt, 4). gt_labels (list[Tensor]): Ground truth labels of each box, each has shape (num_gt, ). feat_shape (Sequence[int]): Shape of output feature, [batch, channel, height, width]. img_shape (Sequence[int]): Shape of input image, [height, width, channel]. with_corner_emb (bool): Generate corner embedding target or not. Defaults to False. with_guiding_shift (bool): Generate guiding shift target or not. Defaults to False. with_centripetal_shift (bool): Generate centripetal shift target or not. Defaults to False. Returns: dict: Ground truth of corner heatmap, corner offset, corner embedding, guiding shift and centripetal shift. Containing the following keys: - topleft_heatmap (Tensor): Ground truth top-left corner heatmap. - bottomright_heatmap (Tensor): Ground truth bottom-right corner heatmap. - topleft_offset (Tensor): Ground truth top-left corner offset. - bottomright_offset (Tensor): Ground truth bottom-right corner offset. - corner_embedding (list[list[list[int]]]): Ground truth corner embedding. Not must have. - topleft_guiding_shift (Tensor): Ground truth top-left corner guiding shift. Not must have. - bottomright_guiding_shift (Tensor): Ground truth bottom-right corner guiding shift. Not must have. - topleft_centripetal_shift (Tensor): Ground truth top-left corner centripetal shift. Not must have. - bottomright_centripetal_shift (Tensor): Ground truth bottom-right corner centripetal shift. Not must have. """ batch_size, _, height, width = feat_shape img_h, img_w = img_shape[:2] width_ratio = float(width / img_w) height_ratio = float(height / img_h) gt_tl_heatmap = gt_bboxes[-1].new_zeros( [batch_size, self.num_classes, height, width]) gt_br_heatmap = gt_bboxes[-1].new_zeros( [batch_size, self.num_classes, height, width]) gt_tl_offset = gt_bboxes[-1].new_zeros([batch_size, 2, height, width]) gt_br_offset = gt_bboxes[-1].new_zeros([batch_size, 2, height, width]) if with_corner_emb: match = [] # Guiding shift is a kind of offset, from center to corner if with_guiding_shift: gt_tl_guiding_shift = gt_bboxes[-1].new_zeros( [batch_size, 2, height, width]) gt_br_guiding_shift = gt_bboxes[-1].new_zeros( [batch_size, 2, height, width]) # Centripetal shift is also a kind of offset, from center to corner # and normalized by log. if with_centripetal_shift: gt_tl_centripetal_shift = gt_bboxes[-1].new_zeros( [batch_size, 2, height, width]) gt_br_centripetal_shift = gt_bboxes[-1].new_zeros( [batch_size, 2, height, width]) for batch_id in range(batch_size): # Ground truth of corner embedding per image is a list of coord set corner_match = [] for box_id in range(len(gt_labels[batch_id])): left, top, right, bottom = gt_bboxes[batch_id][box_id] center_x = (left + right) / 2.0 center_y = (top + bottom) / 2.0 label = gt_labels[batch_id][box_id] # Use coords in the feature level to generate ground truth scale_left = left * width_ratio scale_right = right * width_ratio scale_top = top * height_ratio scale_bottom = bottom * height_ratio scale_center_x = center_x * width_ratio scale_center_y = center_y * height_ratio # Int coords on feature map/ground truth tensor left_idx = int(min(scale_left, width - 1)) right_idx = int(min(scale_right, width - 1)) top_idx = int(min(scale_top, height - 1)) bottom_idx = int(min(scale_bottom, height - 1)) # Generate gaussian heatmap scale_box_width = ceil(scale_right - scale_left) scale_box_height = ceil(scale_bottom - scale_top) radius = gaussian_radius((scale_box_height, scale_box_width), min_overlap=0.3) radius = max(0, int(radius)) gt_tl_heatmap[batch_id, label] = gen_gaussian_target( gt_tl_heatmap[batch_id, label], [left_idx, top_idx], radius) gt_br_heatmap[batch_id, label] = gen_gaussian_target( gt_br_heatmap[batch_id, label], [right_idx, bottom_idx], radius) # Generate corner offset left_offset = scale_left - left_idx top_offset = scale_top - top_idx right_offset = scale_right - right_idx bottom_offset = scale_bottom - bottom_idx gt_tl_offset[batch_id, 0, top_idx, left_idx] = left_offset gt_tl_offset[batch_id, 1, top_idx, left_idx] = top_offset gt_br_offset[batch_id, 0, bottom_idx, right_idx] = right_offset gt_br_offset[batch_id, 1, bottom_idx, right_idx] = bottom_offset # Generate corner embedding if with_corner_emb: corner_match.append([[top_idx, left_idx], [bottom_idx, right_idx]]) # Generate guiding shift if with_guiding_shift: gt_tl_guiding_shift[batch_id, 0, top_idx, left_idx] = scale_center_x - left_idx gt_tl_guiding_shift[batch_id, 1, top_idx, left_idx] = scale_center_y - top_idx gt_br_guiding_shift[batch_id, 0, bottom_idx, right_idx] = right_idx - scale_center_x gt_br_guiding_shift[ batch_id, 1, bottom_idx, right_idx] = bottom_idx - scale_center_y # Generate centripetal shift if with_centripetal_shift: gt_tl_centripetal_shift[batch_id, 0, top_idx, left_idx] = log(scale_center_x - scale_left) gt_tl_centripetal_shift[batch_id, 1, top_idx, left_idx] = log(scale_center_y - scale_top) gt_br_centripetal_shift[batch_id, 0, bottom_idx, right_idx] = log(scale_right - scale_center_x) gt_br_centripetal_shift[batch_id, 1, bottom_idx, right_idx] = log(scale_bottom - scale_center_y) if with_corner_emb: match.append(corner_match) target_result = dict( topleft_heatmap=gt_tl_heatmap, topleft_offset=gt_tl_offset, bottomright_heatmap=gt_br_heatmap, bottomright_offset=gt_br_offset) if with_corner_emb: target_result.update(corner_embedding=match) if with_guiding_shift: target_result.update( topleft_guiding_shift=gt_tl_guiding_shift, bottomright_guiding_shift=gt_br_guiding_shift) if with_centripetal_shift: target_result.update( topleft_centripetal_shift=gt_tl_centripetal_shift, bottomright_centripetal_shift=gt_br_centripetal_shift) return target_result def loss_by_feat( self, tl_heats: List[Tensor], br_heats: List[Tensor], tl_embs: List[Tensor], br_embs: List[Tensor], tl_offs: List[Tensor], br_offs: List[Tensor], batch_gt_instances: InstanceList, batch_img_metas: List[dict], batch_gt_instances_ignore: OptInstanceList = None) -> dict: """Calculate the loss based on the features extracted by the detection head. Args: tl_heats (list[Tensor]): Top-left corner heatmaps for each level with shape (N, num_classes, H, W). br_heats (list[Tensor]): Bottom-right corner heatmaps for each level with shape (N, num_classes, H, W). tl_embs (list[Tensor]): Top-left corner embeddings for each level with shape (N, corner_emb_channels, H, W). br_embs (list[Tensor]): Bottom-right corner embeddings for each level with shape (N, corner_emb_channels, H, W). tl_offs (list[Tensor]): Top-left corner offsets for each level with shape (N, corner_offset_channels, H, W). br_offs (list[Tensor]): Bottom-right corner offsets for each level with shape (N, corner_offset_channels, H, W). batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. batch_gt_instances_ignore (list[:obj:`InstanceData`], optional): Specify which bounding boxes can be ignored when computing the loss. Returns: dict[str, Tensor]: A dictionary of loss components. Containing the following losses: - det_loss (list[Tensor]): Corner keypoint losses of all feature levels. - pull_loss (list[Tensor]): Part one of AssociativeEmbedding losses of all feature levels. - push_loss (list[Tensor]): Part two of AssociativeEmbedding losses of all feature levels. - off_loss (list[Tensor]): Corner offset losses of all feature levels. """ gt_bboxes = [ gt_instances.bboxes for gt_instances in batch_gt_instances ] gt_labels = [ gt_instances.labels for gt_instances in batch_gt_instances ] targets = self.get_targets( gt_bboxes, gt_labels, tl_heats[-1].shape, batch_img_metas[0]['batch_input_shape'], with_corner_emb=self.with_corner_emb) mlvl_targets = [targets for _ in range(self.num_feat_levels)] det_losses, pull_losses, push_losses, off_losses = multi_apply( self.loss_by_feat_single, tl_heats, br_heats, tl_embs, br_embs, tl_offs, br_offs, mlvl_targets) loss_dict = dict(det_loss=det_losses, off_loss=off_losses) if self.with_corner_emb: loss_dict.update(pull_loss=pull_losses, push_loss=push_losses) return loss_dict def loss_by_feat_single(self, tl_hmp: Tensor, br_hmp: Tensor, tl_emb: Optional[Tensor], br_emb: Optional[Tensor], tl_off: Tensor, br_off: Tensor, targets: dict) -> Tuple[Tensor, ...]: """Calculate the loss of a single scale level based on the features extracted by the detection head. Args: tl_hmp (Tensor): Top-left corner heatmap for current level with shape (N, num_classes, H, W). br_hmp (Tensor): Bottom-right corner heatmap for current level with shape (N, num_classes, H, W). tl_emb (Tensor, optional): Top-left corner embedding for current level with shape (N, corner_emb_channels, H, W). br_emb (Tensor, optional): Bottom-right corner embedding for current level with shape (N, corner_emb_channels, H, W). tl_off (Tensor): Top-left corner offset for current level with shape (N, corner_offset_channels, H, W). br_off (Tensor): Bottom-right corner offset for current level with shape (N, corner_offset_channels, H, W). targets (dict): Corner target generated by `get_targets`. Returns: tuple[torch.Tensor]: Losses of the head's different branches containing the following losses: - det_loss (Tensor): Corner keypoint loss. - pull_loss (Tensor): Part one of AssociativeEmbedding loss. - push_loss (Tensor): Part two of AssociativeEmbedding loss. - off_loss (Tensor): Corner offset loss. """ gt_tl_hmp = targets['topleft_heatmap'] gt_br_hmp = targets['bottomright_heatmap'] gt_tl_off = targets['topleft_offset'] gt_br_off = targets['bottomright_offset'] gt_embedding = targets['corner_embedding'] # Detection loss tl_det_loss = self.loss_heatmap( tl_hmp.sigmoid(), gt_tl_hmp, avg_factor=max(1, gt_tl_hmp.eq(1).sum())) br_det_loss = self.loss_heatmap( br_hmp.sigmoid(), gt_br_hmp, avg_factor=max(1, gt_br_hmp.eq(1).sum())) det_loss = (tl_det_loss + br_det_loss) / 2.0 # AssociativeEmbedding loss if self.with_corner_emb and self.loss_embedding is not None: pull_loss, push_loss = self.loss_embedding(tl_emb, br_emb, gt_embedding) else: pull_loss, push_loss = None, None # Offset loss # We only compute the offset loss at the real corner position. # The value of real corner would be 1 in heatmap ground truth. # The mask is computed in class agnostic mode and its shape is # batch * 1 * width * height. tl_off_mask = gt_tl_hmp.eq(1).sum(1).gt(0).unsqueeze(1).type_as( gt_tl_hmp) br_off_mask = gt_br_hmp.eq(1).sum(1).gt(0).unsqueeze(1).type_as( gt_br_hmp) tl_off_loss = self.loss_offset( tl_off, gt_tl_off, tl_off_mask, avg_factor=max(1, tl_off_mask.sum())) br_off_loss = self.loss_offset( br_off, gt_br_off, br_off_mask, avg_factor=max(1, br_off_mask.sum())) off_loss = (tl_off_loss + br_off_loss) / 2.0 return det_loss, pull_loss, push_loss, off_loss def predict_by_feat(self, tl_heats: List[Tensor], br_heats: List[Tensor], tl_embs: List[Tensor], br_embs: List[Tensor], tl_offs: List[Tensor], br_offs: List[Tensor], batch_img_metas: Optional[List[dict]] = None, rescale: bool = False, with_nms: bool = True) -> InstanceList: """Transform a batch of output features extracted from the head into bbox results. Args: tl_heats (list[Tensor]): Top-left corner heatmaps for each level with shape (N, num_classes, H, W). br_heats (list[Tensor]): Bottom-right corner heatmaps for each level with shape (N, num_classes, H, W). tl_embs (list[Tensor]): Top-left corner embeddings for each level with shape (N, corner_emb_channels, H, W). br_embs (list[Tensor]): Bottom-right corner embeddings for each level with shape (N, corner_emb_channels, H, W). tl_offs (list[Tensor]): Top-left corner offsets for each level with shape (N, corner_offset_channels, H, W). br_offs (list[Tensor]): Bottom-right corner offsets for each level with shape (N, corner_offset_channels, H, W). batch_img_metas (list[dict], optional): Batch image meta info. Defaults to None. rescale (bool): If True, return boxes in original image space. Defaults to False. with_nms (bool): If True, do nms before return boxes. Defaults to True. Returns: list[:obj:`InstanceData`]: Object detection results of each image after the post process. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). """ assert tl_heats[-1].shape[0] == br_heats[-1].shape[0] == len( batch_img_metas) result_list = [] for img_id in range(len(batch_img_metas)): result_list.append( self._predict_by_feat_single( tl_heats[-1][img_id:img_id + 1, :], br_heats[-1][img_id:img_id + 1, :], tl_offs[-1][img_id:img_id + 1, :], br_offs[-1][img_id:img_id + 1, :], batch_img_metas[img_id], tl_emb=tl_embs[-1][img_id:img_id + 1, :], br_emb=br_embs[-1][img_id:img_id + 1, :], rescale=rescale, with_nms=with_nms)) return result_list def _predict_by_feat_single(self, tl_heat: Tensor, br_heat: Tensor, tl_off: Tensor, br_off: Tensor, img_meta: dict, tl_emb: Optional[Tensor] = None, br_emb: Optional[Tensor] = None, tl_centripetal_shift: Optional[Tensor] = None, br_centripetal_shift: Optional[Tensor] = None, rescale: bool = False, with_nms: bool = True) -> InstanceData: """Transform a single image's features extracted from the head into bbox results. Args: tl_heat (Tensor): Top-left corner heatmap for current level with shape (N, num_classes, H, W). br_heat (Tensor): Bottom-right corner heatmap for current level with shape (N, num_classes, H, W). tl_off (Tensor): Top-left corner offset for current level with shape (N, corner_offset_channels, H, W). br_off (Tensor): Bottom-right corner offset for current level with shape (N, corner_offset_channels, H, W). img_meta (dict): Meta information of current image, e.g., image size, scaling factor, etc. tl_emb (Tensor): Top-left corner embedding for current level with shape (N, corner_emb_channels, H, W). br_emb (Tensor): Bottom-right corner embedding for current level with shape (N, corner_emb_channels, H, W). tl_centripetal_shift: Top-left corner's centripetal shift for current level with shape (N, 2, H, W). br_centripetal_shift: Bottom-right corner's centripetal shift for current level with shape (N, 2, H, W). rescale (bool): If True, return boxes in original image space. Defaults to False. with_nms (bool): If True, do nms before return boxes. Defaults to True. Returns: :obj:`InstanceData`: Detection results of each image after the post process. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). """ if isinstance(img_meta, (list, tuple)): img_meta = img_meta[0] batch_bboxes, batch_scores, batch_clses = self._decode_heatmap( tl_heat=tl_heat.sigmoid(), br_heat=br_heat.sigmoid(), tl_off=tl_off, br_off=br_off, tl_emb=tl_emb, br_emb=br_emb, tl_centripetal_shift=tl_centripetal_shift, br_centripetal_shift=br_centripetal_shift, img_meta=img_meta, k=self.test_cfg.corner_topk, kernel=self.test_cfg.local_maximum_kernel, distance_threshold=self.test_cfg.distance_threshold) if rescale and 'scale_factor' in img_meta: batch_bboxes /= batch_bboxes.new_tensor( img_meta['scale_factor']).repeat((1, 2)) bboxes = batch_bboxes.view([-1, 4]) scores = batch_scores.view(-1) clses = batch_clses.view(-1) det_bboxes = torch.cat([bboxes, scores.unsqueeze(-1)], -1) keepinds = (det_bboxes[:, -1] > -0.1) det_bboxes = det_bboxes[keepinds] det_labels = clses[keepinds] if with_nms: det_bboxes, det_labels = self._bboxes_nms(det_bboxes, det_labels, self.test_cfg) results = InstanceData() results.bboxes = det_bboxes[..., :4] results.scores = det_bboxes[..., 4] results.labels = det_labels return results def _bboxes_nms(self, bboxes: Tensor, labels: Tensor, cfg: ConfigDict) -> Tuple[Tensor, Tensor]: """bboxes nms.""" if 'nms_cfg' in cfg: warning.warn('nms_cfg in test_cfg will be deprecated. ' 'Please rename it as nms') if 'nms' not in cfg: cfg.nms = cfg.nms_cfg if labels.numel() > 0: max_num = cfg.max_per_img bboxes, keep = batched_nms(bboxes[:, :4], bboxes[:, -1].contiguous(), labels, cfg.nms) if max_num > 0: bboxes = bboxes[:max_num] labels = labels[keep][:max_num] return bboxes, labels def _decode_heatmap(self, tl_heat: Tensor, br_heat: Tensor, tl_off: Tensor, br_off: Tensor, tl_emb: Optional[Tensor] = None, br_emb: Optional[Tensor] = None, tl_centripetal_shift: Optional[Tensor] = None, br_centripetal_shift: Optional[Tensor] = None, img_meta: Optional[dict] = None, k: int = 100, kernel: int = 3, distance_threshold: float = 0.5, num_dets: int = 1000) -> Tuple[Tensor, Tensor, Tensor]: """Transform outputs into detections raw bbox prediction. Args: tl_heat (Tensor): Top-left corner heatmap for current level with shape (N, num_classes, H, W). br_heat (Tensor): Bottom-right corner heatmap for current level with shape (N, num_classes, H, W). tl_off (Tensor): Top-left corner offset for current level with shape (N, corner_offset_channels, H, W). br_off (Tensor): Bottom-right corner offset for current level with shape (N, corner_offset_channels, H, W). tl_emb (Tensor, Optional): Top-left corner embedding for current level with shape (N, corner_emb_channels, H, W). br_emb (Tensor, Optional): Bottom-right corner embedding for current level with shape (N, corner_emb_channels, H, W). tl_centripetal_shift (Tensor, Optional): Top-left centripetal shift for current level with shape (N, 2, H, W). br_centripetal_shift (Tensor, Optional): Bottom-right centripetal shift for current level with shape (N, 2, H, W). img_meta (dict): Meta information of current image, e.g., image size, scaling factor, etc. k (int): Get top k corner keypoints from heatmap. kernel (int): Max pooling kernel for extract local maximum pixels. distance_threshold (float): Distance threshold. Top-left and bottom-right corner keypoints with feature distance less than the threshold will be regarded as keypoints from same object. num_dets (int): Num of raw boxes before doing nms. Returns: tuple[torch.Tensor]: Decoded output of CornerHead, containing the following Tensors: - bboxes (Tensor): Coords of each box. - scores (Tensor): Scores of each box. - clses (Tensor): Categories of each box. """ with_embedding = tl_emb is not None and br_emb is not None with_centripetal_shift = ( tl_centripetal_shift is not None and br_centripetal_shift is not None) assert with_embedding + with_centripetal_shift == 1 batch, _, height, width = tl_heat.size() if torch.onnx.is_in_onnx_export(): inp_h, inp_w = img_meta['pad_shape_for_onnx'][:2] else: inp_h, inp_w = img_meta['batch_input_shape'][:2] # perform nms on heatmaps tl_heat = get_local_maximum(tl_heat, kernel=kernel) br_heat = get_local_maximum(br_heat, kernel=kernel) tl_scores, tl_inds, tl_clses, tl_ys, tl_xs = get_topk_from_heatmap( tl_heat, k=k) br_scores, br_inds, br_clses, br_ys, br_xs = get_topk_from_heatmap( br_heat, k=k) # We use repeat instead of expand here because expand is a # shallow-copy function. Thus it could cause unexpected testing result # sometimes. Using expand will decrease about 10% mAP during testing # compared to repeat. tl_ys = tl_ys.view(batch, k, 1).repeat(1, 1, k) tl_xs = tl_xs.view(batch, k, 1).repeat(1, 1, k) br_ys = br_ys.view(batch, 1, k).repeat(1, k, 1) br_xs = br_xs.view(batch, 1, k).repeat(1, k, 1) tl_off = transpose_and_gather_feat(tl_off, tl_inds) tl_off = tl_off.view(batch, k, 1, 2) br_off = transpose_and_gather_feat(br_off, br_inds) br_off = br_off.view(batch, 1, k, 2) tl_xs = tl_xs + tl_off[..., 0] tl_ys = tl_ys + tl_off[..., 1] br_xs = br_xs + br_off[..., 0] br_ys = br_ys + br_off[..., 1] if with_centripetal_shift: tl_centripetal_shift = transpose_and_gather_feat( tl_centripetal_shift, tl_inds).view(batch, k, 1, 2).exp() br_centripetal_shift = transpose_and_gather_feat( br_centripetal_shift, br_inds).view(batch, 1, k, 2).exp() tl_ctxs = tl_xs + tl_centripetal_shift[..., 0] tl_ctys = tl_ys + tl_centripetal_shift[..., 1] br_ctxs = br_xs - br_centripetal_shift[..., 0] br_ctys = br_ys - br_centripetal_shift[..., 1] # all possible boxes based on top k corners (ignoring class) tl_xs *= (inp_w / width) tl_ys *= (inp_h / height) br_xs *= (inp_w / width) br_ys *= (inp_h / height) if with_centripetal_shift: tl_ctxs *= (inp_w / width) tl_ctys *= (inp_h / height) br_ctxs *= (inp_w / width) br_ctys *= (inp_h / height) x_off, y_off = 0, 0 # no crop if not torch.onnx.is_in_onnx_export(): # since `RandomCenterCropPad` is done on CPU with numpy and it's # not dynamic traceable when exporting to ONNX, thus 'border' # does not appears as key in 'img_meta'. As a tmp solution, # we move this 'border' handle part to the postprocess after # finished exporting to ONNX, which is handle in # `mmdet/core/export/model_wrappers.py`. Though difference between # pytorch and exported onnx model, it might be ignored since # comparable performance is achieved between them (e.g. 40.4 vs # 40.6 on COCO val2017, for CornerNet without test-time flip) if 'border' in img_meta: x_off = img_meta['border'][2] y_off = img_meta['border'][0] tl_xs -= x_off tl_ys -= y_off br_xs -= x_off br_ys -= y_off zeros = tl_xs.new_zeros(*tl_xs.size()) tl_xs = torch.where(tl_xs > 0.0, tl_xs, zeros) tl_ys = torch.where(tl_ys > 0.0, tl_ys, zeros) br_xs = torch.where(br_xs > 0.0, br_xs, zeros) br_ys = torch.where(br_ys > 0.0, br_ys, zeros) bboxes = torch.stack((tl_xs, tl_ys, br_xs, br_ys), dim=3) area_bboxes = ((br_xs - tl_xs) * (br_ys - tl_ys)).abs() if with_centripetal_shift: tl_ctxs -= x_off tl_ctys -= y_off br_ctxs -= x_off br_ctys -= y_off tl_ctxs *= tl_ctxs.gt(0.0).type_as(tl_ctxs) tl_ctys *= tl_ctys.gt(0.0).type_as(tl_ctys) br_ctxs *= br_ctxs.gt(0.0).type_as(br_ctxs) br_ctys *= br_ctys.gt(0.0).type_as(br_ctys) ct_bboxes = torch.stack((tl_ctxs, tl_ctys, br_ctxs, br_ctys), dim=3) area_ct_bboxes = ((br_ctxs - tl_ctxs) * (br_ctys - tl_ctys)).abs() rcentral = torch.zeros_like(ct_bboxes) # magic nums from paper section 4.1 mu = torch.ones_like(area_bboxes) / 2.4 mu[area_bboxes > 3500] = 1 / 2.1 # large bbox have smaller mu bboxes_center_x = (bboxes[..., 0] + bboxes[..., 2]) / 2 bboxes_center_y = (bboxes[..., 1] + bboxes[..., 3]) / 2 rcentral[..., 0] = bboxes_center_x - mu * (bboxes[..., 2] - bboxes[..., 0]) / 2 rcentral[..., 1] = bboxes_center_y - mu * (bboxes[..., 3] - bboxes[..., 1]) / 2 rcentral[..., 2] = bboxes_center_x + mu * (bboxes[..., 2] - bboxes[..., 0]) / 2 rcentral[..., 3] = bboxes_center_y + mu * (bboxes[..., 3] - bboxes[..., 1]) / 2 area_rcentral = ((rcentral[..., 2] - rcentral[..., 0]) * (rcentral[..., 3] - rcentral[..., 1])).abs() dists = area_ct_bboxes / area_rcentral tl_ctx_inds = (ct_bboxes[..., 0] <= rcentral[..., 0]) | ( ct_bboxes[..., 0] >= rcentral[..., 2]) tl_cty_inds = (ct_bboxes[..., 1] <= rcentral[..., 1]) | ( ct_bboxes[..., 1] >= rcentral[..., 3]) br_ctx_inds = (ct_bboxes[..., 2] <= rcentral[..., 0]) | ( ct_bboxes[..., 2] >= rcentral[..., 2]) br_cty_inds = (ct_bboxes[..., 3] <= rcentral[..., 1]) | ( ct_bboxes[..., 3] >= rcentral[..., 3]) if with_embedding: tl_emb = transpose_and_gather_feat(tl_emb, tl_inds) tl_emb = tl_emb.view(batch, k, 1) br_emb = transpose_and_gather_feat(br_emb, br_inds) br_emb = br_emb.view(batch, 1, k) dists = torch.abs(tl_emb - br_emb) tl_scores = tl_scores.view(batch, k, 1).repeat(1, 1, k) br_scores = br_scores.view(batch, 1, k).repeat(1, k, 1) scores = (tl_scores + br_scores) / 2 # scores for all possible boxes # tl and br should have same class tl_clses = tl_clses.view(batch, k, 1).repeat(1, 1, k) br_clses = br_clses.view(batch, 1, k).repeat(1, k, 1) cls_inds = (tl_clses != br_clses) # reject boxes based on distances dist_inds = dists > distance_threshold # reject boxes based on widths and heights width_inds = (br_xs <= tl_xs) height_inds = (br_ys <= tl_ys) # No use `scores[cls_inds]`, instead we use `torch.where` here. # Since only 1-D indices with type 'tensor(bool)' are supported # when exporting to ONNX, any other bool indices with more dimensions # (e.g. 2-D bool tensor) as input parameter in node is invalid negative_scores = -1 * torch.ones_like(scores) scores = torch.where(cls_inds, negative_scores, scores) scores = torch.where(width_inds, negative_scores, scores) scores = torch.where(height_inds, negative_scores, scores) scores = torch.where(dist_inds, negative_scores, scores) if with_centripetal_shift: scores[tl_ctx_inds] = -1 scores[tl_cty_inds] = -1 scores[br_ctx_inds] = -1 scores[br_cty_inds] = -1 scores = scores.view(batch, -1) scores, inds = torch.topk(scores, num_dets) scores = scores.unsqueeze(2) bboxes = bboxes.view(batch, -1, 4) bboxes = gather_feat(bboxes, inds) clses = tl_clses.contiguous().view(batch, -1, 1) clses = gather_feat(clses, inds) return bboxes, scores, clses
49,393
44.524424
79
py
ERD
ERD-main/mmdet/models/dense_heads/yolact_head.py
# Copyright (c) OpenMMLab. All rights reserved. import copy from typing import List, Optional import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import ConvModule from mmengine.model import BaseModule, ModuleList from mmengine.structures import InstanceData from torch import Tensor from mmdet.registry import MODELS from mmdet.utils import (ConfigType, InstanceList, OptConfigType, OptInstanceList, OptMultiConfig) from ..layers import fast_nms from ..utils import images_to_levels, multi_apply, select_single_mlvl from ..utils.misc import empty_instances from .anchor_head import AnchorHead from .base_mask_head import BaseMaskHead @MODELS.register_module() class YOLACTHead(AnchorHead): """YOLACT box head used in https://arxiv.org/abs/1904.02689. Note that YOLACT head is a light version of RetinaNet head. Four differences are described as follows: 1. YOLACT box head has three-times fewer anchors. 2. YOLACT box head shares the convs for box and cls branches. 3. YOLACT box head uses OHEM instead of Focal loss. 4. YOLACT box head predicts a set of mask coefficients for each box. Args: num_classes (int): Number of categories excluding the background category. in_channels (int): Number of channels in the input feature map. anchor_generator (:obj:`ConfigDict` or dict): Config dict for anchor generator loss_cls (:obj:`ConfigDict` or dict): Config of classification loss. loss_bbox (:obj:`ConfigDict` or dict): Config of localization loss. num_head_convs (int): Number of the conv layers shared by box and cls branches. num_protos (int): Number of the mask coefficients. use_ohem (bool): If true, ``loss_single_OHEM`` will be used for cls loss calculation. If false, ``loss_single`` will be used. conv_cfg (:obj:`ConfigDict` or dict, optional): Dictionary to construct and config conv layer. norm_cfg (:obj:`ConfigDict` or dict, optional): Dictionary to construct and config norm layer. init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or list[dict], optional): Initialization config dict. """ def __init__(self, num_classes: int, in_channels: int, anchor_generator: ConfigType = dict( type='AnchorGenerator', octave_base_scale=3, scales_per_octave=1, ratios=[0.5, 1.0, 2.0], strides=[8, 16, 32, 64, 128]), loss_cls: ConfigType = dict( type='CrossEntropyLoss', use_sigmoid=False, reduction='none', loss_weight=1.0), loss_bbox: ConfigType = dict( type='SmoothL1Loss', beta=1.0, loss_weight=1.5), num_head_convs: int = 1, num_protos: int = 32, use_ohem: bool = True, conv_cfg: OptConfigType = None, norm_cfg: OptConfigType = None, init_cfg: OptMultiConfig = dict( type='Xavier', distribution='uniform', bias=0, layer='Conv2d'), **kwargs) -> None: self.num_head_convs = num_head_convs self.num_protos = num_protos self.use_ohem = use_ohem self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg super().__init__( num_classes=num_classes, in_channels=in_channels, loss_cls=loss_cls, loss_bbox=loss_bbox, anchor_generator=anchor_generator, init_cfg=init_cfg, **kwargs) def _init_layers(self) -> None: """Initialize layers of the head.""" self.relu = nn.ReLU(inplace=True) self.head_convs = ModuleList() for i in range(self.num_head_convs): chn = self.in_channels if i == 0 else self.feat_channels self.head_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) self.conv_cls = nn.Conv2d( self.feat_channels, self.num_base_priors * self.cls_out_channels, 3, padding=1) self.conv_reg = nn.Conv2d( self.feat_channels, self.num_base_priors * 4, 3, padding=1) self.conv_coeff = nn.Conv2d( self.feat_channels, self.num_base_priors * self.num_protos, 3, padding=1) def forward_single(self, x: Tensor) -> tuple: """Forward feature of a single scale level. Args: x (Tensor): Features of a single scale level. Returns: tuple: - cls_score (Tensor): Cls scores for a single scale level the channels number is num_anchors * num_classes. - bbox_pred (Tensor): Box energies / deltas for a single scale level, the channels number is num_anchors * 4. - coeff_pred (Tensor): Mask coefficients for a single scale level, the channels number is num_anchors * num_protos. """ for head_conv in self.head_convs: x = head_conv(x) cls_score = self.conv_cls(x) bbox_pred = self.conv_reg(x) coeff_pred = self.conv_coeff(x).tanh() return cls_score, bbox_pred, coeff_pred def loss_by_feat( self, cls_scores: List[Tensor], bbox_preds: List[Tensor], coeff_preds: List[Tensor], batch_gt_instances: InstanceList, batch_img_metas: List[dict], batch_gt_instances_ignore: OptInstanceList = None) -> dict: """Calculate the loss based on the features extracted by the bbox head. When ``self.use_ohem == True``, it functions like ``SSDHead.loss``, otherwise, it follows ``AnchorHead.loss``. Args: cls_scores (list[Tensor]): Box scores for each scale level has shape (N, num_anchors * num_classes, H, W). bbox_preds (list[Tensor]): Box energies / deltas for each scale level with shape (N, num_anchors * 4, H, W). coeff_preds (list[Tensor]): Mask coefficients for each scale level with shape (N, num_anchors * num_protos, H, W) batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. batch_gt_instances_ignore (list[:obj:`InstanceData`], optional): Batch of gt_instances_ignore. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. Returns: dict: A dictionary of loss components. """ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] assert len(featmap_sizes) == self.prior_generator.num_levels device = cls_scores[0].device anchor_list, valid_flag_list = self.get_anchors( featmap_sizes, batch_img_metas, device=device) cls_reg_targets = self.get_targets( anchor_list, valid_flag_list, batch_gt_instances, batch_img_metas, batch_gt_instances_ignore=batch_gt_instances_ignore, unmap_outputs=not self.use_ohem, return_sampling_results=True) (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, avg_factor, sampling_results) = cls_reg_targets if self.use_ohem: num_images = len(batch_img_metas) all_cls_scores = torch.cat([ s.permute(0, 2, 3, 1).reshape( num_images, -1, self.cls_out_channels) for s in cls_scores ], 1) all_labels = torch.cat(labels_list, -1).view(num_images, -1) all_label_weights = torch.cat(label_weights_list, -1).view(num_images, -1) all_bbox_preds = torch.cat([ b.permute(0, 2, 3, 1).reshape(num_images, -1, 4) for b in bbox_preds ], -2) all_bbox_targets = torch.cat(bbox_targets_list, -2).view(num_images, -1, 4) all_bbox_weights = torch.cat(bbox_weights_list, -2).view(num_images, -1, 4) # concat all level anchors to a single tensor all_anchors = [] for i in range(num_images): all_anchors.append(torch.cat(anchor_list[i])) # check NaN and Inf assert torch.isfinite(all_cls_scores).all().item(), \ 'classification scores become infinite or NaN!' assert torch.isfinite(all_bbox_preds).all().item(), \ 'bbox predications become infinite or NaN!' losses_cls, losses_bbox = multi_apply( self.OHEMloss_by_feat_single, all_cls_scores, all_bbox_preds, all_anchors, all_labels, all_label_weights, all_bbox_targets, all_bbox_weights, avg_factor=avg_factor) else: # anchor number of multi levels num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]] # concat all level anchors and flags to a single tensor concat_anchor_list = [] for i in range(len(anchor_list)): concat_anchor_list.append(torch.cat(anchor_list[i])) all_anchor_list = images_to_levels(concat_anchor_list, num_level_anchors) losses_cls, losses_bbox = multi_apply( self.loss_by_feat_single, cls_scores, bbox_preds, all_anchor_list, labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, avg_factor=avg_factor) losses = dict(loss_cls=losses_cls, loss_bbox=losses_bbox) # update `_raw_positive_infos`, which will be used when calling # `get_positive_infos`. self._raw_positive_infos.update(coeff_preds=coeff_preds) return losses def OHEMloss_by_feat_single(self, cls_score: Tensor, bbox_pred: Tensor, anchors: Tensor, labels: Tensor, label_weights: Tensor, bbox_targets: Tensor, bbox_weights: Tensor, avg_factor: int) -> tuple: """Compute loss of a single image. Similar to func:``SSDHead.loss_by_feat_single`` Args: cls_score (Tensor): Box scores for eachimage Has shape (num_total_anchors, num_classes). bbox_pred (Tensor): Box energies / deltas for each image level with shape (num_total_anchors, 4). anchors (Tensor): Box reference for each scale level with shape (num_total_anchors, 4). labels (Tensor): Labels of each anchors with shape (num_total_anchors,). label_weights (Tensor): Label weights of each anchor with shape (num_total_anchors,) bbox_targets (Tensor): BBox regression targets of each anchor weight shape (num_total_anchors, 4). bbox_weights (Tensor): BBox regression loss weights of each anchor with shape (num_total_anchors, 4). avg_factor (int): Average factor that is used to average the loss. When using sampling method, avg_factor is usually the sum of positive and negative priors. When using `PseudoSampler`, `avg_factor` is usually equal to the number of positive priors. Returns: Tuple[Tensor, Tensor]: A tuple of cls loss and bbox loss of one feature map. """ loss_cls_all = self.loss_cls(cls_score, labels, label_weights) # FG cat_id: [0, num_classes -1], BG cat_id: num_classes pos_inds = ((labels >= 0) & (labels < self.num_classes)).nonzero( as_tuple=False).reshape(-1) neg_inds = (labels == self.num_classes).nonzero( as_tuple=False).view(-1) num_pos_samples = pos_inds.size(0) if num_pos_samples == 0: num_neg_samples = neg_inds.size(0) else: num_neg_samples = self.train_cfg['neg_pos_ratio'] * \ num_pos_samples if num_neg_samples > neg_inds.size(0): num_neg_samples = neg_inds.size(0) topk_loss_cls_neg, _ = loss_cls_all[neg_inds].topk(num_neg_samples) loss_cls_pos = loss_cls_all[pos_inds].sum() loss_cls_neg = topk_loss_cls_neg.sum() loss_cls = (loss_cls_pos + loss_cls_neg) / avg_factor if self.reg_decoded_bbox: # When the regression loss (e.g. `IouLoss`, `GIouLoss`) # is applied directly on the decoded bounding boxes, it # decodes the already encoded coordinates to absolute format. bbox_pred = self.bbox_coder.decode(anchors, bbox_pred) loss_bbox = self.loss_bbox( bbox_pred, bbox_targets, bbox_weights, avg_factor=avg_factor) return loss_cls[None], loss_bbox def get_positive_infos(self) -> InstanceList: """Get positive information from sampling results. Returns: list[:obj:`InstanceData`]: Positive Information of each image, usually including positive bboxes, positive labels, positive priors, positive coeffs, etc. """ assert len(self._raw_positive_infos) > 0 sampling_results = self._raw_positive_infos['sampling_results'] num_imgs = len(sampling_results) coeff_pred_list = [] for coeff_pred_per_level in self._raw_positive_infos['coeff_preds']: coeff_pred_per_level = \ coeff_pred_per_level.permute( 0, 2, 3, 1).reshape(num_imgs, -1, self.num_protos) coeff_pred_list.append(coeff_pred_per_level) coeff_preds = torch.cat(coeff_pred_list, dim=1) pos_info_list = [] for idx, sampling_result in enumerate(sampling_results): pos_info = InstanceData() coeff_preds_single = coeff_preds[idx] pos_info.pos_assigned_gt_inds = \ sampling_result.pos_assigned_gt_inds pos_info.pos_inds = sampling_result.pos_inds pos_info.coeffs = coeff_preds_single[sampling_result.pos_inds] pos_info.bboxes = sampling_result.pos_gt_bboxes pos_info_list.append(pos_info) return pos_info_list def predict_by_feat(self, cls_scores, bbox_preds, coeff_preds, batch_img_metas, cfg=None, rescale=True, **kwargs): """Similar to func:``AnchorHead.get_bboxes``, but additionally processes coeff_preds. Args: cls_scores (list[Tensor]): Box scores for each scale level with shape (N, num_anchors * num_classes, H, W) bbox_preds (list[Tensor]): Box energies / deltas for each scale level with shape (N, num_anchors * 4, H, W) coeff_preds (list[Tensor]): Mask coefficients for each scale level with shape (N, num_anchors * num_protos, H, W) batch_img_metas (list[dict]): Batch image meta info. cfg (:obj:`Config` | None): Test / postprocessing configuration, if None, test_cfg would be used rescale (bool): If True, return boxes in original image space. Defaults to True. Returns: list[:obj:`InstanceData`]: Object detection results of each image after the post process. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). - coeffs (Tensor): the predicted mask coefficients of instance inside the corresponding box has a shape (n, num_protos). """ assert len(cls_scores) == len(bbox_preds) num_levels = len(cls_scores) device = cls_scores[0].device featmap_sizes = [cls_scores[i].shape[-2:] for i in range(num_levels)] mlvl_priors = self.prior_generator.grid_priors( featmap_sizes, device=device) result_list = [] for img_id in range(len(batch_img_metas)): img_meta = batch_img_metas[img_id] cls_score_list = select_single_mlvl(cls_scores, img_id) bbox_pred_list = select_single_mlvl(bbox_preds, img_id) coeff_pred_list = select_single_mlvl(coeff_preds, img_id) results = self._predict_by_feat_single( cls_score_list=cls_score_list, bbox_pred_list=bbox_pred_list, coeff_preds_list=coeff_pred_list, mlvl_priors=mlvl_priors, img_meta=img_meta, cfg=cfg, rescale=rescale) result_list.append(results) return result_list def _predict_by_feat_single(self, cls_score_list: List[Tensor], bbox_pred_list: List[Tensor], coeff_preds_list: List[Tensor], mlvl_priors: List[Tensor], img_meta: dict, cfg: ConfigType, rescale: bool = True) -> InstanceData: """Transform a single image's features extracted from the head into bbox results. Similar to func:``AnchorHead._predict_by_feat_single``, but additionally processes coeff_preds_list and uses fast NMS instead of traditional NMS. Args: cls_score_list (list[Tensor]): Box scores for a single scale level Has shape (num_priors * num_classes, H, W). bbox_pred_list (list[Tensor]): Box energies / deltas for a single scale level with shape (num_priors * 4, H, W). coeff_preds_list (list[Tensor]): Mask coefficients for a single scale level with shape (num_priors * num_protos, H, W). mlvl_priors (list[Tensor]): Each element in the list is the priors of a single level in feature pyramid, has shape (num_priors, 4). img_meta (dict): Image meta info. cfg (mmengine.Config): Test / postprocessing configuration, if None, test_cfg would be used. rescale (bool): If True, return boxes in original image space. Defaults to False. Returns: :obj:`InstanceData`: Detection results of each image after the post process. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). - coeffs (Tensor): the predicted mask coefficients of instance inside the corresponding box has a shape (n, num_protos). """ assert len(cls_score_list) == len(bbox_pred_list) == len(mlvl_priors) cfg = self.test_cfg if cfg is None else cfg cfg = copy.deepcopy(cfg) img_shape = img_meta['img_shape'] nms_pre = cfg.get('nms_pre', -1) mlvl_bbox_preds = [] mlvl_valid_priors = [] mlvl_scores = [] mlvl_coeffs = [] for cls_score, bbox_pred, coeff_pred, priors in \ zip(cls_score_list, bbox_pred_list, coeff_preds_list, mlvl_priors): assert cls_score.size()[-2:] == bbox_pred.size()[-2:] cls_score = cls_score.permute(1, 2, 0).reshape(-1, self.cls_out_channels) if self.use_sigmoid_cls: scores = cls_score.sigmoid() else: scores = cls_score.softmax(-1) bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4) coeff_pred = coeff_pred.permute(1, 2, 0).reshape(-1, self.num_protos) if 0 < nms_pre < scores.shape[0]: # Get maximum scores for foreground classes. if self.use_sigmoid_cls: max_scores, _ = scores.max(dim=1) else: # remind that we set FG labels to [0, num_class-1] # since mmdet v2.0 # BG cat_id: num_class max_scores, _ = scores[:, :-1].max(dim=1) _, topk_inds = max_scores.topk(nms_pre) priors = priors[topk_inds, :] bbox_pred = bbox_pred[topk_inds, :] scores = scores[topk_inds, :] coeff_pred = coeff_pred[topk_inds, :] mlvl_bbox_preds.append(bbox_pred) mlvl_valid_priors.append(priors) mlvl_scores.append(scores) mlvl_coeffs.append(coeff_pred) bbox_pred = torch.cat(mlvl_bbox_preds) priors = torch.cat(mlvl_valid_priors) multi_bboxes = self.bbox_coder.decode( priors, bbox_pred, max_shape=img_shape) multi_scores = torch.cat(mlvl_scores) multi_coeffs = torch.cat(mlvl_coeffs) return self._bbox_post_process( multi_bboxes=multi_bboxes, multi_scores=multi_scores, multi_coeffs=multi_coeffs, cfg=cfg, rescale=rescale, img_meta=img_meta) def _bbox_post_process(self, multi_bboxes: Tensor, multi_scores: Tensor, multi_coeffs: Tensor, cfg: ConfigType, rescale: bool = False, img_meta: Optional[dict] = None, **kwargs) -> InstanceData: """bbox post-processing method. The boxes would be rescaled to the original image scale and do the nms operation. Usually `with_nms` is False is used for aug test. Args: multi_bboxes (Tensor): Predicted bbox that concat all levels. multi_scores (Tensor): Bbox scores that concat all levels. multi_coeffs (Tensor): Mask coefficients that concat all levels. cfg (ConfigDict): Test / postprocessing configuration, if None, test_cfg would be used. rescale (bool): If True, return boxes in original image space. Default to False. img_meta (dict, optional): Image meta info. Defaults to None. Returns: :obj:`InstanceData`: Detection results of each image after the post process. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). - coeffs (Tensor): the predicted mask coefficients of instance inside the corresponding box has a shape (n, num_protos). """ if rescale: assert img_meta.get('scale_factor') is not None multi_bboxes /= multi_bboxes.new_tensor( img_meta['scale_factor']).repeat((1, 2)) # mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor) if self.use_sigmoid_cls: # Add a dummy background class to the backend when using sigmoid # remind that we set FG labels to [0, num_class-1] since mmdet v2.0 # BG cat_id: num_class padding = multi_scores.new_zeros(multi_scores.shape[0], 1) multi_scores = torch.cat([multi_scores, padding], dim=1) det_bboxes, det_labels, det_coeffs = fast_nms( multi_bboxes, multi_scores, multi_coeffs, cfg.score_thr, cfg.iou_thr, cfg.top_k, cfg.max_per_img) results = InstanceData() results.bboxes = det_bboxes[:, :4] results.scores = det_bboxes[:, -1] results.labels = det_labels results.coeffs = det_coeffs return results @MODELS.register_module() class YOLACTProtonet(BaseMaskHead): """YOLACT mask head used in https://arxiv.org/abs/1904.02689. This head outputs the mask prototypes for YOLACT. Args: in_channels (int): Number of channels in the input feature map. proto_channels (tuple[int]): Output channels of protonet convs. proto_kernel_sizes (tuple[int]): Kernel sizes of protonet convs. include_last_relu (bool): If keep the last relu of protonet. num_protos (int): Number of prototypes. num_classes (int): Number of categories excluding the background category. loss_mask_weight (float): Reweight the mask loss by this factor. max_masks_to_train (int): Maximum number of masks to train for each image. with_seg_branch (bool): Whether to apply a semantic segmentation branch and calculate loss during training to increase performance with no speed penalty. Defaults to True. loss_segm (:obj:`ConfigDict` or dict, optional): Config of semantic segmentation loss. train_cfg (:obj:`ConfigDict` or dict, optional): Training config of head. test_cfg (:obj:`ConfigDict` or dict, optional): Testing config of head. init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or list[dict], optional): Initialization config dict. """ def __init__( self, num_classes: int, in_channels: int = 256, proto_channels: tuple = (256, 256, 256, None, 256, 32), proto_kernel_sizes: tuple = (3, 3, 3, -2, 3, 1), include_last_relu: bool = True, num_protos: int = 32, loss_mask_weight: float = 1.0, max_masks_to_train: int = 100, train_cfg: OptConfigType = None, test_cfg: OptConfigType = None, with_seg_branch: bool = True, loss_segm: ConfigType = dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), init_cfg=dict( type='Xavier', distribution='uniform', override=dict(name='protonet')) ) -> None: super().__init__(init_cfg=init_cfg) self.in_channels = in_channels self.proto_channels = proto_channels self.proto_kernel_sizes = proto_kernel_sizes self.include_last_relu = include_last_relu # Segmentation branch self.with_seg_branch = with_seg_branch self.segm_branch = SegmentationModule( num_classes=num_classes, in_channels=in_channels) \ if with_seg_branch else None self.loss_segm = MODELS.build(loss_segm) if with_seg_branch else None self.loss_mask_weight = loss_mask_weight self.num_protos = num_protos self.num_classes = num_classes self.max_masks_to_train = max_masks_to_train self.train_cfg = train_cfg self.test_cfg = test_cfg self._init_layers() def _init_layers(self) -> None: """Initialize layers of the head.""" # Possible patterns: # ( 256, 3) -> conv # ( 256,-2) -> deconv # (None,-2) -> bilinear interpolate in_channels = self.in_channels protonets = ModuleList() for num_channels, kernel_size in zip(self.proto_channels, self.proto_kernel_sizes): if kernel_size > 0: layer = nn.Conv2d( in_channels, num_channels, kernel_size, padding=kernel_size // 2) else: if num_channels is None: layer = InterpolateModule( scale_factor=-kernel_size, mode='bilinear', align_corners=False) else: layer = nn.ConvTranspose2d( in_channels, num_channels, -kernel_size, padding=kernel_size // 2) protonets.append(layer) protonets.append(nn.ReLU(inplace=True)) in_channels = num_channels if num_channels is not None \ else in_channels if not self.include_last_relu: protonets = protonets[:-1] self.protonet = nn.Sequential(*protonets) def forward(self, x: tuple, positive_infos: InstanceList) -> tuple: """Forward feature from the upstream network to get prototypes and linearly combine the prototypes, using masks coefficients, into instance masks. Finally, crop the instance masks with given bboxes. Args: x (Tuple[Tensor]): Feature from the upstream network, which is a 4D-tensor. positive_infos (List[:obj:``InstanceData``]): Positive information that calculate from detect head. Returns: tuple: Predicted instance segmentation masks and semantic segmentation map. """ # YOLACT used single feature map to get segmentation masks single_x = x[0] # YOLACT segmentation branch, if not training or segmentation branch # is None, will not process the forward function. if self.segm_branch is not None and self.training: segm_preds = self.segm_branch(single_x) else: segm_preds = None # YOLACT mask head prototypes = self.protonet(single_x) prototypes = prototypes.permute(0, 2, 3, 1).contiguous() num_imgs = single_x.size(0) mask_pred_list = [] for idx in range(num_imgs): cur_prototypes = prototypes[idx] pos_coeffs = positive_infos[idx].coeffs # Linearly combine the prototypes with the mask coefficients mask_preds = cur_prototypes @ pos_coeffs.t() mask_preds = torch.sigmoid(mask_preds) mask_pred_list.append(mask_preds) return mask_pred_list, segm_preds def loss_by_feat(self, mask_preds: List[Tensor], segm_preds: List[Tensor], batch_gt_instances: InstanceList, batch_img_metas: List[dict], positive_infos: InstanceList, **kwargs) -> dict: """Calculate the loss based on the features extracted by the mask head. Args: mask_preds (list[Tensor]): List of predicted prototypes, each has shape (num_classes, H, W). segm_preds (Tensor): Predicted semantic segmentation map with shape (N, num_classes, H, W) batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes``, ``masks``, and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of multiple images. positive_infos (List[:obj:``InstanceData``]): Information of positive samples of each image that are assigned in detection head. Returns: dict[str, Tensor]: A dictionary of loss components. """ assert positive_infos is not None, \ 'positive_infos should not be None in `YOLACTProtonet`' losses = dict() # crop croped_mask_pred = self.crop_mask_preds(mask_preds, batch_img_metas, positive_infos) loss_mask = [] loss_segm = [] num_imgs, _, mask_h, mask_w = segm_preds.size() assert num_imgs == len(croped_mask_pred) segm_avg_factor = num_imgs * mask_h * mask_w total_pos = 0 if self.segm_branch is not None: assert segm_preds is not None for idx in range(num_imgs): img_meta = batch_img_metas[idx] (mask_preds, pos_mask_targets, segm_targets, num_pos, gt_bboxes_for_reweight) = self._get_targets_single( croped_mask_pred[idx], segm_preds[idx], batch_gt_instances[idx], positive_infos[idx]) # segmentation loss if self.with_seg_branch: if segm_targets is None: loss = segm_preds[idx].sum() * 0. else: loss = self.loss_segm( segm_preds[idx], segm_targets, avg_factor=segm_avg_factor) loss_segm.append(loss) # mask loss total_pos += num_pos if num_pos == 0 or pos_mask_targets is None: loss = mask_preds.sum() * 0. else: mask_preds = torch.clamp(mask_preds, 0, 1) loss = F.binary_cross_entropy( mask_preds, pos_mask_targets, reduction='none') * self.loss_mask_weight h, w = img_meta['img_shape'][:2] gt_bboxes_width = (gt_bboxes_for_reweight[:, 2] - gt_bboxes_for_reweight[:, 0]) / w gt_bboxes_height = (gt_bboxes_for_reweight[:, 3] - gt_bboxes_for_reweight[:, 1]) / h loss = loss.mean(dim=(1, 2)) / gt_bboxes_width / gt_bboxes_height loss = torch.sum(loss) loss_mask.append(loss) if total_pos == 0: total_pos += 1 # avoid nan loss_mask = [x / total_pos for x in loss_mask] losses.update(loss_mask=loss_mask) if self.with_seg_branch: losses.update(loss_segm=loss_segm) return losses def _get_targets_single(self, mask_preds: Tensor, segm_pred: Tensor, gt_instances: InstanceData, positive_info: InstanceData): """Compute targets for predictions of single image. Args: mask_preds (Tensor): Predicted prototypes with shape (num_classes, H, W). segm_pred (Tensor): Predicted semantic segmentation map with shape (num_classes, H, W). gt_instances (:obj:`InstanceData`): Ground truth of instance annotations. It should includes ``bboxes``, ``labels``, and ``masks`` attributes. positive_info (:obj:`InstanceData`): Information of positive samples that are assigned in detection head. It usually contains following keys. - pos_assigned_gt_inds (Tensor): Assigner GT indexes of positive proposals, has shape (num_pos, ) - pos_inds (Tensor): Positive index of image, has shape (num_pos, ). - coeffs (Tensor): Positive mask coefficients with shape (num_pos, num_protos). - bboxes (Tensor): Positive bboxes with shape (num_pos, 4) Returns: tuple: Usually returns a tuple containing learning targets. - mask_preds (Tensor): Positive predicted mask with shape (num_pos, mask_h, mask_w). - pos_mask_targets (Tensor): Positive mask targets with shape (num_pos, mask_h, mask_w). - segm_targets (Tensor): Semantic segmentation targets with shape (num_classes, segm_h, segm_w). - num_pos (int): Positive numbers. - gt_bboxes_for_reweight (Tensor): GT bboxes that match to the positive priors has shape (num_pos, 4). """ gt_bboxes = gt_instances.bboxes gt_labels = gt_instances.labels device = gt_bboxes.device gt_masks = gt_instances.masks.to_tensor( dtype=torch.bool, device=device).float() if gt_masks.size(0) == 0: return mask_preds, None, None, 0, None # process with semantic segmentation targets if segm_pred is not None: num_classes, segm_h, segm_w = segm_pred.size() with torch.no_grad(): downsampled_masks = F.interpolate( gt_masks.unsqueeze(0), (segm_h, segm_w), mode='bilinear', align_corners=False).squeeze(0) downsampled_masks = downsampled_masks.gt(0.5).float() segm_targets = torch.zeros_like(segm_pred, requires_grad=False) for obj_idx in range(downsampled_masks.size(0)): segm_targets[gt_labels[obj_idx] - 1] = torch.max( segm_targets[gt_labels[obj_idx] - 1], downsampled_masks[obj_idx]) else: segm_targets = None # process with mask targets pos_assigned_gt_inds = positive_info.pos_assigned_gt_inds num_pos = pos_assigned_gt_inds.size(0) # Since we're producing (near) full image masks, # it'd take too much vram to backprop on every single mask. # Thus we select only a subset. if num_pos > self.max_masks_to_train: perm = torch.randperm(num_pos) select = perm[:self.max_masks_to_train] mask_preds = mask_preds[select] pos_assigned_gt_inds = pos_assigned_gt_inds[select] num_pos = self.max_masks_to_train gt_bboxes_for_reweight = gt_bboxes[pos_assigned_gt_inds] mask_h, mask_w = mask_preds.shape[-2:] gt_masks = F.interpolate( gt_masks.unsqueeze(0), (mask_h, mask_w), mode='bilinear', align_corners=False).squeeze(0) gt_masks = gt_masks.gt(0.5).float() pos_mask_targets = gt_masks[pos_assigned_gt_inds] return (mask_preds, pos_mask_targets, segm_targets, num_pos, gt_bboxes_for_reweight) def crop_mask_preds(self, mask_preds: List[Tensor], batch_img_metas: List[dict], positive_infos: InstanceList) -> list: """Crop predicted masks by zeroing out everything not in the predicted bbox. Args: mask_preds (list[Tensor]): Predicted prototypes with shape (num_classes, H, W). batch_img_metas (list[dict]): Meta information of multiple images. positive_infos (List[:obj:``InstanceData``]): Positive information that calculate from detect head. Returns: list: The cropped masks. """ croped_mask_preds = [] for img_meta, mask_preds, cur_info in zip(batch_img_metas, mask_preds, positive_infos): bboxes_for_cropping = copy.deepcopy(cur_info.bboxes) h, w = img_meta['img_shape'][:2] bboxes_for_cropping[:, 0::2] /= w bboxes_for_cropping[:, 1::2] /= h mask_preds = self.crop_single(mask_preds, bboxes_for_cropping) mask_preds = mask_preds.permute(2, 0, 1).contiguous() croped_mask_preds.append(mask_preds) return croped_mask_preds def crop_single(self, masks: Tensor, boxes: Tensor, padding: int = 1) -> Tensor: """Crop single predicted masks by zeroing out everything not in the predicted bbox. Args: masks (Tensor): Predicted prototypes, has shape [H, W, N]. boxes (Tensor): Bbox coords in relative point form with shape [N, 4]. padding (int): Image padding size. Return: Tensor: The cropped masks. """ h, w, n = masks.size() x1, x2 = self.sanitize_coordinates( boxes[:, 0], boxes[:, 2], w, padding, cast=False) y1, y2 = self.sanitize_coordinates( boxes[:, 1], boxes[:, 3], h, padding, cast=False) rows = torch.arange( w, device=masks.device, dtype=x1.dtype).view(1, -1, 1).expand(h, w, n) cols = torch.arange( h, device=masks.device, dtype=x1.dtype).view(-1, 1, 1).expand(h, w, n) masks_left = rows >= x1.view(1, 1, -1) masks_right = rows < x2.view(1, 1, -1) masks_up = cols >= y1.view(1, 1, -1) masks_down = cols < y2.view(1, 1, -1) crop_mask = masks_left * masks_right * masks_up * masks_down return masks * crop_mask.float() def sanitize_coordinates(self, x1: Tensor, x2: Tensor, img_size: int, padding: int = 0, cast: bool = True) -> tuple: """Sanitizes the input coordinates so that x1 < x2, x1 != x2, x1 >= 0, and x2 <= image_size. Also converts from relative to absolute coordinates and casts the results to long tensors. Warning: this does things in-place behind the scenes so copy if necessary. Args: x1 (Tensor): shape (N, ). x2 (Tensor): shape (N, ). img_size (int): Size of the input image. padding (int): x1 >= padding, x2 <= image_size-padding. cast (bool): If cast is false, the result won't be cast to longs. Returns: tuple: - x1 (Tensor): Sanitized _x1. - x2 (Tensor): Sanitized _x2. """ x1 = x1 * img_size x2 = x2 * img_size if cast: x1 = x1.long() x2 = x2.long() x1 = torch.min(x1, x2) x2 = torch.max(x1, x2) x1 = torch.clamp(x1 - padding, min=0) x2 = torch.clamp(x2 + padding, max=img_size) return x1, x2 def predict_by_feat(self, mask_preds: List[Tensor], segm_preds: Tensor, results_list: InstanceList, batch_img_metas: List[dict], rescale: bool = True, **kwargs) -> InstanceList: """Transform a batch of output features extracted from the head into mask results. Args: mask_preds (list[Tensor]): Predicted prototypes with shape (num_classes, H, W). results_list (List[:obj:``InstanceData``]): BBoxHead results. batch_img_metas (list[dict]): Meta information of all images. rescale (bool, optional): Whether to rescale the results. Defaults to False. Returns: list[:obj:`InstanceData`]: Processed results of multiple images.Each :obj:`InstanceData` usually contains following keys. - scores (Tensor): Classification scores, has shape (num_instance,). - labels (Tensor): Has shape (num_instances,). - masks (Tensor): Processed mask results, has shape (num_instances, h, w). """ assert len(mask_preds) == len(results_list) == len(batch_img_metas) croped_mask_pred = self.crop_mask_preds(mask_preds, batch_img_metas, results_list) for img_id in range(len(batch_img_metas)): img_meta = batch_img_metas[img_id] results = results_list[img_id] bboxes = results.bboxes mask_preds = croped_mask_pred[img_id] if bboxes.shape[0] == 0 or mask_preds.shape[0] == 0: results_list[img_id] = empty_instances( [img_meta], bboxes.device, task_type='mask', instance_results=[results])[0] else: im_mask = self._predict_by_feat_single( mask_preds=croped_mask_pred[img_id], bboxes=bboxes, img_meta=img_meta, rescale=rescale) results.masks = im_mask return results_list def _predict_by_feat_single(self, mask_preds: Tensor, bboxes: Tensor, img_meta: dict, rescale: bool, cfg: OptConfigType = None): """Transform a single image's features extracted from the head into mask results. Args: mask_preds (Tensor): Predicted prototypes, has shape [H, W, N]. bboxes (Tensor): Bbox coords in relative point form with shape [N, 4]. img_meta (dict): Meta information of each image, e.g., image size, scaling factor, etc. rescale (bool): If rescale is False, then returned masks will fit the scale of imgs[0]. cfg (dict, optional): Config used in test phase. Defaults to None. Returns: :obj:`InstanceData`: Processed results of single image. it usually contains following keys. - scores (Tensor): Classification scores, has shape (num_instance,). - labels (Tensor): Has shape (num_instances,). - masks (Tensor): Processed mask results, has shape (num_instances, h, w). """ cfg = self.test_cfg if cfg is None else cfg scale_factor = bboxes.new_tensor(img_meta['scale_factor']).repeat( (1, 2)) img_h, img_w = img_meta['ori_shape'][:2] if rescale: # in-placed rescale the bboxes scale_factor = bboxes.new_tensor(img_meta['scale_factor']).repeat( (1, 2)) bboxes /= scale_factor else: w_scale, h_scale = scale_factor[0, 0], scale_factor[0, 1] img_h = np.round(img_h * h_scale.item()).astype(np.int32) img_w = np.round(img_w * w_scale.item()).astype(np.int32) masks = F.interpolate( mask_preds.unsqueeze(0), (img_h, img_w), mode='bilinear', align_corners=False).squeeze(0) > cfg.mask_thr if cfg.mask_thr_binary < 0: # for visualization and debugging masks = (masks * 255).to(dtype=torch.uint8) return masks class SegmentationModule(BaseModule): """YOLACT segmentation branch used in <https://arxiv.org/abs/1904.02689>`_ In mmdet v2.x `segm_loss` is calculated in YOLACTSegmHead, while in mmdet v3.x `SegmentationModule` is used to obtain the predicted semantic segmentation map and `segm_loss` is calculated in YOLACTProtonet. Args: num_classes (int): Number of categories excluding the background category. in_channels (int): Number of channels in the input feature map. init_cfg (dict or list[dict], optional): Initialization config dict. """ def __init__( self, num_classes: int, in_channels: int = 256, init_cfg: ConfigType = dict( type='Xavier', distribution='uniform', override=dict(name='segm_conv')) ) -> None: super().__init__(init_cfg=init_cfg) self.in_channels = in_channels self.num_classes = num_classes self._init_layers() def _init_layers(self) -> None: """Initialize layers of the head.""" self.segm_conv = nn.Conv2d( self.in_channels, self.num_classes, kernel_size=1) def forward(self, x: Tensor) -> Tensor: """Forward feature from the upstream network. Args: x (Tensor): Feature from the upstream network, which is a 4D-tensor. Returns: Tensor: Predicted semantic segmentation map with shape (N, num_classes, H, W). """ return self.segm_conv(x) class InterpolateModule(BaseModule): """This is a module version of F.interpolate. Any arguments you give it just get passed along for the ride. """ def __init__(self, *args, init_cfg=None, **kwargs) -> None: super().__init__(init_cfg=init_cfg) self.args = args self.kwargs = kwargs def forward(self, x: Tensor) -> Tensor: """Forward features from the upstream network. Args: x (Tensor): Feature from the upstream network, which is a 4D-tensor. Returns: Tensor: A 4D-tensor feature map. """ return F.interpolate(x, *self.args, **self.kwargs)
50,619
41.39531
79
py
ERD
ERD-main/mmdet/models/dense_heads/base_dense_head.py
# Copyright (c) OpenMMLab. All rights reserved. import copy from abc import ABCMeta, abstractmethod from inspect import signature from typing import List, Optional, Tuple import torch from mmcv.ops import batched_nms from mmengine.config import ConfigDict from mmengine.model import BaseModule, constant_init from mmengine.structures import InstanceData from torch import Tensor from mmdet.structures import SampleList from mmdet.structures.bbox import (cat_boxes, get_box_tensor, get_box_wh, scale_boxes) from mmdet.utils import InstanceList, OptMultiConfig from ..test_time_augs import merge_aug_results from ..utils import (filter_scores_and_topk, select_single_mlvl, unpack_gt_instances) class BaseDenseHead(BaseModule, metaclass=ABCMeta): """Base class for DenseHeads. 1. The ``init_weights`` method is used to initialize densehead's model parameters. After detector initialization, ``init_weights`` is triggered when ``detector.init_weights()`` is called externally. 2. The ``loss`` method is used to calculate the loss of densehead, which includes two steps: (1) the densehead model performs forward propagation to obtain the feature maps (2) The ``loss_by_feat`` method is called based on the feature maps to calculate the loss. .. code:: text loss(): forward() -> loss_by_feat() 3. The ``predict`` method is used to predict detection results, which includes two steps: (1) the densehead model performs forward propagation to obtain the feature maps (2) The ``predict_by_feat`` method is called based on the feature maps to predict detection results including post-processing. .. code:: text predict(): forward() -> predict_by_feat() 4. The ``loss_and_predict`` method is used to return loss and detection results at the same time. It will call densehead's ``forward``, ``loss_by_feat`` and ``predict_by_feat`` methods in order. If one-stage is used as RPN, the densehead needs to return both losses and predictions. This predictions is used as the proposal of roihead. .. code:: text loss_and_predict(): forward() -> loss_by_feat() -> predict_by_feat() """ def __init__(self, init_cfg: OptMultiConfig = None) -> None: super().__init__(init_cfg=init_cfg) # `_raw_positive_infos` will be used in `get_positive_infos`, which # can get positive information. self._raw_positive_infos = dict() def init_weights(self) -> None: """Initialize the weights.""" super().init_weights() # avoid init_cfg overwrite the initialization of `conv_offset` for m in self.modules(): # DeformConv2dPack, ModulatedDeformConv2dPack if hasattr(m, 'conv_offset'): constant_init(m.conv_offset, 0) def get_positive_infos(self) -> InstanceList: """Get positive information from sampling results. Returns: list[:obj:`InstanceData`]: Positive information of each image, usually including positive bboxes, positive labels, positive priors, etc. """ if len(self._raw_positive_infos) == 0: return None sampling_results = self._raw_positive_infos.get( 'sampling_results', None) assert sampling_results is not None positive_infos = [] for sampling_result in enumerate(sampling_results): pos_info = InstanceData() pos_info.bboxes = sampling_result.pos_gt_bboxes pos_info.labels = sampling_result.pos_gt_labels pos_info.priors = sampling_result.pos_priors pos_info.pos_assigned_gt_inds = \ sampling_result.pos_assigned_gt_inds pos_info.pos_inds = sampling_result.pos_inds positive_infos.append(pos_info) return positive_infos def loss(self, x: Tuple[Tensor], batch_data_samples: SampleList) -> dict: """Perform forward propagation and loss calculation of the detection head on the features of the upstream network. Args: x (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. batch_data_samples (List[:obj:`DetDataSample`]): The Data Samples. It usually includes information such as `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`. Returns: dict: A dictionary of loss components. """ outs = self(x) outputs = unpack_gt_instances(batch_data_samples) (batch_gt_instances, batch_gt_instances_ignore, batch_img_metas) = outputs loss_inputs = outs + (batch_gt_instances, batch_img_metas, batch_gt_instances_ignore) losses = self.loss_by_feat(*loss_inputs) return losses @abstractmethod def loss_by_feat(self, **kwargs) -> dict: """Calculate the loss based on the features extracted by the detection head.""" pass def loss_and_predict( self, x: Tuple[Tensor], batch_data_samples: SampleList, proposal_cfg: Optional[ConfigDict] = None ) -> Tuple[dict, InstanceList]: """Perform forward propagation of the head, then calculate loss and predictions from the features and data samples. Args: x (tuple[Tensor]): Features from FPN. batch_data_samples (list[:obj:`DetDataSample`]): Each item contains the meta information of each image and corresponding annotations. proposal_cfg (ConfigDict, optional): Test / postprocessing configuration, if None, test_cfg would be used. Defaults to None. Returns: tuple: the return value is a tuple contains: - losses: (dict[str, Tensor]): A dictionary of loss components. - predictions (list[:obj:`InstanceData`]): Detection results of each image after the post process. """ outputs = unpack_gt_instances(batch_data_samples) (batch_gt_instances, batch_gt_instances_ignore, batch_img_metas) = outputs outs = self(x) loss_inputs = outs + (batch_gt_instances, batch_img_metas, batch_gt_instances_ignore) losses = self.loss_by_feat(*loss_inputs) predictions = self.predict_by_feat( *outs, batch_img_metas=batch_img_metas, cfg=proposal_cfg) return losses, predictions def predict(self, x: Tuple[Tensor], batch_data_samples: SampleList, rescale: bool = False) -> InstanceList: """Perform forward propagation of the detection head and predict detection results on the features of the upstream network. Args: x (tuple[Tensor]): Multi-level features from the upstream network, each is a 4D-tensor. batch_data_samples (List[:obj:`DetDataSample`]): The Data Samples. It usually includes information such as `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`. rescale (bool, optional): Whether to rescale the results. Defaults to False. Returns: list[obj:`InstanceData`]: Detection results of each image after the post process. """ batch_img_metas = [ data_samples.metainfo for data_samples in batch_data_samples ] outs = self(x) predictions = self.predict_by_feat( *outs, batch_img_metas=batch_img_metas, rescale=rescale) return predictions def predict_by_feat(self, cls_scores: List[Tensor], bbox_preds: List[Tensor], score_factors: Optional[List[Tensor]] = None, batch_img_metas: Optional[List[dict]] = None, cfg: Optional[ConfigDict] = None, rescale: bool = False, with_nms: bool = True) -> InstanceList: """Transform a batch of output features extracted from the head into bbox results. Note: When score_factors is not None, the cls_scores are usually multiplied by it then obtain the real score used in NMS, such as CenterNess in FCOS, IoU branch in ATSS. Args: cls_scores (list[Tensor]): Classification scores for all scale levels, each is a 4D-tensor, has shape (batch_size, num_priors * num_classes, H, W). bbox_preds (list[Tensor]): Box energies / deltas for all scale levels, each is a 4D-tensor, has shape (batch_size, num_priors * 4, H, W). score_factors (list[Tensor], optional): Score factor for all scale level, each is a 4D-tensor, has shape (batch_size, num_priors * 1, H, W). Defaults to None. batch_img_metas (list[dict], Optional): Batch image meta info. Defaults to None. cfg (ConfigDict, optional): Test / postprocessing configuration, if None, test_cfg would be used. Defaults to None. rescale (bool): If True, return boxes in original image space. Defaults to False. with_nms (bool): If True, do nms before return boxes. Defaults to True. Returns: list[:obj:`InstanceData`]: Object detection results of each image after the post process. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). """ assert len(cls_scores) == len(bbox_preds) if score_factors is None: # e.g. Retina, FreeAnchor, Foveabox, etc. with_score_factors = False else: # e.g. FCOS, PAA, ATSS, AutoAssign, etc. with_score_factors = True assert len(cls_scores) == len(score_factors) num_levels = len(cls_scores) featmap_sizes = [cls_scores[i].shape[-2:] for i in range(num_levels)] mlvl_priors = self.prior_generator.grid_priors( featmap_sizes, dtype=cls_scores[0].dtype, device=cls_scores[0].device) result_list = [] for img_id in range(len(batch_img_metas)): img_meta = batch_img_metas[img_id] cls_score_list = select_single_mlvl( cls_scores, img_id, detach=True) bbox_pred_list = select_single_mlvl( bbox_preds, img_id, detach=True) if with_score_factors: score_factor_list = select_single_mlvl( score_factors, img_id, detach=True) else: score_factor_list = [None for _ in range(num_levels)] results = self._predict_by_feat_single( cls_score_list=cls_score_list, bbox_pred_list=bbox_pred_list, score_factor_list=score_factor_list, mlvl_priors=mlvl_priors, img_meta=img_meta, cfg=cfg, rescale=rescale, with_nms=with_nms) result_list.append(results) return result_list def _predict_by_feat_single(self, cls_score_list: List[Tensor], bbox_pred_list: List[Tensor], score_factor_list: List[Tensor], mlvl_priors: List[Tensor], img_meta: dict, cfg: ConfigDict, rescale: bool = False, with_nms: bool = True) -> InstanceData: """Transform a single image's features extracted from the head into bbox results. Args: cls_score_list (list[Tensor]): Box scores from all scale levels of a single image, each item has shape (num_priors * num_classes, H, W). bbox_pred_list (list[Tensor]): Box energies / deltas from all scale levels of a single image, each item has shape (num_priors * 4, H, W). score_factor_list (list[Tensor]): Score factor from all scale levels of a single image, each item has shape (num_priors * 1, H, W). mlvl_priors (list[Tensor]): Each element in the list is the priors of a single level in feature pyramid. In all anchor-based methods, it has shape (num_priors, 4). In all anchor-free methods, it has shape (num_priors, 2) when `with_stride=True`, otherwise it still has shape (num_priors, 4). img_meta (dict): Image meta info. cfg (mmengine.Config): Test / postprocessing configuration, if None, test_cfg would be used. rescale (bool): If True, return boxes in original image space. Defaults to False. with_nms (bool): If True, do nms before return boxes. Defaults to True. Returns: :obj:`InstanceData`: Detection results of each image after the post process. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). """ if score_factor_list[0] is None: # e.g. Retina, FreeAnchor, etc. with_score_factors = False else: # e.g. FCOS, PAA, ATSS, etc. with_score_factors = True cfg = self.test_cfg if cfg is None else cfg cfg = copy.deepcopy(cfg) img_shape = img_meta['img_shape'] nms_pre = cfg.get('nms_pre', -1) mlvl_bbox_preds = [] mlvl_valid_priors = [] mlvl_scores = [] mlvl_labels = [] if with_score_factors: mlvl_score_factors = [] else: mlvl_score_factors = None for level_idx, (cls_score, bbox_pred, score_factor, priors) in \ enumerate(zip(cls_score_list, bbox_pred_list, score_factor_list, mlvl_priors)): assert cls_score.size()[-2:] == bbox_pred.size()[-2:] dim = self.bbox_coder.encode_size bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, dim) if with_score_factors: score_factor = score_factor.permute(1, 2, 0).reshape(-1).sigmoid() cls_score = cls_score.permute(1, 2, 0).reshape(-1, self.cls_out_channels) if self.use_sigmoid_cls: scores = cls_score.sigmoid() else: # remind that we set FG labels to [0, num_class-1] # since mmdet v2.0 # BG cat_id: num_class scores = cls_score.softmax(-1)[:, :-1] # After https://github.com/open-mmlab/mmdetection/pull/6268/, # this operation keeps fewer bboxes under the same `nms_pre`. # There is no difference in performance for most models. If you # find a slight drop in performance, you can set a larger # `nms_pre` than before. score_thr = cfg.get('score_thr', 0) results = filter_scores_and_topk( scores, score_thr, nms_pre, dict(bbox_pred=bbox_pred, priors=priors)) scores, labels, keep_idxs, filtered_results = results bbox_pred = filtered_results['bbox_pred'] priors = filtered_results['priors'] if with_score_factors: score_factor = score_factor[keep_idxs] mlvl_bbox_preds.append(bbox_pred) mlvl_valid_priors.append(priors) mlvl_scores.append(scores) mlvl_labels.append(labels) if with_score_factors: mlvl_score_factors.append(score_factor) bbox_pred = torch.cat(mlvl_bbox_preds) priors = cat_boxes(mlvl_valid_priors) bboxes = self.bbox_coder.decode(priors, bbox_pred, max_shape=img_shape) results = InstanceData() results.bboxes = bboxes results.scores = torch.cat(mlvl_scores) results.labels = torch.cat(mlvl_labels) if with_score_factors: results.score_factors = torch.cat(mlvl_score_factors) return self._bbox_post_process( results=results, cfg=cfg, rescale=rescale, with_nms=with_nms, img_meta=img_meta) def _bbox_post_process(self, results: InstanceData, cfg: ConfigDict, rescale: bool = False, with_nms: bool = True, img_meta: Optional[dict] = None) -> InstanceData: """bbox post-processing method. The boxes would be rescaled to the original image scale and do the nms operation. Usually `with_nms` is False is used for aug test. Args: results (:obj:`InstaceData`): Detection instance results, each item has shape (num_bboxes, ). cfg (ConfigDict): Test / postprocessing configuration, if None, test_cfg would be used. rescale (bool): If True, return boxes in original image space. Default to False. with_nms (bool): If True, do nms before return boxes. Default to True. img_meta (dict, optional): Image meta info. Defaults to None. Returns: :obj:`InstanceData`: Detection results of each image after the post process. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). """ if rescale: assert img_meta.get('scale_factor') is not None scale_factor = [1 / s for s in img_meta['scale_factor']] results.bboxes = scale_boxes(results.bboxes, scale_factor) if hasattr(results, 'score_factors'): # TODO: Add sqrt operation in order to be consistent with # the paper. score_factors = results.pop('score_factors') results.scores = results.scores * score_factors # filter small size bboxes if cfg.get('min_bbox_size', -1) >= 0: w, h = get_box_wh(results.bboxes) valid_mask = (w > cfg.min_bbox_size) & (h > cfg.min_bbox_size) if not valid_mask.all(): results = results[valid_mask] # TODO: deal with `with_nms` and `nms_cfg=None` in test_cfg if with_nms and results.bboxes.numel() > 0: bboxes = get_box_tensor(results.bboxes) det_bboxes, keep_idxs = batched_nms(bboxes, results.scores, results.labels, cfg.nms) results = results[keep_idxs] # some nms would reweight the score, such as softnms results.scores = det_bboxes[:, -1] results = results[:cfg.max_per_img] return results def aug_test(self, aug_batch_feats, aug_batch_img_metas, rescale=False, with_ori_nms=False, **kwargs): """Test function with test time augmentation. Args: aug_batch_feats (list[tuple[Tensor]]): The outer list indicates test-time augmentations and inner tuple indicate the multi-level feats from FPN, each Tensor should have a shape (B, C, H, W), aug_batch_img_metas (list[list[dict]]): Meta information of images under the different test-time augs (multiscale, flip, etc.). The outer list indicate the rescale (bool, optional): Whether to rescale the results. Defaults to False. with_ori_nms (bool): Whether execute the nms in original head. Defaults to False. It will be `True` when the head is adopted as `rpn_head`. Returns: list(obj:`InstanceData`): Detection results of the input images. Each item usually contains\ following keys. - scores (Tensor): Classification scores, has a shape (num_instance,) - labels (Tensor): Labels of bboxes, has a shape (num_instances,). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). """ # TODO: remove this for detr and deformdetr sig_of_get_results = signature(self.get_results) get_results_args = [ p.name for p in sig_of_get_results.parameters.values() ] get_results_single_sig = signature(self._get_results_single) get_results_single_sig_args = [ p.name for p in get_results_single_sig.parameters.values() ] assert ('with_nms' in get_results_args) and \ ('with_nms' in get_results_single_sig_args), \ f'{self.__class__.__name__}' \ 'does not support test-time augmentation ' num_imgs = len(aug_batch_img_metas[0]) aug_batch_results = [] for x, img_metas in zip(aug_batch_feats, aug_batch_img_metas): outs = self.forward(x) batch_instance_results = self.get_results( *outs, img_metas=img_metas, cfg=self.test_cfg, rescale=False, with_nms=with_ori_nms, **kwargs) aug_batch_results.append(batch_instance_results) # after merging, bboxes will be rescaled to the original image batch_results = merge_aug_results(aug_batch_results, aug_batch_img_metas) final_results = [] for img_id in range(num_imgs): results = batch_results[img_id] det_bboxes, keep_idxs = batched_nms(results.bboxes, results.scores, results.labels, self.test_cfg.nms) results = results[keep_idxs] # some nms operation may reweight the score such as softnms results.scores = det_bboxes[:, -1] results = results[:self.test_cfg.max_per_img] if rescale: # all results have been mapped to the original scale # in `merge_aug_results`, so just pass pass else: # map to the first aug image scale scale_factor = results.bboxes.new_tensor( aug_batch_img_metas[0][img_id]['scale_factor']) results.bboxes = \ results.bboxes * scale_factor final_results.append(results) return final_results
24,198
40.866782
79
py
ERD
ERD-main/mmdet/models/dense_heads/ddod_head.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import List, Optional, Sequence, Tuple import torch import torch.nn as nn from mmcv.cnn import ConvModule, Scale from mmengine.model import bias_init_with_prob, normal_init from mmengine.structures import InstanceData from torch import Tensor from mmdet.registry import MODELS, TASK_UTILS from mmdet.structures.bbox import bbox_overlaps from mmdet.utils import (ConfigType, InstanceList, OptConfigType, OptInstanceList, reduce_mean) from ..task_modules.prior_generators import anchor_inside_flags from ..utils import images_to_levels, multi_apply, unmap from .anchor_head import AnchorHead EPS = 1e-12 @MODELS.register_module() class DDODHead(AnchorHead): """Detection Head of `DDOD <https://arxiv.org/abs/2107.02963>`_. DDOD head decomposes conjunctions lying in most current one-stage detectors via label assignment disentanglement, spatial feature disentanglement, and pyramid supervision disentanglement. Args: num_classes (int): Number of categories excluding the background category. in_channels (int): Number of channels in the input feature map. stacked_convs (int): The number of stacked Conv. Defaults to 4. conv_cfg (:obj:`ConfigDict` or dict, optional): Config dict for convolution layer. Defaults to None. use_dcn (bool): Use dcn, Same as ATSS when False. Defaults to True. norm_cfg (:obj:`ConfigDict` or dict): Normal config of ddod head. Defaults to dict(type='GN', num_groups=32, requires_grad=True). loss_iou (:obj:`ConfigDict` or dict): Config of IoU loss. Defaults to dict(type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0). """ def __init__(self, num_classes: int, in_channels: int, stacked_convs: int = 4, conv_cfg: OptConfigType = None, use_dcn: bool = True, norm_cfg: ConfigType = dict( type='GN', num_groups=32, requires_grad=True), loss_iou: ConfigType = dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), **kwargs) -> None: self.stacked_convs = stacked_convs self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.use_dcn = use_dcn super().__init__(num_classes, in_channels, **kwargs) if self.train_cfg: self.cls_assigner = TASK_UTILS.build(self.train_cfg['assigner']) self.reg_assigner = TASK_UTILS.build( self.train_cfg['reg_assigner']) self.loss_iou = MODELS.build(loss_iou) def _init_layers(self) -> None: """Initialize layers of the head.""" self.relu = nn.ReLU(inplace=True) self.cls_convs = nn.ModuleList() self.reg_convs = nn.ModuleList() for i in range(self.stacked_convs): chn = self.in_channels if i == 0 else self.feat_channels self.cls_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=dict(type='DCN', deform_groups=1) if i == 0 and self.use_dcn else self.conv_cfg, norm_cfg=self.norm_cfg)) self.reg_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=dict(type='DCN', deform_groups=1) if i == 0 and self.use_dcn else self.conv_cfg, norm_cfg=self.norm_cfg)) self.atss_cls = nn.Conv2d( self.feat_channels, self.num_base_priors * self.cls_out_channels, 3, padding=1) self.atss_reg = nn.Conv2d( self.feat_channels, self.num_base_priors * 4, 3, padding=1) self.atss_iou = nn.Conv2d( self.feat_channels, self.num_base_priors * 1, 3, padding=1) self.scales = nn.ModuleList( [Scale(1.0) for _ in self.prior_generator.strides]) # we use the global list in loss self.cls_num_pos_samples_per_level = [ 0. for _ in range(len(self.prior_generator.strides)) ] self.reg_num_pos_samples_per_level = [ 0. for _ in range(len(self.prior_generator.strides)) ] def init_weights(self) -> None: """Initialize weights of the head.""" for m in self.cls_convs: normal_init(m.conv, std=0.01) for m in self.reg_convs: normal_init(m.conv, std=0.01) normal_init(self.atss_reg, std=0.01) normal_init(self.atss_iou, std=0.01) bias_cls = bias_init_with_prob(0.01) normal_init(self.atss_cls, std=0.01, bias=bias_cls) def forward(self, x: Tuple[Tensor]) -> Tuple[List[Tensor]]: """Forward features from the upstream network. Args: x (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. Returns: tuple: A tuple of classification scores, bbox predictions, and iou predictions. - cls_scores (list[Tensor]): Classification scores for all \ scale levels, each is a 4D-tensor, the channels number is \ num_base_priors * num_classes. - bbox_preds (list[Tensor]): Box energies / deltas for all \ scale levels, each is a 4D-tensor, the channels number is \ num_base_priors * 4. - iou_preds (list[Tensor]): IoU scores for all scale levels, \ each is a 4D-tensor, the channels number is num_base_priors * 1. """ return multi_apply(self.forward_single, x, self.scales) def forward_single(self, x: Tensor, scale: Scale) -> Sequence[Tensor]: """Forward feature of a single scale level. Args: x (Tensor): Features of a single scale level. scale (:obj: `mmcv.cnn.Scale`): Learnable scale module to resize the bbox prediction. Returns: tuple: - cls_score (Tensor): Cls scores for a single scale level \ the channels number is num_base_priors * num_classes. - bbox_pred (Tensor): Box energies / deltas for a single \ scale level, the channels number is num_base_priors * 4. - iou_pred (Tensor): Iou for a single scale level, the \ channel number is (N, num_base_priors * 1, H, W). """ cls_feat = x reg_feat = x for cls_conv in self.cls_convs: cls_feat = cls_conv(cls_feat) for reg_conv in self.reg_convs: reg_feat = reg_conv(reg_feat) cls_score = self.atss_cls(cls_feat) # we just follow atss, not apply exp in bbox_pred bbox_pred = scale(self.atss_reg(reg_feat)).float() iou_pred = self.atss_iou(reg_feat) return cls_score, bbox_pred, iou_pred def loss_cls_by_feat_single(self, cls_score: Tensor, labels: Tensor, label_weights: Tensor, reweight_factor: List[float], avg_factor: float) -> Tuple[Tensor]: """Compute cls loss of a single scale level. Args: cls_score (Tensor): Box scores for each scale level Has shape (N, num_base_priors * num_classes, H, W). labels (Tensor): Labels of each anchors with shape (N, num_total_anchors). label_weights (Tensor): Label weights of each anchor with shape (N, num_total_anchors) reweight_factor (List[float]): Reweight factor for cls and reg loss. avg_factor (float): Average factor that is used to average the loss. When using sampling method, avg_factor is usually the sum of positive and negative priors. When using `PseudoSampler`, `avg_factor` is usually equal to the number of positive priors. Returns: Tuple[Tensor]: A tuple of loss components. """ cls_score = cls_score.permute(0, 2, 3, 1).reshape( -1, self.cls_out_channels).contiguous() labels = labels.reshape(-1) label_weights = label_weights.reshape(-1) loss_cls = self.loss_cls( cls_score, labels, label_weights, avg_factor=avg_factor) return reweight_factor * loss_cls, def loss_reg_by_feat_single(self, anchors: Tensor, bbox_pred: Tensor, iou_pred: Tensor, labels, label_weights: Tensor, bbox_targets: Tensor, bbox_weights: Tensor, reweight_factor: List[float], avg_factor: float) -> Tuple[Tensor, Tensor]: """Compute reg loss of a single scale level based on the features extracted by the detection head. Args: anchors (Tensor): Box reference for each scale level with shape (N, num_total_anchors, 4). bbox_pred (Tensor): Box energies / deltas for each scale level with shape (N, num_base_priors * 4, H, W). iou_pred (Tensor): Iou for a single scale level, the channel number is (N, num_base_priors * 1, H, W). labels (Tensor): Labels of each anchors with shape (N, num_total_anchors). label_weights (Tensor): Label weights of each anchor with shape (N, num_total_anchors) bbox_targets (Tensor): BBox regression targets of each anchor weight shape (N, num_total_anchors, 4). bbox_weights (Tensor): BBox weights of all anchors in the image with shape (N, 4) reweight_factor (List[float]): Reweight factor for cls and reg loss. avg_factor (float): Average factor that is used to average the loss. When using sampling method, avg_factor is usually the sum of positive and negative priors. When using `PseudoSampler`, `avg_factor` is usually equal to the number of positive priors. Returns: Tuple[Tensor, Tensor]: A tuple of loss components. """ anchors = anchors.reshape(-1, 4) bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4) iou_pred = iou_pred.permute(0, 2, 3, 1).reshape(-1, ) bbox_targets = bbox_targets.reshape(-1, 4) bbox_weights = bbox_weights.reshape(-1, 4) labels = labels.reshape(-1) label_weights = label_weights.reshape(-1) iou_targets = label_weights.new_zeros(labels.shape) iou_weights = label_weights.new_zeros(labels.shape) iou_weights[(bbox_weights.sum(axis=1) > 0).nonzero( as_tuple=False)] = 1. # FG cat_id: [0, num_classes -1], BG cat_id: num_classes bg_class_ind = self.num_classes pos_inds = ((labels >= 0) & (labels < bg_class_ind)).nonzero(as_tuple=False).squeeze(1) if len(pos_inds) > 0: pos_bbox_targets = bbox_targets[pos_inds] pos_bbox_pred = bbox_pred[pos_inds] pos_anchors = anchors[pos_inds] pos_decode_bbox_pred = self.bbox_coder.decode( pos_anchors, pos_bbox_pred) pos_decode_bbox_targets = self.bbox_coder.decode( pos_anchors, pos_bbox_targets) # regression loss loss_bbox = self.loss_bbox( pos_decode_bbox_pred, pos_decode_bbox_targets, avg_factor=avg_factor) iou_targets[pos_inds] = bbox_overlaps( pos_decode_bbox_pred.detach(), pos_decode_bbox_targets, is_aligned=True) loss_iou = self.loss_iou( iou_pred, iou_targets, iou_weights, avg_factor=avg_factor) else: loss_bbox = bbox_pred.sum() * 0 loss_iou = iou_pred.sum() * 0 return reweight_factor * loss_bbox, reweight_factor * loss_iou def calc_reweight_factor(self, labels_list: List[Tensor]) -> List[float]: """Compute reweight_factor for regression and classification loss.""" # get pos samples for each level bg_class_ind = self.num_classes for ii, each_level_label in enumerate(labels_list): pos_inds = ((each_level_label >= 0) & (each_level_label < bg_class_ind)).nonzero( as_tuple=False).squeeze(1) self.cls_num_pos_samples_per_level[ii] += len(pos_inds) # get reweight factor from 1 ~ 2 with bilinear interpolation min_pos_samples = min(self.cls_num_pos_samples_per_level) max_pos_samples = max(self.cls_num_pos_samples_per_level) interval = 1. / (max_pos_samples - min_pos_samples + 1e-10) reweight_factor_per_level = [] for pos_samples in self.cls_num_pos_samples_per_level: factor = 2. - (pos_samples - min_pos_samples) * interval reweight_factor_per_level.append(factor) return reweight_factor_per_level def loss_by_feat( self, cls_scores: List[Tensor], bbox_preds: List[Tensor], iou_preds: List[Tensor], batch_gt_instances: InstanceList, batch_img_metas: List[dict], batch_gt_instances_ignore: OptInstanceList = None) -> dict: """Calculate the loss based on the features extracted by the detection head. Args: cls_scores (list[Tensor]): Box scores for each scale level Has shape (N, num_base_priors * num_classes, H, W) bbox_preds (list[Tensor]): Box energies / deltas for each scale level with shape (N, num_base_priors * 4, H, W) iou_preds (list[Tensor]): Score factor for all scale level, each is a 4D-tensor, has shape (batch_size, 1, H, W). batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. batch_gt_instances_ignore (list[:obj:`InstanceData`], Optional): Batch of gt_instances_ignore. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. Returns: dict[str, Tensor]: A dictionary of loss components. """ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] assert len(featmap_sizes) == self.prior_generator.num_levels device = cls_scores[0].device anchor_list, valid_flag_list = self.get_anchors( featmap_sizes, batch_img_metas, device=device) # calculate common vars for cls and reg assigners at once targets_com = self.process_predictions_and_anchors( anchor_list, valid_flag_list, cls_scores, bbox_preds, batch_img_metas, batch_gt_instances_ignore) (anchor_list, valid_flag_list, num_level_anchors_list, cls_score_list, bbox_pred_list, batch_gt_instances_ignore) = targets_com # classification branch assigner cls_targets = self.get_cls_targets( anchor_list, valid_flag_list, num_level_anchors_list, cls_score_list, bbox_pred_list, batch_gt_instances, batch_img_metas, batch_gt_instances_ignore=batch_gt_instances_ignore) (cls_anchor_list, labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, avg_factor) = cls_targets avg_factor = reduce_mean( torch.tensor(avg_factor, dtype=torch.float, device=device)).item() avg_factor = max(avg_factor, 1.0) reweight_factor_per_level = self.calc_reweight_factor(labels_list) cls_losses_cls, = multi_apply( self.loss_cls_by_feat_single, cls_scores, labels_list, label_weights_list, reweight_factor_per_level, avg_factor=avg_factor) # regression branch assigner reg_targets = self.get_reg_targets( anchor_list, valid_flag_list, num_level_anchors_list, cls_score_list, bbox_pred_list, batch_gt_instances, batch_img_metas, batch_gt_instances_ignore=batch_gt_instances_ignore) (reg_anchor_list, labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, avg_factor) = reg_targets avg_factor = reduce_mean( torch.tensor(avg_factor, dtype=torch.float, device=device)).item() avg_factor = max(avg_factor, 1.0) reweight_factor_per_level = self.calc_reweight_factor(labels_list) reg_losses_bbox, reg_losses_iou = multi_apply( self.loss_reg_by_feat_single, reg_anchor_list, bbox_preds, iou_preds, labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, reweight_factor_per_level, avg_factor=avg_factor) return dict( loss_cls=cls_losses_cls, loss_bbox=reg_losses_bbox, loss_iou=reg_losses_iou) def process_predictions_and_anchors( self, anchor_list: List[List[Tensor]], valid_flag_list: List[List[Tensor]], cls_scores: List[Tensor], bbox_preds: List[Tensor], batch_img_metas: List[dict], batch_gt_instances_ignore: OptInstanceList = None) -> tuple: """Compute common vars for regression and classification targets. Args: anchor_list (List[List[Tensor]]): anchors of each image. valid_flag_list (List[List[Tensor]]): Valid flags of each image. cls_scores (List[Tensor]): Classification scores for all scale levels, each is a 4D-tensor, the channels number is num_base_priors * num_classes. bbox_preds (list[Tensor]): Box energies / deltas for all scale levels, each is a 4D-tensor, the channels number is num_base_priors * 4. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. batch_gt_instances_ignore (list[:obj:`InstanceData`], Optional): Batch of gt_instances_ignore. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. Return: tuple[Tensor]: A tuple of common loss vars. """ num_imgs = len(batch_img_metas) assert len(anchor_list) == len(valid_flag_list) == num_imgs # anchor number of multi levels num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]] num_level_anchors_list = [num_level_anchors] * num_imgs anchor_list_ = [] valid_flag_list_ = [] # concat all level anchors and flags to a single tensor for i in range(num_imgs): assert len(anchor_list[i]) == len(valid_flag_list[i]) anchor_list_.append(torch.cat(anchor_list[i])) valid_flag_list_.append(torch.cat(valid_flag_list[i])) # compute targets for each image if batch_gt_instances_ignore is None: batch_gt_instances_ignore = [None for _ in range(num_imgs)] num_levels = len(cls_scores) cls_score_list = [] bbox_pred_list = [] mlvl_cls_score_list = [ cls_score.permute(0, 2, 3, 1).reshape( num_imgs, -1, self.num_base_priors * self.cls_out_channels) for cls_score in cls_scores ] mlvl_bbox_pred_list = [ bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, self.num_base_priors * 4) for bbox_pred in bbox_preds ] for i in range(num_imgs): mlvl_cls_tensor_list = [ mlvl_cls_score_list[j][i] for j in range(num_levels) ] mlvl_bbox_tensor_list = [ mlvl_bbox_pred_list[j][i] for j in range(num_levels) ] cat_mlvl_cls_score = torch.cat(mlvl_cls_tensor_list, dim=0) cat_mlvl_bbox_pred = torch.cat(mlvl_bbox_tensor_list, dim=0) cls_score_list.append(cat_mlvl_cls_score) bbox_pred_list.append(cat_mlvl_bbox_pred) return (anchor_list_, valid_flag_list_, num_level_anchors_list, cls_score_list, bbox_pred_list, batch_gt_instances_ignore) def get_cls_targets(self, anchor_list: List[Tensor], valid_flag_list: List[Tensor], num_level_anchors_list: List[int], cls_score_list: List[Tensor], bbox_pred_list: List[Tensor], batch_gt_instances: InstanceList, batch_img_metas: List[dict], batch_gt_instances_ignore: OptInstanceList = None, unmap_outputs: bool = True) -> tuple: """Get cls targets for DDOD head. This method is almost the same as `AnchorHead.get_targets()`. Besides returning the targets as the parent method does, it also returns the anchors as the first element of the returned tuple. Args: anchor_list (list[Tensor]): anchors of each image. valid_flag_list (list[Tensor]): Valid flags of each image. num_level_anchors_list (list[Tensor]): Number of anchors of each scale level of all image. cls_score_list (list[Tensor]): Classification scores for all scale levels, each is a 4D-tensor, the channels number is num_base_priors * num_classes. bbox_pred_list (list[Tensor]): Box energies / deltas for all scale levels, each is a 4D-tensor, the channels number is num_base_priors * 4. batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. batch_gt_instances_ignore (list[:obj:`InstanceData`], optional): Batch of gt_instances_ignore. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. unmap_outputs (bool): Whether to map outputs back to the original set of anchors. Return: tuple[Tensor]: A tuple of cls targets components. """ (all_anchors, all_labels, all_label_weights, all_bbox_targets, all_bbox_weights, pos_inds_list, neg_inds_list, sampling_results_list) = multi_apply( self._get_targets_single, anchor_list, valid_flag_list, cls_score_list, bbox_pred_list, num_level_anchors_list, batch_gt_instances, batch_img_metas, batch_gt_instances_ignore, unmap_outputs=unmap_outputs, is_cls_assigner=True) # Get `avg_factor` of all images, which calculate in `SamplingResult`. # When using sampling method, avg_factor is usually the sum of # positive and negative priors. When using `PseudoSampler`, # `avg_factor` is usually equal to the number of positive priors. avg_factor = sum( [results.avg_factor for results in sampling_results_list]) # split targets to a list w.r.t. multiple levels anchors_list = images_to_levels(all_anchors, num_level_anchors_list[0]) labels_list = images_to_levels(all_labels, num_level_anchors_list[0]) label_weights_list = images_to_levels(all_label_weights, num_level_anchors_list[0]) bbox_targets_list = images_to_levels(all_bbox_targets, num_level_anchors_list[0]) bbox_weights_list = images_to_levels(all_bbox_weights, num_level_anchors_list[0]) return (anchors_list, labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, avg_factor) def get_reg_targets(self, anchor_list: List[Tensor], valid_flag_list: List[Tensor], num_level_anchors_list: List[int], cls_score_list: List[Tensor], bbox_pred_list: List[Tensor], batch_gt_instances: InstanceList, batch_img_metas: List[dict], batch_gt_instances_ignore: OptInstanceList = None, unmap_outputs: bool = True) -> tuple: """Get reg targets for DDOD head. This method is almost the same as `AnchorHead.get_targets()` when is_cls_assigner is False. Besides returning the targets as the parent method does, it also returns the anchors as the first element of the returned tuple. Args: anchor_list (list[Tensor]): anchors of each image. valid_flag_list (list[Tensor]): Valid flags of each image. num_level_anchors_list (list[Tensor]): Number of anchors of each scale level of all image. cls_score_list (list[Tensor]): Classification scores for all scale levels, each is a 4D-tensor, the channels number is num_base_priors * num_classes. bbox_pred_list (list[Tensor]): Box energies / deltas for all scale levels, each is a 4D-tensor, the channels number is num_base_priors * 4. batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. batch_gt_instances_ignore (list[:obj:`InstanceData`], optional): Batch of gt_instances_ignore. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. unmap_outputs (bool): Whether to map outputs back to the original set of anchors. Return: tuple[Tensor]: A tuple of reg targets components. """ (all_anchors, all_labels, all_label_weights, all_bbox_targets, all_bbox_weights, pos_inds_list, neg_inds_list, sampling_results_list) = multi_apply( self._get_targets_single, anchor_list, valid_flag_list, cls_score_list, bbox_pred_list, num_level_anchors_list, batch_gt_instances, batch_img_metas, batch_gt_instances_ignore, unmap_outputs=unmap_outputs, is_cls_assigner=False) # Get `avg_factor` of all images, which calculate in `SamplingResult`. # When using sampling method, avg_factor is usually the sum of # positive and negative priors. When using `PseudoSampler`, # `avg_factor` is usually equal to the number of positive priors. avg_factor = sum( [results.avg_factor for results in sampling_results_list]) # split targets to a list w.r.t. multiple levels anchors_list = images_to_levels(all_anchors, num_level_anchors_list[0]) labels_list = images_to_levels(all_labels, num_level_anchors_list[0]) label_weights_list = images_to_levels(all_label_weights, num_level_anchors_list[0]) bbox_targets_list = images_to_levels(all_bbox_targets, num_level_anchors_list[0]) bbox_weights_list = images_to_levels(all_bbox_weights, num_level_anchors_list[0]) return (anchors_list, labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, avg_factor) def _get_targets_single(self, flat_anchors: Tensor, valid_flags: Tensor, cls_scores: Tensor, bbox_preds: Tensor, num_level_anchors: List[int], gt_instances: InstanceData, img_meta: dict, gt_instances_ignore: Optional[InstanceData] = None, unmap_outputs: bool = True, is_cls_assigner: bool = True) -> tuple: """Compute regression, classification targets for anchors in a single image. Args: flat_anchors (Tensor): Multi-level anchors of the image, which are concatenated into a single tensor of shape (num_base_priors, 4). valid_flags (Tensor): Multi level valid flags of the image, which are concatenated into a single tensor of shape (num_base_priors,). cls_scores (Tensor): Classification scores for all scale levels of the image. bbox_preds (Tensor): Box energies / deltas for all scale levels of the image. num_level_anchors (List[int]): Number of anchors of each scale level. gt_instances (:obj:`InstanceData`): Ground truth of instance annotations. It usually includes ``bboxes`` and ``labels`` attributes. img_meta (dict): Meta information for current image. gt_instances_ignore (:obj:`InstanceData`, optional): Instances to be ignored during training. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. unmap_outputs (bool): Whether to map outputs back to the original set of anchors. Defaults to True. is_cls_assigner (bool): Classification or regression. Defaults to True. Returns: tuple: N is the number of total anchors in the image. - anchors (Tensor): all anchors in the image with shape (N, 4). - labels (Tensor): Labels of all anchors in the image with \ shape (N, ). - label_weights (Tensor): Label weights of all anchor in the \ image with shape (N, ). - bbox_targets (Tensor): BBox targets of all anchors in the \ image with shape (N, 4). - bbox_weights (Tensor): BBox weights of all anchors in the \ image with shape (N, 4) - pos_inds (Tensor): Indices of positive anchor with shape \ (num_pos, ). - neg_inds (Tensor): Indices of negative anchor with shape \ (num_neg, ). - sampling_result (:obj:`SamplingResult`): Sampling results. """ inside_flags = anchor_inside_flags(flat_anchors, valid_flags, img_meta['img_shape'][:2], self.train_cfg['allowed_border']) if not inside_flags.any(): raise ValueError( 'There is no valid anchor inside the image boundary. Please ' 'check the image size and anchor sizes, or set ' '``allowed_border`` to -1 to skip the condition.') # assign gt and sample anchors anchors = flat_anchors[inside_flags, :] num_level_anchors_inside = self.get_num_level_anchors_inside( num_level_anchors, inside_flags) bbox_preds_valid = bbox_preds[inside_flags, :] cls_scores_valid = cls_scores[inside_flags, :] assigner = self.cls_assigner if is_cls_assigner else self.reg_assigner # decode prediction out of assigner bbox_preds_valid = self.bbox_coder.decode(anchors, bbox_preds_valid) pred_instances = InstanceData( priors=anchors, bboxes=bbox_preds_valid, scores=cls_scores_valid) assign_result = assigner.assign( pred_instances=pred_instances, num_level_priors=num_level_anchors_inside, gt_instances=gt_instances, gt_instances_ignore=gt_instances_ignore) sampling_result = self.sampler.sample( assign_result=assign_result, pred_instances=pred_instances, gt_instances=gt_instances) num_valid_anchors = anchors.shape[0] bbox_targets = torch.zeros_like(anchors) bbox_weights = torch.zeros_like(anchors) labels = anchors.new_full((num_valid_anchors, ), self.num_classes, dtype=torch.long) label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float) pos_inds = sampling_result.pos_inds neg_inds = sampling_result.neg_inds if len(pos_inds) > 0: pos_bbox_targets = self.bbox_coder.encode( sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes) bbox_targets[pos_inds, :] = pos_bbox_targets bbox_weights[pos_inds, :] = 1.0 labels[pos_inds] = sampling_result.pos_gt_labels if self.train_cfg['pos_weight'] <= 0: label_weights[pos_inds] = 1.0 else: label_weights[pos_inds] = self.train_cfg['pos_weight'] if len(neg_inds) > 0: label_weights[neg_inds] = 1.0 # map up to original set of anchors if unmap_outputs: num_total_anchors = flat_anchors.size(0) anchors = unmap(anchors, num_total_anchors, inside_flags) labels = unmap( labels, num_total_anchors, inside_flags, fill=self.num_classes) label_weights = unmap(label_weights, num_total_anchors, inside_flags) bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags) bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags) return (anchors, labels, label_weights, bbox_targets, bbox_weights, pos_inds, neg_inds, sampling_result) def get_num_level_anchors_inside(self, num_level_anchors: List[int], inside_flags: Tensor) -> List[int]: """Get the anchors of each scale level inside. Args: num_level_anchors (list[int]): Number of anchors of each scale level. inside_flags (Tensor): Multi level inside flags of the image, which are concatenated into a single tensor of shape (num_base_priors,). Returns: list[int]: Number of anchors of each scale level inside. """ split_inside_flags = torch.split(inside_flags, num_level_anchors) num_level_anchors_inside = [ int(flags.sum()) for flags in split_inside_flags ] return num_level_anchors_inside
36,278
44.633962
79
py
ERD
ERD-main/mmdet/models/dense_heads/free_anchor_retina_head.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import List import torch import torch.nn.functional as F from mmengine.structures import InstanceData from torch import Tensor from mmdet.registry import MODELS from mmdet.structures.bbox import bbox_overlaps from mmdet.utils import InstanceList, OptConfigType, OptInstanceList from ..utils import multi_apply from .retina_head import RetinaHead EPS = 1e-12 @MODELS.register_module() class FreeAnchorRetinaHead(RetinaHead): """FreeAnchor RetinaHead used in https://arxiv.org/abs/1909.02466. Args: num_classes (int): Number of categories excluding the background category. in_channels (int): Number of channels in the input feature map. stacked_convs (int): Number of conv layers in cls and reg tower. Defaults to 4. conv_cfg (:obj:`ConfigDict` or dict, optional): dictionary to construct and config conv layer. Defaults to None. norm_cfg (:obj:`ConfigDict` or dict, optional): dictionary to construct and config norm layer. Defaults to norm_cfg=dict(type='GN', num_groups=32, requires_grad=True). pre_anchor_topk (int): Number of boxes that be token in each bag. Defaults to 50 bbox_thr (float): The threshold of the saturated linear function. It is usually the same with the IoU threshold used in NMS. Defaults to 0.6. gamma (float): Gamma parameter in focal loss. Defaults to 2.0. alpha (float): Alpha parameter in focal loss. Defaults to 0.5. """ def __init__(self, num_classes: int, in_channels: int, stacked_convs: int = 4, conv_cfg: OptConfigType = None, norm_cfg: OptConfigType = None, pre_anchor_topk: int = 50, bbox_thr: float = 0.6, gamma: float = 2.0, alpha: float = 0.5, **kwargs) -> None: super().__init__( num_classes=num_classes, in_channels=in_channels, stacked_convs=stacked_convs, conv_cfg=conv_cfg, norm_cfg=norm_cfg, **kwargs) self.pre_anchor_topk = pre_anchor_topk self.bbox_thr = bbox_thr self.gamma = gamma self.alpha = alpha def loss_by_feat( self, cls_scores: List[Tensor], bbox_preds: List[Tensor], batch_gt_instances: InstanceList, batch_img_metas: List[dict], batch_gt_instances_ignore: OptInstanceList = None) -> dict: """Calculate the loss based on the features extracted by the detection head. Args: cls_scores (list[Tensor]): Box scores for each scale level has shape (N, num_anchors * num_classes, H, W). bbox_preds (list[Tensor]): Box energies / deltas for each scale level with shape (N, num_anchors * 4, H, W). batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. batch_gt_instances_ignore (list[:obj:`InstanceData`], optional): Batch of gt_instances_ignore. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. Returns: dict: A dictionary of loss components. """ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] assert len(featmap_sizes) == self.prior_generator.num_levels device = cls_scores[0].device anchor_list, _ = self.get_anchors( featmap_sizes=featmap_sizes, batch_img_metas=batch_img_metas, device=device) concat_anchor_list = [torch.cat(anchor) for anchor in anchor_list] # concatenate each level cls_scores = [ cls.permute(0, 2, 3, 1).reshape(cls.size(0), -1, self.cls_out_channels) for cls in cls_scores ] bbox_preds = [ bbox_pred.permute(0, 2, 3, 1).reshape(bbox_pred.size(0), -1, 4) for bbox_pred in bbox_preds ] cls_scores = torch.cat(cls_scores, dim=1) cls_probs = torch.sigmoid(cls_scores) bbox_preds = torch.cat(bbox_preds, dim=1) box_probs, positive_losses, num_pos_list = multi_apply( self.positive_loss_single, cls_probs, bbox_preds, concat_anchor_list, batch_gt_instances) num_pos = sum(num_pos_list) positive_loss = torch.cat(positive_losses).sum() / max(1, num_pos) # box_prob: P{a_{j} \in A_{+}} box_probs = torch.stack(box_probs, dim=0) # negative_loss: # \sum_{j}{ FL((1 - P{a_{j} \in A_{+}}) * (1 - P_{j}^{bg})) } / n||B|| negative_loss = self.negative_bag_loss(cls_probs, box_probs).sum() / \ max(1, num_pos * self.pre_anchor_topk) # avoid the absence of gradients in regression subnet # when no ground-truth in a batch if num_pos == 0: positive_loss = bbox_preds.sum() * 0 losses = { 'positive_bag_loss': positive_loss, 'negative_bag_loss': negative_loss } return losses def positive_loss_single(self, cls_prob: Tensor, bbox_pred: Tensor, flat_anchors: Tensor, gt_instances: InstanceData) -> tuple: """Compute positive loss. Args: cls_prob (Tensor): Classification probability of shape (num_anchors, num_classes). bbox_pred (Tensor): Box probability of shape (num_anchors, 4). flat_anchors (Tensor): Multi-level anchors of the image, which are concatenated into a single tensor of shape (num_anchors, 4) gt_instances (:obj:`InstanceData`): Ground truth of instance annotations. It should includes ``bboxes`` and ``labels`` attributes. Returns: tuple: - box_prob (Tensor): Box probability of shape (num_anchors, 4). - positive_loss (Tensor): Positive loss of shape (num_pos, ). - num_pos (int): positive samples indexes. """ gt_bboxes = gt_instances.bboxes gt_labels = gt_instances.labels with torch.no_grad(): if len(gt_bboxes) == 0: image_box_prob = torch.zeros( flat_anchors.size(0), self.cls_out_channels).type_as(bbox_pred) else: # box_localization: a_{j}^{loc}, shape: [j, 4] pred_boxes = self.bbox_coder.decode(flat_anchors, bbox_pred) # object_box_iou: IoU_{ij}^{loc}, shape: [i, j] object_box_iou = bbox_overlaps(gt_bboxes, pred_boxes) # object_box_prob: P{a_{j} -> b_{i}}, shape: [i, j] t1 = self.bbox_thr t2 = object_box_iou.max( dim=1, keepdim=True).values.clamp(min=t1 + 1e-12) object_box_prob = ((object_box_iou - t1) / (t2 - t1)).clamp( min=0, max=1) # object_cls_box_prob: P{a_{j} -> b_{i}}, shape: [i, c, j] num_obj = gt_labels.size(0) indices = torch.stack( [torch.arange(num_obj).type_as(gt_labels), gt_labels], dim=0) object_cls_box_prob = torch.sparse_coo_tensor( indices, object_box_prob) # image_box_iou: P{a_{j} \in A_{+}}, shape: [c, j] """ from "start" to "end" implement: image_box_iou = torch.sparse.max(object_cls_box_prob, dim=0).t() """ # start box_cls_prob = torch.sparse.sum( object_cls_box_prob, dim=0).to_dense() indices = torch.nonzero(box_cls_prob, as_tuple=False).t_() if indices.numel() == 0: image_box_prob = torch.zeros( flat_anchors.size(0), self.cls_out_channels).type_as(object_box_prob) else: nonzero_box_prob = torch.where( (gt_labels.unsqueeze(dim=-1) == indices[0]), object_box_prob[:, indices[1]], torch.tensor( [0]).type_as(object_box_prob)).max(dim=0).values # upmap to shape [j, c] image_box_prob = torch.sparse_coo_tensor( indices.flip([0]), nonzero_box_prob, size=(flat_anchors.size(0), self.cls_out_channels)).to_dense() # end box_prob = image_box_prob # construct bags for objects match_quality_matrix = bbox_overlaps(gt_bboxes, flat_anchors) _, matched = torch.topk( match_quality_matrix, self.pre_anchor_topk, dim=1, sorted=False) del match_quality_matrix # matched_cls_prob: P_{ij}^{cls} matched_cls_prob = torch.gather( cls_prob[matched], 2, gt_labels.view(-1, 1, 1).repeat(1, self.pre_anchor_topk, 1)).squeeze(2) # matched_box_prob: P_{ij}^{loc} matched_anchors = flat_anchors[matched] matched_object_targets = self.bbox_coder.encode( matched_anchors, gt_bboxes.unsqueeze(dim=1).expand_as(matched_anchors)) loss_bbox = self.loss_bbox( bbox_pred[matched], matched_object_targets, reduction_override='none').sum(-1) matched_box_prob = torch.exp(-loss_bbox) # positive_losses: {-log( Mean-max(P_{ij}^{cls} * P_{ij}^{loc}) )} num_pos = len(gt_bboxes) positive_loss = self.positive_bag_loss(matched_cls_prob, matched_box_prob) return box_prob, positive_loss, num_pos def positive_bag_loss(self, matched_cls_prob: Tensor, matched_box_prob: Tensor) -> Tensor: """Compute positive bag loss. :math:`-log( Mean-max(P_{ij}^{cls} * P_{ij}^{loc}) )`. :math:`P_{ij}^{cls}`: matched_cls_prob, classification probability of matched samples. :math:`P_{ij}^{loc}`: matched_box_prob, box probability of matched samples. Args: matched_cls_prob (Tensor): Classification probability of matched samples in shape (num_gt, pre_anchor_topk). matched_box_prob (Tensor): BBox probability of matched samples, in shape (num_gt, pre_anchor_topk). Returns: Tensor: Positive bag loss in shape (num_gt,). """ # noqa: E501, W605 # bag_prob = Mean-max(matched_prob) matched_prob = matched_cls_prob * matched_box_prob weight = 1 / torch.clamp(1 - matched_prob, 1e-12, None) weight /= weight.sum(dim=1).unsqueeze(dim=-1) bag_prob = (weight * matched_prob).sum(dim=1) # positive_bag_loss = -self.alpha * log(bag_prob) return self.alpha * F.binary_cross_entropy( bag_prob, torch.ones_like(bag_prob), reduction='none') def negative_bag_loss(self, cls_prob: Tensor, box_prob: Tensor) -> Tensor: """Compute negative bag loss. :math:`FL((1 - P_{a_{j} \in A_{+}}) * (1 - P_{j}^{bg}))`. :math:`P_{a_{j} \in A_{+}}`: Box_probability of matched samples. :math:`P_{j}^{bg}`: Classification probability of negative samples. Args: cls_prob (Tensor): Classification probability, in shape (num_img, num_anchors, num_classes). box_prob (Tensor): Box probability, in shape (num_img, num_anchors, num_classes). Returns: Tensor: Negative bag loss in shape (num_img, num_anchors, num_classes). """ # noqa: E501, W605 prob = cls_prob * (1 - box_prob) # There are some cases when neg_prob = 0. # This will cause the neg_prob.log() to be inf without clamp. prob = prob.clamp(min=EPS, max=1 - EPS) negative_bag_loss = prob**self.gamma * F.binary_cross_entropy( prob, torch.zeros_like(prob), reduction='none') return (1 - self.alpha) * negative_bag_loss
12,824
39.974441
94
py
ERD
ERD-main/mmdet/models/dense_heads/guided_anchor_head.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import List, Optional, Tuple import torch import torch.nn as nn from mmcv.ops import DeformConv2d, MaskedConv2d from mmengine.model import BaseModule from mmengine.structures import InstanceData from torch import Tensor from mmdet.registry import MODELS, TASK_UTILS from mmdet.utils import (ConfigType, InstanceList, MultiConfig, OptConfigType, OptInstanceList) from ..layers import multiclass_nms from ..task_modules.prior_generators import anchor_inside_flags, calc_region from ..task_modules.samplers import PseudoSampler from ..utils import images_to_levels, multi_apply, unmap from .anchor_head import AnchorHead class FeatureAdaption(BaseModule): """Feature Adaption Module. Feature Adaption Module is implemented based on DCN v1. It uses anchor shape prediction rather than feature map to predict offsets of deform conv layer. Args: in_channels (int): Number of channels in the input feature map. out_channels (int): Number of channels in the output feature map. kernel_size (int): Deformable conv kernel size. Defaults to 3. deform_groups (int): Deformable conv group size. Defaults to 4. init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or \ list[dict], optional): Initialization config dict. """ def __init__( self, in_channels: int, out_channels: int, kernel_size: int = 3, deform_groups: int = 4, init_cfg: MultiConfig = dict( type='Normal', layer='Conv2d', std=0.1, override=dict(type='Normal', name='conv_adaption', std=0.01)) ) -> None: super().__init__(init_cfg=init_cfg) offset_channels = kernel_size * kernel_size * 2 self.conv_offset = nn.Conv2d( 2, deform_groups * offset_channels, 1, bias=False) self.conv_adaption = DeformConv2d( in_channels, out_channels, kernel_size=kernel_size, padding=(kernel_size - 1) // 2, deform_groups=deform_groups) self.relu = nn.ReLU(inplace=True) def forward(self, x: Tensor, shape: Tensor) -> Tensor: offset = self.conv_offset(shape.detach()) x = self.relu(self.conv_adaption(x, offset)) return x @MODELS.register_module() class GuidedAnchorHead(AnchorHead): """Guided-Anchor-based head (GA-RPN, GA-RetinaNet, etc.). This GuidedAnchorHead will predict high-quality feature guided anchors and locations where anchors will be kept in inference. There are mainly 3 categories of bounding-boxes. - Sampled 9 pairs for target assignment. (approxes) - The square boxes where the predicted anchors are based on. (squares) - Guided anchors. Please refer to https://arxiv.org/abs/1901.03278 for more details. Args: num_classes (int): Number of classes. in_channels (int): Number of channels in the input feature map. feat_channels (int): Number of hidden channels. Defaults to 256. approx_anchor_generator (:obj:`ConfigDict` or dict): Config dict for approx generator square_anchor_generator (:obj:`ConfigDict` or dict): Config dict for square generator anchor_coder (:obj:`ConfigDict` or dict): Config dict for anchor coder bbox_coder (:obj:`ConfigDict` or dict): Config dict for bbox coder reg_decoded_bbox (bool): If true, the regression loss would be applied directly on decoded bounding boxes, converting both the predicted boxes and regression targets to absolute coordinates format. Defaults to False. It should be `True` when using `IoULoss`, `GIoULoss`, or `DIoULoss` in the bbox head. deform_groups: (int): Group number of DCN in FeatureAdaption module. Defaults to 4. loc_filter_thr (float): Threshold to filter out unconcerned regions. Defaults to 0.01. loss_loc (:obj:`ConfigDict` or dict): Config of location loss. loss_shape (:obj:`ConfigDict` or dict): Config of anchor shape loss. loss_cls (:obj:`ConfigDict` or dict): Config of classification loss. loss_bbox (:obj:`ConfigDict` or dict): Config of bbox regression loss. init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or \ list[dict], optional): Initialization config dict. """ def __init__( self, num_classes: int, in_channels: int, feat_channels: int = 256, approx_anchor_generator: ConfigType = dict( type='AnchorGenerator', octave_base_scale=8, scales_per_octave=3, ratios=[0.5, 1.0, 2.0], strides=[4, 8, 16, 32, 64]), square_anchor_generator: ConfigType = dict( type='AnchorGenerator', ratios=[1.0], scales=[8], strides=[4, 8, 16, 32, 64]), anchor_coder: ConfigType = dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[1.0, 1.0, 1.0, 1.0]), bbox_coder: ConfigType = dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[1.0, 1.0, 1.0, 1.0]), reg_decoded_bbox: bool = False, deform_groups: int = 4, loc_filter_thr: float = 0.01, train_cfg: OptConfigType = None, test_cfg: OptConfigType = None, loss_loc: ConfigType = dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_shape: ConfigType = dict( type='BoundedIoULoss', beta=0.2, loss_weight=1.0), loss_cls: ConfigType = dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox: ConfigType = dict( type='SmoothL1Loss', beta=1.0, loss_weight=1.0), init_cfg: MultiConfig = dict( type='Normal', layer='Conv2d', std=0.01, override=dict( type='Normal', name='conv_loc', std=0.01, lbias_prob=0.01)) ) -> None: super(AnchorHead, self).__init__(init_cfg=init_cfg) self.in_channels = in_channels self.num_classes = num_classes self.feat_channels = feat_channels self.deform_groups = deform_groups self.loc_filter_thr = loc_filter_thr # build approx_anchor_generator and square_anchor_generator assert (approx_anchor_generator['octave_base_scale'] == square_anchor_generator['scales'][0]) assert (approx_anchor_generator['strides'] == square_anchor_generator['strides']) self.approx_anchor_generator = TASK_UTILS.build( approx_anchor_generator) self.square_anchor_generator = TASK_UTILS.build( square_anchor_generator) self.approxs_per_octave = self.approx_anchor_generator \ .num_base_priors[0] self.reg_decoded_bbox = reg_decoded_bbox # one anchor per location self.num_base_priors = self.square_anchor_generator.num_base_priors[0] self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False) self.loc_focal_loss = loss_loc['type'] in ['FocalLoss'] if self.use_sigmoid_cls: self.cls_out_channels = self.num_classes else: self.cls_out_channels = self.num_classes + 1 # build bbox_coder self.anchor_coder = TASK_UTILS.build(anchor_coder) self.bbox_coder = TASK_UTILS.build(bbox_coder) # build losses self.loss_loc = MODELS.build(loss_loc) self.loss_shape = MODELS.build(loss_shape) self.loss_cls = MODELS.build(loss_cls) self.loss_bbox = MODELS.build(loss_bbox) self.train_cfg = train_cfg self.test_cfg = test_cfg if self.train_cfg: self.assigner = TASK_UTILS.build(self.train_cfg['assigner']) # use PseudoSampler when no sampler in train_cfg if train_cfg.get('sampler', None) is not None: self.sampler = TASK_UTILS.build( self.train_cfg['sampler'], default_args=dict(context=self)) else: self.sampler = PseudoSampler() self.ga_assigner = TASK_UTILS.build(self.train_cfg['ga_assigner']) if train_cfg.get('ga_sampler', None) is not None: self.ga_sampler = TASK_UTILS.build( self.train_cfg['ga_sampler'], default_args=dict(context=self)) else: self.ga_sampler = PseudoSampler() self._init_layers() def _init_layers(self) -> None: """Initialize layers of the head.""" self.relu = nn.ReLU(inplace=True) self.conv_loc = nn.Conv2d(self.in_channels, 1, 1) self.conv_shape = nn.Conv2d(self.in_channels, self.num_base_priors * 2, 1) self.feature_adaption = FeatureAdaption( self.in_channels, self.feat_channels, kernel_size=3, deform_groups=self.deform_groups) self.conv_cls = MaskedConv2d( self.feat_channels, self.num_base_priors * self.cls_out_channels, 1) self.conv_reg = MaskedConv2d(self.feat_channels, self.num_base_priors * 4, 1) def forward_single(self, x: Tensor) -> Tuple[Tensor]: """Forward feature of a single scale level.""" loc_pred = self.conv_loc(x) shape_pred = self.conv_shape(x) x = self.feature_adaption(x, shape_pred) # masked conv is only used during inference for speed-up if not self.training: mask = loc_pred.sigmoid()[0] >= self.loc_filter_thr else: mask = None cls_score = self.conv_cls(x, mask) bbox_pred = self.conv_reg(x, mask) return cls_score, bbox_pred, shape_pred, loc_pred def forward(self, x: List[Tensor]) -> Tuple[List[Tensor]]: """Forward features from the upstream network.""" return multi_apply(self.forward_single, x) def get_sampled_approxs(self, featmap_sizes: List[Tuple[int, int]], batch_img_metas: List[dict], device: str = 'cuda') -> tuple: """Get sampled approxs and inside flags according to feature map sizes. Args: featmap_sizes (list[tuple]): Multi-level feature map sizes. batch_img_metas (list[dict]): Image meta info. device (str): device for returned tensors Returns: tuple: approxes of each image, inside flags of each image """ num_imgs = len(batch_img_metas) # since feature map sizes of all images are the same, we only compute # approxes for one time multi_level_approxs = self.approx_anchor_generator.grid_priors( featmap_sizes, device=device) approxs_list = [multi_level_approxs for _ in range(num_imgs)] # for each image, we compute inside flags of multi level approxes inside_flag_list = [] for img_id, img_meta in enumerate(batch_img_metas): multi_level_flags = [] multi_level_approxs = approxs_list[img_id] # obtain valid flags for each approx first multi_level_approx_flags = self.approx_anchor_generator \ .valid_flags(featmap_sizes, img_meta['pad_shape'], device=device) for i, flags in enumerate(multi_level_approx_flags): approxs = multi_level_approxs[i] inside_flags_list = [] for j in range(self.approxs_per_octave): split_valid_flags = flags[j::self.approxs_per_octave] split_approxs = approxs[j::self.approxs_per_octave, :] inside_flags = anchor_inside_flags( split_approxs, split_valid_flags, img_meta['img_shape'][:2], self.train_cfg['allowed_border']) inside_flags_list.append(inside_flags) # inside_flag for a position is true if any anchor in this # position is true inside_flags = ( torch.stack(inside_flags_list, 0).sum(dim=0) > 0) multi_level_flags.append(inside_flags) inside_flag_list.append(multi_level_flags) return approxs_list, inside_flag_list def get_anchors(self, featmap_sizes: List[Tuple[int, int]], shape_preds: List[Tensor], loc_preds: List[Tensor], batch_img_metas: List[dict], use_loc_filter: bool = False, device: str = 'cuda') -> tuple: """Get squares according to feature map sizes and guided anchors. Args: featmap_sizes (list[tuple]): Multi-level feature map sizes. shape_preds (list[tensor]): Multi-level shape predictions. loc_preds (list[tensor]): Multi-level location predictions. batch_img_metas (list[dict]): Image meta info. use_loc_filter (bool): Use loc filter or not. Defaults to False device (str): device for returned tensors. Defaults to `cuda`. Returns: tuple: square approxs of each image, guided anchors of each image, loc masks of each image. """ num_imgs = len(batch_img_metas) num_levels = len(featmap_sizes) # since feature map sizes of all images are the same, we only compute # squares for one time multi_level_squares = self.square_anchor_generator.grid_priors( featmap_sizes, device=device) squares_list = [multi_level_squares for _ in range(num_imgs)] # for each image, we compute multi level guided anchors guided_anchors_list = [] loc_mask_list = [] for img_id, img_meta in enumerate(batch_img_metas): multi_level_guided_anchors = [] multi_level_loc_mask = [] for i in range(num_levels): squares = squares_list[img_id][i] shape_pred = shape_preds[i][img_id] loc_pred = loc_preds[i][img_id] guided_anchors, loc_mask = self._get_guided_anchors_single( squares, shape_pred, loc_pred, use_loc_filter=use_loc_filter) multi_level_guided_anchors.append(guided_anchors) multi_level_loc_mask.append(loc_mask) guided_anchors_list.append(multi_level_guided_anchors) loc_mask_list.append(multi_level_loc_mask) return squares_list, guided_anchors_list, loc_mask_list def _get_guided_anchors_single( self, squares: Tensor, shape_pred: Tensor, loc_pred: Tensor, use_loc_filter: bool = False) -> Tuple[Tensor]: """Get guided anchors and loc masks for a single level. Args: squares (tensor): Squares of a single level. shape_pred (tensor): Shape predictions of a single level. loc_pred (tensor): Loc predictions of a single level. use_loc_filter (list[tensor]): Use loc filter or not. Defaults to False. Returns: tuple: guided anchors, location masks """ # calculate location filtering mask loc_pred = loc_pred.sigmoid().detach() if use_loc_filter: loc_mask = loc_pred >= self.loc_filter_thr else: loc_mask = loc_pred >= 0.0 mask = loc_mask.permute(1, 2, 0).expand(-1, -1, self.num_base_priors) mask = mask.contiguous().view(-1) # calculate guided anchors squares = squares[mask] anchor_deltas = shape_pred.permute(1, 2, 0).contiguous().view( -1, 2).detach()[mask] bbox_deltas = anchor_deltas.new_full(squares.size(), 0) bbox_deltas[:, 2:] = anchor_deltas guided_anchors = self.anchor_coder.decode( squares, bbox_deltas, wh_ratio_clip=1e-6) return guided_anchors, mask def ga_loc_targets(self, batch_gt_instances: InstanceList, featmap_sizes: List[Tuple[int, int]]) -> tuple: """Compute location targets for guided anchoring. Each feature map is divided into positive, negative and ignore regions. - positive regions: target 1, weight 1 - ignore regions: target 0, weight 0 - negative regions: target 0, weight 0.1 Args: batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. featmap_sizes (list[tuple]): Multi level sizes of each feature maps. Returns: tuple: Returns a tuple containing location targets. """ anchor_scale = self.approx_anchor_generator.octave_base_scale anchor_strides = self.approx_anchor_generator.strides # Currently only supports same stride in x and y direction. for stride in anchor_strides: assert (stride[0] == stride[1]) anchor_strides = [stride[0] for stride in anchor_strides] center_ratio = self.train_cfg['center_ratio'] ignore_ratio = self.train_cfg['ignore_ratio'] img_per_gpu = len(batch_gt_instances) num_lvls = len(featmap_sizes) r1 = (1 - center_ratio) / 2 r2 = (1 - ignore_ratio) / 2 all_loc_targets = [] all_loc_weights = [] all_ignore_map = [] for lvl_id in range(num_lvls): h, w = featmap_sizes[lvl_id] loc_targets = torch.zeros( img_per_gpu, 1, h, w, device=batch_gt_instances[0].bboxes.device, dtype=torch.float32) loc_weights = torch.full_like(loc_targets, -1) ignore_map = torch.zeros_like(loc_targets) all_loc_targets.append(loc_targets) all_loc_weights.append(loc_weights) all_ignore_map.append(ignore_map) for img_id in range(img_per_gpu): gt_bboxes = batch_gt_instances[img_id].bboxes scale = torch.sqrt((gt_bboxes[:, 2] - gt_bboxes[:, 0]) * (gt_bboxes[:, 3] - gt_bboxes[:, 1])) min_anchor_size = scale.new_full( (1, ), float(anchor_scale * anchor_strides[0])) # assign gt bboxes to different feature levels w.r.t. their scales target_lvls = torch.floor( torch.log2(scale) - torch.log2(min_anchor_size) + 0.5) target_lvls = target_lvls.clamp(min=0, max=num_lvls - 1).long() for gt_id in range(gt_bboxes.size(0)): lvl = target_lvls[gt_id].item() # rescaled to corresponding feature map gt_ = gt_bboxes[gt_id, :4] / anchor_strides[lvl] # calculate ignore regions ignore_x1, ignore_y1, ignore_x2, ignore_y2 = calc_region( gt_, r2, featmap_sizes[lvl]) # calculate positive (center) regions ctr_x1, ctr_y1, ctr_x2, ctr_y2 = calc_region( gt_, r1, featmap_sizes[lvl]) all_loc_targets[lvl][img_id, 0, ctr_y1:ctr_y2 + 1, ctr_x1:ctr_x2 + 1] = 1 all_loc_weights[lvl][img_id, 0, ignore_y1:ignore_y2 + 1, ignore_x1:ignore_x2 + 1] = 0 all_loc_weights[lvl][img_id, 0, ctr_y1:ctr_y2 + 1, ctr_x1:ctr_x2 + 1] = 1 # calculate ignore map on nearby low level feature if lvl > 0: d_lvl = lvl - 1 # rescaled to corresponding feature map gt_ = gt_bboxes[gt_id, :4] / anchor_strides[d_lvl] ignore_x1, ignore_y1, ignore_x2, ignore_y2 = calc_region( gt_, r2, featmap_sizes[d_lvl]) all_ignore_map[d_lvl][img_id, 0, ignore_y1:ignore_y2 + 1, ignore_x1:ignore_x2 + 1] = 1 # calculate ignore map on nearby high level feature if lvl < num_lvls - 1: u_lvl = lvl + 1 # rescaled to corresponding feature map gt_ = gt_bboxes[gt_id, :4] / anchor_strides[u_lvl] ignore_x1, ignore_y1, ignore_x2, ignore_y2 = calc_region( gt_, r2, featmap_sizes[u_lvl]) all_ignore_map[u_lvl][img_id, 0, ignore_y1:ignore_y2 + 1, ignore_x1:ignore_x2 + 1] = 1 for lvl_id in range(num_lvls): # ignore negative regions w.r.t. ignore map all_loc_weights[lvl_id][(all_loc_weights[lvl_id] < 0) & (all_ignore_map[lvl_id] > 0)] = 0 # set negative regions with weight 0.1 all_loc_weights[lvl_id][all_loc_weights[lvl_id] < 0] = 0.1 # loc average factor to balance loss loc_avg_factor = sum( [t.size(0) * t.size(-1) * t.size(-2) for t in all_loc_targets]) / 200 return all_loc_targets, all_loc_weights, loc_avg_factor def _ga_shape_target_single(self, flat_approxs: Tensor, inside_flags: Tensor, flat_squares: Tensor, gt_instances: InstanceData, gt_instances_ignore: Optional[InstanceData], img_meta: dict, unmap_outputs: bool = True) -> tuple: """Compute guided anchoring targets. This function returns sampled anchors and gt bboxes directly rather than calculates regression targets. Args: flat_approxs (Tensor): flat approxs of a single image, shape (n, 4) inside_flags (Tensor): inside flags of a single image, shape (n, ). flat_squares (Tensor): flat squares of a single image, shape (approxs_per_octave * n, 4) gt_instances (:obj:`InstanceData`): Ground truth of instance annotations. It usually includes ``bboxes`` and ``labels`` attributes. gt_instances_ignore (:obj:`InstanceData`, optional): Instances to be ignored during training. It includes ``bboxes`` attribute data that is ignored during training and testing. img_meta (dict): Meta info of a single image. unmap_outputs (bool): unmap outputs or not. Returns: tuple: Returns a tuple containing shape targets of each image. """ if not inside_flags.any(): raise ValueError( 'There is no valid anchor inside the image boundary. Please ' 'check the image size and anchor sizes, or set ' '``allowed_border`` to -1 to skip the condition.') # assign gt and sample anchors num_square = flat_squares.size(0) approxs = flat_approxs.view(num_square, self.approxs_per_octave, 4) approxs = approxs[inside_flags, ...] squares = flat_squares[inside_flags, :] pred_instances = InstanceData() pred_instances.priors = squares pred_instances.approxs = approxs assign_result = self.ga_assigner.assign( pred_instances=pred_instances, gt_instances=gt_instances, gt_instances_ignore=gt_instances_ignore) sampling_result = self.ga_sampler.sample( assign_result=assign_result, pred_instances=pred_instances, gt_instances=gt_instances) bbox_anchors = torch.zeros_like(squares) bbox_gts = torch.zeros_like(squares) bbox_weights = torch.zeros_like(squares) pos_inds = sampling_result.pos_inds neg_inds = sampling_result.neg_inds if len(pos_inds) > 0: bbox_anchors[pos_inds, :] = sampling_result.pos_bboxes bbox_gts[pos_inds, :] = sampling_result.pos_gt_bboxes bbox_weights[pos_inds, :] = 1.0 # map up to original set of anchors if unmap_outputs: num_total_anchors = flat_squares.size(0) bbox_anchors = unmap(bbox_anchors, num_total_anchors, inside_flags) bbox_gts = unmap(bbox_gts, num_total_anchors, inside_flags) bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags) return (bbox_anchors, bbox_gts, bbox_weights, pos_inds, neg_inds, sampling_result) def ga_shape_targets(self, approx_list: List[List[Tensor]], inside_flag_list: List[List[Tensor]], square_list: List[List[Tensor]], batch_gt_instances: InstanceList, batch_img_metas: List[dict], batch_gt_instances_ignore: OptInstanceList = None, unmap_outputs: bool = True) -> tuple: """Compute guided anchoring targets. Args: approx_list (list[list[Tensor]]): Multi level approxs of each image. inside_flag_list (list[list[Tensor]]): Multi level inside flags of each image. square_list (list[list[Tensor]]): Multi level squares of each image. batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. batch_gt_instances_ignore (list[:obj:`InstanceData`], optional): Batch of gt_instances_ignore. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. unmap_outputs (bool): unmap outputs or not. Defaults to None. Returns: tuple: Returns a tuple containing shape targets. """ num_imgs = len(batch_img_metas) assert len(approx_list) == len(inside_flag_list) == len( square_list) == num_imgs # anchor number of multi levels num_level_squares = [squares.size(0) for squares in square_list[0]] # concat all level anchors and flags to a single tensor inside_flag_flat_list = [] approx_flat_list = [] square_flat_list = [] for i in range(num_imgs): assert len(square_list[i]) == len(inside_flag_list[i]) inside_flag_flat_list.append(torch.cat(inside_flag_list[i])) approx_flat_list.append(torch.cat(approx_list[i])) square_flat_list.append(torch.cat(square_list[i])) # compute targets for each image if batch_gt_instances_ignore is None: batch_gt_instances_ignore = [None for _ in range(num_imgs)] (all_bbox_anchors, all_bbox_gts, all_bbox_weights, pos_inds_list, neg_inds_list, sampling_results_list) = multi_apply( self._ga_shape_target_single, approx_flat_list, inside_flag_flat_list, square_flat_list, batch_gt_instances, batch_gt_instances_ignore, batch_img_metas, unmap_outputs=unmap_outputs) # sampled anchors of all images avg_factor = sum( [results.avg_factor for results in sampling_results_list]) # split targets to a list w.r.t. multiple levels bbox_anchors_list = images_to_levels(all_bbox_anchors, num_level_squares) bbox_gts_list = images_to_levels(all_bbox_gts, num_level_squares) bbox_weights_list = images_to_levels(all_bbox_weights, num_level_squares) return (bbox_anchors_list, bbox_gts_list, bbox_weights_list, avg_factor) def loss_shape_single(self, shape_pred: Tensor, bbox_anchors: Tensor, bbox_gts: Tensor, anchor_weights: Tensor, avg_factor: int) -> Tensor: """Compute shape loss in single level.""" shape_pred = shape_pred.permute(0, 2, 3, 1).contiguous().view(-1, 2) bbox_anchors = bbox_anchors.contiguous().view(-1, 4) bbox_gts = bbox_gts.contiguous().view(-1, 4) anchor_weights = anchor_weights.contiguous().view(-1, 4) bbox_deltas = bbox_anchors.new_full(bbox_anchors.size(), 0) bbox_deltas[:, 2:] += shape_pred # filter out negative samples to speed-up weighted_bounded_iou_loss inds = torch.nonzero( anchor_weights[:, 0] > 0, as_tuple=False).squeeze(1) bbox_deltas_ = bbox_deltas[inds] bbox_anchors_ = bbox_anchors[inds] bbox_gts_ = bbox_gts[inds] anchor_weights_ = anchor_weights[inds] pred_anchors_ = self.anchor_coder.decode( bbox_anchors_, bbox_deltas_, wh_ratio_clip=1e-6) loss_shape = self.loss_shape( pred_anchors_, bbox_gts_, anchor_weights_, avg_factor=avg_factor) return loss_shape def loss_loc_single(self, loc_pred: Tensor, loc_target: Tensor, loc_weight: Tensor, avg_factor: float) -> Tensor: """Compute location loss in single level.""" loss_loc = self.loss_loc( loc_pred.reshape(-1, 1), loc_target.reshape(-1).long(), loc_weight.reshape(-1), avg_factor=avg_factor) return loss_loc def loss_by_feat( self, cls_scores: List[Tensor], bbox_preds: List[Tensor], shape_preds: List[Tensor], loc_preds: List[Tensor], batch_gt_instances: InstanceList, batch_img_metas: List[dict], batch_gt_instances_ignore: OptInstanceList = None) -> dict: """Calculate the loss based on the features extracted by the detection head. Args: cls_scores (list[Tensor]): Box scores for each scale level has shape (N, num_anchors * num_classes, H, W). bbox_preds (list[Tensor]): Box energies / deltas for each scale level with shape (N, num_anchors * 4, H, W). shape_preds (list[Tensor]): shape predictions for each scale level with shape (N, 1, H, W). loc_preds (list[Tensor]): location predictions for each scale level with shape (N, num_anchors * 2, H, W). batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. batch_gt_instances_ignore (list[:obj:`InstanceData`], optional): Batch of gt_instances_ignore. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. Returns: dict: A dictionary of loss components. """ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] assert len(featmap_sizes) == self.approx_anchor_generator.num_levels device = cls_scores[0].device # get loc targets loc_targets, loc_weights, loc_avg_factor = self.ga_loc_targets( batch_gt_instances, featmap_sizes) # get sampled approxes approxs_list, inside_flag_list = self.get_sampled_approxs( featmap_sizes, batch_img_metas, device=device) # get squares and guided anchors squares_list, guided_anchors_list, _ = self.get_anchors( featmap_sizes, shape_preds, loc_preds, batch_img_metas, device=device) # get shape targets shape_targets = self.ga_shape_targets(approxs_list, inside_flag_list, squares_list, batch_gt_instances, batch_img_metas) (bbox_anchors_list, bbox_gts_list, anchor_weights_list, ga_avg_factor) = shape_targets # get anchor targets cls_reg_targets = self.get_targets( guided_anchors_list, inside_flag_list, batch_gt_instances, batch_img_metas, batch_gt_instances_ignore=batch_gt_instances_ignore) (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, avg_factor) = cls_reg_targets # anchor number of multi levels num_level_anchors = [ anchors.size(0) for anchors in guided_anchors_list[0] ] # concat all level anchors to a single tensor concat_anchor_list = [] for i in range(len(guided_anchors_list)): concat_anchor_list.append(torch.cat(guided_anchors_list[i])) all_anchor_list = images_to_levels(concat_anchor_list, num_level_anchors) # get classification and bbox regression losses losses_cls, losses_bbox = multi_apply( self.loss_by_feat_single, cls_scores, bbox_preds, all_anchor_list, labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, avg_factor=avg_factor) # get anchor location loss losses_loc = [] for i in range(len(loc_preds)): loss_loc = self.loss_loc_single( loc_preds[i], loc_targets[i], loc_weights[i], avg_factor=loc_avg_factor) losses_loc.append(loss_loc) # get anchor shape loss losses_shape = [] for i in range(len(shape_preds)): loss_shape = self.loss_shape_single( shape_preds[i], bbox_anchors_list[i], bbox_gts_list[i], anchor_weights_list[i], avg_factor=ga_avg_factor) losses_shape.append(loss_shape) return dict( loss_cls=losses_cls, loss_bbox=losses_bbox, loss_shape=losses_shape, loss_loc=losses_loc) def predict_by_feat(self, cls_scores: List[Tensor], bbox_preds: List[Tensor], shape_preds: List[Tensor], loc_preds: List[Tensor], batch_img_metas: List[dict], cfg: OptConfigType = None, rescale: bool = False) -> InstanceList: """Transform a batch of output features extracted from the head into bbox results. Args: cls_scores (list[Tensor]): Classification scores for all scale levels, each is a 4D-tensor, has shape (batch_size, num_priors * num_classes, H, W). bbox_preds (list[Tensor]): Box energies / deltas for all scale levels, each is a 4D-tensor, has shape (batch_size, num_priors * 4, H, W). shape_preds (list[Tensor]): shape predictions for each scale level with shape (N, 1, H, W). loc_preds (list[Tensor]): location predictions for each scale level with shape (N, num_anchors * 2, H, W). batch_img_metas (list[dict], Optional): Batch image meta info. Defaults to None. cfg (ConfigDict, optional): Test / postprocessing configuration, if None, test_cfg would be used. Defaults to None. rescale (bool): If True, return boxes in original image space. Defaults to False. Returns: list[:obj:`InstanceData`]: Object detection results of each image after the post process. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). """ assert len(cls_scores) == len(bbox_preds) == len(shape_preds) == len( loc_preds) num_levels = len(cls_scores) featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] device = cls_scores[0].device # get guided anchors _, guided_anchors, loc_masks = self.get_anchors( featmap_sizes, shape_preds, loc_preds, batch_img_metas, use_loc_filter=not self.training, device=device) result_list = [] for img_id in range(len(batch_img_metas)): cls_score_list = [ cls_scores[i][img_id].detach() for i in range(num_levels) ] bbox_pred_list = [ bbox_preds[i][img_id].detach() for i in range(num_levels) ] guided_anchor_list = [ guided_anchors[img_id][i].detach() for i in range(num_levels) ] loc_mask_list = [ loc_masks[img_id][i].detach() for i in range(num_levels) ] proposals = self._predict_by_feat_single( cls_scores=cls_score_list, bbox_preds=bbox_pred_list, mlvl_anchors=guided_anchor_list, mlvl_masks=loc_mask_list, img_meta=batch_img_metas[img_id], cfg=cfg, rescale=rescale) result_list.append(proposals) return result_list def _predict_by_feat_single(self, cls_scores: List[Tensor], bbox_preds: List[Tensor], mlvl_anchors: List[Tensor], mlvl_masks: List[Tensor], img_meta: dict, cfg: ConfigType, rescale: bool = False) -> InstanceData: """Transform a single image's features extracted from the head into bbox results. Args: cls_scores (list[Tensor]): Box scores from all scale levels of a single image, each item has shape (num_priors * num_classes, H, W). bbox_preds (list[Tensor]): Box energies / deltas from all scale levels of a single image, each item has shape (num_priors * 4, H, W). mlvl_anchors (list[Tensor]): Each element in the list is the anchors of a single level in feature pyramid. it has shape (num_priors, 4). mlvl_masks (list[Tensor]): Each element in the list is location masks of a single level. img_meta (dict): Image meta info. cfg (:obj:`ConfigDict` or dict): Test / postprocessing configuration, if None, test_cfg would be used. rescale (bool): If True, return boxes in original image space. Defaults to False. Returns: :obj:`InstanceData`: Detection results of each image after the post process. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). """ cfg = self.test_cfg if cfg is None else cfg assert len(cls_scores) == len(bbox_preds) == len(mlvl_anchors) mlvl_bbox_preds = [] mlvl_valid_anchors = [] mlvl_scores = [] for cls_score, bbox_pred, anchors, mask in zip(cls_scores, bbox_preds, mlvl_anchors, mlvl_masks): assert cls_score.size()[-2:] == bbox_pred.size()[-2:] # if no location is kept, end. if mask.sum() == 0: continue # reshape scores and bbox_pred cls_score = cls_score.permute(1, 2, 0).reshape(-1, self.cls_out_channels) if self.use_sigmoid_cls: scores = cls_score.sigmoid() else: scores = cls_score.softmax(-1) bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4) # filter scores, bbox_pred w.r.t. mask. # anchors are filtered in get_anchors() beforehand. scores = scores[mask, :] bbox_pred = bbox_pred[mask, :] if scores.dim() == 0: anchors = anchors.unsqueeze(0) scores = scores.unsqueeze(0) bbox_pred = bbox_pred.unsqueeze(0) # filter anchors, bbox_pred, scores w.r.t. scores nms_pre = cfg.get('nms_pre', -1) if nms_pre > 0 and scores.shape[0] > nms_pre: if self.use_sigmoid_cls: max_scores, _ = scores.max(dim=1) else: # remind that we set FG labels to [0, num_class-1] # since mmdet v2.0 # BG cat_id: num_class max_scores, _ = scores[:, :-1].max(dim=1) _, topk_inds = max_scores.topk(nms_pre) anchors = anchors[topk_inds, :] bbox_pred = bbox_pred[topk_inds, :] scores = scores[topk_inds, :] mlvl_bbox_preds.append(bbox_pred) mlvl_valid_anchors.append(anchors) mlvl_scores.append(scores) mlvl_bbox_preds = torch.cat(mlvl_bbox_preds) mlvl_anchors = torch.cat(mlvl_valid_anchors) mlvl_scores = torch.cat(mlvl_scores) mlvl_bboxes = self.bbox_coder.decode( mlvl_anchors, mlvl_bbox_preds, max_shape=img_meta['img_shape']) if rescale: assert img_meta.get('scale_factor') is not None mlvl_bboxes /= mlvl_bboxes.new_tensor( img_meta['scale_factor']).repeat((1, 2)) if self.use_sigmoid_cls: # Add a dummy background class to the backend when using sigmoid # remind that we set FG labels to [0, num_class-1] since mmdet v2.0 # BG cat_id: num_class padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1) mlvl_scores = torch.cat([mlvl_scores, padding], dim=1) # multi class NMS det_bboxes, det_labels = multiclass_nms(mlvl_bboxes, mlvl_scores, cfg.score_thr, cfg.nms, cfg.max_per_img) results = InstanceData() results.bboxes = det_bboxes[:, :-1] results.scores = det_bboxes[:, -1] results.labels = det_labels return results
44,103
43.325628
79
py
ERD
ERD-main/mmdet/models/dense_heads/yolof_head.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import List, Optional, Tuple import torch import torch.nn as nn from mmcv.cnn import ConvModule, is_norm from mmengine.model import bias_init_with_prob, constant_init, normal_init from mmengine.structures import InstanceData from torch import Tensor from mmdet.registry import MODELS from mmdet.utils import ConfigType, InstanceList, OptInstanceList, reduce_mean from ..task_modules.prior_generators import anchor_inside_flags from ..utils import levels_to_images, multi_apply, unmap from .anchor_head import AnchorHead INF = 1e8 @MODELS.register_module() class YOLOFHead(AnchorHead): """Detection Head of `YOLOF <https://arxiv.org/abs/2103.09460>`_ Args: num_classes (int): The number of object classes (w/o background) in_channels (list[int]): The number of input channels per scale. cls_num_convs (int): The number of convolutions of cls branch. Defaults to 2. reg_num_convs (int): The number of convolutions of reg branch. Defaults to 4. norm_cfg (:obj:`ConfigDict` or dict): Config dict for normalization layer. Defaults to ``dict(type='BN', requires_grad=True)``. """ def __init__(self, num_classes: int, in_channels: List[int], num_cls_convs: int = 2, num_reg_convs: int = 4, norm_cfg: ConfigType = dict(type='BN', requires_grad=True), **kwargs) -> None: self.num_cls_convs = num_cls_convs self.num_reg_convs = num_reg_convs self.norm_cfg = norm_cfg super().__init__( num_classes=num_classes, in_channels=in_channels, **kwargs) def _init_layers(self) -> None: cls_subnet = [] bbox_subnet = [] for i in range(self.num_cls_convs): cls_subnet.append( ConvModule( self.in_channels, self.in_channels, kernel_size=3, padding=1, norm_cfg=self.norm_cfg)) for i in range(self.num_reg_convs): bbox_subnet.append( ConvModule( self.in_channels, self.in_channels, kernel_size=3, padding=1, norm_cfg=self.norm_cfg)) self.cls_subnet = nn.Sequential(*cls_subnet) self.bbox_subnet = nn.Sequential(*bbox_subnet) self.cls_score = nn.Conv2d( self.in_channels, self.num_base_priors * self.num_classes, kernel_size=3, stride=1, padding=1) self.bbox_pred = nn.Conv2d( self.in_channels, self.num_base_priors * 4, kernel_size=3, stride=1, padding=1) self.object_pred = nn.Conv2d( self.in_channels, self.num_base_priors, kernel_size=3, stride=1, padding=1) def init_weights(self) -> None: for m in self.modules(): if isinstance(m, nn.Conv2d): normal_init(m, mean=0, std=0.01) if is_norm(m): constant_init(m, 1) # Use prior in model initialization to improve stability bias_cls = bias_init_with_prob(0.01) torch.nn.init.constant_(self.cls_score.bias, bias_cls) def forward_single(self, x: Tensor) -> Tuple[Tensor, Tensor]: """Forward feature of a single scale level. Args: x (Tensor): Features of a single scale level. Returns: tuple: normalized_cls_score (Tensor): Normalized Cls scores for a \ single scale level, the channels number is \ num_base_priors * num_classes. bbox_reg (Tensor): Box energies / deltas for a single scale \ level, the channels number is num_base_priors * 4. """ cls_score = self.cls_score(self.cls_subnet(x)) N, _, H, W = cls_score.shape cls_score = cls_score.view(N, -1, self.num_classes, H, W) reg_feat = self.bbox_subnet(x) bbox_reg = self.bbox_pred(reg_feat) objectness = self.object_pred(reg_feat) # implicit objectness objectness = objectness.view(N, -1, 1, H, W) normalized_cls_score = cls_score + objectness - torch.log( 1. + torch.clamp(cls_score.exp(), max=INF) + torch.clamp(objectness.exp(), max=INF)) normalized_cls_score = normalized_cls_score.view(N, -1, H, W) return normalized_cls_score, bbox_reg def loss_by_feat( self, cls_scores: List[Tensor], bbox_preds: List[Tensor], batch_gt_instances: InstanceList, batch_img_metas: List[dict], batch_gt_instances_ignore: OptInstanceList = None) -> dict: """Calculate the loss based on the features extracted by the detection head. Args: cls_scores (list[Tensor]): Box scores for each scale level has shape (N, num_anchors * num_classes, H, W). bbox_preds (list[Tensor]): Box energies / deltas for each scale level with shape (N, num_anchors * 4, H, W). batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. batch_gt_instances_ignore (list[:obj:`InstanceData`], optional): Batch of gt_instances_ignore. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. Returns: dict: A dictionary of loss components. """ assert len(cls_scores) == 1 assert self.prior_generator.num_levels == 1 device = cls_scores[0].device featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] anchor_list, valid_flag_list = self.get_anchors( featmap_sizes, batch_img_metas, device=device) # The output level is always 1 anchor_list = [anchors[0] for anchors in anchor_list] valid_flag_list = [valid_flags[0] for valid_flags in valid_flag_list] cls_scores_list = levels_to_images(cls_scores) bbox_preds_list = levels_to_images(bbox_preds) cls_reg_targets = self.get_targets( cls_scores_list, bbox_preds_list, anchor_list, valid_flag_list, batch_gt_instances, batch_img_metas, batch_gt_instances_ignore=batch_gt_instances_ignore) if cls_reg_targets is None: return None (batch_labels, batch_label_weights, avg_factor, batch_bbox_weights, batch_pos_predicted_boxes, batch_target_boxes) = cls_reg_targets flatten_labels = batch_labels.reshape(-1) batch_label_weights = batch_label_weights.reshape(-1) cls_score = cls_scores[0].permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels) avg_factor = reduce_mean( torch.tensor(avg_factor, dtype=torch.float, device=device)).item() # classification loss loss_cls = self.loss_cls( cls_score, flatten_labels, batch_label_weights, avg_factor=avg_factor) # regression loss if batch_pos_predicted_boxes.shape[0] == 0: # no pos sample loss_bbox = batch_pos_predicted_boxes.sum() * 0 else: loss_bbox = self.loss_bbox( batch_pos_predicted_boxes, batch_target_boxes, batch_bbox_weights.float(), avg_factor=avg_factor) return dict(loss_cls=loss_cls, loss_bbox=loss_bbox) def get_targets(self, cls_scores_list: List[Tensor], bbox_preds_list: List[Tensor], anchor_list: List[Tensor], valid_flag_list: List[Tensor], batch_gt_instances: InstanceList, batch_img_metas: List[dict], batch_gt_instances_ignore: OptInstanceList = None, unmap_outputs: bool = True): """Compute regression and classification targets for anchors in multiple images. Args: cls_scores_list (list[Tensor]): Classification scores of each image. each is a 4D-tensor, the shape is (h * w, num_anchors * num_classes). bbox_preds_list (list[Tensor]): Bbox preds of each image. each is a 4D-tensor, the shape is (h * w, num_anchors * 4). anchor_list (list[Tensor]): Anchors of each image. Each element of is a tensor of shape (h * w * num_anchors, 4). valid_flag_list (list[Tensor]): Valid flags of each image. Each element of is a tensor of shape (h * w * num_anchors, ) batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. batch_gt_instances_ignore (list[:obj:`InstanceData`], optional): Batch of gt_instances_ignore. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. unmap_outputs (bool): Whether to map outputs back to the original set of anchors. Returns: tuple: Usually returns a tuple containing learning targets. - batch_labels (Tensor): Label of all images. Each element \ of is a tensor of shape (batch, h * w * num_anchors) - batch_label_weights (Tensor): Label weights of all images \ of is a tensor of shape (batch, h * w * num_anchors) - num_total_pos (int): Number of positive samples in all \ images. - num_total_neg (int): Number of negative samples in all \ images. additional_returns: This function enables user-defined returns from `self._get_targets_single`. These returns are currently refined to properties at each feature map (i.e. having HxW dimension). The results will be concatenated after the end """ num_imgs = len(batch_img_metas) assert len(anchor_list) == len(valid_flag_list) == num_imgs # compute targets for each image if batch_gt_instances_ignore is None: batch_gt_instances_ignore = [None] * num_imgs results = multi_apply( self._get_targets_single, bbox_preds_list, anchor_list, valid_flag_list, batch_gt_instances, batch_img_metas, batch_gt_instances_ignore, unmap_outputs=unmap_outputs) (all_labels, all_label_weights, pos_inds, neg_inds, sampling_results_list) = results[:5] # Get `avg_factor` of all images, which calculate in `SamplingResult`. # When using sampling method, avg_factor is usually the sum of # positive and negative priors. When using `PseudoSampler`, # `avg_factor` is usually equal to the number of positive priors. avg_factor = sum( [results.avg_factor for results in sampling_results_list]) rest_results = list(results[5:]) # user-added return values batch_labels = torch.stack(all_labels, 0) batch_label_weights = torch.stack(all_label_weights, 0) res = (batch_labels, batch_label_weights, avg_factor) for i, rests in enumerate(rest_results): # user-added return values rest_results[i] = torch.cat(rests, 0) return res + tuple(rest_results) def _get_targets_single(self, bbox_preds: Tensor, flat_anchors: Tensor, valid_flags: Tensor, gt_instances: InstanceData, img_meta: dict, gt_instances_ignore: Optional[InstanceData] = None, unmap_outputs: bool = True) -> tuple: """Compute regression and classification targets for anchors in a single image. Args: bbox_preds (Tensor): Bbox prediction of the image, which shape is (h * w ,4) flat_anchors (Tensor): Anchors of the image, which shape is (h * w * num_anchors ,4) valid_flags (Tensor): Valid flags of the image, which shape is (h * w * num_anchors,). gt_instances (:obj:`InstanceData`): Ground truth of instance annotations. It should includes ``bboxes`` and ``labels`` attributes. img_meta (dict): Meta information for current image. gt_instances_ignore (:obj:`InstanceData`, optional): Instances to be ignored during training. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. unmap_outputs (bool): Whether to map outputs back to the original set of anchors. Returns: tuple: labels (Tensor): Labels of image, which shape is (h * w * num_anchors, ). label_weights (Tensor): Label weights of image, which shape is (h * w * num_anchors, ). pos_inds (Tensor): Pos index of image. neg_inds (Tensor): Neg index of image. sampling_result (obj:`SamplingResult`): Sampling result. pos_bbox_weights (Tensor): The Weight of using to calculate the bbox branch loss, which shape is (num, ). pos_predicted_boxes (Tensor): boxes predicted value of using to calculate the bbox branch loss, which shape is (num, 4). pos_target_boxes (Tensor): boxes target value of using to calculate the bbox branch loss, which shape is (num, 4). """ inside_flags = anchor_inside_flags(flat_anchors, valid_flags, img_meta['img_shape'][:2], self.train_cfg['allowed_border']) if not inside_flags.any(): raise ValueError( 'There is no valid anchor inside the image boundary. Please ' 'check the image size and anchor sizes, or set ' '``allowed_border`` to -1 to skip the condition.') # assign gt and sample anchors anchors = flat_anchors[inside_flags, :] bbox_preds = bbox_preds.reshape(-1, 4) bbox_preds = bbox_preds[inside_flags, :] # decoded bbox decoder_bbox_preds = self.bbox_coder.decode(anchors, bbox_preds) pred_instances = InstanceData( priors=anchors, decoder_priors=decoder_bbox_preds) assign_result = self.assigner.assign(pred_instances, gt_instances, gt_instances_ignore) pos_bbox_weights = assign_result.get_extra_property('pos_idx') pos_predicted_boxes = assign_result.get_extra_property( 'pos_predicted_boxes') pos_target_boxes = assign_result.get_extra_property('target_boxes') sampling_result = self.sampler.sample(assign_result, pred_instances, gt_instances) num_valid_anchors = anchors.shape[0] labels = anchors.new_full((num_valid_anchors, ), self.num_classes, dtype=torch.long) label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float) pos_inds = sampling_result.pos_inds neg_inds = sampling_result.neg_inds if len(pos_inds) > 0: labels[pos_inds] = sampling_result.pos_gt_labels if self.train_cfg['pos_weight'] <= 0: label_weights[pos_inds] = 1.0 else: label_weights[pos_inds] = self.train_cfg['pos_weight'] if len(neg_inds) > 0: label_weights[neg_inds] = 1.0 # map up to original set of anchors if unmap_outputs: num_total_anchors = flat_anchors.size(0) labels = unmap( labels, num_total_anchors, inside_flags, fill=self.num_classes) # fill bg label label_weights = unmap(label_weights, num_total_anchors, inside_flags) return (labels, label_weights, pos_inds, neg_inds, sampling_result, pos_bbox_weights, pos_predicted_boxes, pos_target_boxes)
17,384
42.4625
79
py
ERD
ERD-main/mmdet/models/dense_heads/sabl_retina_head.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import List, Optional, Tuple, Union import numpy as np import torch import torch.nn as nn from mmcv.cnn import ConvModule from mmengine.config import ConfigDict from mmengine.structures import InstanceData from torch import Tensor from mmdet.registry import MODELS, TASK_UTILS from mmdet.utils import (ConfigType, InstanceList, MultiConfig, OptConfigType, OptInstanceList) from ..task_modules.samplers import PseudoSampler from ..utils import (filter_scores_and_topk, images_to_levels, multi_apply, unmap) from .base_dense_head import BaseDenseHead from .guided_anchor_head import GuidedAnchorHead @MODELS.register_module() class SABLRetinaHead(BaseDenseHead): """Side-Aware Boundary Localization (SABL) for RetinaNet. The anchor generation, assigning and sampling in SABLRetinaHead are the same as GuidedAnchorHead for guided anchoring. Please refer to https://arxiv.org/abs/1912.04260 for more details. Args: num_classes (int): Number of classes. in_channels (int): Number of channels in the input feature map. stacked_convs (int): Number of Convs for classification and regression branches. Defaults to 4. feat_channels (int): Number of hidden channels. Defaults to 256. approx_anchor_generator (:obj:`ConfigType` or dict): Config dict for approx generator. square_anchor_generator (:obj:`ConfigDict` or dict): Config dict for square generator. conv_cfg (:obj:`ConfigDict` or dict, optional): Config dict for ConvModule. Defaults to None. norm_cfg (:obj:`ConfigDict` or dict, optional): Config dict for Norm Layer. Defaults to None. bbox_coder (:obj:`ConfigDict` or dict): Config dict for bbox coder. reg_decoded_bbox (bool): If true, the regression loss would be applied directly on decoded bounding boxes, converting both the predicted boxes and regression targets to absolute coordinates format. Default False. It should be ``True`` when using ``IoULoss``, ``GIoULoss``, or ``DIoULoss`` in the bbox head. train_cfg (:obj:`ConfigDict` or dict, optional): Training config of SABLRetinaHead. test_cfg (:obj:`ConfigDict` or dict, optional): Testing config of SABLRetinaHead. loss_cls (:obj:`ConfigDict` or dict): Config of classification loss. loss_bbox_cls (:obj:`ConfigDict` or dict): Config of classification loss for bbox branch. loss_bbox_reg (:obj:`ConfigDict` or dict): Config of regression loss for bbox branch. init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \ dict], optional): Initialization config dict. """ def __init__( self, num_classes: int, in_channels: int, stacked_convs: int = 4, feat_channels: int = 256, approx_anchor_generator: ConfigType = dict( type='AnchorGenerator', octave_base_scale=4, scales_per_octave=3, ratios=[0.5, 1.0, 2.0], strides=[8, 16, 32, 64, 128]), square_anchor_generator: ConfigType = dict( type='AnchorGenerator', ratios=[1.0], scales=[4], strides=[8, 16, 32, 64, 128]), conv_cfg: OptConfigType = None, norm_cfg: OptConfigType = None, bbox_coder: ConfigType = dict( type='BucketingBBoxCoder', num_buckets=14, scale_factor=3.0), reg_decoded_bbox: bool = False, train_cfg: OptConfigType = None, test_cfg: OptConfigType = None, loss_cls: ConfigType = dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_bbox_cls: ConfigType = dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.5), loss_bbox_reg: ConfigType = dict( type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.5), init_cfg: MultiConfig = dict( type='Normal', layer='Conv2d', std=0.01, override=dict( type='Normal', name='retina_cls', std=0.01, bias_prob=0.01)) ) -> None: super().__init__(init_cfg=init_cfg) self.in_channels = in_channels self.num_classes = num_classes self.feat_channels = feat_channels self.num_buckets = bbox_coder['num_buckets'] self.side_num = int(np.ceil(self.num_buckets / 2)) assert (approx_anchor_generator['octave_base_scale'] == square_anchor_generator['scales'][0]) assert (approx_anchor_generator['strides'] == square_anchor_generator['strides']) self.approx_anchor_generator = TASK_UTILS.build( approx_anchor_generator) self.square_anchor_generator = TASK_UTILS.build( square_anchor_generator) self.approxs_per_octave = ( self.approx_anchor_generator.num_base_priors[0]) # one anchor per location self.num_base_priors = self.square_anchor_generator.num_base_priors[0] self.stacked_convs = stacked_convs self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.reg_decoded_bbox = reg_decoded_bbox self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False) if self.use_sigmoid_cls: self.cls_out_channels = num_classes else: self.cls_out_channels = num_classes + 1 self.bbox_coder = TASK_UTILS.build(bbox_coder) self.loss_cls = MODELS.build(loss_cls) self.loss_bbox_cls = MODELS.build(loss_bbox_cls) self.loss_bbox_reg = MODELS.build(loss_bbox_reg) self.train_cfg = train_cfg self.test_cfg = test_cfg if self.train_cfg: self.assigner = TASK_UTILS.build(self.train_cfg['assigner']) # use PseudoSampler when sampling is False if 'sampler' in self.train_cfg: self.sampler = TASK_UTILS.build( self.train_cfg['sampler'], default_args=dict(context=self)) else: self.sampler = PseudoSampler(context=self) self._init_layers() def _init_layers(self) -> None: self.relu = nn.ReLU(inplace=True) self.cls_convs = nn.ModuleList() self.reg_convs = nn.ModuleList() for i in range(self.stacked_convs): chn = self.in_channels if i == 0 else self.feat_channels self.cls_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) self.reg_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) self.retina_cls = nn.Conv2d( self.feat_channels, self.cls_out_channels, 3, padding=1) self.retina_bbox_reg = nn.Conv2d( self.feat_channels, self.side_num * 4, 3, padding=1) self.retina_bbox_cls = nn.Conv2d( self.feat_channels, self.side_num * 4, 3, padding=1) def forward_single(self, x: Tensor) -> Tuple[Tensor, Tensor]: cls_feat = x reg_feat = x for cls_conv in self.cls_convs: cls_feat = cls_conv(cls_feat) for reg_conv in self.reg_convs: reg_feat = reg_conv(reg_feat) cls_score = self.retina_cls(cls_feat) bbox_cls_pred = self.retina_bbox_cls(reg_feat) bbox_reg_pred = self.retina_bbox_reg(reg_feat) bbox_pred = (bbox_cls_pred, bbox_reg_pred) return cls_score, bbox_pred def forward(self, feats: List[Tensor]) -> Tuple[List[Tensor]]: return multi_apply(self.forward_single, feats) def get_anchors( self, featmap_sizes: List[tuple], img_metas: List[dict], device: Union[torch.device, str] = 'cuda' ) -> Tuple[List[List[Tensor]], List[List[Tensor]]]: """Get squares according to feature map sizes and guided anchors. Args: featmap_sizes (list[tuple]): Multi-level feature map sizes. img_metas (list[dict]): Image meta info. device (torch.device | str): device for returned tensors Returns: tuple: square approxs of each image """ num_imgs = len(img_metas) # since feature map sizes of all images are the same, we only compute # squares for one time multi_level_squares = self.square_anchor_generator.grid_priors( featmap_sizes, device=device) squares_list = [multi_level_squares for _ in range(num_imgs)] return squares_list def get_targets(self, approx_list: List[List[Tensor]], inside_flag_list: List[List[Tensor]], square_list: List[List[Tensor]], batch_gt_instances: InstanceList, batch_img_metas, batch_gt_instances_ignore: OptInstanceList = None, unmap_outputs=True) -> tuple: """Compute bucketing targets. Args: approx_list (list[list[Tensor]]): Multi level approxs of each image. inside_flag_list (list[list[Tensor]]): Multi level inside flags of each image. square_list (list[list[Tensor]]): Multi level squares of each image. batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. batch_gt_instances_ignore (list[:obj:`InstanceData`], optional): Batch of gt_instances_ignore. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. unmap_outputs (bool): Whether to map outputs back to the original set of anchors. Defaults to True. Returns: tuple: Returns a tuple containing learning targets. - labels_list (list[Tensor]): Labels of each level. - label_weights_list (list[Tensor]): Label weights of each level. - bbox_cls_targets_list (list[Tensor]): BBox cls targets of \ each level. - bbox_cls_weights_list (list[Tensor]): BBox cls weights of \ each level. - bbox_reg_targets_list (list[Tensor]): BBox reg targets of \ each level. - bbox_reg_weights_list (list[Tensor]): BBox reg weights of \ each level. - num_total_pos (int): Number of positive samples in all images. - num_total_neg (int): Number of negative samples in all images. """ num_imgs = len(batch_img_metas) assert len(approx_list) == len(inside_flag_list) == len( square_list) == num_imgs # anchor number of multi levels num_level_squares = [squares.size(0) for squares in square_list[0]] # concat all level anchors and flags to a single tensor inside_flag_flat_list = [] approx_flat_list = [] square_flat_list = [] for i in range(num_imgs): assert len(square_list[i]) == len(inside_flag_list[i]) inside_flag_flat_list.append(torch.cat(inside_flag_list[i])) approx_flat_list.append(torch.cat(approx_list[i])) square_flat_list.append(torch.cat(square_list[i])) # compute targets for each image if batch_gt_instances_ignore is None: batch_gt_instances_ignore = [None for _ in range(num_imgs)] (all_labels, all_label_weights, all_bbox_cls_targets, all_bbox_cls_weights, all_bbox_reg_targets, all_bbox_reg_weights, pos_inds_list, neg_inds_list, sampling_results_list) = multi_apply( self._get_targets_single, approx_flat_list, inside_flag_flat_list, square_flat_list, batch_gt_instances, batch_img_metas, batch_gt_instances_ignore, unmap_outputs=unmap_outputs) # sampled anchors of all images avg_factor = sum( [results.avg_factor for results in sampling_results_list]) # split targets to a list w.r.t. multiple levels labels_list = images_to_levels(all_labels, num_level_squares) label_weights_list = images_to_levels(all_label_weights, num_level_squares) bbox_cls_targets_list = images_to_levels(all_bbox_cls_targets, num_level_squares) bbox_cls_weights_list = images_to_levels(all_bbox_cls_weights, num_level_squares) bbox_reg_targets_list = images_to_levels(all_bbox_reg_targets, num_level_squares) bbox_reg_weights_list = images_to_levels(all_bbox_reg_weights, num_level_squares) return (labels_list, label_weights_list, bbox_cls_targets_list, bbox_cls_weights_list, bbox_reg_targets_list, bbox_reg_weights_list, avg_factor) def _get_targets_single(self, flat_approxs: Tensor, inside_flags: Tensor, flat_squares: Tensor, gt_instances: InstanceData, img_meta: dict, gt_instances_ignore: Optional[InstanceData] = None, unmap_outputs: bool = True) -> tuple: """Compute regression and classification targets for anchors in a single image. Args: flat_approxs (Tensor): flat approxs of a single image, shape (n, 4) inside_flags (Tensor): inside flags of a single image, shape (n, ). flat_squares (Tensor): flat squares of a single image, shape (approxs_per_octave * n, 4) gt_instances (:obj:`InstanceData`): Ground truth of instance annotations. It should includes ``bboxes`` and ``labels`` attributes. img_meta (dict): Meta information for current image. gt_instances_ignore (:obj:`InstanceData`, optional): Instances to be ignored during training. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. unmap_outputs (bool): Whether to map outputs back to the original set of anchors. Defaults to True. Returns: tuple: - labels_list (Tensor): Labels in a single image. - label_weights (Tensor): Label weights in a single image. - bbox_cls_targets (Tensor): BBox cls targets in a single image. - bbox_cls_weights (Tensor): BBox cls weights in a single image. - bbox_reg_targets (Tensor): BBox reg targets in a single image. - bbox_reg_weights (Tensor): BBox reg weights in a single image. - num_total_pos (int): Number of positive samples in a single \ image. - num_total_neg (int): Number of negative samples in a single \ image. - sampling_result (:obj:`SamplingResult`): Sampling result object. """ if not inside_flags.any(): raise ValueError( 'There is no valid anchor inside the image boundary. Please ' 'check the image size and anchor sizes, or set ' '``allowed_border`` to -1 to skip the condition.') # assign gt and sample anchors num_square = flat_squares.size(0) approxs = flat_approxs.view(num_square, self.approxs_per_octave, 4) approxs = approxs[inside_flags, ...] squares = flat_squares[inside_flags, :] pred_instances = InstanceData() pred_instances.priors = squares pred_instances.approxs = approxs assign_result = self.assigner.assign(pred_instances, gt_instances, gt_instances_ignore) sampling_result = self.sampler.sample(assign_result, pred_instances, gt_instances) num_valid_squares = squares.shape[0] bbox_cls_targets = squares.new_zeros( (num_valid_squares, self.side_num * 4)) bbox_cls_weights = squares.new_zeros( (num_valid_squares, self.side_num * 4)) bbox_reg_targets = squares.new_zeros( (num_valid_squares, self.side_num * 4)) bbox_reg_weights = squares.new_zeros( (num_valid_squares, self.side_num * 4)) labels = squares.new_full((num_valid_squares, ), self.num_classes, dtype=torch.long) label_weights = squares.new_zeros(num_valid_squares, dtype=torch.float) pos_inds = sampling_result.pos_inds neg_inds = sampling_result.neg_inds if len(pos_inds) > 0: (pos_bbox_reg_targets, pos_bbox_reg_weights, pos_bbox_cls_targets, pos_bbox_cls_weights) = self.bbox_coder.encode( sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes) bbox_cls_targets[pos_inds, :] = pos_bbox_cls_targets bbox_reg_targets[pos_inds, :] = pos_bbox_reg_targets bbox_cls_weights[pos_inds, :] = pos_bbox_cls_weights bbox_reg_weights[pos_inds, :] = pos_bbox_reg_weights labels[pos_inds] = sampling_result.pos_gt_labels if self.train_cfg['pos_weight'] <= 0: label_weights[pos_inds] = 1.0 else: label_weights[pos_inds] = self.train_cfg['pos_weight'] if len(neg_inds) > 0: label_weights[neg_inds] = 1.0 # map up to original set of anchors if unmap_outputs: num_total_anchors = flat_squares.size(0) labels = unmap( labels, num_total_anchors, inside_flags, fill=self.num_classes) label_weights = unmap(label_weights, num_total_anchors, inside_flags) bbox_cls_targets = unmap(bbox_cls_targets, num_total_anchors, inside_flags) bbox_cls_weights = unmap(bbox_cls_weights, num_total_anchors, inside_flags) bbox_reg_targets = unmap(bbox_reg_targets, num_total_anchors, inside_flags) bbox_reg_weights = unmap(bbox_reg_weights, num_total_anchors, inside_flags) return (labels, label_weights, bbox_cls_targets, bbox_cls_weights, bbox_reg_targets, bbox_reg_weights, pos_inds, neg_inds, sampling_result) def loss_by_feat_single(self, cls_score: Tensor, bbox_pred: Tensor, labels: Tensor, label_weights: Tensor, bbox_cls_targets: Tensor, bbox_cls_weights: Tensor, bbox_reg_targets: Tensor, bbox_reg_weights: Tensor, avg_factor: float) -> Tuple[Tensor]: """Calculate the loss of a single scale level based on the features extracted by the detection head. Args: cls_score (Tensor): Box scores for each scale level Has shape (N, num_anchors * num_classes, H, W). bbox_pred (Tensor): Box energies / deltas for each scale level with shape (N, num_anchors * 4, H, W). labels (Tensor): Labels in a single image. label_weights (Tensor): Label weights in a single level. bbox_cls_targets (Tensor): BBox cls targets in a single level. bbox_cls_weights (Tensor): BBox cls weights in a single level. bbox_reg_targets (Tensor): BBox reg targets in a single level. bbox_reg_weights (Tensor): BBox reg weights in a single level. avg_factor (int): Average factor that is used to average the loss. Returns: tuple: loss components. """ # classification loss labels = labels.reshape(-1) label_weights = label_weights.reshape(-1) cls_score = cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels) loss_cls = self.loss_cls( cls_score, labels, label_weights, avg_factor=avg_factor) # regression loss bbox_cls_targets = bbox_cls_targets.reshape(-1, self.side_num * 4) bbox_cls_weights = bbox_cls_weights.reshape(-1, self.side_num * 4) bbox_reg_targets = bbox_reg_targets.reshape(-1, self.side_num * 4) bbox_reg_weights = bbox_reg_weights.reshape(-1, self.side_num * 4) (bbox_cls_pred, bbox_reg_pred) = bbox_pred bbox_cls_pred = bbox_cls_pred.permute(0, 2, 3, 1).reshape( -1, self.side_num * 4) bbox_reg_pred = bbox_reg_pred.permute(0, 2, 3, 1).reshape( -1, self.side_num * 4) loss_bbox_cls = self.loss_bbox_cls( bbox_cls_pred, bbox_cls_targets.long(), bbox_cls_weights, avg_factor=avg_factor * 4 * self.side_num) loss_bbox_reg = self.loss_bbox_reg( bbox_reg_pred, bbox_reg_targets, bbox_reg_weights, avg_factor=avg_factor * 4 * self.bbox_coder.offset_topk) return loss_cls, loss_bbox_cls, loss_bbox_reg def loss_by_feat( self, cls_scores: List[Tensor], bbox_preds: List[Tensor], batch_gt_instances: InstanceList, batch_img_metas: List[dict], batch_gt_instances_ignore: OptInstanceList = None) -> dict: """Calculate the loss based on the features extracted by the detection head. Args: cls_scores (list[Tensor]): Box scores for each scale level has shape (N, num_anchors * num_classes, H, W). bbox_preds (list[Tensor]): Box energies / deltas for each scale level with shape (N, num_anchors * 4, H, W). batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. batch_gt_instances_ignore (list[:obj:`InstanceData`], optional): Batch of gt_instances_ignore. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. Returns: dict: A dictionary of loss components. """ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] assert len(featmap_sizes) == self.approx_anchor_generator.num_levels device = cls_scores[0].device # get sampled approxes approxs_list, inside_flag_list = GuidedAnchorHead.get_sampled_approxs( self, featmap_sizes, batch_img_metas, device=device) square_list = self.get_anchors( featmap_sizes, batch_img_metas, device=device) cls_reg_targets = self.get_targets( approxs_list, inside_flag_list, square_list, batch_gt_instances, batch_img_metas, batch_gt_instances_ignore=batch_gt_instances_ignore) (labels_list, label_weights_list, bbox_cls_targets_list, bbox_cls_weights_list, bbox_reg_targets_list, bbox_reg_weights_list, avg_factor) = cls_reg_targets losses_cls, losses_bbox_cls, losses_bbox_reg = multi_apply( self.loss_by_feat_single, cls_scores, bbox_preds, labels_list, label_weights_list, bbox_cls_targets_list, bbox_cls_weights_list, bbox_reg_targets_list, bbox_reg_weights_list, avg_factor=avg_factor) return dict( loss_cls=losses_cls, loss_bbox_cls=losses_bbox_cls, loss_bbox_reg=losses_bbox_reg) def predict_by_feat(self, cls_scores: List[Tensor], bbox_preds: List[Tensor], batch_img_metas: List[dict], cfg: Optional[ConfigDict] = None, rescale: bool = False, with_nms: bool = True) -> InstanceList: """Transform a batch of output features extracted from the head into bbox results. Note: When score_factors is not None, the cls_scores are usually multiplied by it then obtain the real score used in NMS, such as CenterNess in FCOS, IoU branch in ATSS. Args: cls_scores (list[Tensor]): Classification scores for all scale levels, each is a 4D-tensor, has shape (batch_size, num_priors * num_classes, H, W). bbox_preds (list[Tensor]): Box energies / deltas for all scale levels, each is a 4D-tensor, has shape (batch_size, num_priors * 4, H, W). batch_img_metas (list[dict], Optional): Batch image meta info. cfg (:obj:`ConfigDict`, optional): Test / postprocessing configuration, if None, test_cfg would be used. Defaults to None. rescale (bool): If True, return boxes in original image space. Defaults to False. with_nms (bool): If True, do nms before return boxes. Defaults to True. Returns: list[:obj:`InstanceData`]: Object detection results of each image after the post process. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). """ assert len(cls_scores) == len(bbox_preds) num_levels = len(cls_scores) featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] device = cls_scores[0].device mlvl_anchors = self.get_anchors( featmap_sizes, batch_img_metas, device=device) result_list = [] for img_id in range(len(batch_img_metas)): cls_score_list = [ cls_scores[i][img_id].detach() for i in range(num_levels) ] bbox_cls_pred_list = [ bbox_preds[i][0][img_id].detach() for i in range(num_levels) ] bbox_reg_pred_list = [ bbox_preds[i][1][img_id].detach() for i in range(num_levels) ] proposals = self._predict_by_feat_single( cls_scores=cls_score_list, bbox_cls_preds=bbox_cls_pred_list, bbox_reg_preds=bbox_reg_pred_list, mlvl_anchors=mlvl_anchors[img_id], img_meta=batch_img_metas[img_id], cfg=cfg, rescale=rescale, with_nms=with_nms) result_list.append(proposals) return result_list def _predict_by_feat_single(self, cls_scores: List[Tensor], bbox_cls_preds: List[Tensor], bbox_reg_preds: List[Tensor], mlvl_anchors: List[Tensor], img_meta: dict, cfg: ConfigDict, rescale: bool = False, with_nms: bool = True) -> InstanceData: cfg = self.test_cfg if cfg is None else cfg nms_pre = cfg.get('nms_pre', -1) mlvl_bboxes = [] mlvl_scores = [] mlvl_confids = [] mlvl_labels = [] assert len(cls_scores) == len(bbox_cls_preds) == len( bbox_reg_preds) == len(mlvl_anchors) for cls_score, bbox_cls_pred, bbox_reg_pred, anchors in zip( cls_scores, bbox_cls_preds, bbox_reg_preds, mlvl_anchors): assert cls_score.size()[-2:] == bbox_cls_pred.size( )[-2:] == bbox_reg_pred.size()[-2::] cls_score = cls_score.permute(1, 2, 0).reshape(-1, self.cls_out_channels) if self.use_sigmoid_cls: scores = cls_score.sigmoid() else: scores = cls_score.softmax(-1)[:, :-1] bbox_cls_pred = bbox_cls_pred.permute(1, 2, 0).reshape( -1, self.side_num * 4) bbox_reg_pred = bbox_reg_pred.permute(1, 2, 0).reshape( -1, self.side_num * 4) # After https://github.com/open-mmlab/mmdetection/pull/6268/, # this operation keeps fewer bboxes under the same `nms_pre`. # There is no difference in performance for most models. If you # find a slight drop in performance, you can set a larger # `nms_pre` than before. results = filter_scores_and_topk( scores, cfg.score_thr, nms_pre, dict( anchors=anchors, bbox_cls_pred=bbox_cls_pred, bbox_reg_pred=bbox_reg_pred)) scores, labels, _, filtered_results = results anchors = filtered_results['anchors'] bbox_cls_pred = filtered_results['bbox_cls_pred'] bbox_reg_pred = filtered_results['bbox_reg_pred'] bbox_preds = [ bbox_cls_pred.contiguous(), bbox_reg_pred.contiguous() ] bboxes, confids = self.bbox_coder.decode( anchors.contiguous(), bbox_preds, max_shape=img_meta['img_shape']) mlvl_bboxes.append(bboxes) mlvl_scores.append(scores) mlvl_confids.append(confids) mlvl_labels.append(labels) results = InstanceData() results.bboxes = torch.cat(mlvl_bboxes) results.scores = torch.cat(mlvl_scores) results.score_factors = torch.cat(mlvl_confids) results.labels = torch.cat(mlvl_labels) return self._bbox_post_process( results=results, cfg=cfg, rescale=rescale, with_nms=with_nms, img_meta=img_meta)
31,550
43.626591
79
py
ERD
ERD-main/mmdet/models/dense_heads/fovea_head.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import Dict, List, Optional, Tuple import torch import torch.nn as nn from mmcv.cnn import ConvModule from mmcv.ops import DeformConv2d from mmengine.config import ConfigDict from mmengine.model import BaseModule from mmengine.structures import InstanceData from torch import Tensor from mmdet.registry import MODELS from mmdet.utils import InstanceList, OptInstanceList, OptMultiConfig from ..utils import filter_scores_and_topk, multi_apply from .anchor_free_head import AnchorFreeHead INF = 1e8 class FeatureAlign(BaseModule): """Feature Align Module. Feature Align Module is implemented based on DCN v1. It uses anchor shape prediction rather than feature map to predict offsets of deform conv layer. Args: in_channels (int): Number of channels in the input feature map. out_channels (int): Number of channels in the output feature map. kernel_size (int): Size of the convolution kernel. ``norm_cfg=dict(type='GN', num_groups=32, requires_grad=True)``. deform_groups: (int): Group number of DCN in FeatureAdaption module. init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \ dict], optional): Initialization config dict. """ def __init__( self, in_channels: int, out_channels: int, kernel_size: int = 3, deform_groups: int = 4, init_cfg: OptMultiConfig = dict( type='Normal', layer='Conv2d', std=0.1, override=dict(type='Normal', name='conv_adaption', std=0.01)) ) -> None: super().__init__(init_cfg=init_cfg) offset_channels = kernel_size * kernel_size * 2 self.conv_offset = nn.Conv2d( 4, deform_groups * offset_channels, 1, bias=False) self.conv_adaption = DeformConv2d( in_channels, out_channels, kernel_size=kernel_size, padding=(kernel_size - 1) // 2, deform_groups=deform_groups) self.relu = nn.ReLU(inplace=True) def forward(self, x: Tensor, shape: Tensor) -> Tensor: """Forward function of feature align module. Args: x (Tensor): Features from the upstream network. shape (Tensor): Exponential of bbox predictions. Returns: x (Tensor): The aligned features. """ offset = self.conv_offset(shape) x = self.relu(self.conv_adaption(x, offset)) return x @MODELS.register_module() class FoveaHead(AnchorFreeHead): """Detection Head of `FoveaBox: Beyond Anchor-based Object Detector. <https://arxiv.org/abs/1904.03797>`_. Args: num_classes (int): Number of categories excluding the background category. in_channels (int): Number of channels in the input feature map. base_edge_list (list[int]): List of edges. scale_ranges (list[tuple]): Range of scales. sigma (float): Super parameter of ``FoveaHead``. with_deform (bool): Whether use deform conv. deform_groups (int): Deformable conv group size. init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \ dict], optional): Initialization config dict. """ def __init__(self, num_classes: int, in_channels: int, base_edge_list: List[int] = (16, 32, 64, 128, 256), scale_ranges: List[tuple] = ((8, 32), (16, 64), (32, 128), (64, 256), (128, 512)), sigma: float = 0.4, with_deform: bool = False, deform_groups: int = 4, init_cfg: OptMultiConfig = dict( type='Normal', layer='Conv2d', std=0.01, override=dict( type='Normal', name='conv_cls', std=0.01, bias_prob=0.01)), **kwargs) -> None: self.base_edge_list = base_edge_list self.scale_ranges = scale_ranges self.sigma = sigma self.with_deform = with_deform self.deform_groups = deform_groups super().__init__( num_classes=num_classes, in_channels=in_channels, init_cfg=init_cfg, **kwargs) def _init_layers(self) -> None: """Initialize layers of the head.""" # box branch super()._init_reg_convs() self.conv_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1) # cls branch if not self.with_deform: super()._init_cls_convs() self.conv_cls = nn.Conv2d( self.feat_channels, self.cls_out_channels, 3, padding=1) else: self.cls_convs = nn.ModuleList() self.cls_convs.append( ConvModule( self.feat_channels, (self.feat_channels * 4), 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, bias=self.norm_cfg is None)) self.cls_convs.append( ConvModule((self.feat_channels * 4), (self.feat_channels * 4), 1, stride=1, padding=0, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, bias=self.norm_cfg is None)) self.feature_adaption = FeatureAlign( self.feat_channels, self.feat_channels, kernel_size=3, deform_groups=self.deform_groups) self.conv_cls = nn.Conv2d( int(self.feat_channels * 4), self.cls_out_channels, 3, padding=1) def forward_single(self, x: Tensor) -> Tuple[Tensor, Tensor]: """Forward features of a single scale level. Args: x (Tensor): FPN feature maps of the specified stride. Returns: tuple: scores for each class and bbox predictions of input feature maps. """ cls_feat = x reg_feat = x for reg_layer in self.reg_convs: reg_feat = reg_layer(reg_feat) bbox_pred = self.conv_reg(reg_feat) if self.with_deform: cls_feat = self.feature_adaption(cls_feat, bbox_pred.exp()) for cls_layer in self.cls_convs: cls_feat = cls_layer(cls_feat) cls_score = self.conv_cls(cls_feat) return cls_score, bbox_pred def loss_by_feat( self, cls_scores: List[Tensor], bbox_preds: List[Tensor], batch_gt_instances: InstanceList, batch_img_metas: List[dict], batch_gt_instances_ignore: OptInstanceList = None ) -> Dict[str, Tensor]: """Calculate the loss based on the features extracted by the detection head. Args: cls_scores (list[Tensor]): Box scores for each scale level, each is a 4D-tensor, the channel number is num_priors * num_classes. bbox_preds (list[Tensor]): Box energies / deltas for each scale level, each is a 4D-tensor, the channel number is num_priors * 4. batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. batch_gt_instances_ignore (list[:obj:`InstanceData`], Optional): Batch of gt_instances_ignore. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. Returns: dict[str, Tensor]: A dictionary of loss components. """ assert len(cls_scores) == len(bbox_preds) featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] priors = self.prior_generator.grid_priors( featmap_sizes, dtype=bbox_preds[0].dtype, device=bbox_preds[0].device) num_imgs = cls_scores[0].size(0) flatten_cls_scores = [ cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels) for cls_score in cls_scores ] flatten_bbox_preds = [ bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4) for bbox_pred in bbox_preds ] flatten_cls_scores = torch.cat(flatten_cls_scores) flatten_bbox_preds = torch.cat(flatten_bbox_preds) flatten_labels, flatten_bbox_targets = self.get_targets( batch_gt_instances, featmap_sizes, priors) # FG cat_id: [0, num_classes -1], BG cat_id: num_classes pos_inds = ((flatten_labels >= 0) & (flatten_labels < self.num_classes)).nonzero().view(-1) num_pos = len(pos_inds) loss_cls = self.loss_cls( flatten_cls_scores, flatten_labels, avg_factor=num_pos + num_imgs) if num_pos > 0: pos_bbox_preds = flatten_bbox_preds[pos_inds] pos_bbox_targets = flatten_bbox_targets[pos_inds] pos_weights = pos_bbox_targets.new_ones(pos_bbox_targets.size()) loss_bbox = self.loss_bbox( pos_bbox_preds, pos_bbox_targets, pos_weights, avg_factor=num_pos) else: loss_bbox = torch.tensor( 0, dtype=flatten_bbox_preds.dtype, device=flatten_bbox_preds.device) return dict(loss_cls=loss_cls, loss_bbox=loss_bbox) def get_targets( self, batch_gt_instances: InstanceList, featmap_sizes: List[tuple], priors_list: List[Tensor]) -> Tuple[List[Tensor], List[Tensor]]: """Compute regression and classification for priors in multiple images. Args: batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. featmap_sizes (list[tuple]): Size tuple of feature maps. priors_list (list[Tensor]): Priors list of each fpn level, each has shape (num_priors, 2). Returns: tuple: Targets of each level. - flatten_labels (list[Tensor]): Labels of each level. - flatten_bbox_targets (list[Tensor]): BBox targets of each level. """ label_list, bbox_target_list = multi_apply( self._get_targets_single, batch_gt_instances, featmap_size_list=featmap_sizes, priors_list=priors_list) flatten_labels = [ torch.cat([ labels_level_img.flatten() for labels_level_img in labels_level ]) for labels_level in zip(*label_list) ] flatten_bbox_targets = [ torch.cat([ bbox_targets_level_img.reshape(-1, 4) for bbox_targets_level_img in bbox_targets_level ]) for bbox_targets_level in zip(*bbox_target_list) ] flatten_labels = torch.cat(flatten_labels) flatten_bbox_targets = torch.cat(flatten_bbox_targets) return flatten_labels, flatten_bbox_targets def _get_targets_single(self, gt_instances: InstanceData, featmap_size_list: List[tuple] = None, priors_list: List[Tensor] = None) -> tuple: """Compute regression and classification targets for a single image. Args: gt_instances (:obj:`InstanceData`): Ground truth of instance annotations. It usually includes ``bboxes`` and ``labels`` attributes. featmap_size_list (list[tuple]): Size tuple of feature maps. priors_list (list[Tensor]): Priors of each fpn level, each has shape (num_priors, 2). Returns: tuple: - label_list (list[Tensor]): Labels of all anchors in the image. - box_target_list (list[Tensor]): BBox targets of all anchors in the image. """ gt_bboxes_raw = gt_instances.bboxes gt_labels_raw = gt_instances.labels gt_areas = torch.sqrt((gt_bboxes_raw[:, 2] - gt_bboxes_raw[:, 0]) * (gt_bboxes_raw[:, 3] - gt_bboxes_raw[:, 1])) label_list = [] bbox_target_list = [] # for each pyramid, find the cls and box target for base_len, (lower_bound, upper_bound), stride, featmap_size, \ priors in zip(self.base_edge_list, self.scale_ranges, self.strides, featmap_size_list, priors_list): # FG cat_id: [0, num_classes -1], BG cat_id: num_classes priors = priors.view(*featmap_size, 2) x, y = priors[..., 0], priors[..., 1] labels = gt_labels_raw.new_full(featmap_size, self.num_classes) bbox_targets = gt_bboxes_raw.new_ones(featmap_size[0], featmap_size[1], 4) # scale assignment hit_indices = ((gt_areas >= lower_bound) & (gt_areas <= upper_bound)).nonzero().flatten() if len(hit_indices) == 0: label_list.append(labels) bbox_target_list.append(torch.log(bbox_targets)) continue _, hit_index_order = torch.sort(-gt_areas[hit_indices]) hit_indices = hit_indices[hit_index_order] gt_bboxes = gt_bboxes_raw[hit_indices, :] / stride gt_labels = gt_labels_raw[hit_indices] half_w = 0.5 * (gt_bboxes[:, 2] - gt_bboxes[:, 0]) half_h = 0.5 * (gt_bboxes[:, 3] - gt_bboxes[:, 1]) # valid fovea area: left, right, top, down pos_left = torch.ceil( gt_bboxes[:, 0] + (1 - self.sigma) * half_w - 0.5).long(). \ clamp(0, featmap_size[1] - 1) pos_right = torch.floor( gt_bboxes[:, 0] + (1 + self.sigma) * half_w - 0.5).long(). \ clamp(0, featmap_size[1] - 1) pos_top = torch.ceil( gt_bboxes[:, 1] + (1 - self.sigma) * half_h - 0.5).long(). \ clamp(0, featmap_size[0] - 1) pos_down = torch.floor( gt_bboxes[:, 1] + (1 + self.sigma) * half_h - 0.5).long(). \ clamp(0, featmap_size[0] - 1) for px1, py1, px2, py2, label, (gt_x1, gt_y1, gt_x2, gt_y2) in \ zip(pos_left, pos_top, pos_right, pos_down, gt_labels, gt_bboxes_raw[hit_indices, :]): labels[py1:py2 + 1, px1:px2 + 1] = label bbox_targets[py1:py2 + 1, px1:px2 + 1, 0] = \ (x[py1:py2 + 1, px1:px2 + 1] - gt_x1) / base_len bbox_targets[py1:py2 + 1, px1:px2 + 1, 1] = \ (y[py1:py2 + 1, px1:px2 + 1] - gt_y1) / base_len bbox_targets[py1:py2 + 1, px1:px2 + 1, 2] = \ (gt_x2 - x[py1:py2 + 1, px1:px2 + 1]) / base_len bbox_targets[py1:py2 + 1, px1:px2 + 1, 3] = \ (gt_y2 - y[py1:py2 + 1, px1:px2 + 1]) / base_len bbox_targets = bbox_targets.clamp(min=1. / 16, max=16.) label_list.append(labels) bbox_target_list.append(torch.log(bbox_targets)) return label_list, bbox_target_list # Same as base_dense_head/_predict_by_feat_single except self._bbox_decode def _predict_by_feat_single(self, cls_score_list: List[Tensor], bbox_pred_list: List[Tensor], score_factor_list: List[Tensor], mlvl_priors: List[Tensor], img_meta: dict, cfg: Optional[ConfigDict] = None, rescale: bool = False, with_nms: bool = True) -> InstanceData: """Transform a single image's features extracted from the head into bbox results. Args: cls_score_list (list[Tensor]): Box scores from all scale levels of a single image, each item has shape (num_priors * num_classes, H, W). bbox_pred_list (list[Tensor]): Box energies / deltas from all scale levels of a single image, each item has shape (num_priors * 4, H, W). score_factor_list (list[Tensor]): Score factor from all scale levels of a single image, each item has shape (num_priors * 1, H, W). mlvl_priors (list[Tensor]): Each element in the list is the priors of a single level in feature pyramid, has shape (num_priors, 2). img_meta (dict): Image meta info. cfg (ConfigDict, optional): Test / postprocessing configuration, if None, test_cfg would be used. Defaults to None. rescale (bool): If True, return boxes in original image space. Defaults to False. with_nms (bool): If True, do nms before return boxes. Defaults to True. Returns: :obj:`InstanceData`: Detection results of each image after the post process. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). """ cfg = self.test_cfg if cfg is None else cfg assert len(cls_score_list) == len(bbox_pred_list) img_shape = img_meta['img_shape'] nms_pre = cfg.get('nms_pre', -1) mlvl_bboxes = [] mlvl_scores = [] mlvl_labels = [] for level_idx, (cls_score, bbox_pred, stride, base_len, priors) in \ enumerate(zip(cls_score_list, bbox_pred_list, self.strides, self.base_edge_list, mlvl_priors)): assert cls_score.size()[-2:] == bbox_pred.size()[-2:] bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4) scores = cls_score.permute(1, 2, 0).reshape( -1, self.cls_out_channels).sigmoid() # After https://github.com/open-mmlab/mmdetection/pull/6268/, # this operation keeps fewer bboxes under the same `nms_pre`. # There is no difference in performance for most models. If you # find a slight drop in performance, you can set a larger # `nms_pre` than before. results = filter_scores_and_topk( scores, cfg.score_thr, nms_pre, dict(bbox_pred=bbox_pred, priors=priors)) scores, labels, _, filtered_results = results bbox_pred = filtered_results['bbox_pred'] priors = filtered_results['priors'] bboxes = self._bbox_decode(priors, bbox_pred, base_len, img_shape) mlvl_bboxes.append(bboxes) mlvl_scores.append(scores) mlvl_labels.append(labels) results = InstanceData() results.bboxes = torch.cat(mlvl_bboxes) results.scores = torch.cat(mlvl_scores) results.labels = torch.cat(mlvl_labels) return self._bbox_post_process( results=results, cfg=cfg, rescale=rescale, with_nms=with_nms, img_meta=img_meta) def _bbox_decode(self, priors: Tensor, bbox_pred: Tensor, base_len: int, max_shape: int) -> Tensor: """Function to decode bbox. Args: priors (Tensor): Center proiors of an image, has shape (num_instances, 2). bbox_preds (Tensor): Box energies / deltas for all instances, has shape (batch_size, num_instances, 4). base_len (int): The base length. max_shape (int): The max shape of bbox. Returns: Tensor: Decoded bboxes in (tl_x, tl_y, br_x, br_y) format. Has shape (batch_size, num_instances, 4). """ bbox_pred = bbox_pred.exp() y = priors[:, 1] x = priors[:, 0] x1 = (x - base_len * bbox_pred[:, 0]). \ clamp(min=0, max=max_shape[1] - 1) y1 = (y - base_len * bbox_pred[:, 1]). \ clamp(min=0, max=max_shape[0] - 1) x2 = (x + base_len * bbox_pred[:, 2]). \ clamp(min=0, max=max_shape[1] - 1) y2 = (y + base_len * bbox_pred[:, 3]). \ clamp(min=0, max=max_shape[0] - 1) decoded_bboxes = torch.stack([x1, y1, x2, y2], -1) return decoded_bboxes
21,448
41.056863
79
py
ERD
ERD-main/mmdet/models/dense_heads/mask2former_head.py
# Copyright (c) OpenMMLab. All rights reserved. import copy from typing import List, Tuple import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import Conv2d from mmcv.ops import point_sample from mmengine.model import ModuleList, caffe2_xavier_init from mmengine.structures import InstanceData from torch import Tensor from mmdet.registry import MODELS, TASK_UTILS from mmdet.structures import SampleList from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig, reduce_mean from ..layers import Mask2FormerTransformerDecoder, SinePositionalEncoding from ..utils import get_uncertain_point_coords_with_randomness from .anchor_free_head import AnchorFreeHead from .maskformer_head import MaskFormerHead @MODELS.register_module() class Mask2FormerHead(MaskFormerHead): """Implements the Mask2Former head. See `Masked-attention Mask Transformer for Universal Image Segmentation <https://arxiv.org/pdf/2112.01527>`_ for details. Args: in_channels (list[int]): Number of channels in the input feature map. feat_channels (int): Number of channels for features. out_channels (int): Number of channels for output. num_things_classes (int): Number of things. num_stuff_classes (int): Number of stuff. num_queries (int): Number of query in Transformer decoder. pixel_decoder (:obj:`ConfigDict` or dict): Config for pixel decoder. Defaults to None. enforce_decoder_input_project (bool, optional): Whether to add a layer to change the embed_dim of tranformer encoder in pixel decoder to the embed_dim of transformer decoder. Defaults to False. transformer_decoder (:obj:`ConfigDict` or dict): Config for transformer decoder. Defaults to None. positional_encoding (:obj:`ConfigDict` or dict): Config for transformer decoder position encoding. Defaults to dict(num_feats=128, normalize=True). loss_cls (:obj:`ConfigDict` or dict): Config of the classification loss. Defaults to None. loss_mask (:obj:`ConfigDict` or dict): Config of the mask loss. Defaults to None. loss_dice (:obj:`ConfigDict` or dict): Config of the dice loss. Defaults to None. train_cfg (:obj:`ConfigDict` or dict, optional): Training config of Mask2Former head. test_cfg (:obj:`ConfigDict` or dict, optional): Testing config of Mask2Former head. init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \ dict], optional): Initialization config dict. Defaults to None. """ def __init__(self, in_channels: List[int], feat_channels: int, out_channels: int, num_things_classes: int = 80, num_stuff_classes: int = 53, num_queries: int = 100, num_transformer_feat_level: int = 3, pixel_decoder: ConfigType = ..., enforce_decoder_input_project: bool = False, transformer_decoder: ConfigType = ..., positional_encoding: ConfigType = dict( num_feats=128, normalize=True), loss_cls: ConfigType = dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=2.0, reduction='mean', class_weight=[1.0] * 133 + [0.1]), loss_mask: ConfigType = dict( type='CrossEntropyLoss', use_sigmoid=True, reduction='mean', loss_weight=5.0), loss_dice: ConfigType = dict( type='DiceLoss', use_sigmoid=True, activate=True, reduction='mean', naive_dice=True, eps=1.0, loss_weight=5.0), train_cfg: OptConfigType = None, test_cfg: OptConfigType = None, init_cfg: OptMultiConfig = None, **kwargs) -> None: super(AnchorFreeHead, self).__init__(init_cfg=init_cfg) self.num_things_classes = num_things_classes self.num_stuff_classes = num_stuff_classes self.num_classes = self.num_things_classes + self.num_stuff_classes self.num_queries = num_queries self.num_transformer_feat_level = num_transformer_feat_level self.num_heads = transformer_decoder.layer_cfg.cross_attn_cfg.num_heads self.num_transformer_decoder_layers = transformer_decoder.num_layers assert pixel_decoder.encoder.layer_cfg. \ self_attn_cfg.num_levels == num_transformer_feat_level pixel_decoder_ = copy.deepcopy(pixel_decoder) pixel_decoder_.update( in_channels=in_channels, feat_channels=feat_channels, out_channels=out_channels) self.pixel_decoder = MODELS.build(pixel_decoder_) self.transformer_decoder = Mask2FormerTransformerDecoder( **transformer_decoder) self.decoder_embed_dims = self.transformer_decoder.embed_dims self.decoder_input_projs = ModuleList() # from low resolution to high resolution for _ in range(num_transformer_feat_level): if (self.decoder_embed_dims != feat_channels or enforce_decoder_input_project): self.decoder_input_projs.append( Conv2d( feat_channels, self.decoder_embed_dims, kernel_size=1)) else: self.decoder_input_projs.append(nn.Identity()) self.decoder_positional_encoding = SinePositionalEncoding( **positional_encoding) self.query_embed = nn.Embedding(self.num_queries, feat_channels) self.query_feat = nn.Embedding(self.num_queries, feat_channels) # from low resolution to high resolution self.level_embed = nn.Embedding(self.num_transformer_feat_level, feat_channels) self.cls_embed = nn.Linear(feat_channels, self.num_classes + 1) self.mask_embed = nn.Sequential( nn.Linear(feat_channels, feat_channels), nn.ReLU(inplace=True), nn.Linear(feat_channels, feat_channels), nn.ReLU(inplace=True), nn.Linear(feat_channels, out_channels)) self.test_cfg = test_cfg self.train_cfg = train_cfg if train_cfg: self.assigner = TASK_UTILS.build(self.train_cfg['assigner']) self.sampler = TASK_UTILS.build( self.train_cfg['sampler'], default_args=dict(context=self)) self.num_points = self.train_cfg.get('num_points', 12544) self.oversample_ratio = self.train_cfg.get('oversample_ratio', 3.0) self.importance_sample_ratio = self.train_cfg.get( 'importance_sample_ratio', 0.75) self.class_weight = loss_cls.class_weight self.loss_cls = MODELS.build(loss_cls) self.loss_mask = MODELS.build(loss_mask) self.loss_dice = MODELS.build(loss_dice) def init_weights(self) -> None: for m in self.decoder_input_projs: if isinstance(m, Conv2d): caffe2_xavier_init(m, bias=0) self.pixel_decoder.init_weights() for p in self.transformer_decoder.parameters(): if p.dim() > 1: nn.init.xavier_normal_(p) def _get_targets_single(self, cls_score: Tensor, mask_pred: Tensor, gt_instances: InstanceData, img_meta: dict) -> Tuple[Tensor]: """Compute classification and mask targets for one image. Args: cls_score (Tensor): Mask score logits from a single decoder layer for one image. Shape (num_queries, cls_out_channels). mask_pred (Tensor): Mask logits for a single decoder layer for one image. Shape (num_queries, h, w). gt_instances (:obj:`InstanceData`): It contains ``labels`` and ``masks``. img_meta (dict): Image informtation. Returns: tuple[Tensor]: A tuple containing the following for one image. - labels (Tensor): Labels of each image. \ shape (num_queries, ). - label_weights (Tensor): Label weights of each image. \ shape (num_queries, ). - mask_targets (Tensor): Mask targets of each image. \ shape (num_queries, h, w). - mask_weights (Tensor): Mask weights of each image. \ shape (num_queries, ). - pos_inds (Tensor): Sampled positive indices for each \ image. - neg_inds (Tensor): Sampled negative indices for each \ image. - sampling_result (:obj:`SamplingResult`): Sampling results. """ gt_labels = gt_instances.labels gt_masks = gt_instances.masks # sample points num_queries = cls_score.shape[0] num_gts = gt_labels.shape[0] point_coords = torch.rand((1, self.num_points, 2), device=cls_score.device) # shape (num_queries, num_points) mask_points_pred = point_sample( mask_pred.unsqueeze(1), point_coords.repeat(num_queries, 1, 1)).squeeze(1) # shape (num_gts, num_points) gt_points_masks = point_sample( gt_masks.unsqueeze(1).float(), point_coords.repeat(num_gts, 1, 1)).squeeze(1) sampled_gt_instances = InstanceData( labels=gt_labels, masks=gt_points_masks) sampled_pred_instances = InstanceData( scores=cls_score, masks=mask_points_pred) # assign and sample assign_result = self.assigner.assign( pred_instances=sampled_pred_instances, gt_instances=sampled_gt_instances, img_meta=img_meta) pred_instances = InstanceData(scores=cls_score, masks=mask_pred) sampling_result = self.sampler.sample( assign_result=assign_result, pred_instances=pred_instances, gt_instances=gt_instances) pos_inds = sampling_result.pos_inds neg_inds = sampling_result.neg_inds # label target labels = gt_labels.new_full((self.num_queries, ), self.num_classes, dtype=torch.long) labels[pos_inds] = gt_labels[sampling_result.pos_assigned_gt_inds] label_weights = gt_labels.new_ones((self.num_queries, )) # mask target mask_targets = gt_masks[sampling_result.pos_assigned_gt_inds] mask_weights = mask_pred.new_zeros((self.num_queries, )) mask_weights[pos_inds] = 1.0 return (labels, label_weights, mask_targets, mask_weights, pos_inds, neg_inds, sampling_result) def _loss_by_feat_single(self, cls_scores: Tensor, mask_preds: Tensor, batch_gt_instances: List[InstanceData], batch_img_metas: List[dict]) -> Tuple[Tensor]: """Loss function for outputs from a single decoder layer. Args: cls_scores (Tensor): Mask score logits from a single decoder layer for all images. Shape (batch_size, num_queries, cls_out_channels). Note `cls_out_channels` should includes background. mask_preds (Tensor): Mask logits for a pixel decoder for all images. Shape (batch_size, num_queries, h, w). batch_gt_instances (list[obj:`InstanceData`]): each contains ``labels`` and ``masks``. batch_img_metas (list[dict]): List of image meta information. Returns: tuple[Tensor]: Loss components for outputs from a single \ decoder layer. """ num_imgs = cls_scores.size(0) cls_scores_list = [cls_scores[i] for i in range(num_imgs)] mask_preds_list = [mask_preds[i] for i in range(num_imgs)] (labels_list, label_weights_list, mask_targets_list, mask_weights_list, avg_factor) = self.get_targets(cls_scores_list, mask_preds_list, batch_gt_instances, batch_img_metas) # shape (batch_size, num_queries) labels = torch.stack(labels_list, dim=0) # shape (batch_size, num_queries) label_weights = torch.stack(label_weights_list, dim=0) # shape (num_total_gts, h, w) mask_targets = torch.cat(mask_targets_list, dim=0) # shape (batch_size, num_queries) mask_weights = torch.stack(mask_weights_list, dim=0) # classfication loss # shape (batch_size * num_queries, ) cls_scores = cls_scores.flatten(0, 1) labels = labels.flatten(0, 1) label_weights = label_weights.flatten(0, 1) class_weight = cls_scores.new_tensor(self.class_weight) loss_cls = self.loss_cls( cls_scores, labels, label_weights, avg_factor=class_weight[labels].sum()) num_total_masks = reduce_mean(cls_scores.new_tensor([avg_factor])) num_total_masks = max(num_total_masks, 1) # extract positive ones # shape (batch_size, num_queries, h, w) -> (num_total_gts, h, w) mask_preds = mask_preds[mask_weights > 0] if mask_targets.shape[0] == 0: # zero match loss_dice = mask_preds.sum() loss_mask = mask_preds.sum() return loss_cls, loss_mask, loss_dice with torch.no_grad(): points_coords = get_uncertain_point_coords_with_randomness( mask_preds.unsqueeze(1), None, self.num_points, self.oversample_ratio, self.importance_sample_ratio) # shape (num_total_gts, h, w) -> (num_total_gts, num_points) mask_point_targets = point_sample( mask_targets.unsqueeze(1).float(), points_coords).squeeze(1) # shape (num_queries, h, w) -> (num_queries, num_points) mask_point_preds = point_sample( mask_preds.unsqueeze(1), points_coords).squeeze(1) # dice loss loss_dice = self.loss_dice( mask_point_preds, mask_point_targets, avg_factor=num_total_masks) # mask loss # shape (num_queries, num_points) -> (num_queries * num_points, ) mask_point_preds = mask_point_preds.reshape(-1) # shape (num_total_gts, num_points) -> (num_total_gts * num_points, ) mask_point_targets = mask_point_targets.reshape(-1) loss_mask = self.loss_mask( mask_point_preds, mask_point_targets, avg_factor=num_total_masks * self.num_points) return loss_cls, loss_mask, loss_dice def _forward_head(self, decoder_out: Tensor, mask_feature: Tensor, attn_mask_target_size: Tuple[int, int]) -> Tuple[Tensor]: """Forward for head part which is called after every decoder layer. Args: decoder_out (Tensor): in shape (batch_size, num_queries, c). mask_feature (Tensor): in shape (batch_size, c, h, w). attn_mask_target_size (tuple[int, int]): target attention mask size. Returns: tuple: A tuple contain three elements. - cls_pred (Tensor): Classification scores in shape \ (batch_size, num_queries, cls_out_channels). \ Note `cls_out_channels` should includes background. - mask_pred (Tensor): Mask scores in shape \ (batch_size, num_queries,h, w). - attn_mask (Tensor): Attention mask in shape \ (batch_size * num_heads, num_queries, h, w). """ decoder_out = self.transformer_decoder.post_norm(decoder_out) # shape (num_queries, batch_size, c) cls_pred = self.cls_embed(decoder_out) # shape (num_queries, batch_size, c) mask_embed = self.mask_embed(decoder_out) # shape (num_queries, batch_size, h, w) mask_pred = torch.einsum('bqc,bchw->bqhw', mask_embed, mask_feature) attn_mask = F.interpolate( mask_pred, attn_mask_target_size, mode='bilinear', align_corners=False) # shape (num_queries, batch_size, h, w) -> # (batch_size * num_head, num_queries, h, w) attn_mask = attn_mask.flatten(2).unsqueeze(1).repeat( (1, self.num_heads, 1, 1)).flatten(0, 1) attn_mask = attn_mask.sigmoid() < 0.5 attn_mask = attn_mask.detach() return cls_pred, mask_pred, attn_mask def forward(self, x: List[Tensor], batch_data_samples: SampleList) -> Tuple[List[Tensor]]: """Forward function. Args: x (list[Tensor]): Multi scale Features from the upstream network, each is a 4D-tensor. batch_data_samples (List[:obj:`DetDataSample`]): The Data Samples. It usually includes information such as `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`. Returns: tuple[list[Tensor]]: A tuple contains two elements. - cls_pred_list (list[Tensor)]: Classification logits \ for each decoder layer. Each is a 3D-tensor with shape \ (batch_size, num_queries, cls_out_channels). \ Note `cls_out_channels` should includes background. - mask_pred_list (list[Tensor]): Mask logits for each \ decoder layer. Each with shape (batch_size, num_queries, \ h, w). """ batch_img_metas = [ data_sample.metainfo for data_sample in batch_data_samples ] batch_size = len(batch_img_metas) mask_features, multi_scale_memorys = self.pixel_decoder(x) # multi_scale_memorys (from low resolution to high resolution) decoder_inputs = [] decoder_positional_encodings = [] for i in range(self.num_transformer_feat_level): decoder_input = self.decoder_input_projs[i](multi_scale_memorys[i]) # shape (batch_size, c, h, w) -> (batch_size, h*w, c) decoder_input = decoder_input.flatten(2).permute(0, 2, 1) level_embed = self.level_embed.weight[i].view(1, 1, -1) decoder_input = decoder_input + level_embed # shape (batch_size, c, h, w) -> (batch_size, h*w, c) mask = decoder_input.new_zeros( (batch_size, ) + multi_scale_memorys[i].shape[-2:], dtype=torch.bool) decoder_positional_encoding = self.decoder_positional_encoding( mask) decoder_positional_encoding = decoder_positional_encoding.flatten( 2).permute(0, 2, 1) decoder_inputs.append(decoder_input) decoder_positional_encodings.append(decoder_positional_encoding) # shape (num_queries, c) -> (batch_size, num_queries, c) query_feat = self.query_feat.weight.unsqueeze(0).repeat( (batch_size, 1, 1)) query_embed = self.query_embed.weight.unsqueeze(0).repeat( (batch_size, 1, 1)) cls_pred_list = [] mask_pred_list = [] cls_pred, mask_pred, attn_mask = self._forward_head( query_feat, mask_features, multi_scale_memorys[0].shape[-2:]) cls_pred_list.append(cls_pred) mask_pred_list.append(mask_pred) for i in range(self.num_transformer_decoder_layers): level_idx = i % self.num_transformer_feat_level # if a mask is all True(all background), then set it all False. attn_mask[torch.where( attn_mask.sum(-1) == attn_mask.shape[-1])] = False # cross_attn + self_attn layer = self.transformer_decoder.layers[i] query_feat = layer( query=query_feat, key=decoder_inputs[level_idx], value=decoder_inputs[level_idx], query_pos=query_embed, key_pos=decoder_positional_encodings[level_idx], cross_attn_mask=attn_mask, query_key_padding_mask=None, # here we do not apply masking on padded region key_padding_mask=None) cls_pred, mask_pred, attn_mask = self._forward_head( query_feat, mask_features, multi_scale_memorys[ (i + 1) % self.num_transformer_feat_level].shape[-2:]) cls_pred_list.append(cls_pred) mask_pred_list.append(mask_pred) return cls_pred_list, mask_pred_list
21,211
44.715517
79
py
ERD
ERD-main/mmdet/models/dense_heads/dense_test_mixins.py
# Copyright (c) OpenMMLab. All rights reserved. import sys import warnings from inspect import signature import torch from mmcv.ops import batched_nms from mmengine.structures import InstanceData from mmdet.structures.bbox import bbox_mapping_back from ..test_time_augs import merge_aug_proposals if sys.version_info >= (3, 7): from mmdet.utils.contextmanagers import completed class BBoxTestMixin(object): """Mixin class for testing det bboxes via DenseHead.""" def simple_test_bboxes(self, feats, img_metas, rescale=False): """Test det bboxes without test-time augmentation, can be applied in DenseHead except for ``RPNHead`` and its variants, e.g., ``GARPNHead``, etc. Args: feats (tuple[torch.Tensor]): Multi-level features from the upstream network, each is a 4D-tensor. img_metas (list[dict]): List of image information. rescale (bool, optional): Whether to rescale the results. Defaults to False. Returns: list[obj:`InstanceData`]: Detection results of each image after the post process. \ Each item usually contains following keys. \ - scores (Tensor): Classification scores, has a shape (num_instance,) - labels (Tensor): Labels of bboxes, has a shape (num_instances,). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). """ warnings.warn('You are calling `simple_test_bboxes` in ' '`dense_test_mixins`, but the `dense_test_mixins`' 'will be deprecated soon. Please use ' '`simple_test` instead.') outs = self.forward(feats) results_list = self.get_results( *outs, img_metas=img_metas, rescale=rescale) return results_list def aug_test_bboxes(self, feats, img_metas, rescale=False): """Test det bboxes with test time augmentation, can be applied in DenseHead except for ``RPNHead`` and its variants, e.g., ``GARPNHead``, etc. Args: feats (list[Tensor]): the outer list indicates test-time augmentations and inner Tensor should have a shape NxCxHxW, which contains features for all images in the batch. img_metas (list[list[dict]]): the outer list indicates test-time augs (multiscale, flip, etc.) and the inner list indicates images in a batch. each dict has image information. rescale (bool, optional): Whether to rescale the results. Defaults to False. Returns: list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple. The first item is ``bboxes`` with shape (n, 5), where 5 represent (tl_x, tl_y, br_x, br_y, score). The shape of the second tensor in the tuple is ``labels`` with shape (n,). The length of list should always be 1. """ warnings.warn('You are calling `aug_test_bboxes` in ' '`dense_test_mixins`, but the `dense_test_mixins`' 'will be deprecated soon. Please use ' '`aug_test` instead.') # check with_nms argument gb_sig = signature(self.get_results) gb_args = [p.name for p in gb_sig.parameters.values()] gbs_sig = signature(self._get_results_single) gbs_args = [p.name for p in gbs_sig.parameters.values()] assert ('with_nms' in gb_args) and ('with_nms' in gbs_args), \ f'{self.__class__.__name__}' \ ' does not support test-time augmentation' aug_bboxes = [] aug_scores = [] aug_labels = [] for x, img_meta in zip(feats, img_metas): # only one image in the batch outs = self.forward(x) bbox_outputs = self.get_results( *outs, img_metas=img_meta, cfg=self.test_cfg, rescale=False, with_nms=False)[0] aug_bboxes.append(bbox_outputs.bboxes) aug_scores.append(bbox_outputs.scores) if len(bbox_outputs) >= 3: aug_labels.append(bbox_outputs.labels) # after merging, bboxes will be rescaled to the original image size merged_bboxes, merged_scores = self.merge_aug_bboxes( aug_bboxes, aug_scores, img_metas) merged_labels = torch.cat(aug_labels, dim=0) if aug_labels else None if merged_bboxes.numel() == 0: det_bboxes = torch.cat([merged_bboxes, merged_scores[:, None]], -1) return [ (det_bboxes, merged_labels), ] det_bboxes, keep_idxs = batched_nms(merged_bboxes, merged_scores, merged_labels, self.test_cfg.nms) det_bboxes = det_bboxes[:self.test_cfg.max_per_img] det_labels = merged_labels[keep_idxs][:self.test_cfg.max_per_img] if rescale: _det_bboxes = det_bboxes else: _det_bboxes = det_bboxes.clone() _det_bboxes[:, :4] *= det_bboxes.new_tensor( img_metas[0][0]['scale_factor']) results = InstanceData() results.bboxes = _det_bboxes[:, :4] results.scores = _det_bboxes[:, 4] results.labels = det_labels return [results] def aug_test_rpn(self, feats, img_metas): """Test with augmentation for only for ``RPNHead`` and its variants, e.g., ``GARPNHead``, etc. Args: feats (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. img_metas (list[dict]): Meta info of each image. Returns: list[Tensor]: Proposals of each image, each item has shape (n, 5), where 5 represent (tl_x, tl_y, br_x, br_y, score). """ samples_per_gpu = len(img_metas[0]) aug_proposals = [[] for _ in range(samples_per_gpu)] for x, img_meta in zip(feats, img_metas): results_list = self.simple_test_rpn(x, img_meta) for i, results in enumerate(results_list): proposals = torch.cat( [results.bboxes, results.scores[:, None]], dim=-1) aug_proposals[i].append(proposals) # reorganize the order of 'img_metas' to match the dimensions # of 'aug_proposals' aug_img_metas = [] for i in range(samples_per_gpu): aug_img_meta = [] for j in range(len(img_metas)): aug_img_meta.append(img_metas[j][i]) aug_img_metas.append(aug_img_meta) # after merging, proposals will be rescaled to the original image size merged_proposals = [] for proposals, aug_img_meta in zip(aug_proposals, aug_img_metas): merged_proposal = merge_aug_proposals(proposals, aug_img_meta, self.test_cfg) results = InstanceData() results.bboxes = merged_proposal[:, :4] results.scores = merged_proposal[:, 4] merged_proposals.append(results) return merged_proposals if sys.version_info >= (3, 7): async def async_simple_test_rpn(self, x, img_metas): sleep_interval = self.test_cfg.pop('async_sleep_interval', 0.025) async with completed( __name__, 'rpn_head_forward', sleep_interval=sleep_interval): rpn_outs = self(x) proposal_list = self.get_results(*rpn_outs, img_metas=img_metas) return proposal_list def merge_aug_bboxes(self, aug_bboxes, aug_scores, img_metas): """Merge augmented detection bboxes and scores. Args: aug_bboxes (list[Tensor]): shape (n, 4*#class) aug_scores (list[Tensor] or None): shape (n, #class) img_shapes (list[Tensor]): shape (3, ). Returns: tuple[Tensor]: ``bboxes`` with shape (n,4), where 4 represent (tl_x, tl_y, br_x, br_y) and ``scores`` with shape (n,). """ recovered_bboxes = [] for bboxes, img_info in zip(aug_bboxes, img_metas): img_shape = img_info[0]['img_shape'] scale_factor = img_info[0]['scale_factor'] flip = img_info[0]['flip'] flip_direction = img_info[0]['flip_direction'] bboxes = bbox_mapping_back(bboxes, img_shape, scale_factor, flip, flip_direction) recovered_bboxes.append(bboxes) bboxes = torch.cat(recovered_bboxes, dim=0) if aug_scores is None: return bboxes else: scores = torch.cat(aug_scores, dim=0) return bboxes, scores
9,048
40.893519
79
py
ERD
ERD-main/mmdet/models/dense_heads/gfl_head_increment_erd.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import List, Tuple import torch import torch.nn as nn import torch.nn.functional as F from mmcv.ops import batched_nms from torch import Tensor from mmdet.registry import MODELS, TASK_UTILS from mmdet.structures import SampleList from mmdet.structures.bbox import distance2bbox, bbox_overlaps from mmdet.utils import (ConfigType, InstanceList, MultiConfig, OptConfigType, OptInstanceList, reduce_mean) from .gfl_head import GFLHead from ..task_modules.samplers import PseudoSampler from ..utils import (multi_apply, unpack_gt_instances) class Integral(nn.Module): """A fixed layer for calculating integral result from distribution. This layer calculates the target location by :math: ``sum{P(y_i) * y_i}``, P(y_i) denotes the softmax vector that represents the discrete distribution y_i denotes the discrete set, usually {0, 1, 2, ..., reg_max} Args: reg_max (int): The maximal value of the discrete set. Defaults to 16. You may want to reset it according to your new dataset or related settings. """ def __init__(self, reg_max: int = 16) -> None: super().__init__() self.reg_max = reg_max self.register_buffer('project', torch.linspace(0, self.reg_max, self.reg_max + 1)) def forward(self, x: Tensor) -> Tensor: """Forward feature from the regression head to get integral result of bounding box location. Args: x (Tensor): Features of the regression head, shape (N, 4*(n+1)), n is self.reg_max. Returns: x (Tensor): Integral result of box locations, i.e., distance offsets from the box center in four directions, shape (N, 4). """ x = F.softmax(x.reshape(-1, self.reg_max + 1), dim=1) x = F.linear(x, self.project.type_as(x)).reshape(-1, 4) return x @MODELS.register_module() class GFLHeadIncrementERD(GFLHead): """Generalized Focal Loss: Learning Qualified and Distributed Bounding Boxes for Dense Object Detection. GFL head structure is similar with ATSS, however GFL uses 1) joint representation for classification and localization quality, and 2) flexible General distribution for bounding box locations, which are supervised by Quality Focal Loss (QFL) and Distribution Focal Loss (DFL), respectively https://arxiv.org/abs/2006.04388 Args: num_classes (int): Number of categories excluding the background category. in_channels (int): Number of channels in the input feature map. stacked_convs (int): Number of conv layers in cls and reg tower. Defaults to 4. conv_cfg (:obj:`ConfigDict` or dict, optional): dictionary to construct and config conv layer. Defaults to None. norm_cfg (:obj:`ConfigDict` or dict): dictionary to construct and config norm layer. Default: dict(type='GN', num_groups=32, requires_grad=True). loss_qfl (:obj:`ConfigDict` or dict): Config of Quality Focal Loss (QFL). bbox_coder (:obj:`ConfigDict` or dict): Config of bbox coder. Defaults to 'DistancePointBBoxCoder'. reg_max (int): Max value of integral set :math: ``{0, ..., reg_max}`` in QFL setting. Defaults to 16. init_cfg (:obj:`ConfigDict` or dict or list[dict] or list[:obj:`ConfigDict`]): Initialization config dict. Example: >>> self = GFLHead(11, 7) >>> feats = [torch.rand(1, 7, s, s) for s in [4, 8, 16, 32, 64]] >>> cls_quality_score, bbox_pred = self.forward(feats) >>> assert len(cls_quality_score) == len(self.scales) """ def __init__(self, num_classes: int, in_channels: int, stacked_convs: int = 4, conv_cfg: OptConfigType = None, norm_cfg: ConfigType = dict( type='GN', num_groups=32, requires_grad=True), loss_dfl: ConfigType = dict( type='DistributionFocalLoss', loss_weight=0.25), loss_ld: ConfigType = dict( type='KnowledgeDistillationKLDivLoss', loss_weight=0.25, T=10), bbox_coder: ConfigType = dict(type='DistancePointBBoxCoder'), reg_max: int = 16, init_cfg: MultiConfig = dict( type='Normal', layer='Conv2d', std=0.01, override=dict( type='Normal', name='gfl_cls', std=0.01, bias_prob=0.01)), **kwargs) -> None: self.stacked_convs = stacked_convs self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.reg_max = reg_max super().__init__( num_classes=num_classes, in_channels=in_channels, bbox_coder=bbox_coder, init_cfg=init_cfg, **kwargs) if self.train_cfg: self.assigner = TASK_UTILS.build(self.train_cfg['assigner']) if self.train_cfg.get('sampler', None) is not None: self.sampler = TASK_UTILS.build( self.train_cfg['sampler'], default_args=dict(context=self)) else: self.sampler = PseudoSampler(context=self) self.integral = Integral(self.reg_max) self.loss_dfl = MODELS.build(loss_dfl) self.loss_ld = MODELS.build(loss_ld) def distill_loss_by_image_single(self, anchors, new_cls_scores, new_bbox_preds, ori_cls_inds, ori_box_inds, ori_cls_scores, ori_bbox_preds, dist_loss_weight, ori_num_classes: int, avg_factor: int) -> dict: """Calculate the loss of a single scale level based on the features extracted by the detection head. Args: anchors (Tensor): Box reference for each scale level with shape (N, num_total_anchors, 4). cls_score (Tensor): Cls and quality joint scores for each scale level has shape (N, num_classes, H, W). bbox_pred (Tensor): Box distribution logits for each scale level with shape (N, 4*(n+1), H, W), n is max value of integral set. labels (Tensor): Labels of each anchors with shape (N, num_total_anchors). label_weights (Tensor): Label weights of each anchor with shape (N, num_total_anchors) bbox_targets (Tensor): BBox regression targets of each anchor weight shape (N, num_total_anchors, 4). stride (Tuple[int]): Stride in this scale level. avg_factor (int): Average factor that is used to average the loss. When using sampling method, avg_factor is usually the sum of positive and negative priors. When using `PseudoSampler`, `avg_factor` is usually equal to the number of positive priors. Returns: dict[str, Tensor]: A dictionary of loss components. """ # ===========> distillation classification (only u+2 * sigma) using l2 loss new_topk_cls_scores = new_cls_scores.gather(0, ori_cls_inds.unsqueeze(-1).expand(-1, new_cls_scores.size(-1))) ori_topk_cls_scores = ori_cls_scores.gather(0, ori_cls_inds.unsqueeze(-1).expand(-1, ori_cls_scores.size(-1))) loss_dist_cls = dist_loss_weight * self.l2_loss(new_topk_cls_scores, ori_topk_cls_scores) # ===========> distillation regression (only u+2 * sigma) using ld loss anchor_centers = self.anchor_center(anchors) # ori decode bbox, shape (Num,4) ori_bbox_preds_tblr = self.integral(ori_bbox_preds) decode_bbox_pred = distance2bbox(anchor_centers, ori_bbox_preds_tblr) ori_cls_conf = ori_cls_scores.sigmoid() cls_conf, ids = ori_cls_conf.max(dim=-1) # nms nms_cfg = dict(iou_threshold=0.005) # 0.005 thr_bboxes, thr_scores, thr_id = decode_bbox_pred[ori_box_inds], cls_conf[ori_box_inds], \ ids[ori_box_inds] _, keep = batched_nms(thr_bboxes, thr_scores, thr_id, nms_cfg) nms_bbox_preds = new_bbox_preds.gather( 0, ori_box_inds.unsqueeze(-1).expand(-1, new_bbox_preds.size(-1))) new_topk_bbox_preds = nms_bbox_preds.gather( 0, keep.unsqueeze(-1).expand(-1, nms_bbox_preds.size(-1))) nms_ori_topk_bbox_preds = ori_bbox_preds.gather( 0, ori_box_inds.unsqueeze(-1).expand(-1, ori_bbox_preds.size(-1))) ori_topk_bbox_preds = nms_ori_topk_bbox_preds.gather( 0, keep.unsqueeze(-1).expand(-1, nms_ori_topk_bbox_preds.size(-1))) new_topk_bbox_corners = new_topk_bbox_preds.reshape(-1, self.reg_max + 1) ori_topk_pred_corners = ori_topk_bbox_preds.reshape(-1, self.reg_max + 1) weight_targets = new_cls_scores.reshape(-1, ori_num_classes)[ori_box_inds].detach().sigmoid() weight_targets = weight_targets.max(dim=1)[0][keep.reshape(-1)] loss_dist_bbox = dist_loss_weight * self.loss_ld(new_topk_bbox_corners, ori_topk_pred_corners, weight=weight_targets[:, None].expand(-1, 4).reshape( -1), avg_factor=4.0) return loss_dist_cls, loss_dist_bbox def loss_by_feat_single(self, anchors: Tensor, cls_score: Tensor, bbox_pred: Tensor, labels: Tensor, label_weights: Tensor, bbox_targets: Tensor, stride: Tuple[int], ori_num_classes: int, avg_factor: int) -> dict: """Calculate the loss of a single scale level based on the features extracted by the detection head. Args: anchors (Tensor): Box reference for each scale level with shape (N, num_total_anchors, 4). cls_score (Tensor): Cls and quality joint scores for each scale level has shape (N, num_classes, H, W). bbox_pred (Tensor): Box distribution logits for each scale level with shape (N, 4*(n+1), H, W), n is max value of integral set. labels (Tensor): Labels of each anchors with shape (N, num_total_anchors). label_weights (Tensor): Label weights of each anchor with shape (N, num_total_anchors) bbox_targets (Tensor): BBox regression targets of each anchor weight shape (N, num_total_anchors, 4). stride (Tuple[int]): Stride in this scale level. avg_factor (int): Average factor that is used to average the loss. When using sampling method, avg_factor is usually the sum of positive and negative priors. When using `PseudoSampler`, `avg_factor` is usually equal to the number of positive priors. Returns: dict[str, Tensor]: A dictionary of loss components. """ assert stride[0] == stride[1], 'h stride is not equal to w stride!' anchors = anchors.reshape(-1, 4) # cls_score = cls_score.permute(0, 2, 3, # 1).reshape(-1, self.cls_out_channels) cls_score = cls_score[:, ori_num_classes:].permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels - ori_num_classes) bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4 * (self.reg_max + 1)) bbox_targets = bbox_targets.reshape(-1, 4) labels = labels.reshape(-1) label_weights = label_weights.reshape(-1) # FG cat_id: [0, num_classes -1], BG cat_id: num_classes bg_class_ind = self.num_classes - ori_num_classes # only optimize the novel classes labels[labels == self.num_classes] = bg_class_ind # only optimize the novel classes pos_inds = ((labels >= 0) & (labels < bg_class_ind)).nonzero().squeeze(1) score = label_weights.new_zeros(labels.shape) if len(pos_inds) > 0: pos_bbox_targets = bbox_targets[pos_inds] pos_bbox_pred = bbox_pred[pos_inds] pos_anchors = anchors[pos_inds] pos_anchor_centers = self.anchor_center(pos_anchors) / stride[0] weight_targets = cls_score.detach().sigmoid() weight_targets = weight_targets.max(dim=1)[0][pos_inds] pos_bbox_pred_corners = self.integral(pos_bbox_pred) pos_decode_bbox_pred = self.bbox_coder.decode( pos_anchor_centers, pos_bbox_pred_corners) pos_decode_bbox_targets = pos_bbox_targets / stride[0] score[pos_inds] = bbox_overlaps( pos_decode_bbox_pred.detach(), pos_decode_bbox_targets, is_aligned=True) pred_corners = pos_bbox_pred.reshape(-1, self.reg_max + 1) target_corners = self.bbox_coder.encode(pos_anchor_centers, pos_decode_bbox_targets, self.reg_max).reshape(-1) # regression loss loss_bbox = self.loss_bbox( pos_decode_bbox_pred, pos_decode_bbox_targets, weight=weight_targets, avg_factor=1.0) # dfl loss loss_dfl = self.loss_dfl( pred_corners, target_corners, weight=weight_targets[:, None].expand(-1, 4).reshape(-1), avg_factor=4.0) else: loss_bbox = bbox_pred.sum() * 0 loss_dfl = bbox_pred.sum() * 0 weight_targets = bbox_pred.new_tensor(0) # cls (qfl) loss loss_cls = self.loss_cls( cls_score, (labels, score), weight=label_weights, avg_factor=avg_factor) return loss_cls, loss_bbox, loss_dfl, weight_targets.sum() @staticmethod def l2_loss(pred, target, reduction='mean'): r"""Function that takes the mean element-wise square value difference. """ assert target.size() == pred.size() loss = (pred - target).pow(2).float() if reduction != 'none': loss = torch.mean(loss) if reduction == 'mean' else torch.sum(loss) return loss def loss_by_feat(self, ori_outs: Tuple[Tensor], new_outs: Tuple[Tensor], ori_topk_cls_inds, # for distillation ori_topk_cls_scores, # for distillation ori_topk_bbox_inds, # for distillation ori_topk_bbox_preds, # for distillation ori_num_classes, dist_loss_weight, model, batch_gt_instances: InstanceList, batch_img_metas: List[dict], batch_gt_instances_ignore: OptInstanceList = None) -> dict: """Calculate the loss based on the features extracted by the detection head. Args: cls_scores (list[Tensor]): Cls and quality scores for each scale level has shape (N, num_classes, H, W). bbox_preds (list[Tensor]): Box distribution logits for each scale level with shape (N, 4*(n+1), H, W), n is max value of integral set. batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. batch_gt_instances_ignore (list[:obj:`InstanceData`], Optional): Batch of gt_instances_ignore. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. Returns: dict[str, Tensor]: A dictionary of loss components. """ # ****************************** ori loss ********************************** cls_scores, bbox_preds = new_outs num_imgs = cls_scores[0].size(0) featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] assert len(featmap_sizes) == self.prior_generator.num_levels device = cls_scores[0].device anchor_list, valid_flag_list = self.get_anchors( featmap_sizes, batch_img_metas, device=device) cls_reg_targets = self.get_targets( anchor_list, valid_flag_list, batch_gt_instances, batch_img_metas, batch_gt_instances_ignore=batch_gt_instances_ignore) (anchor_list, labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, avg_factor) = cls_reg_targets avg_factor = reduce_mean( torch.tensor(avg_factor, dtype=torch.float, device=device)).item() losses_cls, losses_bbox, losses_dfl, \ avg_factor = multi_apply( self.loss_by_feat_single, anchor_list, cls_scores, bbox_preds, labels_list, label_weights_list, bbox_targets_list, self.prior_generator.strides, ori_num_classes=ori_num_classes, avg_factor=avg_factor) avg_factor = sum(avg_factor) avg_factor = reduce_mean(avg_factor).clamp_(min=1).item() losses_bbox = list(map(lambda x: x / avg_factor, losses_bbox)) losses_dfl = list(map(lambda x: x / avg_factor, losses_dfl)) # ****************************** distill loss ********************************** anchor_list = torch.cat(anchor_list, dim=1) bbox_preds_list = [ bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, 4 * (self.reg_max + 1)) for bbox_pred in bbox_preds] bbox_preds_list = torch.cat(bbox_preds_list, dim=1) ori_cls_scores, ori_bbox_preds = ori_outs ori_cls_scores_list = [ ori_cls_score[:, :ori_num_classes, :, :].permute(0, 2, 3, 1).reshape( num_imgs, -1, ori_num_classes) for ori_cls_score in ori_cls_scores] ori_cls_scores_list = torch.cat(ori_cls_scores_list, dim=1) ori_bbox_preds_list = [ ori_bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, 4 * (self.reg_max + 1)) for ori_bbox_pred in ori_bbox_preds] ori_bbox_preds_list = torch.cat(ori_bbox_preds_list, dim=1) new_cls_scores_list = [ cls_score[:, :ori_num_classes, :, :].permute(0, 2, 3, 1).reshape( num_imgs, -1, ori_num_classes) for cls_score in cls_scores] new_cls_scores_list = torch.cat(new_cls_scores_list, dim=1) loss_dist_cls, loss_dist_bbox = multi_apply( self.distill_loss_by_image_single, anchor_list, new_cls_scores_list, bbox_preds_list, ori_topk_cls_inds, ori_topk_bbox_inds, ori_cls_scores_list, ori_bbox_preds_list, dist_loss_weight=dist_loss_weight, ori_num_classes=ori_num_classes, avg_factor=avg_factor) return dict( loss_cls=losses_cls, loss_bbox=losses_bbox, loss_dfl=losses_dfl, loss_dist_cls=loss_dist_cls, loss_dist_bbox=loss_dist_bbox) # def loss(self, ori_out: Tuple[Tensor], new_out: Tuple[Tensor],batch_data_samples: SampleList) -> dict: def loss(self, ori_outs: Tuple[Tensor], new_outs: Tuple[Tensor], batch_data_samples: SampleList, topk_cls_inds, topk_cls_scores, topk_bbox_inds, topk_bbox_preds, ori_num_classes, dist_loss_weight, model) -> dict: """Perform forward propagation and loss calculation of the detection head on the features of the upstream network. Args: x (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. batch_data_samples (List[:obj:`DetDataSample`]): The Data Samples. It usually includes information such as `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`. Returns: dict: A dictionary of loss components. """ # outs = self(x) outputs = unpack_gt_instances(batch_data_samples) (batch_gt_instances, batch_gt_instances_ignore, batch_img_metas) = outputs loss_inputs = (ori_outs, new_outs, topk_cls_inds, topk_cls_scores, topk_bbox_inds, topk_bbox_preds, ori_num_classes, dist_loss_weight, model) + ( batch_gt_instances, batch_img_metas, batch_gt_instances_ignore) losses = self.loss_by_feat(*loss_inputs) return losses
21,772
43.892784
115
py
ERD
ERD-main/mmdet/models/dense_heads/condinst_head.py
# Copyright (c) OpenMMLab. All rights reserved. import copy from typing import Dict, List, Optional, Tuple import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import ConvModule, Scale from mmengine.config import ConfigDict from mmengine.model import BaseModule, kaiming_init from mmengine.structures import InstanceData from torch import Tensor from mmdet.registry import MODELS from mmdet.structures.bbox import cat_boxes from mmdet.utils import (ConfigType, InstanceList, MultiConfig, OptConfigType, OptInstanceList, reduce_mean) from ..task_modules.prior_generators import MlvlPointGenerator from ..utils import (aligned_bilinear, filter_scores_and_topk, multi_apply, relative_coordinate_maps, select_single_mlvl) from ..utils.misc import empty_instances from .base_mask_head import BaseMaskHead from .fcos_head import FCOSHead INF = 1e8 @MODELS.register_module() class CondInstBboxHead(FCOSHead): """CondInst box head used in https://arxiv.org/abs/1904.02689. Note that CondInst Bbox Head is a extension of FCOS head. Two differences are described as follows: 1. CondInst box head predicts a set of params for each instance. 2. CondInst box head return the pos_gt_inds and pos_inds. Args: num_params (int): Number of params for instance segmentation. """ def __init__(self, *args, num_params: int = 169, **kwargs) -> None: self.num_params = num_params super().__init__(*args, **kwargs) def _init_layers(self) -> None: """Initialize layers of the head.""" super()._init_layers() self.controller = nn.Conv2d( self.feat_channels, self.num_params, 3, padding=1) def forward_single(self, x: Tensor, scale: Scale, stride: int) -> Tuple[Tensor, Tensor, Tensor, Tensor]: """Forward features of a single scale level. Args: x (Tensor): FPN feature maps of the specified stride. scale (:obj:`mmcv.cnn.Scale`): Learnable scale module to resize the bbox prediction. stride (int): The corresponding stride for feature maps, only used to normalize the bbox prediction when self.norm_on_bbox is True. Returns: tuple: scores for each class, bbox predictions, centerness predictions and param predictions of input feature maps. """ cls_score, bbox_pred, cls_feat, reg_feat = \ super(FCOSHead, self).forward_single(x) if self.centerness_on_reg: centerness = self.conv_centerness(reg_feat) else: centerness = self.conv_centerness(cls_feat) # scale the bbox_pred of different level # float to avoid overflow when enabling FP16 bbox_pred = scale(bbox_pred).float() if self.norm_on_bbox: # bbox_pred needed for gradient computation has been modified # by F.relu(bbox_pred) when run with PyTorch 1.10. So replace # F.relu(bbox_pred) with bbox_pred.clamp(min=0) bbox_pred = bbox_pred.clamp(min=0) if not self.training: bbox_pred *= stride else: bbox_pred = bbox_pred.exp() param_pred = self.controller(reg_feat) return cls_score, bbox_pred, centerness, param_pred def loss_by_feat( self, cls_scores: List[Tensor], bbox_preds: List[Tensor], centernesses: List[Tensor], param_preds: List[Tensor], batch_gt_instances: InstanceList, batch_img_metas: List[dict], batch_gt_instances_ignore: OptInstanceList = None ) -> Dict[str, Tensor]: """Calculate the loss based on the features extracted by the detection head. Args: cls_scores (list[Tensor]): Box scores for each scale level, each is a 4D-tensor, the channel number is num_points * num_classes. bbox_preds (list[Tensor]): Box energies / deltas for each scale level, each is a 4D-tensor, the channel number is num_points * 4. centernesses (list[Tensor]): centerness for each scale level, each is a 4D-tensor, the channel number is num_points * 1. param_preds (List[Tensor]): param_pred for each scale level, each is a 4D-tensor, the channel number is num_params. batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. batch_gt_instances_ignore (list[:obj:`InstanceData`], Optional): Batch of gt_instances_ignore. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. Returns: dict[str, Tensor]: A dictionary of loss components. """ assert len(cls_scores) == len(bbox_preds) == len(centernesses) featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] # Need stride for rel coord compute all_level_points_strides = self.prior_generator.grid_priors( featmap_sizes, dtype=bbox_preds[0].dtype, device=bbox_preds[0].device, with_stride=True) all_level_points = [i[:, :2] for i in all_level_points_strides] all_level_strides = [i[:, 2] for i in all_level_points_strides] labels, bbox_targets, pos_inds_list, pos_gt_inds_list = \ self.get_targets(all_level_points, batch_gt_instances) num_imgs = cls_scores[0].size(0) # flatten cls_scores, bbox_preds and centerness flatten_cls_scores = [ cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels) for cls_score in cls_scores ] flatten_bbox_preds = [ bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4) for bbox_pred in bbox_preds ] flatten_centerness = [ centerness.permute(0, 2, 3, 1).reshape(-1) for centerness in centernesses ] flatten_cls_scores = torch.cat(flatten_cls_scores) flatten_bbox_preds = torch.cat(flatten_bbox_preds) flatten_centerness = torch.cat(flatten_centerness) flatten_labels = torch.cat(labels) flatten_bbox_targets = torch.cat(bbox_targets) # repeat points to align with bbox_preds flatten_points = torch.cat( [points.repeat(num_imgs, 1) for points in all_level_points]) # FG cat_id: [0, num_classes -1], BG cat_id: num_classes bg_class_ind = self.num_classes pos_inds = ((flatten_labels >= 0) & (flatten_labels < bg_class_ind)).nonzero().reshape(-1) num_pos = torch.tensor( len(pos_inds), dtype=torch.float, device=bbox_preds[0].device) num_pos = max(reduce_mean(num_pos), 1.0) loss_cls = self.loss_cls( flatten_cls_scores, flatten_labels, avg_factor=num_pos) pos_bbox_preds = flatten_bbox_preds[pos_inds] pos_centerness = flatten_centerness[pos_inds] pos_bbox_targets = flatten_bbox_targets[pos_inds] pos_centerness_targets = self.centerness_target(pos_bbox_targets) # centerness weighted iou loss centerness_denorm = max( reduce_mean(pos_centerness_targets.sum().detach()), 1e-6) if len(pos_inds) > 0: pos_points = flatten_points[pos_inds] pos_decoded_bbox_preds = self.bbox_coder.decode( pos_points, pos_bbox_preds) pos_decoded_target_preds = self.bbox_coder.decode( pos_points, pos_bbox_targets) loss_bbox = self.loss_bbox( pos_decoded_bbox_preds, pos_decoded_target_preds, weight=pos_centerness_targets, avg_factor=centerness_denorm) loss_centerness = self.loss_centerness( pos_centerness, pos_centerness_targets, avg_factor=num_pos) else: loss_bbox = pos_bbox_preds.sum() loss_centerness = pos_centerness.sum() self._raw_positive_infos.update(cls_scores=cls_scores) self._raw_positive_infos.update(centernesses=centernesses) self._raw_positive_infos.update(param_preds=param_preds) self._raw_positive_infos.update(all_level_points=all_level_points) self._raw_positive_infos.update(all_level_strides=all_level_strides) self._raw_positive_infos.update(pos_gt_inds_list=pos_gt_inds_list) self._raw_positive_infos.update(pos_inds_list=pos_inds_list) return dict( loss_cls=loss_cls, loss_bbox=loss_bbox, loss_centerness=loss_centerness) def get_targets( self, points: List[Tensor], batch_gt_instances: InstanceList ) -> Tuple[List[Tensor], List[Tensor], List[Tensor], List[Tensor]]: """Compute regression, classification and centerness targets for points in multiple images. Args: points (list[Tensor]): Points of each fpn level, each has shape (num_points, 2). batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. Returns: tuple: Targets of each level. - concat_lvl_labels (list[Tensor]): Labels of each level. - concat_lvl_bbox_targets (list[Tensor]): BBox targets of each \ level. - pos_inds_list (list[Tensor]): pos_inds of each image. - pos_gt_inds_list (List[Tensor]): pos_gt_inds of each image. """ assert len(points) == len(self.regress_ranges) num_levels = len(points) # expand regress ranges to align with points expanded_regress_ranges = [ points[i].new_tensor(self.regress_ranges[i])[None].expand_as( points[i]) for i in range(num_levels) ] # concat all levels points and regress ranges concat_regress_ranges = torch.cat(expanded_regress_ranges, dim=0) concat_points = torch.cat(points, dim=0) # the number of points per img, per lvl num_points = [center.size(0) for center in points] # get labels and bbox_targets of each image labels_list, bbox_targets_list, pos_inds_list, pos_gt_inds_list = \ multi_apply( self._get_targets_single, batch_gt_instances, points=concat_points, regress_ranges=concat_regress_ranges, num_points_per_lvl=num_points) # split to per img, per level labels_list = [labels.split(num_points, 0) for labels in labels_list] bbox_targets_list = [ bbox_targets.split(num_points, 0) for bbox_targets in bbox_targets_list ] # concat per level image concat_lvl_labels = [] concat_lvl_bbox_targets = [] for i in range(num_levels): concat_lvl_labels.append( torch.cat([labels[i] for labels in labels_list])) bbox_targets = torch.cat( [bbox_targets[i] for bbox_targets in bbox_targets_list]) if self.norm_on_bbox: bbox_targets = bbox_targets / self.strides[i] concat_lvl_bbox_targets.append(bbox_targets) return (concat_lvl_labels, concat_lvl_bbox_targets, pos_inds_list, pos_gt_inds_list) def _get_targets_single( self, gt_instances: InstanceData, points: Tensor, regress_ranges: Tensor, num_points_per_lvl: List[int] ) -> Tuple[Tensor, Tensor, Tensor, Tensor]: """Compute regression and classification targets for a single image.""" num_points = points.size(0) num_gts = len(gt_instances) gt_bboxes = gt_instances.bboxes gt_labels = gt_instances.labels gt_masks = gt_instances.get('masks', None) if num_gts == 0: return gt_labels.new_full((num_points,), self.num_classes), \ gt_bboxes.new_zeros((num_points, 4)), \ gt_bboxes.new_zeros((0,), dtype=torch.int64), \ gt_bboxes.new_zeros((0,), dtype=torch.int64) areas = (gt_bboxes[:, 2] - gt_bboxes[:, 0]) * ( gt_bboxes[:, 3] - gt_bboxes[:, 1]) # TODO: figure out why these two are different # areas = areas[None].expand(num_points, num_gts) areas = areas[None].repeat(num_points, 1) regress_ranges = regress_ranges[:, None, :].expand( num_points, num_gts, 2) gt_bboxes = gt_bboxes[None].expand(num_points, num_gts, 4) xs, ys = points[:, 0], points[:, 1] xs = xs[:, None].expand(num_points, num_gts) ys = ys[:, None].expand(num_points, num_gts) left = xs - gt_bboxes[..., 0] right = gt_bboxes[..., 2] - xs top = ys - gt_bboxes[..., 1] bottom = gt_bboxes[..., 3] - ys bbox_targets = torch.stack((left, top, right, bottom), -1) if self.center_sampling: # condition1: inside a `center bbox` radius = self.center_sample_radius # if gt_mask not None, use gt mask's centroid to determine # the center region rather than gt_bbox center if gt_masks is None: center_xs = (gt_bboxes[..., 0] + gt_bboxes[..., 2]) / 2 center_ys = (gt_bboxes[..., 1] + gt_bboxes[..., 3]) / 2 else: h, w = gt_masks.height, gt_masks.width masks = gt_masks.to_tensor( dtype=torch.bool, device=gt_bboxes.device) yys = torch.arange( 0, h, dtype=torch.float32, device=masks.device) xxs = torch.arange( 0, w, dtype=torch.float32, device=masks.device) # m00/m10/m01 represent the moments of a contour # centroid is computed by m00/m10 and m00/m01 m00 = masks.sum(dim=-1).sum(dim=-1).clamp(min=1e-6) m10 = (masks * xxs).sum(dim=-1).sum(dim=-1) m01 = (masks * yys[:, None]).sum(dim=-1).sum(dim=-1) center_xs = m10 / m00 center_ys = m01 / m00 center_xs = center_xs[None].expand(num_points, num_gts) center_ys = center_ys[None].expand(num_points, num_gts) center_gts = torch.zeros_like(gt_bboxes) stride = center_xs.new_zeros(center_xs.shape) # project the points on current lvl back to the `original` sizes lvl_begin = 0 for lvl_idx, num_points_lvl in enumerate(num_points_per_lvl): lvl_end = lvl_begin + num_points_lvl stride[lvl_begin:lvl_end] = self.strides[lvl_idx] * radius lvl_begin = lvl_end x_mins = center_xs - stride y_mins = center_ys - stride x_maxs = center_xs + stride y_maxs = center_ys + stride center_gts[..., 0] = torch.where(x_mins > gt_bboxes[..., 0], x_mins, gt_bboxes[..., 0]) center_gts[..., 1] = torch.where(y_mins > gt_bboxes[..., 1], y_mins, gt_bboxes[..., 1]) center_gts[..., 2] = torch.where(x_maxs > gt_bboxes[..., 2], gt_bboxes[..., 2], x_maxs) center_gts[..., 3] = torch.where(y_maxs > gt_bboxes[..., 3], gt_bboxes[..., 3], y_maxs) cb_dist_left = xs - center_gts[..., 0] cb_dist_right = center_gts[..., 2] - xs cb_dist_top = ys - center_gts[..., 1] cb_dist_bottom = center_gts[..., 3] - ys center_bbox = torch.stack( (cb_dist_left, cb_dist_top, cb_dist_right, cb_dist_bottom), -1) inside_gt_bbox_mask = center_bbox.min(-1)[0] > 0 else: # condition1: inside a gt bbox inside_gt_bbox_mask = bbox_targets.min(-1)[0] > 0 # condition2: limit the regression range for each location max_regress_distance = bbox_targets.max(-1)[0] inside_regress_range = ( (max_regress_distance >= regress_ranges[..., 0]) & (max_regress_distance <= regress_ranges[..., 1])) # if there are still more than one objects for a location, # we choose the one with minimal area areas[inside_gt_bbox_mask == 0] = INF areas[inside_regress_range == 0] = INF min_area, min_area_inds = areas.min(dim=1) labels = gt_labels[min_area_inds] labels[min_area == INF] = self.num_classes # set as BG bbox_targets = bbox_targets[range(num_points), min_area_inds] # return pos_inds & pos_gt_inds bg_class_ind = self.num_classes pos_inds = ((labels >= 0) & (labels < bg_class_ind)).nonzero().reshape(-1) pos_gt_inds = min_area_inds[labels < self.num_classes] return labels, bbox_targets, pos_inds, pos_gt_inds def get_positive_infos(self) -> InstanceList: """Get positive information from sampling results. Returns: list[:obj:`InstanceData`]: Positive information of each image, usually including positive bboxes, positive labels, positive priors, etc. """ assert len(self._raw_positive_infos) > 0 pos_gt_inds_list = self._raw_positive_infos['pos_gt_inds_list'] pos_inds_list = self._raw_positive_infos['pos_inds_list'] num_imgs = len(pos_gt_inds_list) cls_score_list = [] centerness_list = [] param_pred_list = [] point_list = [] stride_list = [] for cls_score_per_lvl, centerness_per_lvl, param_pred_per_lvl,\ point_per_lvl, stride_per_lvl in \ zip(self._raw_positive_infos['cls_scores'], self._raw_positive_infos['centernesses'], self._raw_positive_infos['param_preds'], self._raw_positive_infos['all_level_points'], self._raw_positive_infos['all_level_strides']): cls_score_per_lvl = \ cls_score_per_lvl.permute( 0, 2, 3, 1).reshape(num_imgs, -1, self.num_classes) centerness_per_lvl = \ centerness_per_lvl.permute( 0, 2, 3, 1).reshape(num_imgs, -1, 1) param_pred_per_lvl = \ param_pred_per_lvl.permute( 0, 2, 3, 1).reshape(num_imgs, -1, self.num_params) point_per_lvl = point_per_lvl.unsqueeze(0).repeat(num_imgs, 1, 1) stride_per_lvl = stride_per_lvl.unsqueeze(0).repeat(num_imgs, 1) cls_score_list.append(cls_score_per_lvl) centerness_list.append(centerness_per_lvl) param_pred_list.append(param_pred_per_lvl) point_list.append(point_per_lvl) stride_list.append(stride_per_lvl) cls_scores = torch.cat(cls_score_list, dim=1) centernesses = torch.cat(centerness_list, dim=1) param_preds = torch.cat(param_pred_list, dim=1) all_points = torch.cat(point_list, dim=1) all_strides = torch.cat(stride_list, dim=1) positive_infos = [] for i, (pos_gt_inds, pos_inds) in enumerate(zip(pos_gt_inds_list, pos_inds_list)): pos_info = InstanceData() pos_info.points = all_points[i][pos_inds] pos_info.strides = all_strides[i][pos_inds] pos_info.scores = cls_scores[i][pos_inds] pos_info.centernesses = centernesses[i][pos_inds] pos_info.param_preds = param_preds[i][pos_inds] pos_info.pos_assigned_gt_inds = pos_gt_inds pos_info.pos_inds = pos_inds positive_infos.append(pos_info) return positive_infos def predict_by_feat(self, cls_scores: List[Tensor], bbox_preds: List[Tensor], score_factors: Optional[List[Tensor]] = None, param_preds: Optional[List[Tensor]] = None, batch_img_metas: Optional[List[dict]] = None, cfg: Optional[ConfigDict] = None, rescale: bool = False, with_nms: bool = True) -> InstanceList: """Transform a batch of output features extracted from the head into bbox results. Note: When score_factors is not None, the cls_scores are usually multiplied by it then obtain the real score used in NMS, such as CenterNess in FCOS, IoU branch in ATSS. Args: cls_scores (list[Tensor]): Classification scores for all scale levels, each is a 4D-tensor, has shape (batch_size, num_priors * num_classes, H, W). bbox_preds (list[Tensor]): Box energies / deltas for all scale levels, each is a 4D-tensor, has shape (batch_size, num_priors * 4, H, W). score_factors (list[Tensor], optional): Score factor for all scale level, each is a 4D-tensor, has shape (batch_size, num_priors * 1, H, W). Defaults to None. param_preds (list[Tensor], optional): Params for all scale level, each is a 4D-tensor, has shape (batch_size, num_priors * num_params, H, W) batch_img_metas (list[dict], Optional): Batch image meta info. Defaults to None. cfg (ConfigDict, optional): Test / postprocessing configuration, if None, test_cfg would be used. Defaults to None. rescale (bool): If True, return boxes in original image space. Defaults to False. with_nms (bool): If True, do nms before return boxes. Defaults to True. Returns: list[:obj:`InstanceData`]: Object detection results of each image after the post process. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). """ assert len(cls_scores) == len(bbox_preds) if score_factors is None: # e.g. Retina, FreeAnchor, Foveabox, etc. with_score_factors = False else: # e.g. FCOS, PAA, ATSS, AutoAssign, etc. with_score_factors = True assert len(cls_scores) == len(score_factors) num_levels = len(cls_scores) featmap_sizes = [cls_scores[i].shape[-2:] for i in range(num_levels)] all_level_points_strides = self.prior_generator.grid_priors( featmap_sizes, dtype=bbox_preds[0].dtype, device=bbox_preds[0].device, with_stride=True) all_level_points = [i[:, :2] for i in all_level_points_strides] all_level_strides = [i[:, 2] for i in all_level_points_strides] result_list = [] for img_id in range(len(batch_img_metas)): img_meta = batch_img_metas[img_id] cls_score_list = select_single_mlvl( cls_scores, img_id, detach=True) bbox_pred_list = select_single_mlvl( bbox_preds, img_id, detach=True) if with_score_factors: score_factor_list = select_single_mlvl( score_factors, img_id, detach=True) else: score_factor_list = [None for _ in range(num_levels)] param_pred_list = select_single_mlvl( param_preds, img_id, detach=True) results = self._predict_by_feat_single( cls_score_list=cls_score_list, bbox_pred_list=bbox_pred_list, score_factor_list=score_factor_list, param_pred_list=param_pred_list, mlvl_points=all_level_points, mlvl_strides=all_level_strides, img_meta=img_meta, cfg=cfg, rescale=rescale, with_nms=with_nms) result_list.append(results) return result_list def _predict_by_feat_single(self, cls_score_list: List[Tensor], bbox_pred_list: List[Tensor], score_factor_list: List[Tensor], param_pred_list: List[Tensor], mlvl_points: List[Tensor], mlvl_strides: List[Tensor], img_meta: dict, cfg: ConfigDict, rescale: bool = False, with_nms: bool = True) -> InstanceData: """Transform a single image's features extracted from the head into bbox results. Args: cls_score_list (list[Tensor]): Box scores from all scale levels of a single image, each item has shape (num_priors * num_classes, H, W). bbox_pred_list (list[Tensor]): Box energies / deltas from all scale levels of a single image, each item has shape (num_priors * 4, H, W). score_factor_list (list[Tensor]): Score factor from all scale levels of a single image, each item has shape (num_priors * 1, H, W). param_pred_list (List[Tensor]): Param predition from all scale levels of a single image, each item has shape (num_priors * num_params, H, W). mlvl_points (list[Tensor]): Each element in the list is the priors of a single level in feature pyramid. It has shape (num_priors, 2) mlvl_strides (List[Tensor]): Each element in the list is the stride of a single level in feature pyramid. It has shape (num_priors, 1) img_meta (dict): Image meta info. cfg (mmengine.Config): Test / postprocessing configuration, if None, test_cfg would be used. rescale (bool): If True, return boxes in original image space. Defaults to False. with_nms (bool): If True, do nms before return boxes. Defaults to True. Returns: :obj:`InstanceData`: Detection results of each image after the post process. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). """ if score_factor_list[0] is None: # e.g. Retina, FreeAnchor, etc. with_score_factors = False else: # e.g. FCOS, PAA, ATSS, etc. with_score_factors = True cfg = self.test_cfg if cfg is None else cfg cfg = copy.deepcopy(cfg) img_shape = img_meta['img_shape'] nms_pre = cfg.get('nms_pre', -1) mlvl_bbox_preds = [] mlvl_param_preds = [] mlvl_valid_points = [] mlvl_valid_strides = [] mlvl_scores = [] mlvl_labels = [] if with_score_factors: mlvl_score_factors = [] else: mlvl_score_factors = None for level_idx, (cls_score, bbox_pred, score_factor, param_pred, points, strides) in \ enumerate(zip(cls_score_list, bbox_pred_list, score_factor_list, param_pred_list, mlvl_points, mlvl_strides)): assert cls_score.size()[-2:] == bbox_pred.size()[-2:] dim = self.bbox_coder.encode_size bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, dim) if with_score_factors: score_factor = score_factor.permute(1, 2, 0).reshape(-1).sigmoid() cls_score = cls_score.permute(1, 2, 0).reshape(-1, self.cls_out_channels) if self.use_sigmoid_cls: scores = cls_score.sigmoid() else: # remind that we set FG labels to [0, num_class-1] # since mmdet v2.0 # BG cat_id: num_class scores = cls_score.softmax(-1)[:, :-1] param_pred = param_pred.permute(1, 2, 0).reshape(-1, self.num_params) # After https://github.com/open-mmlab/mmdetection/pull/6268/, # this operation keeps fewer bboxes under the same `nms_pre`. # There is no difference in performance for most models. If you # find a slight drop in performance, you can set a larger # `nms_pre` than before. score_thr = cfg.get('score_thr', 0) results = filter_scores_and_topk( scores, score_thr, nms_pre, dict( bbox_pred=bbox_pred, param_pred=param_pred, points=points, strides=strides)) scores, labels, keep_idxs, filtered_results = results bbox_pred = filtered_results['bbox_pred'] param_pred = filtered_results['param_pred'] points = filtered_results['points'] strides = filtered_results['strides'] if with_score_factors: score_factor = score_factor[keep_idxs] mlvl_bbox_preds.append(bbox_pred) mlvl_param_preds.append(param_pred) mlvl_valid_points.append(points) mlvl_valid_strides.append(strides) mlvl_scores.append(scores) mlvl_labels.append(labels) if with_score_factors: mlvl_score_factors.append(score_factor) bbox_pred = torch.cat(mlvl_bbox_preds) priors = cat_boxes(mlvl_valid_points) bboxes = self.bbox_coder.decode(priors, bbox_pred, max_shape=img_shape) results = InstanceData() results.bboxes = bboxes results.scores = torch.cat(mlvl_scores) results.labels = torch.cat(mlvl_labels) results.param_preds = torch.cat(mlvl_param_preds) results.points = torch.cat(mlvl_valid_points) results.strides = torch.cat(mlvl_valid_strides) if with_score_factors: results.score_factors = torch.cat(mlvl_score_factors) return self._bbox_post_process( results=results, cfg=cfg, rescale=rescale, with_nms=with_nms, img_meta=img_meta) class MaskFeatModule(BaseModule): """CondInst mask feature map branch used in \ https://arxiv.org/abs/1904.02689. Args: in_channels (int): Number of channels in the input feature map. feat_channels (int): Number of hidden channels of the mask feature map branch. start_level (int): The starting feature map level from RPN that will be used to predict the mask feature map. end_level (int): The ending feature map level from rpn that will be used to predict the mask feature map. out_channels (int): Number of output channels of the mask feature map branch. This is the channel count of the mask feature map that to be dynamically convolved with the predicted kernel. mask_stride (int): Downsample factor of the mask feature map output. Defaults to 4. num_stacked_convs (int): Number of convs in mask feature branch. conv_cfg (dict): Config dict for convolution layer. Default: None. norm_cfg (dict): Config dict for normalization layer. Default: None. init_cfg (dict or list[dict], optional): Initialization config dict. """ def __init__(self, in_channels: int, feat_channels: int, start_level: int, end_level: int, out_channels: int, mask_stride: int = 4, num_stacked_convs: int = 4, conv_cfg: OptConfigType = None, norm_cfg: OptConfigType = None, init_cfg: MultiConfig = [ dict(type='Normal', layer='Conv2d', std=0.01) ], **kwargs) -> None: super().__init__(init_cfg=init_cfg) self.in_channels = in_channels self.feat_channels = feat_channels self.start_level = start_level self.end_level = end_level self.mask_stride = mask_stride self.num_stacked_convs = num_stacked_convs assert start_level >= 0 and end_level >= start_level self.out_channels = out_channels self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self._init_layers() def _init_layers(self) -> None: """Initialize layers of the head.""" self.convs_all_levels = nn.ModuleList() for i in range(self.start_level, self.end_level + 1): convs_per_level = nn.Sequential() convs_per_level.add_module( f'conv{i}', ConvModule( self.in_channels, self.feat_channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, inplace=False, bias=False)) self.convs_all_levels.append(convs_per_level) conv_branch = [] for _ in range(self.num_stacked_convs): conv_branch.append( ConvModule( self.feat_channels, self.feat_channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, bias=False)) self.conv_branch = nn.Sequential(*conv_branch) self.conv_pred = nn.Conv2d( self.feat_channels, self.out_channels, 1, stride=1) def init_weights(self) -> None: """Initialize weights of the head.""" super().init_weights() kaiming_init(self.convs_all_levels, a=1, distribution='uniform') kaiming_init(self.conv_branch, a=1, distribution='uniform') kaiming_init(self.conv_pred, a=1, distribution='uniform') def forward(self, x: Tuple[Tensor]) -> Tensor: """Forward features from the upstream network. Args: x (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. Returns: Tensor: The predicted mask feature map. """ inputs = x[self.start_level:self.end_level + 1] assert len(inputs) == (self.end_level - self.start_level + 1) feature_add_all_level = self.convs_all_levels[0](inputs[0]) target_h, target_w = feature_add_all_level.size()[2:] for i in range(1, len(inputs)): input_p = inputs[i] x_p = self.convs_all_levels[i](input_p) h, w = x_p.size()[2:] factor_h = target_h // h factor_w = target_w // w assert factor_h == factor_w feature_per_level = aligned_bilinear(x_p, factor_h) feature_add_all_level = feature_add_all_level + \ feature_per_level feature_add_all_level = self.conv_branch(feature_add_all_level) feature_pred = self.conv_pred(feature_add_all_level) return feature_pred @MODELS.register_module() class CondInstMaskHead(BaseMaskHead): """CondInst mask head used in https://arxiv.org/abs/1904.02689. This head outputs the mask for CondInst. Args: mask_feature_head (dict): Config of CondInstMaskFeatHead. num_layers (int): Number of dynamic conv layers. feat_channels (int): Number of channels in the dynamic conv. mask_out_stride (int): The stride of the mask feat. size_of_interest (int): The size of the region used in rel coord. max_masks_to_train (int): Maximum number of masks to train for each image. loss_segm (:obj:`ConfigDict` or dict, optional): Config of segmentation loss. train_cfg (:obj:`ConfigDict` or dict, optional): Training config of head. test_cfg (:obj:`ConfigDict` or dict, optional): Testing config of head. """ def __init__(self, mask_feature_head: ConfigType, num_layers: int = 3, feat_channels: int = 8, mask_out_stride: int = 4, size_of_interest: int = 8, max_masks_to_train: int = -1, topk_masks_per_img: int = -1, loss_mask: ConfigType = None, train_cfg: OptConfigType = None, test_cfg: OptConfigType = None) -> None: super().__init__() self.mask_feature_head = MaskFeatModule(**mask_feature_head) self.mask_feat_stride = self.mask_feature_head.mask_stride self.in_channels = self.mask_feature_head.out_channels self.num_layers = num_layers self.feat_channels = feat_channels self.size_of_interest = size_of_interest self.mask_out_stride = mask_out_stride self.max_masks_to_train = max_masks_to_train self.topk_masks_per_img = topk_masks_per_img self.prior_generator = MlvlPointGenerator([self.mask_feat_stride]) self.train_cfg = train_cfg self.test_cfg = test_cfg self.loss_mask = MODELS.build(loss_mask) self._init_layers() def _init_layers(self) -> None: """Initialize layers of the head.""" weight_nums, bias_nums = [], [] for i in range(self.num_layers): if i == 0: weight_nums.append((self.in_channels + 2) * self.feat_channels) bias_nums.append(self.feat_channels) elif i == self.num_layers - 1: weight_nums.append(self.feat_channels * 1) bias_nums.append(1) else: weight_nums.append(self.feat_channels * self.feat_channels) bias_nums.append(self.feat_channels) self.weight_nums = weight_nums self.bias_nums = bias_nums self.num_params = sum(weight_nums) + sum(bias_nums) def parse_dynamic_params( self, params: Tensor) -> Tuple[List[Tensor], List[Tensor]]: """parse the dynamic params for dynamic conv.""" num_insts = params.size(0) params_splits = list( torch.split_with_sizes( params, self.weight_nums + self.bias_nums, dim=1)) weight_splits = params_splits[:self.num_layers] bias_splits = params_splits[self.num_layers:] for i in range(self.num_layers): if i < self.num_layers - 1: weight_splits[i] = weight_splits[i].reshape( num_insts * self.in_channels, -1, 1, 1) bias_splits[i] = bias_splits[i].reshape(num_insts * self.in_channels) else: # out_channels x in_channels x 1 x 1 weight_splits[i] = weight_splits[i].reshape( num_insts * 1, -1, 1, 1) bias_splits[i] = bias_splits[i].reshape(num_insts) return weight_splits, bias_splits def dynamic_conv_forward(self, features: Tensor, weights: List[Tensor], biases: List[Tensor], num_insts: int) -> Tensor: """dynamic forward, each layer follow a relu.""" n_layers = len(weights) x = features for i, (w, b) in enumerate(zip(weights, biases)): x = F.conv2d(x, w, bias=b, stride=1, padding=0, groups=num_insts) if i < n_layers - 1: x = F.relu(x) return x def forward(self, x: tuple, positive_infos: InstanceList) -> tuple: """Forward feature from the upstream network to get prototypes and linearly combine the prototypes, using masks coefficients, into instance masks. Finally, crop the instance masks with given bboxes. Args: x (Tuple[Tensor]): Feature from the upstream network, which is a 4D-tensor. positive_infos (List[:obj:``InstanceData``]): Positive information that calculate from detect head. Returns: tuple: Predicted instance segmentation masks """ mask_feats = self.mask_feature_head(x) return multi_apply(self.forward_single, mask_feats, positive_infos) def forward_single(self, mask_feat: Tensor, positive_info: InstanceData) -> Tensor: """Forward features of a each image.""" pos_param_preds = positive_info.get('param_preds') pos_points = positive_info.get('points') pos_strides = positive_info.get('strides') num_inst = pos_param_preds.shape[0] mask_feat = mask_feat[None].repeat(num_inst, 1, 1, 1) _, _, H, W = mask_feat.size() if num_inst == 0: return (pos_param_preds.new_zeros((0, 1, H, W)), ) locations = self.prior_generator.single_level_grid_priors( mask_feat.size()[2:], 0, device=mask_feat.device) rel_coords = relative_coordinate_maps(locations, pos_points, pos_strides, self.size_of_interest, mask_feat.size()[2:]) mask_head_inputs = torch.cat([rel_coords, mask_feat], dim=1) mask_head_inputs = mask_head_inputs.reshape(1, -1, H, W) weights, biases = self.parse_dynamic_params(pos_param_preds) mask_preds = self.dynamic_conv_forward(mask_head_inputs, weights, biases, num_inst) mask_preds = mask_preds.reshape(-1, H, W) mask_preds = aligned_bilinear( mask_preds.unsqueeze(0), int(self.mask_feat_stride / self.mask_out_stride)).squeeze(0) return (mask_preds, ) def loss_by_feat(self, mask_preds: List[Tensor], batch_gt_instances: InstanceList, batch_img_metas: List[dict], positive_infos: InstanceList, **kwargs) -> dict: """Calculate the loss based on the features extracted by the mask head. Args: mask_preds (list[Tensor]): List of predicted masks, each has shape (num_classes, H, W). batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes``, ``masks``, and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of multiple images. positive_infos (List[:obj:``InstanceData``]): Information of positive samples of each image that are assigned in detection head. Returns: dict[str, Tensor]: A dictionary of loss components. """ assert positive_infos is not None, \ 'positive_infos should not be None in `CondInstMaskHead`' losses = dict() loss_mask = 0. num_imgs = len(mask_preds) total_pos = 0 for idx in range(num_imgs): (mask_pred, pos_mask_targets, num_pos) = \ self._get_targets_single( mask_preds[idx], batch_gt_instances[idx], positive_infos[idx]) # mask loss total_pos += num_pos if num_pos == 0 or pos_mask_targets is None: loss = mask_pred.new_zeros(1).mean() else: loss = self.loss_mask( mask_pred, pos_mask_targets, reduction_override='none').sum() loss_mask += loss if total_pos == 0: total_pos += 1 # avoid nan loss_mask = loss_mask / total_pos losses.update(loss_mask=loss_mask) return losses def _get_targets_single(self, mask_preds: Tensor, gt_instances: InstanceData, positive_info: InstanceData): """Compute targets for predictions of single image. Args: mask_preds (Tensor): Predicted prototypes with shape (num_classes, H, W). gt_instances (:obj:`InstanceData`): Ground truth of instance annotations. It should includes ``bboxes``, ``labels``, and ``masks`` attributes. positive_info (:obj:`InstanceData`): Information of positive samples that are assigned in detection head. It usually contains following keys. - pos_assigned_gt_inds (Tensor): Assigner GT indexes of positive proposals, has shape (num_pos, ) - pos_inds (Tensor): Positive index of image, has shape (num_pos, ). - param_pred (Tensor): Positive param preditions with shape (num_pos, num_params). Returns: tuple: Usually returns a tuple containing learning targets. - mask_preds (Tensor): Positive predicted mask with shape (num_pos, mask_h, mask_w). - pos_mask_targets (Tensor): Positive mask targets with shape (num_pos, mask_h, mask_w). - num_pos (int): Positive numbers. """ gt_bboxes = gt_instances.bboxes device = gt_bboxes.device gt_masks = gt_instances.masks.to_tensor( dtype=torch.bool, device=device).float() # process with mask targets pos_assigned_gt_inds = positive_info.get('pos_assigned_gt_inds') scores = positive_info.get('scores') centernesses = positive_info.get('centernesses') num_pos = pos_assigned_gt_inds.size(0) if gt_masks.size(0) == 0 or num_pos == 0: return mask_preds, None, 0 # Since we're producing (near) full image masks, # it'd take too much vram to backprop on every single mask. # Thus we select only a subset. if (self.max_masks_to_train != -1) and \ (num_pos > self.max_masks_to_train): perm = torch.randperm(num_pos) select = perm[:self.max_masks_to_train] mask_preds = mask_preds[select] pos_assigned_gt_inds = pos_assigned_gt_inds[select] num_pos = self.max_masks_to_train elif self.topk_masks_per_img != -1: unique_gt_inds = pos_assigned_gt_inds.unique() num_inst_per_gt = max( int(self.topk_masks_per_img / len(unique_gt_inds)), 1) keep_mask_preds = [] keep_pos_assigned_gt_inds = [] for gt_ind in unique_gt_inds: per_inst_pos_inds = (pos_assigned_gt_inds == gt_ind) mask_preds_per_inst = mask_preds[per_inst_pos_inds] gt_inds_per_inst = pos_assigned_gt_inds[per_inst_pos_inds] if sum(per_inst_pos_inds) > num_inst_per_gt: per_inst_scores = scores[per_inst_pos_inds].sigmoid().max( dim=1)[0] per_inst_centerness = centernesses[ per_inst_pos_inds].sigmoid().reshape(-1, ) select = (per_inst_scores * per_inst_centerness).topk( k=num_inst_per_gt, dim=0)[1] mask_preds_per_inst = mask_preds_per_inst[select] gt_inds_per_inst = gt_inds_per_inst[select] keep_mask_preds.append(mask_preds_per_inst) keep_pos_assigned_gt_inds.append(gt_inds_per_inst) mask_preds = torch.cat(keep_mask_preds) pos_assigned_gt_inds = torch.cat(keep_pos_assigned_gt_inds) num_pos = pos_assigned_gt_inds.size(0) # Follow the origin implement start = int(self.mask_out_stride // 2) gt_masks = gt_masks[:, start::self.mask_out_stride, start::self.mask_out_stride] gt_masks = gt_masks.gt(0.5).float() pos_mask_targets = gt_masks[pos_assigned_gt_inds] return (mask_preds, pos_mask_targets, num_pos) def predict_by_feat(self, mask_preds: List[Tensor], results_list: InstanceList, batch_img_metas: List[dict], rescale: bool = True, **kwargs) -> InstanceList: """Transform a batch of output features extracted from the head into mask results. Args: mask_preds (list[Tensor]): Predicted prototypes with shape (num_classes, H, W). results_list (List[:obj:``InstanceData``]): BBoxHead results. batch_img_metas (list[dict]): Meta information of all images. rescale (bool, optional): Whether to rescale the results. Defaults to False. Returns: list[:obj:`InstanceData`]: Processed results of multiple images.Each :obj:`InstanceData` usually contains following keys. - scores (Tensor): Classification scores, has shape (num_instance,). - labels (Tensor): Has shape (num_instances,). - masks (Tensor): Processed mask results, has shape (num_instances, h, w). """ assert len(mask_preds) == len(results_list) == len(batch_img_metas) for img_id in range(len(batch_img_metas)): img_meta = batch_img_metas[img_id] results = results_list[img_id] bboxes = results.bboxes mask_pred = mask_preds[img_id] if bboxes.shape[0] == 0 or mask_pred.shape[0] == 0: results_list[img_id] = empty_instances( [img_meta], bboxes.device, task_type='mask', instance_results=[results])[0] else: im_mask = self._predict_by_feat_single( mask_preds=mask_pred, bboxes=bboxes, img_meta=img_meta, rescale=rescale) results.masks = im_mask return results_list def _predict_by_feat_single(self, mask_preds: Tensor, bboxes: Tensor, img_meta: dict, rescale: bool, cfg: OptConfigType = None): """Transform a single image's features extracted from the head into mask results. Args: mask_preds (Tensor): Predicted prototypes, has shape [H, W, N]. img_meta (dict): Meta information of each image, e.g., image size, scaling factor, etc. rescale (bool): If rescale is False, then returned masks will fit the scale of imgs[0]. cfg (dict, optional): Config used in test phase. Defaults to None. Returns: :obj:`InstanceData`: Processed results of single image. it usually contains following keys. - scores (Tensor): Classification scores, has shape (num_instance,). - labels (Tensor): Has shape (num_instances,). - masks (Tensor): Processed mask results, has shape (num_instances, h, w). """ cfg = self.test_cfg if cfg is None else cfg scale_factor = bboxes.new_tensor(img_meta['scale_factor']).repeat( (1, 2)) img_h, img_w = img_meta['img_shape'][:2] ori_h, ori_w = img_meta['ori_shape'][:2] mask_preds = mask_preds.sigmoid().unsqueeze(0) mask_preds = aligned_bilinear(mask_preds, self.mask_out_stride) mask_preds = mask_preds[:, :, :img_h, :img_w] if rescale: # in-placed rescale the bboxes scale_factor = bboxes.new_tensor(img_meta['scale_factor']).repeat( (1, 2)) bboxes /= scale_factor masks = F.interpolate( mask_preds, (ori_h, ori_w), mode='bilinear', align_corners=False).squeeze(0) > cfg.mask_thr else: masks = mask_preds.squeeze(0) > cfg.mask_thr return masks
53,738
42.797066
79
py
ERD
ERD-main/mmdet/models/test_time_augs/det_tta.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import List, Tuple import torch from mmcv.ops import batched_nms from mmengine.model import BaseTTAModel from mmengine.registry import MODELS from mmengine.structures import InstanceData from torch import Tensor from mmdet.structures import DetDataSample from mmdet.structures.bbox import bbox_flip @MODELS.register_module() class DetTTAModel(BaseTTAModel): """Merge augmented detection results, only bboxes corresponding score under flipping and multi-scale resizing can be processed now. Examples: >>> tta_model = dict( >>> type='DetTTAModel', >>> tta_cfg=dict(nms=dict( >>> type='nms', >>> iou_threshold=0.5), >>> max_per_img=100)) >>> >>> tta_pipeline = [ >>> dict(type='LoadImageFromFile', >>> backend_args=None), >>> dict( >>> type='TestTimeAug', >>> transforms=[[ >>> dict(type='Resize', >>> scale=(1333, 800), >>> keep_ratio=True), >>> ], [ >>> dict(type='RandomFlip', prob=1.), >>> dict(type='RandomFlip', prob=0.) >>> ], [ >>> dict( >>> type='PackDetInputs', >>> meta_keys=('img_id', 'img_path', 'ori_shape', >>> 'img_shape', 'scale_factor', 'flip', >>> 'flip_direction')) >>> ]])] """ def __init__(self, tta_cfg=None, **kwargs): super().__init__(**kwargs) self.tta_cfg = tta_cfg def merge_aug_bboxes(self, aug_bboxes: List[Tensor], aug_scores: List[Tensor], img_metas: List[str]) -> Tuple[Tensor, Tensor]: """Merge augmented detection bboxes and scores. Args: aug_bboxes (list[Tensor]): shape (n, 4*#class) aug_scores (list[Tensor] or None): shape (n, #class) Returns: tuple[Tensor]: ``bboxes`` with shape (n,4), where 4 represent (tl_x, tl_y, br_x, br_y) and ``scores`` with shape (n,). """ recovered_bboxes = [] for bboxes, img_info in zip(aug_bboxes, img_metas): ori_shape = img_info['ori_shape'] flip = img_info['flip'] flip_direction = img_info['flip_direction'] if flip: bboxes = bbox_flip( bboxes=bboxes, img_shape=ori_shape, direction=flip_direction) recovered_bboxes.append(bboxes) bboxes = torch.cat(recovered_bboxes, dim=0) if aug_scores is None: return bboxes else: scores = torch.cat(aug_scores, dim=0) return bboxes, scores def merge_preds(self, data_samples_list: List[List[DetDataSample]]): """Merge batch predictions of enhanced data. Args: data_samples_list (List[List[DetDataSample]]): List of predictions of all enhanced data. The outer list indicates images, and the inner list corresponds to the different views of one image. Each element of the inner list is a ``DetDataSample``. Returns: List[DetDataSample]: Merged batch prediction. """ merged_data_samples = [] for data_samples in data_samples_list: merged_data_samples.append(self._merge_single_sample(data_samples)) return merged_data_samples def _merge_single_sample( self, data_samples: List[DetDataSample]) -> DetDataSample: """Merge predictions which come form the different views of one image to one prediction. Args: data_samples (List[DetDataSample]): List of predictions of enhanced data which come form one image. Returns: List[DetDataSample]: Merged prediction. """ aug_bboxes = [] aug_scores = [] aug_labels = [] img_metas = [] # TODO: support instance segmentation TTA assert data_samples[0].pred_instances.get('masks', None) is None, \ 'TTA of instance segmentation does not support now.' for data_sample in data_samples: aug_bboxes.append(data_sample.pred_instances.bboxes) aug_scores.append(data_sample.pred_instances.scores) aug_labels.append(data_sample.pred_instances.labels) img_metas.append(data_sample.metainfo) merged_bboxes, merged_scores = self.merge_aug_bboxes( aug_bboxes, aug_scores, img_metas) merged_labels = torch.cat(aug_labels, dim=0) if merged_bboxes.numel() == 0: return data_samples[0] det_bboxes, keep_idxs = batched_nms(merged_bboxes, merged_scores, merged_labels, self.tta_cfg.nms) det_bboxes = det_bboxes[:self.tta_cfg.max_per_img] det_labels = merged_labels[keep_idxs][:self.tta_cfg.max_per_img] results = InstanceData() _det_bboxes = det_bboxes.clone() results.bboxes = _det_bboxes[:, :-1] results.scores = _det_bboxes[:, -1] results.labels = det_labels det_results = data_samples[0] det_results.pred_instances = results return det_results
5,568
37.406897
79
py
ERD
ERD-main/mmdet/models/test_time_augs/merge_augs.py
# Copyright (c) OpenMMLab. All rights reserved. import copy import warnings from typing import List, Optional, Union import numpy as np import torch from mmcv.ops import nms from mmengine.config import ConfigDict from torch import Tensor from mmdet.structures.bbox import bbox_mapping_back # TODO remove this, never be used in mmdet def merge_aug_proposals(aug_proposals, img_metas, cfg): """Merge augmented proposals (multiscale, flip, etc.) Args: aug_proposals (list[Tensor]): proposals from different testing schemes, shape (n, 5). Note that they are not rescaled to the original image size. img_metas (list[dict]): list of image info dict where each dict has: 'img_shape', 'scale_factor', 'flip', and may also contain 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. For details on the values of these keys see `mmdet/datasets/pipelines/formatting.py:Collect`. cfg (dict): rpn test config. Returns: Tensor: shape (n, 4), proposals corresponding to original image scale. """ cfg = copy.deepcopy(cfg) # deprecate arguments warning if 'nms' not in cfg or 'max_num' in cfg or 'nms_thr' in cfg: warnings.warn( 'In rpn_proposal or test_cfg, ' 'nms_thr has been moved to a dict named nms as ' 'iou_threshold, max_num has been renamed as max_per_img, ' 'name of original arguments and the way to specify ' 'iou_threshold of NMS will be deprecated.') if 'nms' not in cfg: cfg.nms = ConfigDict(dict(type='nms', iou_threshold=cfg.nms_thr)) if 'max_num' in cfg: if 'max_per_img' in cfg: assert cfg.max_num == cfg.max_per_img, f'You set max_num and ' \ f'max_per_img at the same time, but get {cfg.max_num} ' \ f'and {cfg.max_per_img} respectively' \ f'Please delete max_num which will be deprecated.' else: cfg.max_per_img = cfg.max_num if 'nms_thr' in cfg: assert cfg.nms.iou_threshold == cfg.nms_thr, f'You set ' \ f'iou_threshold in nms and ' \ f'nms_thr at the same time, but get ' \ f'{cfg.nms.iou_threshold} and {cfg.nms_thr}' \ f' respectively. Please delete the nms_thr ' \ f'which will be deprecated.' recovered_proposals = [] for proposals, img_info in zip(aug_proposals, img_metas): img_shape = img_info['img_shape'] scale_factor = img_info['scale_factor'] flip = img_info['flip'] flip_direction = img_info['flip_direction'] _proposals = proposals.clone() _proposals[:, :4] = bbox_mapping_back(_proposals[:, :4], img_shape, scale_factor, flip, flip_direction) recovered_proposals.append(_proposals) aug_proposals = torch.cat(recovered_proposals, dim=0) merged_proposals, _ = nms(aug_proposals[:, :4].contiguous(), aug_proposals[:, -1].contiguous(), cfg.nms.iou_threshold) scores = merged_proposals[:, 4] _, order = scores.sort(0, descending=True) num = min(cfg.max_per_img, merged_proposals.shape[0]) order = order[:num] merged_proposals = merged_proposals[order, :] return merged_proposals # TODO remove this, never be used in mmdet def merge_aug_bboxes(aug_bboxes, aug_scores, img_metas, rcnn_test_cfg): """Merge augmented detection bboxes and scores. Args: aug_bboxes (list[Tensor]): shape (n, 4*#class) aug_scores (list[Tensor] or None): shape (n, #class) img_shapes (list[Tensor]): shape (3, ). rcnn_test_cfg (dict): rcnn test config. Returns: tuple: (bboxes, scores) """ recovered_bboxes = [] for bboxes, img_info in zip(aug_bboxes, img_metas): img_shape = img_info[0]['img_shape'] scale_factor = img_info[0]['scale_factor'] flip = img_info[0]['flip'] flip_direction = img_info[0]['flip_direction'] bboxes = bbox_mapping_back(bboxes, img_shape, scale_factor, flip, flip_direction) recovered_bboxes.append(bboxes) bboxes = torch.stack(recovered_bboxes).mean(dim=0) if aug_scores is None: return bboxes else: scores = torch.stack(aug_scores).mean(dim=0) return bboxes, scores def merge_aug_results(aug_batch_results, aug_batch_img_metas): """Merge augmented detection results, only bboxes corresponding score under flipping and multi-scale resizing can be processed now. Args: aug_batch_results (list[list[[obj:`InstanceData`]]): Detection results of multiple images with different augmentations. The outer list indicate the augmentation . The inter list indicate the batch dimension. Each item usually contains the following keys. - scores (Tensor): Classification scores, in shape (num_instance,) - labels (Tensor): Labels of bboxes, in shape (num_instances,). - bboxes (Tensor): In shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). aug_batch_img_metas (list[list[dict]]): The outer list indicates test-time augs (multiscale, flip, etc.) and the inner list indicates images in a batch. Each dict in the list contains information of an image in the batch. Returns: batch_results (list[obj:`InstanceData`]): Same with the input `aug_results` except that all bboxes have been mapped to the original scale. """ num_augs = len(aug_batch_results) num_imgs = len(aug_batch_results[0]) batch_results = [] aug_batch_results = copy.deepcopy(aug_batch_results) for img_id in range(num_imgs): aug_results = [] for aug_id in range(num_augs): img_metas = aug_batch_img_metas[aug_id][img_id] results = aug_batch_results[aug_id][img_id] img_shape = img_metas['img_shape'] scale_factor = img_metas['scale_factor'] flip = img_metas['flip'] flip_direction = img_metas['flip_direction'] bboxes = bbox_mapping_back(results.bboxes, img_shape, scale_factor, flip, flip_direction) results.bboxes = bboxes aug_results.append(results) merged_aug_results = results.cat(aug_results) batch_results.append(merged_aug_results) return batch_results def merge_aug_scores(aug_scores): """Merge augmented bbox scores.""" if isinstance(aug_scores[0], torch.Tensor): return torch.mean(torch.stack(aug_scores), dim=0) else: return np.mean(aug_scores, axis=0) def merge_aug_masks(aug_masks: List[Tensor], img_metas: dict, weights: Optional[Union[list, Tensor]] = None) -> Tensor: """Merge augmented mask prediction. Args: aug_masks (list[Tensor]): each has shape (n, c, h, w). img_metas (dict): Image information. weights (list or Tensor): Weight of each aug_masks, the length should be n. Returns: Tensor: has shape (n, c, h, w) """ recovered_masks = [] for i, mask in enumerate(aug_masks): if weights is not None: assert len(weights) == len(aug_masks) weight = weights[i] else: weight = 1 flip = img_metas.get('filp', False) if flip: flip_direction = img_metas['flip_direction'] if flip_direction == 'horizontal': mask = mask[:, :, :, ::-1] elif flip_direction == 'vertical': mask = mask[:, :, ::-1, :] elif flip_direction == 'diagonal': mask = mask[:, :, :, ::-1] mask = mask[:, :, ::-1, :] else: raise ValueError( f"Invalid flipping direction '{flip_direction}'") recovered_masks.append(mask[None, :] * weight) merged_masks = torch.cat(recovered_masks, 0).mean(dim=0) if weights is not None: merged_masks = merged_masks * len(weights) / sum(weights) return merged_masks
8,469
37.5
79
py
ERD
ERD-main/mmdet/models/utils/gaussian_target.py
# Copyright (c) OpenMMLab. All rights reserved. from math import sqrt import torch import torch.nn.functional as F def gaussian2D(radius, sigma=1, dtype=torch.float32, device='cpu'): """Generate 2D gaussian kernel. Args: radius (int): Radius of gaussian kernel. sigma (int): Sigma of gaussian function. Default: 1. dtype (torch.dtype): Dtype of gaussian tensor. Default: torch.float32. device (str): Device of gaussian tensor. Default: 'cpu'. Returns: h (Tensor): Gaussian kernel with a ``(2 * radius + 1) * (2 * radius + 1)`` shape. """ x = torch.arange( -radius, radius + 1, dtype=dtype, device=device).view(1, -1) y = torch.arange( -radius, radius + 1, dtype=dtype, device=device).view(-1, 1) h = (-(x * x + y * y) / (2 * sigma * sigma)).exp() h[h < torch.finfo(h.dtype).eps * h.max()] = 0 return h def gen_gaussian_target(heatmap, center, radius, k=1): """Generate 2D gaussian heatmap. Args: heatmap (Tensor): Input heatmap, the gaussian kernel will cover on it and maintain the max value. center (list[int]): Coord of gaussian kernel's center. radius (int): Radius of gaussian kernel. k (int): Coefficient of gaussian kernel. Default: 1. Returns: out_heatmap (Tensor): Updated heatmap covered by gaussian kernel. """ diameter = 2 * radius + 1 gaussian_kernel = gaussian2D( radius, sigma=diameter / 6, dtype=heatmap.dtype, device=heatmap.device) x, y = center height, width = heatmap.shape[:2] left, right = min(x, radius), min(width - x, radius + 1) top, bottom = min(y, radius), min(height - y, radius + 1) masked_heatmap = heatmap[y - top:y + bottom, x - left:x + right] masked_gaussian = gaussian_kernel[radius - top:radius + bottom, radius - left:radius + right] out_heatmap = heatmap torch.max( masked_heatmap, masked_gaussian * k, out=out_heatmap[y - top:y + bottom, x - left:x + right]) return out_heatmap def gaussian_radius(det_size, min_overlap): r"""Generate 2D gaussian radius. This function is modified from the `official github repo <https://github.com/princeton-vl/CornerNet-Lite/blob/master/core/sample/ utils.py#L65>`_. Given ``min_overlap``, radius could computed by a quadratic equation according to Vieta's formulas. There are 3 cases for computing gaussian radius, details are following: - Explanation of figure: ``lt`` and ``br`` indicates the left-top and bottom-right corner of ground truth box. ``x`` indicates the generated corner at the limited position when ``radius=r``. - Case1: one corner is inside the gt box and the other is outside. .. code:: text |< width >| lt-+----------+ - | | | ^ +--x----------+--+ | | | | | | | | height | | overlap | | | | | | | | | | v +--+---------br--+ - | | | +----------+--x To ensure IoU of generated box and gt box is larger than ``min_overlap``: .. math:: \cfrac{(w-r)*(h-r)}{w*h+(w+h)r-r^2} \ge {iou} \quad\Rightarrow\quad {r^2-(w+h)r+\cfrac{1-iou}{1+iou}*w*h} \ge 0 \\ {a} = 1,\quad{b} = {-(w+h)},\quad{c} = {\cfrac{1-iou}{1+iou}*w*h} {r} \le \cfrac{-b-\sqrt{b^2-4*a*c}}{2*a} - Case2: both two corners are inside the gt box. .. code:: text |< width >| lt-+----------+ - | | | ^ +--x-------+ | | | | | | |overlap| | height | | | | | +-------x--+ | | | v +----------+-br - To ensure IoU of generated box and gt box is larger than ``min_overlap``: .. math:: \cfrac{(w-2*r)*(h-2*r)}{w*h} \ge {iou} \quad\Rightarrow\quad {4r^2-2(w+h)r+(1-iou)*w*h} \ge 0 \\ {a} = 4,\quad {b} = {-2(w+h)},\quad {c} = {(1-iou)*w*h} {r} \le \cfrac{-b-\sqrt{b^2-4*a*c}}{2*a} - Case3: both two corners are outside the gt box. .. code:: text |< width >| x--+----------------+ | | | +-lt-------------+ | - | | | | ^ | | | | | | overlap | | height | | | | | | | | v | +------------br--+ - | | | +----------------+--x To ensure IoU of generated box and gt box is larger than ``min_overlap``: .. math:: \cfrac{w*h}{(w+2*r)*(h+2*r)} \ge {iou} \quad\Rightarrow\quad {4*iou*r^2+2*iou*(w+h)r+(iou-1)*w*h} \le 0 \\ {a} = {4*iou},\quad {b} = {2*iou*(w+h)},\quad {c} = {(iou-1)*w*h} \\ {r} \le \cfrac{-b+\sqrt{b^2-4*a*c}}{2*a} Args: det_size (list[int]): Shape of object. min_overlap (float): Min IoU with ground truth for boxes generated by keypoints inside the gaussian kernel. Returns: radius (int): Radius of gaussian kernel. """ height, width = det_size a1 = 1 b1 = (height + width) c1 = width * height * (1 - min_overlap) / (1 + min_overlap) sq1 = sqrt(b1**2 - 4 * a1 * c1) r1 = (b1 - sq1) / (2 * a1) a2 = 4 b2 = 2 * (height + width) c2 = (1 - min_overlap) * width * height sq2 = sqrt(b2**2 - 4 * a2 * c2) r2 = (b2 - sq2) / (2 * a2) a3 = 4 * min_overlap b3 = -2 * min_overlap * (height + width) c3 = (min_overlap - 1) * width * height sq3 = sqrt(b3**2 - 4 * a3 * c3) r3 = (b3 + sq3) / (2 * a3) return min(r1, r2, r3) def get_local_maximum(heat, kernel=3): """Extract local maximum pixel with given kernel. Args: heat (Tensor): Target heatmap. kernel (int): Kernel size of max pooling. Default: 3. Returns: heat (Tensor): A heatmap where local maximum pixels maintain its own value and other positions are 0. """ pad = (kernel - 1) // 2 hmax = F.max_pool2d(heat, kernel, stride=1, padding=pad) keep = (hmax == heat).float() return heat * keep def get_topk_from_heatmap(scores, k=20): """Get top k positions from heatmap. Args: scores (Tensor): Target heatmap with shape [batch, num_classes, height, width]. k (int): Target number. Default: 20. Returns: tuple[torch.Tensor]: Scores, indexes, categories and coords of topk keypoint. Containing following Tensors: - topk_scores (Tensor): Max scores of each topk keypoint. - topk_inds (Tensor): Indexes of each topk keypoint. - topk_clses (Tensor): Categories of each topk keypoint. - topk_ys (Tensor): Y-coord of each topk keypoint. - topk_xs (Tensor): X-coord of each topk keypoint. """ batch, _, height, width = scores.size() topk_scores, topk_inds = torch.topk(scores.view(batch, -1), k) topk_clses = topk_inds // (height * width) topk_inds = topk_inds % (height * width) topk_ys = topk_inds // width topk_xs = (topk_inds % width).int().float() return topk_scores, topk_inds, topk_clses, topk_ys, topk_xs def gather_feat(feat, ind, mask=None): """Gather feature according to index. Args: feat (Tensor): Target feature map. ind (Tensor): Target coord index. mask (Tensor | None): Mask of feature map. Default: None. Returns: feat (Tensor): Gathered feature. """ dim = feat.size(2) ind = ind.unsqueeze(2).repeat(1, 1, dim) feat = feat.gather(1, ind) if mask is not None: mask = mask.unsqueeze(2).expand_as(feat) feat = feat[mask] feat = feat.view(-1, dim) return feat def transpose_and_gather_feat(feat, ind): """Transpose and gather feature according to index. Args: feat (Tensor): Target feature map. ind (Tensor): Target coord index. Returns: feat (Tensor): Transposed and gathered feature. """ feat = feat.permute(0, 2, 3, 1).contiguous() feat = feat.view(feat.size(0), -1, feat.size(3)) feat = gather_feat(feat, ind) return feat
8,393
30.204461
79
py
ERD
ERD-main/mmdet/models/utils/point_sample.py
# Copyright (c) OpenMMLab. All rights reserved. import torch from mmcv.ops import point_sample from torch import Tensor def get_uncertainty(mask_preds: Tensor, labels: Tensor) -> Tensor: """Estimate uncertainty based on pred logits. We estimate uncertainty as L1 distance between 0.0 and the logits prediction in 'mask_preds' for the foreground class in `classes`. Args: mask_preds (Tensor): mask predication logits, shape (num_rois, num_classes, mask_height, mask_width). labels (Tensor): Either predicted or ground truth label for each predicted mask, of length num_rois. Returns: scores (Tensor): Uncertainty scores with the most uncertain locations having the highest uncertainty score, shape (num_rois, 1, mask_height, mask_width) """ if mask_preds.shape[1] == 1: gt_class_logits = mask_preds.clone() else: inds = torch.arange(mask_preds.shape[0], device=mask_preds.device) gt_class_logits = mask_preds[inds, labels].unsqueeze(1) return -torch.abs(gt_class_logits) def get_uncertain_point_coords_with_randomness( mask_preds: Tensor, labels: Tensor, num_points: int, oversample_ratio: float, importance_sample_ratio: float) -> Tensor: """Get ``num_points`` most uncertain points with random points during train. Sample points in [0, 1] x [0, 1] coordinate space based on their uncertainty. The uncertainties are calculated for each point using 'get_uncertainty()' function that takes point's logit prediction as input. Args: mask_preds (Tensor): A tensor of shape (num_rois, num_classes, mask_height, mask_width) for class-specific or class-agnostic prediction. labels (Tensor): The ground truth class for each instance. num_points (int): The number of points to sample. oversample_ratio (float): Oversampling parameter. importance_sample_ratio (float): Ratio of points that are sampled via importnace sampling. Returns: point_coords (Tensor): A tensor of shape (num_rois, num_points, 2) that contains the coordinates sampled points. """ assert oversample_ratio >= 1 assert 0 <= importance_sample_ratio <= 1 batch_size = mask_preds.shape[0] num_sampled = int(num_points * oversample_ratio) point_coords = torch.rand( batch_size, num_sampled, 2, device=mask_preds.device) point_logits = point_sample(mask_preds, point_coords) # It is crucial to calculate uncertainty based on the sampled # prediction value for the points. Calculating uncertainties of the # coarse predictions first and sampling them for points leads to # incorrect results. To illustrate this: assume uncertainty func( # logits)=-abs(logits), a sampled point between two coarse # predictions with -1 and 1 logits has 0 logits, and therefore 0 # uncertainty value. However, if we calculate uncertainties for the # coarse predictions first, both will have -1 uncertainty, # and sampled point will get -1 uncertainty. point_uncertainties = get_uncertainty(point_logits, labels) num_uncertain_points = int(importance_sample_ratio * num_points) num_random_points = num_points - num_uncertain_points idx = torch.topk( point_uncertainties[:, 0, :], k=num_uncertain_points, dim=1)[1] shift = num_sampled * torch.arange( batch_size, dtype=torch.long, device=mask_preds.device) idx += shift[:, None] point_coords = point_coords.view(-1, 2)[idx.view(-1), :].view( batch_size, num_uncertain_points, 2) if num_random_points > 0: rand_roi_coords = torch.rand( batch_size, num_random_points, 2, device=mask_preds.device) point_coords = torch.cat((point_coords, rand_roi_coords), dim=1) return point_coords
3,910
42.94382
75
py
ERD
ERD-main/mmdet/models/utils/misc.py
# Copyright (c) OpenMMLab. All rights reserved. from functools import partial from typing import List, Sequence, Tuple, Union import numpy as np import torch from mmengine.structures import InstanceData from mmengine.utils import digit_version from six.moves import map, zip from torch import Tensor from torch.autograd import Function from torch.nn import functional as F from mmdet.structures import SampleList from mmdet.structures.bbox import BaseBoxes, get_box_type, stack_boxes from mmdet.structures.mask import BitmapMasks, PolygonMasks from mmdet.utils import OptInstanceList class SigmoidGeometricMean(Function): """Forward and backward function of geometric mean of two sigmoid functions. This implementation with analytical gradient function substitutes the autograd function of (x.sigmoid() * y.sigmoid()).sqrt(). The original implementation incurs none during gradient backprapagation if both x and y are very small values. """ @staticmethod def forward(ctx, x, y): x_sigmoid = x.sigmoid() y_sigmoid = y.sigmoid() z = (x_sigmoid * y_sigmoid).sqrt() ctx.save_for_backward(x_sigmoid, y_sigmoid, z) return z @staticmethod def backward(ctx, grad_output): x_sigmoid, y_sigmoid, z = ctx.saved_tensors grad_x = grad_output * z * (1 - x_sigmoid) / 2 grad_y = grad_output * z * (1 - y_sigmoid) / 2 return grad_x, grad_y sigmoid_geometric_mean = SigmoidGeometricMean.apply def interpolate_as(source, target, mode='bilinear', align_corners=False): """Interpolate the `source` to the shape of the `target`. The `source` must be a Tensor, but the `target` can be a Tensor or a np.ndarray with the shape (..., target_h, target_w). Args: source (Tensor): A 3D/4D Tensor with the shape (N, H, W) or (N, C, H, W). target (Tensor | np.ndarray): The interpolation target with the shape (..., target_h, target_w). mode (str): Algorithm used for interpolation. The options are the same as those in F.interpolate(). Default: ``'bilinear'``. align_corners (bool): The same as the argument in F.interpolate(). Returns: Tensor: The interpolated source Tensor. """ assert len(target.shape) >= 2 def _interpolate_as(source, target, mode='bilinear', align_corners=False): """Interpolate the `source` (4D) to the shape of the `target`.""" target_h, target_w = target.shape[-2:] source_h, source_w = source.shape[-2:] if target_h != source_h or target_w != source_w: source = F.interpolate( source, size=(target_h, target_w), mode=mode, align_corners=align_corners) return source if len(source.shape) == 3: source = source[:, None, :, :] source = _interpolate_as(source, target, mode, align_corners) return source[:, 0, :, :] else: return _interpolate_as(source, target, mode, align_corners) def unpack_gt_instances(batch_data_samples: SampleList) -> tuple: """Unpack ``gt_instances``, ``gt_instances_ignore`` and ``img_metas`` based on ``batch_data_samples`` Args: batch_data_samples (List[:obj:`DetDataSample`]): The Data Samples. It usually includes information such as `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`. Returns: tuple: - batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. - batch_gt_instances_ignore (list[:obj:`InstanceData`]): Batch of gt_instances_ignore. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. - batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. """ batch_gt_instances = [] batch_gt_instances_ignore = [] batch_img_metas = [] for data_sample in batch_data_samples: batch_img_metas.append(data_sample.metainfo) batch_gt_instances.append(data_sample.gt_instances) if 'ignored_instances' in data_sample: batch_gt_instances_ignore.append(data_sample.ignored_instances) else: batch_gt_instances_ignore.append(None) return batch_gt_instances, batch_gt_instances_ignore, batch_img_metas def empty_instances(batch_img_metas: List[dict], device: torch.device, task_type: str, instance_results: OptInstanceList = None, mask_thr_binary: Union[int, float] = 0, box_type: Union[str, type] = 'hbox', use_box_type: bool = False, num_classes: int = 80, score_per_cls: bool = False) -> List[InstanceData]: """Handle predicted instances when RoI is empty. Note: If ``instance_results`` is not None, it will be modified in place internally, and then return ``instance_results`` Args: batch_img_metas (list[dict]): List of image information. device (torch.device): Device of tensor. task_type (str): Expected returned task type. it currently supports bbox and mask. instance_results (list[:obj:`InstanceData`]): List of instance results. mask_thr_binary (int, float): mask binarization threshold. Defaults to 0. box_type (str or type): The empty box type. Defaults to `hbox`. use_box_type (bool): Whether to warp boxes with the box type. Defaults to False. num_classes (int): num_classes of bbox_head. Defaults to 80. score_per_cls (bool): Whether to generate classwise score for the empty instance. ``score_per_cls`` will be True when the model needs to produce raw results without nms. Defaults to False. Returns: list[:obj:`InstanceData`]: Detection results of each image """ assert task_type in ('bbox', 'mask'), 'Only support bbox and mask,' \ f' but got {task_type}' if instance_results is not None: assert len(instance_results) == len(batch_img_metas) results_list = [] for img_id in range(len(batch_img_metas)): if instance_results is not None: results = instance_results[img_id] assert isinstance(results, InstanceData) else: results = InstanceData() if task_type == 'bbox': _, box_type = get_box_type(box_type) bboxes = torch.zeros(0, box_type.box_dim, device=device) if use_box_type: bboxes = box_type(bboxes, clone=False) results.bboxes = bboxes score_shape = (0, num_classes + 1) if score_per_cls else (0, ) results.scores = torch.zeros(score_shape, device=device) results.labels = torch.zeros((0, ), device=device, dtype=torch.long) else: # TODO: Handle the case where rescale is false img_h, img_w = batch_img_metas[img_id]['ori_shape'][:2] # the type of `im_mask` will be torch.bool or torch.uint8, # where uint8 if for visualization and debugging. im_mask = torch.zeros( 0, img_h, img_w, device=device, dtype=torch.bool if mask_thr_binary >= 0 else torch.uint8) results.masks = im_mask results_list.append(results) return results_list def multi_apply(func, *args, **kwargs): """Apply function to a list of arguments. Note: This function applies the ``func`` to multiple inputs and map the multiple outputs of the ``func`` into different list. Each list contains the same type of outputs corresponding to different inputs. Args: func (Function): A function that will be applied to a list of arguments Returns: tuple(list): A tuple containing multiple list, each list contains \ a kind of returned results by the function """ pfunc = partial(func, **kwargs) if kwargs else func map_results = map(pfunc, *args) return tuple(map(list, zip(*map_results))) def unmap(data, count, inds, fill=0): """Unmap a subset of item (data) back to the original set of items (of size count)""" if data.dim() == 1: ret = data.new_full((count, ), fill) ret[inds.type(torch.bool)] = data else: new_size = (count, ) + data.size()[1:] ret = data.new_full(new_size, fill) ret[inds.type(torch.bool), :] = data return ret def mask2ndarray(mask): """Convert Mask to ndarray.. Args: mask (:obj:`BitmapMasks` or :obj:`PolygonMasks` or torch.Tensor or np.ndarray): The mask to be converted. Returns: np.ndarray: Ndarray mask of shape (n, h, w) that has been converted """ if isinstance(mask, (BitmapMasks, PolygonMasks)): mask = mask.to_ndarray() elif isinstance(mask, torch.Tensor): mask = mask.detach().cpu().numpy() elif not isinstance(mask, np.ndarray): raise TypeError(f'Unsupported {type(mask)} data type') return mask def flip_tensor(src_tensor, flip_direction): """flip tensor base on flip_direction. Args: src_tensor (Tensor): input feature map, shape (B, C, H, W). flip_direction (str): The flipping direction. Options are 'horizontal', 'vertical', 'diagonal'. Returns: out_tensor (Tensor): Flipped tensor. """ assert src_tensor.ndim == 4 valid_directions = ['horizontal', 'vertical', 'diagonal'] assert flip_direction in valid_directions if flip_direction == 'horizontal': out_tensor = torch.flip(src_tensor, [3]) elif flip_direction == 'vertical': out_tensor = torch.flip(src_tensor, [2]) else: out_tensor = torch.flip(src_tensor, [2, 3]) return out_tensor def select_single_mlvl(mlvl_tensors, batch_id, detach=True): """Extract a multi-scale single image tensor from a multi-scale batch tensor based on batch index. Note: The default value of detach is True, because the proposal gradient needs to be detached during the training of the two-stage model. E.g Cascade Mask R-CNN. Args: mlvl_tensors (list[Tensor]): Batch tensor for all scale levels, each is a 4D-tensor. batch_id (int): Batch index. detach (bool): Whether detach gradient. Default True. Returns: list[Tensor]: Multi-scale single image tensor. """ assert isinstance(mlvl_tensors, (list, tuple)) num_levels = len(mlvl_tensors) if detach: mlvl_tensor_list = [ mlvl_tensors[i][batch_id].detach() for i in range(num_levels) ] else: mlvl_tensor_list = [ mlvl_tensors[i][batch_id] for i in range(num_levels) ] return mlvl_tensor_list def filter_scores_and_topk(scores, score_thr, topk, results=None): """Filter results using score threshold and topk candidates. Args: scores (Tensor): The scores, shape (num_bboxes, K). score_thr (float): The score filter threshold. topk (int): The number of topk candidates. results (dict or list or Tensor, Optional): The results to which the filtering rule is to be applied. The shape of each item is (num_bboxes, N). Returns: tuple: Filtered results - scores (Tensor): The scores after being filtered, \ shape (num_bboxes_filtered, ). - labels (Tensor): The class labels, shape \ (num_bboxes_filtered, ). - anchor_idxs (Tensor): The anchor indexes, shape \ (num_bboxes_filtered, ). - filtered_results (dict or list or Tensor, Optional): \ The filtered results. The shape of each item is \ (num_bboxes_filtered, N). """ valid_mask = scores > score_thr scores = scores[valid_mask] valid_idxs = torch.nonzero(valid_mask) num_topk = min(topk, valid_idxs.size(0)) # torch.sort is actually faster than .topk (at least on GPUs) scores, idxs = scores.sort(descending=True) scores = scores[:num_topk] topk_idxs = valid_idxs[idxs[:num_topk]] keep_idxs, labels = topk_idxs.unbind(dim=1) filtered_results = None if results is not None: if isinstance(results, dict): filtered_results = {k: v[keep_idxs] for k, v in results.items()} elif isinstance(results, list): filtered_results = [result[keep_idxs] for result in results] elif isinstance(results, torch.Tensor): filtered_results = results[keep_idxs] else: raise NotImplementedError(f'Only supports dict or list or Tensor, ' f'but get {type(results)}.') return scores, labels, keep_idxs, filtered_results def center_of_mass(mask, esp=1e-6): """Calculate the centroid coordinates of the mask. Args: mask (Tensor): The mask to be calculated, shape (h, w). esp (float): Avoid dividing by zero. Default: 1e-6. Returns: tuple[Tensor]: the coordinates of the center point of the mask. - center_h (Tensor): the center point of the height. - center_w (Tensor): the center point of the width. """ h, w = mask.shape grid_h = torch.arange(h, device=mask.device)[:, None] grid_w = torch.arange(w, device=mask.device) normalizer = mask.sum().float().clamp(min=esp) center_h = (mask * grid_h).sum() / normalizer center_w = (mask * grid_w).sum() / normalizer return center_h, center_w def generate_coordinate(featmap_sizes, device='cuda'): """Generate the coordinate. Args: featmap_sizes (tuple): The feature to be calculated, of shape (N, C, W, H). device (str): The device where the feature will be put on. Returns: coord_feat (Tensor): The coordinate feature, of shape (N, 2, W, H). """ x_range = torch.linspace(-1, 1, featmap_sizes[-1], device=device) y_range = torch.linspace(-1, 1, featmap_sizes[-2], device=device) y, x = torch.meshgrid(y_range, x_range) y = y.expand([featmap_sizes[0], 1, -1, -1]) x = x.expand([featmap_sizes[0], 1, -1, -1]) coord_feat = torch.cat([x, y], 1) return coord_feat def levels_to_images(mlvl_tensor: List[torch.Tensor]) -> List[torch.Tensor]: """Concat multi-level feature maps by image. [feature_level0, feature_level1...] -> [feature_image0, feature_image1...] Convert the shape of each element in mlvl_tensor from (N, C, H, W) to (N, H*W , C), then split the element to N elements with shape (H*W, C), and concat elements in same image of all level along first dimension. Args: mlvl_tensor (list[Tensor]): list of Tensor which collect from corresponding level. Each element is of shape (N, C, H, W) Returns: list[Tensor]: A list that contains N tensors and each tensor is of shape (num_elements, C) """ batch_size = mlvl_tensor[0].size(0) batch_list = [[] for _ in range(batch_size)] channels = mlvl_tensor[0].size(1) for t in mlvl_tensor: t = t.permute(0, 2, 3, 1) t = t.view(batch_size, -1, channels).contiguous() for img in range(batch_size): batch_list[img].append(t[img]) return [torch.cat(item, 0) for item in batch_list] def images_to_levels(target, num_levels): """Convert targets by image to targets by feature level. [target_img0, target_img1] -> [target_level0, target_level1, ...] """ target = stack_boxes(target, 0) level_targets = [] start = 0 for n in num_levels: end = start + n # level_targets.append(target[:, start:end].squeeze(0)) level_targets.append(target[:, start:end]) start = end return level_targets def samplelist_boxtype2tensor(batch_data_samples: SampleList) -> SampleList: for data_samples in batch_data_samples: if 'gt_instances' in data_samples: bboxes = data_samples.gt_instances.get('bboxes', None) if isinstance(bboxes, BaseBoxes): data_samples.gt_instances.bboxes = bboxes.tensor if 'pred_instances' in data_samples: bboxes = data_samples.pred_instances.get('bboxes', None) if isinstance(bboxes, BaseBoxes): data_samples.pred_instances.bboxes = bboxes.tensor if 'ignored_instances' in data_samples: bboxes = data_samples.ignored_instances.get('bboxes', None) if isinstance(bboxes, BaseBoxes): data_samples.ignored_instances.bboxes = bboxes.tensor _torch_version_div_indexing = ( 'parrots' not in torch.__version__ and digit_version(torch.__version__) >= digit_version('1.8')) def floordiv(dividend, divisor, rounding_mode='trunc'): if _torch_version_div_indexing: return torch.div(dividend, divisor, rounding_mode=rounding_mode) else: return dividend // divisor def _filter_gt_instances_by_score(batch_data_samples: SampleList, score_thr: float) -> SampleList: """Filter ground truth (GT) instances by score. Args: batch_data_samples (SampleList): The Data Samples. It usually includes information such as `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`. score_thr (float): The score filter threshold. Returns: SampleList: The Data Samples filtered by score. """ for data_samples in batch_data_samples: assert 'scores' in data_samples.gt_instances, \ 'there does not exit scores in instances' if data_samples.gt_instances.bboxes.shape[0] > 0: data_samples.gt_instances = data_samples.gt_instances[ data_samples.gt_instances.scores > score_thr] return batch_data_samples def _filter_gt_instances_by_size(batch_data_samples: SampleList, wh_thr: tuple) -> SampleList: """Filter ground truth (GT) instances by size. Args: batch_data_samples (SampleList): The Data Samples. It usually includes information such as `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`. wh_thr (tuple): Minimum width and height of bbox. Returns: SampleList: The Data Samples filtered by score. """ for data_samples in batch_data_samples: bboxes = data_samples.gt_instances.bboxes if bboxes.shape[0] > 0: w = bboxes[:, 2] - bboxes[:, 0] h = bboxes[:, 3] - bboxes[:, 1] data_samples.gt_instances = data_samples.gt_instances[ (w > wh_thr[0]) & (h > wh_thr[1])] return batch_data_samples def filter_gt_instances(batch_data_samples: SampleList, score_thr: float = None, wh_thr: tuple = None): """Filter ground truth (GT) instances by score and/or size. Args: batch_data_samples (SampleList): The Data Samples. It usually includes information such as `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`. score_thr (float): The score filter threshold. wh_thr (tuple): Minimum width and height of bbox. Returns: SampleList: The Data Samples filtered by score and/or size. """ if score_thr is not None: batch_data_samples = _filter_gt_instances_by_score( batch_data_samples, score_thr) if wh_thr is not None: batch_data_samples = _filter_gt_instances_by_size( batch_data_samples, wh_thr) return batch_data_samples def rename_loss_dict(prefix: str, losses: dict) -> dict: """Rename the key names in loss dict by adding a prefix. Args: prefix (str): The prefix for loss components. losses (dict): A dictionary of loss components. Returns: dict: A dictionary of loss components with prefix. """ return {prefix + k: v for k, v in losses.items()} def reweight_loss_dict(losses: dict, weight: float) -> dict: """Reweight losses in the dict by weight. Args: losses (dict): A dictionary of loss components. weight (float): Weight for loss components. Returns: dict: A dictionary of weighted loss components. """ for name, loss in losses.items(): if 'loss' in name: if isinstance(loss, Sequence): losses[name] = [item * weight for item in loss] else: losses[name] = loss * weight return losses def relative_coordinate_maps( locations: Tensor, centers: Tensor, strides: Tensor, size_of_interest: int, feat_sizes: Tuple[int], ) -> Tensor: """Generate the relative coordinate maps with feat_stride. Args: locations (Tensor): The prior location of mask feature map. It has shape (num_priors, 2). centers (Tensor): The prior points of a object in all feature pyramid. It has shape (num_pos, 2) strides (Tensor): The prior strides of a object in all feature pyramid. It has shape (num_pos, 1) size_of_interest (int): The size of the region used in rel coord. feat_sizes (Tuple[int]): The feature size H and W, which has 2 dims. Returns: rel_coord_feat (Tensor): The coordinate feature of shape (num_pos, 2, H, W). """ H, W = feat_sizes rel_coordinates = centers.reshape(-1, 1, 2) - locations.reshape(1, -1, 2) rel_coordinates = rel_coordinates.permute(0, 2, 1).float() rel_coordinates = rel_coordinates / ( strides[:, None, None] * size_of_interest) return rel_coordinates.reshape(-1, 2, H, W) def aligned_bilinear(tensor: Tensor, factor: int) -> Tensor: """aligned bilinear, used in original implement in CondInst: https://github.com/aim-uofa/AdelaiDet/blob/\ c0b2092ce72442b0f40972f7c6dda8bb52c46d16/adet/utils/comm.py#L23 """ assert tensor.dim() == 4 assert factor >= 1 assert int(factor) == factor if factor == 1: return tensor h, w = tensor.size()[2:] tensor = F.pad(tensor, pad=(0, 1, 0, 1), mode='replicate') oh = factor * h + 1 ow = factor * w + 1 tensor = F.interpolate( tensor, size=(oh, ow), mode='bilinear', align_corners=True) tensor = F.pad( tensor, pad=(factor // 2, 0, factor // 2, 0), mode='replicate') return tensor[:, :, :oh - 1, :ow - 1] def unfold_wo_center(x, kernel_size: int, dilation: int) -> Tensor: """unfold_wo_center, used in original implement in BoxInst: https://github.com/aim-uofa/AdelaiDet/blob/\ 4a3a1f7372c35b48ebf5f6adc59f135a0fa28d60/\ adet/modeling/condinst/condinst.py#L53 """ assert x.dim() == 4 assert kernel_size % 2 == 1 # using SAME padding padding = (kernel_size + (dilation - 1) * (kernel_size - 1)) // 2 unfolded_x = F.unfold( x, kernel_size=kernel_size, padding=padding, dilation=dilation) unfolded_x = unfolded_x.reshape( x.size(0), x.size(1), -1, x.size(2), x.size(3)) # remove the center pixels size = kernel_size**2 unfolded_x = torch.cat( (unfolded_x[:, :, :size // 2], unfolded_x[:, :, size // 2 + 1:]), dim=2) return unfolded_x
23,814
35.470138
79
py
ERD
ERD-main/mmdet/models/utils/panoptic_gt_processing.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import Tuple import torch from torch import Tensor def preprocess_panoptic_gt(gt_labels: Tensor, gt_masks: Tensor, gt_semantic_seg: Tensor, num_things: int, num_stuff: int) -> Tuple[Tensor, Tensor]: """Preprocess the ground truth for a image. Args: gt_labels (Tensor): Ground truth labels of each bbox, with shape (num_gts, ). gt_masks (BitmapMasks): Ground truth masks of each instances of a image, shape (num_gts, h, w). gt_semantic_seg (Tensor | None): Ground truth of semantic segmentation with the shape (1, h, w). [0, num_thing_class - 1] means things, [num_thing_class, num_class-1] means stuff, 255 means VOID. It's None when training instance segmentation. Returns: tuple[Tensor, Tensor]: a tuple containing the following targets. - labels (Tensor): Ground truth class indices for a image, with shape (n, ), n is the sum of number of stuff type and number of instance in a image. - masks (Tensor): Ground truth mask for a image, with shape (n, h, w). Contains stuff and things when training panoptic segmentation, and things only when training instance segmentation. """ num_classes = num_things + num_stuff things_masks = gt_masks.to_tensor( dtype=torch.bool, device=gt_labels.device) if gt_semantic_seg is None: masks = things_masks.long() return gt_labels, masks things_labels = gt_labels gt_semantic_seg = gt_semantic_seg.squeeze(0) semantic_labels = torch.unique( gt_semantic_seg, sorted=False, return_inverse=False, return_counts=False) stuff_masks_list = [] stuff_labels_list = [] for label in semantic_labels: if label < num_things or label >= num_classes: continue stuff_mask = gt_semantic_seg == label stuff_masks_list.append(stuff_mask) stuff_labels_list.append(label) if len(stuff_masks_list) > 0: stuff_masks = torch.stack(stuff_masks_list, dim=0) stuff_labels = torch.stack(stuff_labels_list, dim=0) labels = torch.cat([things_labels, stuff_labels], dim=0) masks = torch.cat([things_masks, stuff_masks], dim=0) else: labels = things_labels masks = things_masks masks = masks.long() return labels, masks
2,575
35.28169
74
py
ERD
ERD-main/mmdet/models/task_modules/assigners/assign_result.py
# Copyright (c) OpenMMLab. All rights reserved. import torch from torch import Tensor from mmdet.utils import util_mixins class AssignResult(util_mixins.NiceRepr): """Stores assignments between predicted and truth boxes. Attributes: num_gts (int): the number of truth boxes considered when computing this assignment gt_inds (Tensor): for each predicted box indicates the 1-based index of the assigned truth box. 0 means unassigned and -1 means ignore. max_overlaps (Tensor): the iou between the predicted box and its assigned truth box. labels (Tensor): If specified, for each predicted box indicates the category label of the assigned truth box. Example: >>> # An assign result between 4 predicted boxes and 9 true boxes >>> # where only two boxes were assigned. >>> num_gts = 9 >>> max_overlaps = torch.LongTensor([0, .5, .9, 0]) >>> gt_inds = torch.LongTensor([-1, 1, 2, 0]) >>> labels = torch.LongTensor([0, 3, 4, 0]) >>> self = AssignResult(num_gts, gt_inds, max_overlaps, labels) >>> print(str(self)) # xdoctest: +IGNORE_WANT <AssignResult(num_gts=9, gt_inds.shape=(4,), max_overlaps.shape=(4,), labels.shape=(4,))> >>> # Force addition of gt labels (when adding gt as proposals) >>> new_labels = torch.LongTensor([3, 4, 5]) >>> self.add_gt_(new_labels) >>> print(str(self)) # xdoctest: +IGNORE_WANT <AssignResult(num_gts=9, gt_inds.shape=(7,), max_overlaps.shape=(7,), labels.shape=(7,))> """ def __init__(self, num_gts: int, gt_inds: Tensor, max_overlaps: Tensor, labels: Tensor) -> None: self.num_gts = num_gts self.gt_inds = gt_inds self.max_overlaps = max_overlaps self.labels = labels # Interface for possible user-defined properties self._extra_properties = {} @property def num_preds(self): """int: the number of predictions in this assignment""" return len(self.gt_inds) def set_extra_property(self, key, value): """Set user-defined new property.""" assert key not in self.info self._extra_properties[key] = value def get_extra_property(self, key): """Get user-defined property.""" return self._extra_properties.get(key, None) @property def info(self): """dict: a dictionary of info about the object""" basic_info = { 'num_gts': self.num_gts, 'num_preds': self.num_preds, 'gt_inds': self.gt_inds, 'max_overlaps': self.max_overlaps, 'labels': self.labels, } basic_info.update(self._extra_properties) return basic_info def __nice__(self): """str: a "nice" summary string describing this assign result""" parts = [] parts.append(f'num_gts={self.num_gts!r}') if self.gt_inds is None: parts.append(f'gt_inds={self.gt_inds!r}') else: parts.append(f'gt_inds.shape={tuple(self.gt_inds.shape)!r}') if self.max_overlaps is None: parts.append(f'max_overlaps={self.max_overlaps!r}') else: parts.append('max_overlaps.shape=' f'{tuple(self.max_overlaps.shape)!r}') if self.labels is None: parts.append(f'labels={self.labels!r}') else: parts.append(f'labels.shape={tuple(self.labels.shape)!r}') return ', '.join(parts) @classmethod def random(cls, **kwargs): """Create random AssignResult for tests or debugging. Args: num_preds: number of predicted boxes num_gts: number of true boxes p_ignore (float): probability of a predicted box assigned to an ignored truth p_assigned (float): probability of a predicted box not being assigned p_use_label (float | bool): with labels or not rng (None | int | numpy.random.RandomState): seed or state Returns: :obj:`AssignResult`: Randomly generated assign results. Example: >>> from mmdet.models.task_modules.assigners.assign_result import * # NOQA >>> self = AssignResult.random() >>> print(self.info) """ from ..samplers.sampling_result import ensure_rng rng = ensure_rng(kwargs.get('rng', None)) num_gts = kwargs.get('num_gts', None) num_preds = kwargs.get('num_preds', None) p_ignore = kwargs.get('p_ignore', 0.3) p_assigned = kwargs.get('p_assigned', 0.7) num_classes = kwargs.get('num_classes', 3) if num_gts is None: num_gts = rng.randint(0, 8) if num_preds is None: num_preds = rng.randint(0, 16) if num_gts == 0: max_overlaps = torch.zeros(num_preds, dtype=torch.float32) gt_inds = torch.zeros(num_preds, dtype=torch.int64) labels = torch.zeros(num_preds, dtype=torch.int64) else: import numpy as np # Create an overlap for each predicted box max_overlaps = torch.from_numpy(rng.rand(num_preds)) # Construct gt_inds for each predicted box is_assigned = torch.from_numpy(rng.rand(num_preds) < p_assigned) # maximum number of assignments constraints n_assigned = min(num_preds, min(num_gts, is_assigned.sum())) assigned_idxs = np.where(is_assigned)[0] rng.shuffle(assigned_idxs) assigned_idxs = assigned_idxs[0:n_assigned] assigned_idxs.sort() is_assigned[:] = 0 is_assigned[assigned_idxs] = True is_ignore = torch.from_numpy( rng.rand(num_preds) < p_ignore) & is_assigned gt_inds = torch.zeros(num_preds, dtype=torch.int64) true_idxs = np.arange(num_gts) rng.shuffle(true_idxs) true_idxs = torch.from_numpy(true_idxs) gt_inds[is_assigned] = true_idxs[:n_assigned].long() gt_inds = torch.from_numpy( rng.randint(1, num_gts + 1, size=num_preds)) gt_inds[is_ignore] = -1 gt_inds[~is_assigned] = 0 max_overlaps[~is_assigned] = 0 if num_classes == 0: labels = torch.zeros(num_preds, dtype=torch.int64) else: labels = torch.from_numpy( # remind that we set FG labels to [0, num_class-1] # since mmdet v2.0 # BG cat_id: num_class rng.randint(0, num_classes, size=num_preds)) labels[~is_assigned] = 0 self = cls(num_gts, gt_inds, max_overlaps, labels) return self def add_gt_(self, gt_labels): """Add ground truth as assigned results. Args: gt_labels (torch.Tensor): Labels of gt boxes """ self_inds = torch.arange( 1, len(gt_labels) + 1, dtype=torch.long, device=gt_labels.device) self.gt_inds = torch.cat([self_inds, self.gt_inds]) self.max_overlaps = torch.cat( [self.max_overlaps.new_ones(len(gt_labels)), self.max_overlaps]) self.labels = torch.cat([gt_labels, self.labels])
7,470
36.542714
87
py
ERD
ERD-main/mmdet/models/task_modules/assigners/dynamic_soft_label_assigner.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import Optional, Tuple import torch import torch.nn.functional as F from mmengine.structures import InstanceData from torch import Tensor from mmdet.registry import TASK_UTILS from mmdet.structures.bbox import BaseBoxes from mmdet.utils import ConfigType from .assign_result import AssignResult from .base_assigner import BaseAssigner INF = 100000000 EPS = 1.0e-7 def center_of_mass(masks: Tensor, eps: float = 1e-7) -> Tensor: """Compute the masks center of mass. Args: masks: Mask tensor, has shape (num_masks, H, W). eps: a small number to avoid normalizer to be zero. Defaults to 1e-7. Returns: Tensor: The masks center of mass. Has shape (num_masks, 2). """ n, h, w = masks.shape grid_h = torch.arange(h, device=masks.device)[:, None] grid_w = torch.arange(w, device=masks.device) normalizer = masks.sum(dim=(1, 2)).float().clamp(min=eps) center_y = (masks * grid_h).sum(dim=(1, 2)) / normalizer center_x = (masks * grid_w).sum(dim=(1, 2)) / normalizer center = torch.cat([center_x[:, None], center_y[:, None]], dim=1) return center @TASK_UTILS.register_module() class DynamicSoftLabelAssigner(BaseAssigner): """Computes matching between predictions and ground truth with dynamic soft label assignment. Args: soft_center_radius (float): Radius of the soft center prior. Defaults to 3.0. topk (int): Select top-k predictions to calculate dynamic k best matches for each gt. Defaults to 13. iou_weight (float): The scale factor of iou cost. Defaults to 3.0. iou_calculator (ConfigType): Config of overlaps Calculator. Defaults to dict(type='BboxOverlaps2D'). """ def __init__( self, soft_center_radius: float = 3.0, topk: int = 13, iou_weight: float = 3.0, iou_calculator: ConfigType = dict(type='BboxOverlaps2D') ) -> None: self.soft_center_radius = soft_center_radius self.topk = topk self.iou_weight = iou_weight self.iou_calculator = TASK_UTILS.build(iou_calculator) def assign(self, pred_instances: InstanceData, gt_instances: InstanceData, gt_instances_ignore: Optional[InstanceData] = None, **kwargs) -> AssignResult: """Assign gt to priors. Args: pred_instances (:obj:`InstanceData`): Instances of model predictions. It includes ``priors``, and the priors can be anchors or points, or the bboxes predicted by the previous stage, has shape (n, 4). The bboxes predicted by the current model or stage will be named ``bboxes``, ``labels``, and ``scores``, the same as the ``InstanceData`` in other places. gt_instances (:obj:`InstanceData`): Ground truth of instance annotations. It usually includes ``bboxes``, with shape (k, 4), and ``labels``, with shape (k, ). gt_instances_ignore (:obj:`InstanceData`, optional): Instances to be ignored during training. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. Returns: obj:`AssignResult`: The assigned result. """ gt_bboxes = gt_instances.bboxes gt_labels = gt_instances.labels num_gt = gt_bboxes.size(0) decoded_bboxes = pred_instances.bboxes pred_scores = pred_instances.scores priors = pred_instances.priors num_bboxes = decoded_bboxes.size(0) # assign 0 by default assigned_gt_inds = decoded_bboxes.new_full((num_bboxes, ), 0, dtype=torch.long) if num_gt == 0 or num_bboxes == 0: # No ground truth or boxes, return empty assignment max_overlaps = decoded_bboxes.new_zeros((num_bboxes, )) if num_gt == 0: # No truth, assign everything to background assigned_gt_inds[:] = 0 assigned_labels = decoded_bboxes.new_full((num_bboxes, ), -1, dtype=torch.long) return AssignResult( num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels) prior_center = priors[:, :2] if isinstance(gt_bboxes, BaseBoxes): is_in_gts = gt_bboxes.find_inside_points(prior_center) else: # Tensor boxes will be treated as horizontal boxes by defaults lt_ = prior_center[:, None] - gt_bboxes[:, :2] rb_ = gt_bboxes[:, 2:] - prior_center[:, None] deltas = torch.cat([lt_, rb_], dim=-1) is_in_gts = deltas.min(dim=-1).values > 0 valid_mask = is_in_gts.sum(dim=1) > 0 valid_decoded_bbox = decoded_bboxes[valid_mask] valid_pred_scores = pred_scores[valid_mask] num_valid = valid_decoded_bbox.size(0) if num_valid == 0: # No ground truth or boxes, return empty assignment max_overlaps = decoded_bboxes.new_zeros((num_bboxes, )) assigned_labels = decoded_bboxes.new_full((num_bboxes, ), -1, dtype=torch.long) return AssignResult( num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels) if hasattr(gt_instances, 'masks'): gt_center = center_of_mass(gt_instances.masks, eps=EPS) elif isinstance(gt_bboxes, BaseBoxes): gt_center = gt_bboxes.centers else: # Tensor boxes will be treated as horizontal boxes by defaults gt_center = (gt_bboxes[:, :2] + gt_bboxes[:, 2:]) / 2.0 valid_prior = priors[valid_mask] strides = valid_prior[:, 2] distance = (valid_prior[:, None, :2] - gt_center[None, :, :] ).pow(2).sum(-1).sqrt() / strides[:, None] soft_center_prior = torch.pow(10, distance - self.soft_center_radius) pairwise_ious = self.iou_calculator(valid_decoded_bbox, gt_bboxes) iou_cost = -torch.log(pairwise_ious + EPS) * self.iou_weight gt_onehot_label = ( F.one_hot(gt_labels.to(torch.int64), pred_scores.shape[-1]).float().unsqueeze(0).repeat( num_valid, 1, 1)) valid_pred_scores = valid_pred_scores.unsqueeze(1).repeat(1, num_gt, 1) soft_label = gt_onehot_label * pairwise_ious[..., None] scale_factor = soft_label - valid_pred_scores.sigmoid() soft_cls_cost = F.binary_cross_entropy_with_logits( valid_pred_scores, soft_label, reduction='none') * scale_factor.abs().pow(2.0) soft_cls_cost = soft_cls_cost.sum(dim=-1) cost_matrix = soft_cls_cost + iou_cost + soft_center_prior matched_pred_ious, matched_gt_inds = self.dynamic_k_matching( cost_matrix, pairwise_ious, num_gt, valid_mask) # convert to AssignResult format assigned_gt_inds[valid_mask] = matched_gt_inds + 1 assigned_labels = assigned_gt_inds.new_full((num_bboxes, ), -1) assigned_labels[valid_mask] = gt_labels[matched_gt_inds].long() max_overlaps = assigned_gt_inds.new_full((num_bboxes, ), -INF, dtype=torch.float32) max_overlaps[valid_mask] = matched_pred_ious return AssignResult( num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels) def dynamic_k_matching(self, cost: Tensor, pairwise_ious: Tensor, num_gt: int, valid_mask: Tensor) -> Tuple[Tensor, Tensor]: """Use IoU and matching cost to calculate the dynamic top-k positive targets. Same as SimOTA. Args: cost (Tensor): Cost matrix. pairwise_ious (Tensor): Pairwise iou matrix. num_gt (int): Number of gt. valid_mask (Tensor): Mask for valid bboxes. Returns: tuple: matched ious and gt indexes. """ matching_matrix = torch.zeros_like(cost, dtype=torch.uint8) # select candidate topk ious for dynamic-k calculation candidate_topk = min(self.topk, pairwise_ious.size(0)) topk_ious, _ = torch.topk(pairwise_ious, candidate_topk, dim=0) # calculate dynamic k for each gt dynamic_ks = torch.clamp(topk_ious.sum(0).int(), min=1) for gt_idx in range(num_gt): _, pos_idx = torch.topk( cost[:, gt_idx], k=dynamic_ks[gt_idx], largest=False) matching_matrix[:, gt_idx][pos_idx] = 1 del topk_ious, dynamic_ks, pos_idx prior_match_gt_mask = matching_matrix.sum(1) > 1 if prior_match_gt_mask.sum() > 0: cost_min, cost_argmin = torch.min( cost[prior_match_gt_mask, :], dim=1) matching_matrix[prior_match_gt_mask, :] *= 0 matching_matrix[prior_match_gt_mask, cost_argmin] = 1 # get foreground mask inside box and center prior fg_mask_inboxes = matching_matrix.sum(1) > 0 valid_mask[valid_mask.clone()] = fg_mask_inboxes matched_gt_inds = matching_matrix[fg_mask_inboxes, :].argmax(1) matched_pred_ious = (matching_matrix * pairwise_ious).sum(1)[fg_mask_inboxes] return matched_pred_ious, matched_gt_inds
9,847
42.192982
79
py
ERD
ERD-main/mmdet/models/task_modules/assigners/sim_ota_assigner.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import Optional, Tuple import torch import torch.nn.functional as F from mmengine.structures import InstanceData from torch import Tensor from mmdet.registry import TASK_UTILS from mmdet.utils import ConfigType from .assign_result import AssignResult from .base_assigner import BaseAssigner INF = 100000.0 EPS = 1.0e-7 @TASK_UTILS.register_module() class SimOTAAssigner(BaseAssigner): """Computes matching between predictions and ground truth. Args: center_radius (float): Ground truth center size to judge whether a prior is in center. Defaults to 2.5. candidate_topk (int): The candidate top-k which used to get top-k ious to calculate dynamic-k. Defaults to 10. iou_weight (float): The scale factor for regression iou cost. Defaults to 3.0. cls_weight (float): The scale factor for classification cost. Defaults to 1.0. iou_calculator (ConfigType): Config of overlaps Calculator. Defaults to dict(type='BboxOverlaps2D'). """ def __init__(self, center_radius: float = 2.5, candidate_topk: int = 10, iou_weight: float = 3.0, cls_weight: float = 1.0, iou_calculator: ConfigType = dict(type='BboxOverlaps2D')): self.center_radius = center_radius self.candidate_topk = candidate_topk self.iou_weight = iou_weight self.cls_weight = cls_weight self.iou_calculator = TASK_UTILS.build(iou_calculator) def assign(self, pred_instances: InstanceData, gt_instances: InstanceData, gt_instances_ignore: Optional[InstanceData] = None, **kwargs) -> AssignResult: """Assign gt to priors using SimOTA. Args: pred_instances (:obj:`InstanceData`): Instances of model predictions. It includes ``priors``, and the priors can be anchors or points, or the bboxes predicted by the previous stage, has shape (n, 4). The bboxes predicted by the current model or stage will be named ``bboxes``, ``labels``, and ``scores``, the same as the ``InstanceData`` in other places. gt_instances (:obj:`InstanceData`): Ground truth of instance annotations. It usually includes ``bboxes``, with shape (k, 4), and ``labels``, with shape (k, ). gt_instances_ignore (:obj:`InstanceData`, optional): Instances to be ignored during training. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. Returns: obj:`AssignResult`: The assigned result. """ gt_bboxes = gt_instances.bboxes gt_labels = gt_instances.labels num_gt = gt_bboxes.size(0) decoded_bboxes = pred_instances.bboxes pred_scores = pred_instances.scores priors = pred_instances.priors num_bboxes = decoded_bboxes.size(0) # assign 0 by default assigned_gt_inds = decoded_bboxes.new_full((num_bboxes, ), 0, dtype=torch.long) if num_gt == 0 or num_bboxes == 0: # No ground truth or boxes, return empty assignment max_overlaps = decoded_bboxes.new_zeros((num_bboxes, )) assigned_labels = decoded_bboxes.new_full((num_bboxes, ), -1, dtype=torch.long) return AssignResult( num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels) valid_mask, is_in_boxes_and_center = self.get_in_gt_and_in_center_info( priors, gt_bboxes) valid_decoded_bbox = decoded_bboxes[valid_mask] valid_pred_scores = pred_scores[valid_mask] num_valid = valid_decoded_bbox.size(0) if num_valid == 0: # No valid bboxes, return empty assignment max_overlaps = decoded_bboxes.new_zeros((num_bboxes, )) assigned_labels = decoded_bboxes.new_full((num_bboxes, ), -1, dtype=torch.long) return AssignResult( num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels) pairwise_ious = self.iou_calculator(valid_decoded_bbox, gt_bboxes) iou_cost = -torch.log(pairwise_ious + EPS) gt_onehot_label = ( F.one_hot(gt_labels.to(torch.int64), pred_scores.shape[-1]).float().unsqueeze(0).repeat( num_valid, 1, 1)) valid_pred_scores = valid_pred_scores.unsqueeze(1).repeat(1, num_gt, 1) # disable AMP autocast and calculate BCE with FP32 to avoid overflow with torch.cuda.amp.autocast(enabled=False): cls_cost = ( F.binary_cross_entropy( valid_pred_scores.to(dtype=torch.float32), gt_onehot_label, reduction='none', ).sum(-1).to(dtype=valid_pred_scores.dtype)) cost_matrix = ( cls_cost * self.cls_weight + iou_cost * self.iou_weight + (~is_in_boxes_and_center) * INF) matched_pred_ious, matched_gt_inds = \ self.dynamic_k_matching( cost_matrix, pairwise_ious, num_gt, valid_mask) # convert to AssignResult format assigned_gt_inds[valid_mask] = matched_gt_inds + 1 assigned_labels = assigned_gt_inds.new_full((num_bboxes, ), -1) assigned_labels[valid_mask] = gt_labels[matched_gt_inds].long() max_overlaps = assigned_gt_inds.new_full((num_bboxes, ), -INF, dtype=torch.float32) max_overlaps[valid_mask] = matched_pred_ious return AssignResult( num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels) def get_in_gt_and_in_center_info( self, priors: Tensor, gt_bboxes: Tensor) -> Tuple[Tensor, Tensor]: """Get the information of which prior is in gt bboxes and gt center priors.""" num_gt = gt_bboxes.size(0) repeated_x = priors[:, 0].unsqueeze(1).repeat(1, num_gt) repeated_y = priors[:, 1].unsqueeze(1).repeat(1, num_gt) repeated_stride_x = priors[:, 2].unsqueeze(1).repeat(1, num_gt) repeated_stride_y = priors[:, 3].unsqueeze(1).repeat(1, num_gt) # is prior centers in gt bboxes, shape: [n_prior, n_gt] l_ = repeated_x - gt_bboxes[:, 0] t_ = repeated_y - gt_bboxes[:, 1] r_ = gt_bboxes[:, 2] - repeated_x b_ = gt_bboxes[:, 3] - repeated_y deltas = torch.stack([l_, t_, r_, b_], dim=1) is_in_gts = deltas.min(dim=1).values > 0 is_in_gts_all = is_in_gts.sum(dim=1) > 0 # is prior centers in gt centers gt_cxs = (gt_bboxes[:, 0] + gt_bboxes[:, 2]) / 2.0 gt_cys = (gt_bboxes[:, 1] + gt_bboxes[:, 3]) / 2.0 ct_box_l = gt_cxs - self.center_radius * repeated_stride_x ct_box_t = gt_cys - self.center_radius * repeated_stride_y ct_box_r = gt_cxs + self.center_radius * repeated_stride_x ct_box_b = gt_cys + self.center_radius * repeated_stride_y cl_ = repeated_x - ct_box_l ct_ = repeated_y - ct_box_t cr_ = ct_box_r - repeated_x cb_ = ct_box_b - repeated_y ct_deltas = torch.stack([cl_, ct_, cr_, cb_], dim=1) is_in_cts = ct_deltas.min(dim=1).values > 0 is_in_cts_all = is_in_cts.sum(dim=1) > 0 # in boxes or in centers, shape: [num_priors] is_in_gts_or_centers = is_in_gts_all | is_in_cts_all # both in boxes and centers, shape: [num_fg, num_gt] is_in_boxes_and_centers = ( is_in_gts[is_in_gts_or_centers, :] & is_in_cts[is_in_gts_or_centers, :]) return is_in_gts_or_centers, is_in_boxes_and_centers def dynamic_k_matching(self, cost: Tensor, pairwise_ious: Tensor, num_gt: int, valid_mask: Tensor) -> Tuple[Tensor, Tensor]: """Use IoU and matching cost to calculate the dynamic top-k positive targets.""" matching_matrix = torch.zeros_like(cost, dtype=torch.uint8) # select candidate topk ious for dynamic-k calculation candidate_topk = min(self.candidate_topk, pairwise_ious.size(0)) topk_ious, _ = torch.topk(pairwise_ious, candidate_topk, dim=0) # calculate dynamic k for each gt dynamic_ks = torch.clamp(topk_ious.sum(0).int(), min=1) for gt_idx in range(num_gt): _, pos_idx = torch.topk( cost[:, gt_idx], k=dynamic_ks[gt_idx], largest=False) matching_matrix[:, gt_idx][pos_idx] = 1 del topk_ious, dynamic_ks, pos_idx prior_match_gt_mask = matching_matrix.sum(1) > 1 if prior_match_gt_mask.sum() > 0: cost_min, cost_argmin = torch.min( cost[prior_match_gt_mask, :], dim=1) matching_matrix[prior_match_gt_mask, :] *= 0 matching_matrix[prior_match_gt_mask, cost_argmin] = 1 # get foreground mask inside box and center prior fg_mask_inboxes = matching_matrix.sum(1) > 0 valid_mask[valid_mask.clone()] = fg_mask_inboxes matched_gt_inds = matching_matrix[fg_mask_inboxes, :].argmax(1) matched_pred_ious = (matching_matrix * pairwise_ious).sum(1)[fg_mask_inboxes] return matched_pred_ious, matched_gt_inds
9,943
43.392857
79
py
ERD
ERD-main/mmdet/models/task_modules/assigners/atss_assigner.py
# Copyright (c) OpenMMLab. All rights reserved. import warnings from typing import List, Optional import torch from mmengine.structures import InstanceData from torch import Tensor from mmdet.registry import TASK_UTILS from mmdet.utils import ConfigType from .assign_result import AssignResult from .base_assigner import BaseAssigner def bbox_center_distance(bboxes: Tensor, priors: Tensor) -> Tensor: """Compute the center distance between bboxes and priors. Args: bboxes (Tensor): Shape (n, 4) for , "xyxy" format. priors (Tensor): Shape (n, 4) for priors, "xyxy" format. Returns: Tensor: Center distances between bboxes and priors. """ bbox_cx = (bboxes[:, 0] + bboxes[:, 2]) / 2.0 bbox_cy = (bboxes[:, 1] + bboxes[:, 3]) / 2.0 bbox_points = torch.stack((bbox_cx, bbox_cy), dim=1) priors_cx = (priors[:, 0] + priors[:, 2]) / 2.0 priors_cy = (priors[:, 1] + priors[:, 3]) / 2.0 priors_points = torch.stack((priors_cx, priors_cy), dim=1) distances = (priors_points[:, None, :] - bbox_points[None, :, :]).pow(2).sum(-1).sqrt() return distances @TASK_UTILS.register_module() class ATSSAssigner(BaseAssigner): """Assign a corresponding gt bbox or background to each prior. Each proposals will be assigned with `0` or a positive integer indicating the ground truth index. - 0: negative sample, no assigned gt - positive integer: positive sample, index (1-based) of assigned gt If ``alpha`` is not None, it means that the dynamic cost ATSSAssigner is adopted, which is currently only used in the DDOD. Args: topk (int): number of priors selected in each level alpha (float, optional): param of cost rate for each proposal only in DDOD. Defaults to None. iou_calculator (:obj:`ConfigDict` or dict): Config dict for iou calculator. Defaults to ``dict(type='BboxOverlaps2D')`` ignore_iof_thr (float): IoF threshold for ignoring bboxes (if `gt_bboxes_ignore` is specified). Negative values mean not ignoring any bboxes. Defaults to -1. """ def __init__(self, topk: int, alpha: Optional[float] = None, iou_calculator: ConfigType = dict(type='BboxOverlaps2D'), ignore_iof_thr: float = -1) -> None: self.topk = topk self.alpha = alpha self.iou_calculator = TASK_UTILS.build(iou_calculator) self.ignore_iof_thr = ignore_iof_thr # https://github.com/sfzhang15/ATSS/blob/master/atss_core/modeling/rpn/atss/loss.py def assign( self, pred_instances: InstanceData, num_level_priors: List[int], gt_instances: InstanceData, gt_instances_ignore: Optional[InstanceData] = None ) -> AssignResult: """Assign gt to priors. The assignment is done in following steps 1. compute iou between all prior (prior of all pyramid levels) and gt 2. compute center distance between all prior and gt 3. on each pyramid level, for each gt, select k prior whose center are closest to the gt center, so we total select k*l prior as candidates for each gt 4. get corresponding iou for the these candidates, and compute the mean and std, set mean + std as the iou threshold 5. select these candidates whose iou are greater than or equal to the threshold as positive 6. limit the positive sample's center in gt If ``alpha`` is not None, and ``cls_scores`` and `bbox_preds` are not None, the overlaps calculation in the first step will also include dynamic cost, which is currently only used in the DDOD. Args: pred_instances (:obj:`InstaceData`): Instances of model predictions. It includes ``priors``, and the priors can be anchors, points, or bboxes predicted by the model, shape(n, 4). num_level_priors (List): Number of bboxes in each level gt_instances (:obj:`InstaceData`): Ground truth of instance annotations. It usually includes ``bboxes`` and ``labels`` attributes. gt_instances_ignore (:obj:`InstaceData`, optional): Instances to be ignored during training. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. Returns: :obj:`AssignResult`: The assign result. """ gt_bboxes = gt_instances.bboxes priors = pred_instances.priors gt_labels = gt_instances.labels if gt_instances_ignore is not None: gt_bboxes_ignore = gt_instances_ignore.bboxes else: gt_bboxes_ignore = None INF = 100000000 priors = priors[:, :4] num_gt, num_priors = gt_bboxes.size(0), priors.size(0) message = 'Invalid alpha parameter because cls_scores or ' \ 'bbox_preds are None. If you want to use the ' \ 'cost-based ATSSAssigner, please set cls_scores, ' \ 'bbox_preds and self.alpha at the same time. ' # compute iou between all bbox and gt if self.alpha is None: # ATSSAssigner overlaps = self.iou_calculator(priors, gt_bboxes) if ('scores' in pred_instances or 'bboxes' in pred_instances): warnings.warn(message) else: # Dynamic cost ATSSAssigner in DDOD assert ('scores' in pred_instances and 'bboxes' in pred_instances), message cls_scores = pred_instances.scores bbox_preds = pred_instances.bboxes # compute cls cost for bbox and GT cls_cost = torch.sigmoid(cls_scores[:, gt_labels]) # compute iou between all bbox and gt overlaps = self.iou_calculator(bbox_preds, gt_bboxes) # make sure that we are in element-wise multiplication assert cls_cost.shape == overlaps.shape # overlaps is actually a cost matrix overlaps = cls_cost**(1 - self.alpha) * overlaps**self.alpha # assign 0 by default assigned_gt_inds = overlaps.new_full((num_priors, ), 0, dtype=torch.long) if num_gt == 0 or num_priors == 0: # No ground truth or boxes, return empty assignment max_overlaps = overlaps.new_zeros((num_priors, )) if num_gt == 0: # No truth, assign everything to background assigned_gt_inds[:] = 0 assigned_labels = overlaps.new_full((num_priors, ), -1, dtype=torch.long) return AssignResult( num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels) # compute center distance between all bbox and gt distances = bbox_center_distance(gt_bboxes, priors) if (self.ignore_iof_thr > 0 and gt_bboxes_ignore is not None and gt_bboxes_ignore.numel() > 0 and priors.numel() > 0): ignore_overlaps = self.iou_calculator( priors, gt_bboxes_ignore, mode='iof') ignore_max_overlaps, _ = ignore_overlaps.max(dim=1) ignore_idxs = ignore_max_overlaps > self.ignore_iof_thr distances[ignore_idxs, :] = INF assigned_gt_inds[ignore_idxs] = -1 # Selecting candidates based on the center distance candidate_idxs = [] start_idx = 0 for level, priors_per_level in enumerate(num_level_priors): # on each pyramid level, for each gt, # select k bbox whose center are closest to the gt center end_idx = start_idx + priors_per_level distances_per_level = distances[start_idx:end_idx, :] selectable_k = min(self.topk, priors_per_level) _, topk_idxs_per_level = distances_per_level.topk( selectable_k, dim=0, largest=False) candidate_idxs.append(topk_idxs_per_level + start_idx) start_idx = end_idx candidate_idxs = torch.cat(candidate_idxs, dim=0) # get corresponding iou for the these candidates, and compute the # mean and std, set mean + std as the iou threshold candidate_overlaps = overlaps[candidate_idxs, torch.arange(num_gt)] overlaps_mean_per_gt = candidate_overlaps.mean(0) overlaps_std_per_gt = candidate_overlaps.std(0) overlaps_thr_per_gt = overlaps_mean_per_gt + overlaps_std_per_gt is_pos = candidate_overlaps >= overlaps_thr_per_gt[None, :] # limit the positive sample's center in gt for gt_idx in range(num_gt): candidate_idxs[:, gt_idx] += gt_idx * num_priors priors_cx = (priors[:, 0] + priors[:, 2]) / 2.0 priors_cy = (priors[:, 1] + priors[:, 3]) / 2.0 ep_priors_cx = priors_cx.view(1, -1).expand( num_gt, num_priors).contiguous().view(-1) ep_priors_cy = priors_cy.view(1, -1).expand( num_gt, num_priors).contiguous().view(-1) candidate_idxs = candidate_idxs.view(-1) # calculate the left, top, right, bottom distance between positive # prior center and gt side l_ = ep_priors_cx[candidate_idxs].view(-1, num_gt) - gt_bboxes[:, 0] t_ = ep_priors_cy[candidate_idxs].view(-1, num_gt) - gt_bboxes[:, 1] r_ = gt_bboxes[:, 2] - ep_priors_cx[candidate_idxs].view(-1, num_gt) b_ = gt_bboxes[:, 3] - ep_priors_cy[candidate_idxs].view(-1, num_gt) is_in_gts = torch.stack([l_, t_, r_, b_], dim=1).min(dim=1)[0] > 0.01 is_pos = is_pos & is_in_gts # if an anchor box is assigned to multiple gts, # the one with the highest IoU will be selected. overlaps_inf = torch.full_like(overlaps, -INF).t().contiguous().view(-1) index = candidate_idxs.view(-1)[is_pos.view(-1)] overlaps_inf[index] = overlaps.t().contiguous().view(-1)[index] overlaps_inf = overlaps_inf.view(num_gt, -1).t() max_overlaps, argmax_overlaps = overlaps_inf.max(dim=1) assigned_gt_inds[ max_overlaps != -INF] = argmax_overlaps[max_overlaps != -INF] + 1 assigned_labels = assigned_gt_inds.new_full((num_priors, ), -1) pos_inds = torch.nonzero( assigned_gt_inds > 0, as_tuple=False).squeeze() if pos_inds.numel() > 0: assigned_labels[pos_inds] = gt_labels[assigned_gt_inds[pos_inds] - 1] return AssignResult( num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels)
11,003
42.152941
87
py
ERD
ERD-main/mmdet/models/task_modules/assigners/multi_instance_assigner.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import Optional import torch from mmengine.structures import InstanceData from mmdet.registry import TASK_UTILS from .assign_result import AssignResult from .max_iou_assigner import MaxIoUAssigner @TASK_UTILS.register_module() class MultiInstanceAssigner(MaxIoUAssigner): """Assign a corresponding gt bbox or background to each proposal bbox. If we need to use a proposal box to generate multiple predict boxes, `MultiInstanceAssigner` can assign multiple gt to each proposal box. Args: num_instance (int): How many bboxes are predicted by each proposal box. """ def __init__(self, num_instance: int = 2, **kwargs): super().__init__(**kwargs) self.num_instance = num_instance def assign(self, pred_instances: InstanceData, gt_instances: InstanceData, gt_instances_ignore: Optional[InstanceData] = None, **kwargs) -> AssignResult: """Assign gt to bboxes. This method assign gt bboxes to every bbox (proposal/anchor), each bbox is assigned a set of gts, and the number of gts in this set is defined by `self.num_instance`. Args: pred_instances (:obj:`InstanceData`): Instances of model predictions. It includes ``priors``, and the priors can be anchors or points, or the bboxes predicted by the previous stage, has shape (n, 4). The bboxes predicted by the current model or stage will be named ``bboxes``, ``labels``, and ``scores``, the same as the ``InstanceData`` in other places. gt_instances (:obj:`InstanceData`): Ground truth of instance annotations. It usually includes ``bboxes``, with shape (k, 4), and ``labels``, with shape (k, ). gt_instances_ignore (:obj:`InstanceData`, optional): Instances to be ignored during training. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. Returns: :obj:`AssignResult`: The assign result. """ gt_bboxes = gt_instances.bboxes priors = pred_instances.priors # Set the FG label to 1 and add ignored annotations gt_labels = gt_instances.labels + 1 if gt_instances_ignore is not None: gt_bboxes_ignore = gt_instances_ignore.bboxes if hasattr(gt_instances_ignore, 'labels'): gt_labels_ignore = gt_instances_ignore.labels else: gt_labels_ignore = torch.ones_like(gt_bboxes_ignore)[:, 0] * -1 else: gt_bboxes_ignore = None gt_labels_ignore = None assign_on_cpu = True if (self.gpu_assign_thr > 0) and ( gt_bboxes.shape[0] > self.gpu_assign_thr) else False # compute overlap and assign gt on CPU when number of GT is large if assign_on_cpu: device = priors.device priors = priors.cpu() gt_bboxes = gt_bboxes.cpu() gt_labels = gt_labels.cpu() if gt_bboxes_ignore is not None: gt_bboxes_ignore = gt_bboxes_ignore.cpu() gt_labels_ignore = gt_labels_ignore.cpu() if gt_bboxes_ignore is not None: all_bboxes = torch.cat([gt_bboxes, gt_bboxes_ignore], dim=0) all_labels = torch.cat([gt_labels, gt_labels_ignore], dim=0) else: all_bboxes = gt_bboxes all_labels = gt_labels all_priors = torch.cat([priors, all_bboxes], dim=0) overlaps_normal = self.iou_calculator( all_priors, all_bboxes, mode='iou') overlaps_ignore = self.iou_calculator( all_priors, all_bboxes, mode='iof') gt_ignore_mask = all_labels.eq(-1).repeat(all_priors.shape[0], 1) overlaps_normal = overlaps_normal * ~gt_ignore_mask overlaps_ignore = overlaps_ignore * gt_ignore_mask overlaps_normal, overlaps_normal_indices = overlaps_normal.sort( descending=True, dim=1) overlaps_ignore, overlaps_ignore_indices = overlaps_ignore.sort( descending=True, dim=1) # select the roi with the higher score max_overlaps_normal = overlaps_normal[:, :self.num_instance].flatten() gt_assignment_normal = overlaps_normal_indices[:, :self. num_instance].flatten() max_overlaps_ignore = overlaps_ignore[:, :self.num_instance].flatten() gt_assignment_ignore = overlaps_ignore_indices[:, :self. num_instance].flatten() # ignore or not ignore_assign_mask = (max_overlaps_normal < self.pos_iou_thr) * ( max_overlaps_ignore > max_overlaps_normal) overlaps = (max_overlaps_normal * ~ignore_assign_mask) + ( max_overlaps_ignore * ignore_assign_mask) gt_assignment = (gt_assignment_normal * ~ignore_assign_mask) + ( gt_assignment_ignore * ignore_assign_mask) assigned_labels = all_labels[gt_assignment] fg_mask = (overlaps >= self.pos_iou_thr) * (assigned_labels != -1) bg_mask = (overlaps < self.neg_iou_thr) * (overlaps >= 0) assigned_labels[fg_mask] = 1 assigned_labels[bg_mask] = 0 overlaps = overlaps.reshape(-1, self.num_instance) gt_assignment = gt_assignment.reshape(-1, self.num_instance) assigned_labels = assigned_labels.reshape(-1, self.num_instance) assign_result = AssignResult( num_gts=all_bboxes.size(0), gt_inds=gt_assignment, max_overlaps=overlaps, labels=assigned_labels) if assign_on_cpu: assign_result.gt_inds = assign_result.gt_inds.to(device) assign_result.max_overlaps = assign_result.max_overlaps.to(device) if assign_result.labels is not None: assign_result.labels = assign_result.labels.to(device) return assign_result
6,188
42.893617
79
py
ERD
ERD-main/mmdet/models/task_modules/assigners/center_region_assigner.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import Optional, Tuple import torch from mmengine.structures import InstanceData from torch import Tensor from mmdet.registry import TASK_UTILS from mmdet.utils import ConfigType from .assign_result import AssignResult from .base_assigner import BaseAssigner def scale_boxes(bboxes: Tensor, scale: float) -> Tensor: """Expand an array of boxes by a given scale. Args: bboxes (Tensor): Shape (m, 4) scale (float): The scale factor of bboxes Returns: Tensor: Shape (m, 4). Scaled bboxes """ assert bboxes.size(1) == 4 w_half = (bboxes[:, 2] - bboxes[:, 0]) * .5 h_half = (bboxes[:, 3] - bboxes[:, 1]) * .5 x_c = (bboxes[:, 2] + bboxes[:, 0]) * .5 y_c = (bboxes[:, 3] + bboxes[:, 1]) * .5 w_half *= scale h_half *= scale boxes_scaled = torch.zeros_like(bboxes) boxes_scaled[:, 0] = x_c - w_half boxes_scaled[:, 2] = x_c + w_half boxes_scaled[:, 1] = y_c - h_half boxes_scaled[:, 3] = y_c + h_half return boxes_scaled def is_located_in(points: Tensor, bboxes: Tensor) -> Tensor: """Are points located in bboxes. Args: points (Tensor): Points, shape: (m, 2). bboxes (Tensor): Bounding boxes, shape: (n, 4). Return: Tensor: Flags indicating if points are located in bboxes, shape: (m, n). """ assert points.size(1) == 2 assert bboxes.size(1) == 4 return (points[:, 0].unsqueeze(1) > bboxes[:, 0].unsqueeze(0)) & \ (points[:, 0].unsqueeze(1) < bboxes[:, 2].unsqueeze(0)) & \ (points[:, 1].unsqueeze(1) > bboxes[:, 1].unsqueeze(0)) & \ (points[:, 1].unsqueeze(1) < bboxes[:, 3].unsqueeze(0)) def bboxes_area(bboxes: Tensor) -> Tensor: """Compute the area of an array of bboxes. Args: bboxes (Tensor): The coordinates ox bboxes. Shape: (m, 4) Returns: Tensor: Area of the bboxes. Shape: (m, ) """ assert bboxes.size(1) == 4 w = (bboxes[:, 2] - bboxes[:, 0]) h = (bboxes[:, 3] - bboxes[:, 1]) areas = w * h return areas @TASK_UTILS.register_module() class CenterRegionAssigner(BaseAssigner): """Assign pixels at the center region of a bbox as positive. Each proposals will be assigned with `-1`, `0`, or a positive integer indicating the ground truth index. - -1: negative samples - semi-positive numbers: positive sample, index (0-based) of assigned gt Args: pos_scale (float): Threshold within which pixels are labelled as positive. neg_scale (float): Threshold above which pixels are labelled as positive. min_pos_iof (float): Minimum iof of a pixel with a gt to be labelled as positive. Default: 1e-2 ignore_gt_scale (float): Threshold within which the pixels are ignored when the gt is labelled as shadowed. Default: 0.5 foreground_dominate (bool): If True, the bbox will be assigned as positive when a gt's kernel region overlaps with another's shadowed (ignored) region, otherwise it is set as ignored. Default to False. iou_calculator (:obj:`ConfigDict` or dict): Config of overlaps Calculator. """ def __init__( self, pos_scale: float, neg_scale: float, min_pos_iof: float = 1e-2, ignore_gt_scale: float = 0.5, foreground_dominate: bool = False, iou_calculator: ConfigType = dict(type='BboxOverlaps2D') ) -> None: self.pos_scale = pos_scale self.neg_scale = neg_scale self.min_pos_iof = min_pos_iof self.ignore_gt_scale = ignore_gt_scale self.foreground_dominate = foreground_dominate self.iou_calculator = TASK_UTILS.build(iou_calculator) def get_gt_priorities(self, gt_bboxes: Tensor) -> Tensor: """Get gt priorities according to their areas. Smaller gt has higher priority. Args: gt_bboxes (Tensor): Ground truth boxes, shape (k, 4). Returns: Tensor: The priority of gts so that gts with larger priority is more likely to be assigned. Shape (k, ) """ gt_areas = bboxes_area(gt_bboxes) # Rank all gt bbox areas. Smaller objects has larger priority _, sort_idx = gt_areas.sort(descending=True) sort_idx = sort_idx.argsort() return sort_idx def assign(self, pred_instances: InstanceData, gt_instances: InstanceData, gt_instances_ignore: Optional[InstanceData] = None, **kwargs) -> AssignResult: """Assign gt to bboxes. This method assigns gts to every prior (proposal/anchor), each prior will be assigned with -1, or a semi-positive number. -1 means negative sample, semi-positive number is the index (0-based) of assigned gt. Args: pred_instances (:obj:`InstanceData`): Instances of model predictions. It includes ``priors``, and the priors can be anchors or points, or the bboxes predicted by the previous stage, has shape (n, 4). The bboxes predicted by the current model or stage will be named ``bboxes``, ``labels``, and ``scores``, the same as the ``InstanceData`` in other places. gt_instances (:obj:`InstanceData`): Ground truth of instance annotations. It usually includes ``bboxes``, with shape (k, 4), and ``labels``, with shape (k, ). gt_instances_ignore (:obj:`InstanceData`, optional): Instances to be ignored during training. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. Returns: :obj:`AssignResult`: The assigned result. Note that shadowed_labels of shape (N, 2) is also added as an `assign_result` attribute. `shadowed_labels` is a tensor composed of N pairs of anchor_ind, class_label], where N is the number of anchors that lie in the outer region of a gt, anchor_ind is the shadowed anchor index and class_label is the shadowed class label. Example: >>> from mmengine.structures import InstanceData >>> self = CenterRegionAssigner(0.2, 0.2) >>> pred_instances.priors = torch.Tensor([[0, 0, 10, 10], ... [10, 10, 20, 20]]) >>> gt_instances = InstanceData() >>> gt_instances.bboxes = torch.Tensor([[0, 0, 10, 10]]) >>> gt_instances.labels = torch.Tensor([0]) >>> assign_result = self.assign(pred_instances, gt_instances) >>> expected_gt_inds = torch.LongTensor([1, 0]) >>> assert torch.all(assign_result.gt_inds == expected_gt_inds) """ # There are in total 5 steps in the pixel assignment # 1. Find core (the center region, say inner 0.2) # and shadow (the relatively ourter part, say inner 0.2-0.5) # regions of every gt. # 2. Find all prior bboxes that lie in gt_core and gt_shadow regions # 3. Assign prior bboxes in gt_core with a one-hot id of the gt in # the image. # 3.1. For overlapping objects, the prior bboxes in gt_core is # assigned with the object with smallest area # 4. Assign prior bboxes with class label according to its gt id. # 4.1. Assign -1 to prior bboxes lying in shadowed gts # 4.2. Assign positive prior boxes with the corresponding label # 5. Find pixels lying in the shadow of an object and assign them with # background label, but set the loss weight of its corresponding # gt to zero. # TODO not extract bboxes in assign. gt_bboxes = gt_instances.bboxes priors = pred_instances.priors gt_labels = gt_instances.labels assert priors.size(1) == 4, 'priors must have size of 4' # 1. Find core positive and shadow region of every gt gt_core = scale_boxes(gt_bboxes, self.pos_scale) gt_shadow = scale_boxes(gt_bboxes, self.neg_scale) # 2. Find prior bboxes that lie in gt_core and gt_shadow regions prior_centers = (priors[:, 2:4] + priors[:, 0:2]) / 2 # The center points lie within the gt boxes is_prior_in_gt = is_located_in(prior_centers, gt_bboxes) # Only calculate prior and gt_core IoF. This enables small prior bboxes # to match large gts prior_and_gt_core_overlaps = self.iou_calculator( priors, gt_core, mode='iof') # The center point of effective priors should be within the gt box is_prior_in_gt_core = is_prior_in_gt & ( prior_and_gt_core_overlaps > self.min_pos_iof) # shape (n, k) is_prior_in_gt_shadow = ( self.iou_calculator(priors, gt_shadow, mode='iof') > self.min_pos_iof) # Rule out center effective positive pixels is_prior_in_gt_shadow &= (~is_prior_in_gt_core) num_gts, num_priors = gt_bboxes.size(0), priors.size(0) if num_gts == 0 or num_priors == 0: # If no gts exist, assign all pixels to negative assigned_gt_ids = \ is_prior_in_gt_core.new_zeros((num_priors,), dtype=torch.long) pixels_in_gt_shadow = assigned_gt_ids.new_empty((0, 2)) else: # Step 3: assign a one-hot gt id to each pixel, and smaller objects # have high priority to assign the pixel. sort_idx = self.get_gt_priorities(gt_bboxes) assigned_gt_ids, pixels_in_gt_shadow = \ self.assign_one_hot_gt_indices(is_prior_in_gt_core, is_prior_in_gt_shadow, gt_priority=sort_idx) if (gt_instances_ignore is not None and gt_instances_ignore.bboxes.numel() > 0): # No ground truth or boxes, return empty assignment gt_bboxes_ignore = gt_instances_ignore.bboxes gt_bboxes_ignore = scale_boxes( gt_bboxes_ignore, scale=self.ignore_gt_scale) is_prior_in_ignored_gts = is_located_in(prior_centers, gt_bboxes_ignore) is_prior_in_ignored_gts = is_prior_in_ignored_gts.any(dim=1) assigned_gt_ids[is_prior_in_ignored_gts] = -1 # 4. Assign prior bboxes with class label according to its gt id. # Default assigned label is the background (-1) assigned_labels = assigned_gt_ids.new_full((num_priors, ), -1) pos_inds = torch.nonzero(assigned_gt_ids > 0, as_tuple=False).squeeze() if pos_inds.numel() > 0: assigned_labels[pos_inds] = gt_labels[assigned_gt_ids[pos_inds] - 1] # 5. Find pixels lying in the shadow of an object shadowed_pixel_labels = pixels_in_gt_shadow.clone() if pixels_in_gt_shadow.numel() > 0: pixel_idx, gt_idx =\ pixels_in_gt_shadow[:, 0], pixels_in_gt_shadow[:, 1] assert (assigned_gt_ids[pixel_idx] != gt_idx).all(), \ 'Some pixels are dually assigned to ignore and gt!' shadowed_pixel_labels[:, 1] = gt_labels[gt_idx - 1] override = ( assigned_labels[pixel_idx] == shadowed_pixel_labels[:, 1]) if self.foreground_dominate: # When a pixel is both positive and shadowed, set it as pos shadowed_pixel_labels = shadowed_pixel_labels[~override] else: # When a pixel is both pos and shadowed, set it as shadowed assigned_labels[pixel_idx[override]] = -1 assigned_gt_ids[pixel_idx[override]] = 0 assign_result = AssignResult( num_gts, assigned_gt_ids, None, labels=assigned_labels) # Add shadowed_labels as assign_result property. Shape: (num_shadow, 2) assign_result.set_extra_property('shadowed_labels', shadowed_pixel_labels) return assign_result def assign_one_hot_gt_indices( self, is_prior_in_gt_core: Tensor, is_prior_in_gt_shadow: Tensor, gt_priority: Optional[Tensor] = None) -> Tuple[Tensor, Tensor]: """Assign only one gt index to each prior box. Gts with large gt_priority are more likely to be assigned. Args: is_prior_in_gt_core (Tensor): Bool tensor indicating the prior center is in the core area of a gt (e.g. 0-0.2). Shape: (num_prior, num_gt). is_prior_in_gt_shadow (Tensor): Bool tensor indicating the prior center is in the shadowed area of a gt (e.g. 0.2-0.5). Shape: (num_prior, num_gt). gt_priority (Tensor): Priorities of gts. The gt with a higher priority is more likely to be assigned to the bbox when the bbox match with multiple gts. Shape: (num_gt, ). Returns: tuple: Returns (assigned_gt_inds, shadowed_gt_inds). - assigned_gt_inds: The assigned gt index of each prior bbox \ (i.e. index from 1 to num_gts). Shape: (num_prior, ). - shadowed_gt_inds: shadowed gt indices. It is a tensor of \ shape (num_ignore, 2) with first column being the shadowed prior \ bbox indices and the second column the shadowed gt \ indices (1-based). """ num_bboxes, num_gts = is_prior_in_gt_core.shape if gt_priority is None: gt_priority = torch.arange( num_gts, device=is_prior_in_gt_core.device) assert gt_priority.size(0) == num_gts # The bigger gt_priority, the more preferable to be assigned # The assigned inds are by default 0 (background) assigned_gt_inds = is_prior_in_gt_core.new_zeros((num_bboxes, ), dtype=torch.long) # Shadowed bboxes are assigned to be background. But the corresponding # label is ignored during loss calculation, which is done through # shadowed_gt_inds shadowed_gt_inds = torch.nonzero(is_prior_in_gt_shadow, as_tuple=False) if is_prior_in_gt_core.sum() == 0: # No gt match shadowed_gt_inds[:, 1] += 1 # 1-based. For consistency issue return assigned_gt_inds, shadowed_gt_inds # The priority of each prior box and gt pair. If one prior box is # matched bo multiple gts. Only the pair with the highest priority # is saved pair_priority = is_prior_in_gt_core.new_full((num_bboxes, num_gts), -1, dtype=torch.long) # Each bbox could match with multiple gts. # The following codes deal with this situation # Matched bboxes (to any gt). Shape: (num_pos_anchor, ) inds_of_match = torch.any(is_prior_in_gt_core, dim=1) # The matched gt index of each positive bbox. Length >= num_pos_anchor # , since one bbox could match multiple gts matched_bbox_gt_inds = torch.nonzero( is_prior_in_gt_core, as_tuple=False)[:, 1] # Assign priority to each bbox-gt pair. pair_priority[is_prior_in_gt_core] = gt_priority[matched_bbox_gt_inds] _, argmax_priority = pair_priority[inds_of_match].max(dim=1) assigned_gt_inds[inds_of_match] = argmax_priority + 1 # 1-based # Zero-out the assigned anchor box to filter the shadowed gt indices is_prior_in_gt_core[inds_of_match, argmax_priority] = 0 # Concat the shadowed indices due to overlapping with that out side of # effective scale. shape: (total_num_ignore, 2) shadowed_gt_inds = torch.cat( (shadowed_gt_inds, torch.nonzero(is_prior_in_gt_core, as_tuple=False)), dim=0) # Change `is_prior_in_gt_core` back to keep arguments intact. is_prior_in_gt_core[inds_of_match, argmax_priority] = 1 # 1-based shadowed gt indices, to be consistent with `assigned_gt_inds` if shadowed_gt_inds.numel() > 0: shadowed_gt_inds[:, 1] += 1 return assigned_gt_inds, shadowed_gt_inds
16,747
44.634877
79
py
ERD
ERD-main/mmdet/models/task_modules/assigners/region_assigner.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import List, Optional, Tuple import torch from mmengine.structures import InstanceData from torch import Tensor from mmdet.registry import TASK_UTILS from ..prior_generators import anchor_inside_flags from .assign_result import AssignResult from .base_assigner import BaseAssigner def calc_region( bbox: Tensor, ratio: float, stride: int, featmap_size: Optional[Tuple[int, int]] = None) -> Tuple[Tensor]: """Calculate region of the box defined by the ratio, the ratio is from the center of the box to every edge.""" # project bbox on the feature f_bbox = bbox / stride x1 = torch.round((1 - ratio) * f_bbox[0] + ratio * f_bbox[2]) y1 = torch.round((1 - ratio) * f_bbox[1] + ratio * f_bbox[3]) x2 = torch.round(ratio * f_bbox[0] + (1 - ratio) * f_bbox[2]) y2 = torch.round(ratio * f_bbox[1] + (1 - ratio) * f_bbox[3]) if featmap_size is not None: x1 = x1.clamp(min=0, max=featmap_size[1]) y1 = y1.clamp(min=0, max=featmap_size[0]) x2 = x2.clamp(min=0, max=featmap_size[1]) y2 = y2.clamp(min=0, max=featmap_size[0]) return (x1, y1, x2, y2) def anchor_ctr_inside_region_flags(anchors: Tensor, stride: int, region: Tuple[Tensor]) -> Tensor: """Get the flag indicate whether anchor centers are inside regions.""" x1, y1, x2, y2 = region f_anchors = anchors / stride x = (f_anchors[:, 0] + f_anchors[:, 2]) * 0.5 y = (f_anchors[:, 1] + f_anchors[:, 3]) * 0.5 flags = (x >= x1) & (x <= x2) & (y >= y1) & (y <= y2) return flags @TASK_UTILS.register_module() class RegionAssigner(BaseAssigner): """Assign a corresponding gt bbox or background to each bbox. Each proposals will be assigned with `-1`, `0`, or a positive integer indicating the ground truth index. - -1: don't care - 0: negative sample, no assigned gt - positive integer: positive sample, index (1-based) of assigned gt Args: center_ratio (float): ratio of the region in the center of the bbox to define positive sample. ignore_ratio (float): ratio of the region to define ignore samples. """ def __init__(self, center_ratio: float = 0.2, ignore_ratio: float = 0.5) -> None: self.center_ratio = center_ratio self.ignore_ratio = ignore_ratio def assign(self, pred_instances: InstanceData, gt_instances: InstanceData, img_meta: dict, featmap_sizes: List[Tuple[int, int]], num_level_anchors: List[int], anchor_scale: int, anchor_strides: List[int], gt_instances_ignore: Optional[InstanceData] = None, allowed_border: int = 0) -> AssignResult: """Assign gt to anchors. This method assign a gt bbox to every bbox (proposal/anchor), each bbox will be assigned with -1, 0, or a positive number. -1 means don't care, 0 means negative sample, positive number is the index (1-based) of assigned gt. The assignment is done in following steps, and the order matters. 1. Assign every anchor to 0 (negative) 2. (For each gt_bboxes) Compute ignore flags based on ignore_region then assign -1 to anchors w.r.t. ignore flags 3. (For each gt_bboxes) Compute pos flags based on center_region then assign gt_bboxes to anchors w.r.t. pos flags 4. (For each gt_bboxes) Compute ignore flags based on adjacent anchor level then assign -1 to anchors w.r.t. ignore flags 5. Assign anchor outside of image to -1 Args: pred_instances (:obj:`InstanceData`): Instances of model predictions. It includes ``priors``, and the priors can be anchors or points, or the bboxes predicted by the previous stage, has shape (n, 4). The bboxes predicted by the current model or stage will be named ``bboxes``, ``labels``, and ``scores``, the same as the ``InstanceData`` in other places. gt_instances (:obj:`InstanceData`): Ground truth of instance annotations. It usually includes ``bboxes``, with shape (k, 4), and ``labels``, with shape (k, ). img_meta (dict): Meta info of image. featmap_sizes (list[tuple[int, int]]): Feature map size each level. num_level_anchors (list[int]): The number of anchors in each level. anchor_scale (int): Scale of the anchor. anchor_strides (list[int]): Stride of the anchor. gt_instances_ignore (:obj:`InstanceData`, optional): Instances to be ignored during training. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. allowed_border (int, optional): The border to allow the valid anchor. Defaults to 0. Returns: :obj:`AssignResult`: The assign result. """ if gt_instances_ignore is not None: raise NotImplementedError num_gts = len(gt_instances) num_bboxes = len(pred_instances) gt_bboxes = gt_instances.bboxes gt_labels = gt_instances.labels flat_anchors = pred_instances.priors flat_valid_flags = pred_instances.valid_flags mlvl_anchors = torch.split(flat_anchors, num_level_anchors) if num_gts == 0 or num_bboxes == 0: # No ground truth or boxes, return empty assignment max_overlaps = gt_bboxes.new_zeros((num_bboxes, )) assigned_gt_inds = gt_bboxes.new_zeros((num_bboxes, ), dtype=torch.long) assigned_labels = gt_bboxes.new_full((num_bboxes, ), -1, dtype=torch.long) return AssignResult( num_gts=num_gts, gt_inds=assigned_gt_inds, max_overlaps=max_overlaps, labels=assigned_labels) num_lvls = len(mlvl_anchors) r1 = (1 - self.center_ratio) / 2 r2 = (1 - self.ignore_ratio) / 2 scale = torch.sqrt((gt_bboxes[:, 2] - gt_bboxes[:, 0]) * (gt_bboxes[:, 3] - gt_bboxes[:, 1])) min_anchor_size = scale.new_full( (1, ), float(anchor_scale * anchor_strides[0])) target_lvls = torch.floor( torch.log2(scale) - torch.log2(min_anchor_size) + 0.5) target_lvls = target_lvls.clamp(min=0, max=num_lvls - 1).long() # 1. assign 0 (negative) by default mlvl_assigned_gt_inds = [] mlvl_ignore_flags = [] for lvl in range(num_lvls): assigned_gt_inds = gt_bboxes.new_full((num_level_anchors[lvl], ), 0, dtype=torch.long) ignore_flags = torch.zeros_like(assigned_gt_inds) mlvl_assigned_gt_inds.append(assigned_gt_inds) mlvl_ignore_flags.append(ignore_flags) for gt_id in range(num_gts): lvl = target_lvls[gt_id].item() featmap_size = featmap_sizes[lvl] stride = anchor_strides[lvl] anchors = mlvl_anchors[lvl] gt_bbox = gt_bboxes[gt_id, :4] # Compute regions ignore_region = calc_region(gt_bbox, r2, stride, featmap_size) ctr_region = calc_region(gt_bbox, r1, stride, featmap_size) # 2. Assign -1 to ignore flags ignore_flags = anchor_ctr_inside_region_flags( anchors, stride, ignore_region) mlvl_assigned_gt_inds[lvl][ignore_flags] = -1 # 3. Assign gt_bboxes to pos flags pos_flags = anchor_ctr_inside_region_flags(anchors, stride, ctr_region) mlvl_assigned_gt_inds[lvl][pos_flags] = gt_id + 1 # 4. Assign -1 to ignore adjacent lvl if lvl > 0: d_lvl = lvl - 1 d_anchors = mlvl_anchors[d_lvl] d_featmap_size = featmap_sizes[d_lvl] d_stride = anchor_strides[d_lvl] d_ignore_region = calc_region(gt_bbox, r2, d_stride, d_featmap_size) ignore_flags = anchor_ctr_inside_region_flags( d_anchors, d_stride, d_ignore_region) mlvl_ignore_flags[d_lvl][ignore_flags] = 1 if lvl < num_lvls - 1: u_lvl = lvl + 1 u_anchors = mlvl_anchors[u_lvl] u_featmap_size = featmap_sizes[u_lvl] u_stride = anchor_strides[u_lvl] u_ignore_region = calc_region(gt_bbox, r2, u_stride, u_featmap_size) ignore_flags = anchor_ctr_inside_region_flags( u_anchors, u_stride, u_ignore_region) mlvl_ignore_flags[u_lvl][ignore_flags] = 1 # 4. (cont.) Assign -1 to ignore adjacent lvl for lvl in range(num_lvls): ignore_flags = mlvl_ignore_flags[lvl] mlvl_assigned_gt_inds[lvl][ignore_flags == 1] = -1 # 5. Assign -1 to anchor outside of image flat_assigned_gt_inds = torch.cat(mlvl_assigned_gt_inds) assert (flat_assigned_gt_inds.shape[0] == flat_anchors.shape[0] == flat_valid_flags.shape[0]) inside_flags = anchor_inside_flags(flat_anchors, flat_valid_flags, img_meta['img_shape'], allowed_border) outside_flags = ~inside_flags flat_assigned_gt_inds[outside_flags] = -1 assigned_labels = torch.zeros_like(flat_assigned_gt_inds) pos_flags = flat_assigned_gt_inds > 0 assigned_labels[pos_flags] = gt_labels[flat_assigned_gt_inds[pos_flags] - 1] return AssignResult( num_gts=num_gts, gt_inds=flat_assigned_gt_inds, max_overlaps=None, labels=assigned_labels)
10,474
42.645833
79
py
ERD
ERD-main/mmdet/models/task_modules/assigners/grid_assigner.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import Optional, Tuple, Union import torch from mmengine.structures import InstanceData from mmdet.registry import TASK_UTILS from mmdet.utils import ConfigType from .assign_result import AssignResult from .base_assigner import BaseAssigner @TASK_UTILS.register_module() class GridAssigner(BaseAssigner): """Assign a corresponding gt bbox or background to each bbox. Each proposals will be assigned with `-1`, `0`, or a positive integer indicating the ground truth index. - -1: don't care - 0: negative sample, no assigned gt - positive integer: positive sample, index (1-based) of assigned gt Args: pos_iou_thr (float): IoU threshold for positive bboxes. neg_iou_thr (float or tuple[float, float]): IoU threshold for negative bboxes. min_pos_iou (float): Minimum iou for a bbox to be considered as a positive bbox. Positive samples can have smaller IoU than pos_iou_thr due to the 4th step (assign max IoU sample to each gt). Defaults to 0. gt_max_assign_all (bool): Whether to assign all bboxes with the same highest overlap with some gt to that gt. iou_calculator (:obj:`ConfigDict` or dict): Config of overlaps Calculator. """ def __init__( self, pos_iou_thr: float, neg_iou_thr: Union[float, Tuple[float, float]], min_pos_iou: float = .0, gt_max_assign_all: bool = True, iou_calculator: ConfigType = dict(type='BboxOverlaps2D') ) -> None: self.pos_iou_thr = pos_iou_thr self.neg_iou_thr = neg_iou_thr self.min_pos_iou = min_pos_iou self.gt_max_assign_all = gt_max_assign_all self.iou_calculator = TASK_UTILS.build(iou_calculator) def assign(self, pred_instances: InstanceData, gt_instances: InstanceData, gt_instances_ignore: Optional[InstanceData] = None, **kwargs) -> AssignResult: """Assign gt to bboxes. The process is very much like the max iou assigner, except that positive samples are constrained within the cell that the gt boxes fell in. This method assign a gt bbox to every bbox (proposal/anchor), each bbox will be assigned with -1, 0, or a positive number. -1 means don't care, 0 means negative sample, positive number is the index (1-based) of assigned gt. The assignment is done in following steps, the order matters. 1. assign every bbox to -1 2. assign proposals whose iou with all gts <= neg_iou_thr to 0 3. for each bbox within a cell, if the iou with its nearest gt > pos_iou_thr and the center of that gt falls inside the cell, assign it to that bbox 4. for each gt bbox, assign its nearest proposals within the cell the gt bbox falls in to itself. Args: pred_instances (:obj:`InstanceData`): Instances of model predictions. It includes ``priors``, and the priors can be anchors or points, or the bboxes predicted by the previous stage, has shape (n, 4). The bboxes predicted by the current model or stage will be named ``bboxes``, ``labels``, and ``scores``, the same as the ``InstanceData`` in other places. gt_instances (:obj:`InstanceData`): Ground truth of instance annotations. It usually includes ``bboxes``, with shape (k, 4), and ``labels``, with shape (k, ). gt_instances_ignore (:obj:`InstanceData`, optional): Instances to be ignored during training. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. Returns: :obj:`AssignResult`: The assign result. """ gt_bboxes = gt_instances.bboxes gt_labels = gt_instances.labels priors = pred_instances.priors responsible_flags = pred_instances.responsible_flags num_gts, num_priors = gt_bboxes.size(0), priors.size(0) # compute iou between all gt and priors overlaps = self.iou_calculator(gt_bboxes, priors) # 1. assign -1 by default assigned_gt_inds = overlaps.new_full((num_priors, ), -1, dtype=torch.long) if num_gts == 0 or num_priors == 0: # No ground truth or priors, return empty assignment max_overlaps = overlaps.new_zeros((num_priors, )) if num_gts == 0: # No truth, assign everything to background assigned_gt_inds[:] = 0 assigned_labels = overlaps.new_full((num_priors, ), -1, dtype=torch.long) return AssignResult( num_gts, assigned_gt_inds, max_overlaps, labels=assigned_labels) # 2. assign negative: below # for each anchor, which gt best overlaps with it # for each anchor, the max iou of all gts # shape of max_overlaps == argmax_overlaps == num_priors max_overlaps, argmax_overlaps = overlaps.max(dim=0) if isinstance(self.neg_iou_thr, float): assigned_gt_inds[(max_overlaps >= 0) & (max_overlaps <= self.neg_iou_thr)] = 0 elif isinstance(self.neg_iou_thr, (tuple, list)): assert len(self.neg_iou_thr) == 2 assigned_gt_inds[(max_overlaps > self.neg_iou_thr[0]) & (max_overlaps <= self.neg_iou_thr[1])] = 0 # 3. assign positive: falls into responsible cell and above # positive IOU threshold, the order matters. # the prior condition of comparison is to filter out all # unrelated anchors, i.e. not responsible_flags overlaps[:, ~responsible_flags.type(torch.bool)] = -1. # calculate max_overlaps again, but this time we only consider IOUs # for anchors responsible for prediction max_overlaps, argmax_overlaps = overlaps.max(dim=0) # for each gt, which anchor best overlaps with it # for each gt, the max iou of all proposals # shape of gt_max_overlaps == gt_argmax_overlaps == num_gts gt_max_overlaps, gt_argmax_overlaps = overlaps.max(dim=1) pos_inds = (max_overlaps > self.pos_iou_thr) & responsible_flags.type( torch.bool) assigned_gt_inds[pos_inds] = argmax_overlaps[pos_inds] + 1 # 4. assign positive to max overlapped anchors within responsible cell for i in range(num_gts): if gt_max_overlaps[i] > self.min_pos_iou: if self.gt_max_assign_all: max_iou_inds = (overlaps[i, :] == gt_max_overlaps[i]) & \ responsible_flags.type(torch.bool) assigned_gt_inds[max_iou_inds] = i + 1 elif responsible_flags[gt_argmax_overlaps[i]]: assigned_gt_inds[gt_argmax_overlaps[i]] = i + 1 # assign labels of positive anchors assigned_labels = assigned_gt_inds.new_full((num_priors, ), -1) pos_inds = torch.nonzero( assigned_gt_inds > 0, as_tuple=False).squeeze() if pos_inds.numel() > 0: assigned_labels[pos_inds] = gt_labels[assigned_gt_inds[pos_inds] - 1] return AssignResult( num_gts, assigned_gt_inds, max_overlaps, labels=assigned_labels)
7,815
42.910112
79
py
ERD
ERD-main/mmdet/models/task_modules/assigners/match_cost.py
# Copyright (c) OpenMMLab. All rights reserved. from abc import abstractmethod from typing import Optional, Union import torch import torch.nn.functional as F from mmengine.structures import InstanceData from torch import Tensor from mmdet.registry import TASK_UTILS from mmdet.structures.bbox import bbox_overlaps, bbox_xyxy_to_cxcywh class BaseMatchCost: """Base match cost class. Args: weight (Union[float, int]): Cost weight. Defaults to 1. """ def __init__(self, weight: Union[float, int] = 1.) -> None: self.weight = weight @abstractmethod def __call__(self, pred_instances: InstanceData, gt_instances: InstanceData, img_meta: Optional[dict] = None, **kwargs) -> Tensor: """Compute match cost. Args: pred_instances (:obj:`InstanceData`): Instances of model predictions. It includes ``priors``, and the priors can be anchors or points, or the bboxes predicted by the previous stage, has shape (n, 4). The bboxes predicted by the current model or stage will be named ``bboxes``, ``labels``, and ``scores``, the same as the ``InstanceData`` in other places. gt_instances (:obj:`InstanceData`): Ground truth of instance annotations. It usually includes ``bboxes``, with shape (k, 4), and ``labels``, with shape (k, ). img_meta (dict, optional): Image information. Returns: Tensor: Match Cost matrix of shape (num_preds, num_gts). """ pass @TASK_UTILS.register_module() class BBoxL1Cost(BaseMatchCost): """BBoxL1Cost. Note: ``bboxes`` in ``InstanceData`` passed in is of format 'xyxy' and its coordinates are unnormalized. Args: box_format (str, optional): 'xyxy' for DETR, 'xywh' for Sparse_RCNN. Defaults to 'xyxy'. weight (Union[float, int]): Cost weight. Defaults to 1. Examples: >>> from mmdet.models.task_modules.assigners. ... match_costs.match_cost import BBoxL1Cost >>> import torch >>> self = BBoxL1Cost() >>> bbox_pred = torch.rand(1, 4) >>> gt_bboxes= torch.FloatTensor([[0, 0, 2, 4], [1, 2, 3, 4]]) >>> factor = torch.tensor([10, 8, 10, 8]) >>> self(bbox_pred, gt_bboxes, factor) tensor([[1.6172, 1.6422]]) """ def __init__(self, box_format: str = 'xyxy', weight: Union[float, int] = 1.) -> None: super().__init__(weight=weight) assert box_format in ['xyxy', 'xywh'] self.box_format = box_format def __call__(self, pred_instances: InstanceData, gt_instances: InstanceData, img_meta: Optional[dict] = None, **kwargs) -> Tensor: """Compute match cost. Args: pred_instances (:obj:`InstanceData`): ``bboxes`` inside is predicted boxes with unnormalized coordinate (x, y, x, y). gt_instances (:obj:`InstanceData`): ``bboxes`` inside is gt bboxes with unnormalized coordinate (x, y, x, y). img_meta (Optional[dict]): Image information. Defaults to None. Returns: Tensor: Match Cost matrix of shape (num_preds, num_gts). """ pred_bboxes = pred_instances.bboxes gt_bboxes = gt_instances.bboxes # convert box format if self.box_format == 'xywh': gt_bboxes = bbox_xyxy_to_cxcywh(gt_bboxes) pred_bboxes = bbox_xyxy_to_cxcywh(pred_bboxes) # normalized img_h, img_w = img_meta['img_shape'] factor = gt_bboxes.new_tensor([img_w, img_h, img_w, img_h]).unsqueeze(0) gt_bboxes = gt_bboxes / factor pred_bboxes = pred_bboxes / factor bbox_cost = torch.cdist(pred_bboxes, gt_bboxes, p=1) return bbox_cost * self.weight @TASK_UTILS.register_module() class IoUCost(BaseMatchCost): """IoUCost. Note: ``bboxes`` in ``InstanceData`` passed in is of format 'xyxy' and its coordinates are unnormalized. Args: iou_mode (str): iou mode such as 'iou', 'giou'. Defaults to 'giou'. weight (Union[float, int]): Cost weight. Defaults to 1. Examples: >>> from mmdet.models.task_modules.assigners. ... match_costs.match_cost import IoUCost >>> import torch >>> self = IoUCost() >>> bboxes = torch.FloatTensor([[1,1, 2, 2], [2, 2, 3, 4]]) >>> gt_bboxes = torch.FloatTensor([[0, 0, 2, 4], [1, 2, 3, 4]]) >>> self(bboxes, gt_bboxes) tensor([[-0.1250, 0.1667], [ 0.1667, -0.5000]]) """ def __init__(self, iou_mode: str = 'giou', weight: Union[float, int] = 1.): super().__init__(weight=weight) self.iou_mode = iou_mode def __call__(self, pred_instances: InstanceData, gt_instances: InstanceData, img_meta: Optional[dict] = None, **kwargs): """Compute match cost. Args: pred_instances (:obj:`InstanceData`): ``bboxes`` inside is predicted boxes with unnormalized coordinate (x, y, x, y). gt_instances (:obj:`InstanceData`): ``bboxes`` inside is gt bboxes with unnormalized coordinate (x, y, x, y). img_meta (Optional[dict]): Image information. Defaults to None. Returns: Tensor: Match Cost matrix of shape (num_preds, num_gts). """ pred_bboxes = pred_instances.bboxes gt_bboxes = gt_instances.bboxes overlaps = bbox_overlaps( pred_bboxes, gt_bboxes, mode=self.iou_mode, is_aligned=False) # The 1 is a constant that doesn't change the matching, so omitted. iou_cost = -overlaps return iou_cost * self.weight @TASK_UTILS.register_module() class ClassificationCost(BaseMatchCost): """ClsSoftmaxCost. Args: weight (Union[float, int]): Cost weight. Defaults to 1. Examples: >>> from mmdet.models.task_modules.assigners. ... match_costs.match_cost import ClassificationCost >>> import torch >>> self = ClassificationCost() >>> cls_pred = torch.rand(4, 3) >>> gt_labels = torch.tensor([0, 1, 2]) >>> factor = torch.tensor([10, 8, 10, 8]) >>> self(cls_pred, gt_labels) tensor([[-0.3430, -0.3525, -0.3045], [-0.3077, -0.2931, -0.3992], [-0.3664, -0.3455, -0.2881], [-0.3343, -0.2701, -0.3956]]) """ def __init__(self, weight: Union[float, int] = 1) -> None: super().__init__(weight=weight) def __call__(self, pred_instances: InstanceData, gt_instances: InstanceData, img_meta: Optional[dict] = None, **kwargs) -> Tensor: """Compute match cost. Args: pred_instances (:obj:`InstanceData`): ``scores`` inside is predicted classification logits, of shape (num_queries, num_class). gt_instances (:obj:`InstanceData`): ``labels`` inside should have shape (num_gt, ). img_meta (Optional[dict]): _description_. Defaults to None. Returns: Tensor: Match Cost matrix of shape (num_preds, num_gts). """ pred_scores = pred_instances.scores gt_labels = gt_instances.labels pred_scores = pred_scores.softmax(-1) cls_cost = -pred_scores[:, gt_labels] return cls_cost * self.weight @TASK_UTILS.register_module() class FocalLossCost(BaseMatchCost): """FocalLossCost. Args: alpha (Union[float, int]): focal_loss alpha. Defaults to 0.25. gamma (Union[float, int]): focal_loss gamma. Defaults to 2. eps (float): Defaults to 1e-12. binary_input (bool): Whether the input is binary. Currently, binary_input = True is for masks input, binary_input = False is for label input. Defaults to False. weight (Union[float, int]): Cost weight. Defaults to 1. """ def __init__(self, alpha: Union[float, int] = 0.25, gamma: Union[float, int] = 2, eps: float = 1e-12, binary_input: bool = False, weight: Union[float, int] = 1.) -> None: super().__init__(weight=weight) self.alpha = alpha self.gamma = gamma self.eps = eps self.binary_input = binary_input def _focal_loss_cost(self, cls_pred: Tensor, gt_labels: Tensor) -> Tensor: """ Args: cls_pred (Tensor): Predicted classification logits, shape (num_queries, num_class). gt_labels (Tensor): Label of `gt_bboxes`, shape (num_gt,). Returns: torch.Tensor: cls_cost value with weight """ cls_pred = cls_pred.sigmoid() neg_cost = -(1 - cls_pred + self.eps).log() * ( 1 - self.alpha) * cls_pred.pow(self.gamma) pos_cost = -(cls_pred + self.eps).log() * self.alpha * ( 1 - cls_pred).pow(self.gamma) cls_cost = pos_cost[:, gt_labels] - neg_cost[:, gt_labels] return cls_cost * self.weight def _mask_focal_loss_cost(self, cls_pred, gt_labels) -> Tensor: """ Args: cls_pred (Tensor): Predicted classification logits. in shape (num_queries, d1, ..., dn), dtype=torch.float32. gt_labels (Tensor): Ground truth in shape (num_gt, d1, ..., dn), dtype=torch.long. Labels should be binary. Returns: Tensor: Focal cost matrix with weight in shape\ (num_queries, num_gt). """ cls_pred = cls_pred.flatten(1) gt_labels = gt_labels.flatten(1).float() n = cls_pred.shape[1] cls_pred = cls_pred.sigmoid() neg_cost = -(1 - cls_pred + self.eps).log() * ( 1 - self.alpha) * cls_pred.pow(self.gamma) pos_cost = -(cls_pred + self.eps).log() * self.alpha * ( 1 - cls_pred).pow(self.gamma) cls_cost = torch.einsum('nc,mc->nm', pos_cost, gt_labels) + \ torch.einsum('nc,mc->nm', neg_cost, (1 - gt_labels)) return cls_cost / n * self.weight def __call__(self, pred_instances: InstanceData, gt_instances: InstanceData, img_meta: Optional[dict] = None, **kwargs) -> Tensor: """Compute match cost. Args: pred_instances (:obj:`InstanceData`): Predicted instances which must contain ``scores`` or ``masks``. gt_instances (:obj:`InstanceData`): Ground truth which must contain ``labels`` or ``mask``. img_meta (Optional[dict]): Image information. Defaults to None. Returns: Tensor: Match Cost matrix of shape (num_preds, num_gts). """ if self.binary_input: pred_masks = pred_instances.masks gt_masks = gt_instances.masks return self._mask_focal_loss_cost(pred_masks, gt_masks) else: pred_scores = pred_instances.scores gt_labels = gt_instances.labels return self._focal_loss_cost(pred_scores, gt_labels) @TASK_UTILS.register_module() class DiceCost(BaseMatchCost): """Cost of mask assignments based on dice losses. Args: pred_act (bool): Whether to apply sigmoid to mask_pred. Defaults to False. eps (float): Defaults to 1e-3. naive_dice (bool): If True, use the naive dice loss in which the power of the number in the denominator is the first power. If False, use the second power that is adopted by K-Net and SOLO. Defaults to True. weight (Union[float, int]): Cost weight. Defaults to 1. """ def __init__(self, pred_act: bool = False, eps: float = 1e-3, naive_dice: bool = True, weight: Union[float, int] = 1.) -> None: super().__init__(weight=weight) self.pred_act = pred_act self.eps = eps self.naive_dice = naive_dice def _binary_mask_dice_loss(self, mask_preds: Tensor, gt_masks: Tensor) -> Tensor: """ Args: mask_preds (Tensor): Mask prediction in shape (num_queries, *). gt_masks (Tensor): Ground truth in shape (num_gt, *) store 0 or 1, 0 for negative class and 1 for positive class. Returns: Tensor: Dice cost matrix in shape (num_queries, num_gt). """ mask_preds = mask_preds.flatten(1) gt_masks = gt_masks.flatten(1).float() numerator = 2 * torch.einsum('nc,mc->nm', mask_preds, gt_masks) if self.naive_dice: denominator = mask_preds.sum(-1)[:, None] + \ gt_masks.sum(-1)[None, :] else: denominator = mask_preds.pow(2).sum(1)[:, None] + \ gt_masks.pow(2).sum(1)[None, :] loss = 1 - (numerator + self.eps) / (denominator + self.eps) return loss def __call__(self, pred_instances: InstanceData, gt_instances: InstanceData, img_meta: Optional[dict] = None, **kwargs) -> Tensor: """Compute match cost. Args: pred_instances (:obj:`InstanceData`): Predicted instances which must contain ``masks``. gt_instances (:obj:`InstanceData`): Ground truth which must contain ``mask``. img_meta (Optional[dict]): Image information. Defaults to None. Returns: Tensor: Match Cost matrix of shape (num_preds, num_gts). """ pred_masks = pred_instances.masks gt_masks = gt_instances.masks if self.pred_act: pred_masks = pred_masks.sigmoid() dice_cost = self._binary_mask_dice_loss(pred_masks, gt_masks) return dice_cost * self.weight @TASK_UTILS.register_module() class CrossEntropyLossCost(BaseMatchCost): """CrossEntropyLossCost. Args: use_sigmoid (bool): Whether the prediction uses sigmoid of softmax. Defaults to True. weight (Union[float, int]): Cost weight. Defaults to 1. """ def __init__(self, use_sigmoid: bool = True, weight: Union[float, int] = 1.) -> None: super().__init__(weight=weight) self.use_sigmoid = use_sigmoid def _binary_cross_entropy(self, cls_pred: Tensor, gt_labels: Tensor) -> Tensor: """ Args: cls_pred (Tensor): The prediction with shape (num_queries, 1, *) or (num_queries, *). gt_labels (Tensor): The learning label of prediction with shape (num_gt, *). Returns: Tensor: Cross entropy cost matrix in shape (num_queries, num_gt). """ cls_pred = cls_pred.flatten(1).float() gt_labels = gt_labels.flatten(1).float() n = cls_pred.shape[1] pos = F.binary_cross_entropy_with_logits( cls_pred, torch.ones_like(cls_pred), reduction='none') neg = F.binary_cross_entropy_with_logits( cls_pred, torch.zeros_like(cls_pred), reduction='none') cls_cost = torch.einsum('nc,mc->nm', pos, gt_labels) + \ torch.einsum('nc,mc->nm', neg, 1 - gt_labels) cls_cost = cls_cost / n return cls_cost def __call__(self, pred_instances: InstanceData, gt_instances: InstanceData, img_meta: Optional[dict] = None, **kwargs) -> Tensor: """Compute match cost. Args: pred_instances (:obj:`InstanceData`): Predicted instances which must contain ``scores`` or ``masks``. gt_instances (:obj:`InstanceData`): Ground truth which must contain ``labels`` or ``masks``. img_meta (Optional[dict]): Image information. Defaults to None. Returns: Tensor: Match Cost matrix of shape (num_preds, num_gts). """ pred_masks = pred_instances.masks gt_masks = gt_instances.masks if self.use_sigmoid: cls_cost = self._binary_cross_entropy(pred_masks, gt_masks) else: raise NotImplementedError return cls_cost * self.weight
16,863
35.344828
79
py
ERD
ERD-main/mmdet/models/task_modules/assigners/hungarian_assigner.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import List, Optional, Union import torch from mmengine import ConfigDict from mmengine.structures import InstanceData from scipy.optimize import linear_sum_assignment from torch import Tensor from mmdet.registry import TASK_UTILS from .assign_result import AssignResult from .base_assigner import BaseAssigner @TASK_UTILS.register_module() class HungarianAssigner(BaseAssigner): """Computes one-to-one matching between predictions and ground truth. This class computes an assignment between the targets and the predictions based on the costs. The costs are weighted sum of some components. For DETR the costs are weighted sum of classification cost, regression L1 cost and regression iou cost. The targets don't include the no_object, so generally there are more predictions than targets. After the one-to-one matching, the un-matched are treated as backgrounds. Thus each query prediction will be assigned with `0` or a positive integer indicating the ground truth index: - 0: negative sample, no assigned gt - positive integer: positive sample, index (1-based) of assigned gt Args: match_costs (:obj:`ConfigDict` or dict or \ List[Union[:obj:`ConfigDict`, dict]]): Match cost configs. """ def __init__( self, match_costs: Union[List[Union[dict, ConfigDict]], dict, ConfigDict] ) -> None: if isinstance(match_costs, dict): match_costs = [match_costs] elif isinstance(match_costs, list): assert len(match_costs) > 0, \ 'match_costs must not be a empty list.' self.match_costs = [ TASK_UTILS.build(match_cost) for match_cost in match_costs ] def assign(self, pred_instances: InstanceData, gt_instances: InstanceData, img_meta: Optional[dict] = None, **kwargs) -> AssignResult: """Computes one-to-one matching based on the weighted costs. This method assign each query prediction to a ground truth or background. The `assigned_gt_inds` with -1 means don't care, 0 means negative sample, and positive number is the index (1-based) of assigned gt. The assignment is done in the following steps, the order matters. 1. assign every prediction to -1 2. compute the weighted costs 3. do Hungarian matching on CPU based on the costs 4. assign all to 0 (background) first, then for each matched pair between predictions and gts, treat this prediction as foreground and assign the corresponding gt index (plus 1) to it. Args: pred_instances (:obj:`InstanceData`): Instances of model predictions. It includes ``priors``, and the priors can be anchors or points, or the bboxes predicted by the previous stage, has shape (n, 4). The bboxes predicted by the current model or stage will be named ``bboxes``, ``labels``, and ``scores``, the same as the ``InstanceData`` in other places. It may includes ``masks``, with shape (n, h, w) or (n, l). gt_instances (:obj:`InstanceData`): Ground truth of instance annotations. It usually includes ``bboxes``, with shape (k, 4), ``labels``, with shape (k, ) and ``masks``, with shape (k, h, w) or (k, l). img_meta (dict): Image information. Returns: :obj:`AssignResult`: The assigned result. """ assert isinstance(gt_instances.labels, Tensor) num_gts, num_preds = len(gt_instances), len(pred_instances) gt_labels = gt_instances.labels device = gt_labels.device # 1. assign -1 by default assigned_gt_inds = torch.full((num_preds, ), -1, dtype=torch.long, device=device) assigned_labels = torch.full((num_preds, ), -1, dtype=torch.long, device=device) if num_gts == 0 or num_preds == 0: # No ground truth or boxes, return empty assignment if num_gts == 0: # No ground truth, assign all to background assigned_gt_inds[:] = 0 return AssignResult( num_gts=num_gts, gt_inds=assigned_gt_inds, max_overlaps=None, labels=assigned_labels) # 2. compute weighted cost cost_list = [] for match_cost in self.match_costs: cost = match_cost( pred_instances=pred_instances, gt_instances=gt_instances, img_meta=img_meta) cost_list.append(cost) cost = torch.stack(cost_list).sum(dim=0) # 3. do Hungarian matching on CPU using linear_sum_assignment cost = cost.detach().cpu() if linear_sum_assignment is None: raise ImportError('Please run "pip install scipy" ' 'to install scipy first.') matched_row_inds, matched_col_inds = linear_sum_assignment(cost) matched_row_inds = torch.from_numpy(matched_row_inds).to(device) matched_col_inds = torch.from_numpy(matched_col_inds).to(device) # 4. assign backgrounds and foregrounds # assign all indices to backgrounds first assigned_gt_inds[:] = 0 # assign foregrounds based on matching results assigned_gt_inds[matched_row_inds] = matched_col_inds + 1 assigned_labels[matched_row_inds] = gt_labels[matched_col_inds] return AssignResult( num_gts=num_gts, gt_inds=assigned_gt_inds, max_overlaps=None, labels=assigned_labels)
6,075
40.616438
79
py
ERD
ERD-main/mmdet/models/task_modules/assigners/task_aligned_assigner.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import Optional import torch from mmengine.structures import InstanceData from mmdet.registry import TASK_UTILS from mmdet.utils import ConfigType from .assign_result import AssignResult from .base_assigner import BaseAssigner INF = 100000000 @TASK_UTILS.register_module() class TaskAlignedAssigner(BaseAssigner): """Task aligned assigner used in the paper: `TOOD: Task-aligned One-stage Object Detection. <https://arxiv.org/abs/2108.07755>`_. Assign a corresponding gt bbox or background to each predicted bbox. Each bbox will be assigned with `0` or a positive integer indicating the ground truth index. - 0: negative sample, no assigned gt - positive integer: positive sample, index (1-based) of assigned gt Args: topk (int): number of bbox selected in each level iou_calculator (:obj:`ConfigDict` or dict): Config dict for iou calculator. Defaults to ``dict(type='BboxOverlaps2D')`` """ def __init__(self, topk: int, iou_calculator: ConfigType = dict(type='BboxOverlaps2D')): assert topk >= 1 self.topk = topk self.iou_calculator = TASK_UTILS.build(iou_calculator) def assign(self, pred_instances: InstanceData, gt_instances: InstanceData, gt_instances_ignore: Optional[InstanceData] = None, alpha: int = 1, beta: int = 6) -> AssignResult: """Assign gt to bboxes. The assignment is done in following steps 1. compute alignment metric between all bbox (bbox of all pyramid levels) and gt 2. select top-k bbox as candidates for each gt 3. limit the positive sample's center in gt (because the anchor-free detector only can predict positive distance) Args: pred_instances (:obj:`InstaceData`): Instances of model predictions. It includes ``priors``, and the priors can be anchors, points, or bboxes predicted by the model, shape(n, 4). gt_instances (:obj:`InstaceData`): Ground truth of instance annotations. It usually includes ``bboxes`` and ``labels`` attributes. gt_instances_ignore (:obj:`InstaceData`, optional): Instances to be ignored during training. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. alpha (int): Hyper-parameters related to alignment_metrics. Defaults to 1. beta (int): Hyper-parameters related to alignment_metrics. Defaults to 6. Returns: :obj:`TaskAlignedAssignResult`: The assign result. """ priors = pred_instances.priors decode_bboxes = pred_instances.bboxes pred_scores = pred_instances.scores gt_bboxes = gt_instances.bboxes gt_labels = gt_instances.labels priors = priors[:, :4] num_gt, num_bboxes = gt_bboxes.size(0), priors.size(0) # compute alignment metric between all bbox and gt overlaps = self.iou_calculator(decode_bboxes, gt_bboxes).detach() bbox_scores = pred_scores[:, gt_labels].detach() # assign 0 by default assigned_gt_inds = priors.new_full((num_bboxes, ), 0, dtype=torch.long) assign_metrics = priors.new_zeros((num_bboxes, )) if num_gt == 0 or num_bboxes == 0: # No ground truth or boxes, return empty assignment max_overlaps = priors.new_zeros((num_bboxes, )) if num_gt == 0: # No gt boxes, assign everything to background assigned_gt_inds[:] = 0 assigned_labels = priors.new_full((num_bboxes, ), -1, dtype=torch.long) assign_result = AssignResult( num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels) assign_result.assign_metrics = assign_metrics return assign_result # select top-k bboxes as candidates for each gt alignment_metrics = bbox_scores**alpha * overlaps**beta topk = min(self.topk, alignment_metrics.size(0)) _, candidate_idxs = alignment_metrics.topk(topk, dim=0, largest=True) candidate_metrics = alignment_metrics[candidate_idxs, torch.arange(num_gt)] is_pos = candidate_metrics > 0 # limit the positive sample's center in gt priors_cx = (priors[:, 0] + priors[:, 2]) / 2.0 priors_cy = (priors[:, 1] + priors[:, 3]) / 2.0 for gt_idx in range(num_gt): candidate_idxs[:, gt_idx] += gt_idx * num_bboxes ep_priors_cx = priors_cx.view(1, -1).expand( num_gt, num_bboxes).contiguous().view(-1) ep_priors_cy = priors_cy.view(1, -1).expand( num_gt, num_bboxes).contiguous().view(-1) candidate_idxs = candidate_idxs.view(-1) # calculate the left, top, right, bottom distance between positive # bbox center and gt side l_ = ep_priors_cx[candidate_idxs].view(-1, num_gt) - gt_bboxes[:, 0] t_ = ep_priors_cy[candidate_idxs].view(-1, num_gt) - gt_bboxes[:, 1] r_ = gt_bboxes[:, 2] - ep_priors_cx[candidate_idxs].view(-1, num_gt) b_ = gt_bboxes[:, 3] - ep_priors_cy[candidate_idxs].view(-1, num_gt) is_in_gts = torch.stack([l_, t_, r_, b_], dim=1).min(dim=1)[0] > 0.01 is_pos = is_pos & is_in_gts # if an anchor box is assigned to multiple gts, # the one with the highest iou will be selected. overlaps_inf = torch.full_like(overlaps, -INF).t().contiguous().view(-1) index = candidate_idxs.view(-1)[is_pos.view(-1)] overlaps_inf[index] = overlaps.t().contiguous().view(-1)[index] overlaps_inf = overlaps_inf.view(num_gt, -1).t() max_overlaps, argmax_overlaps = overlaps_inf.max(dim=1) assigned_gt_inds[ max_overlaps != -INF] = argmax_overlaps[max_overlaps != -INF] + 1 assign_metrics[max_overlaps != -INF] = alignment_metrics[ max_overlaps != -INF, argmax_overlaps[max_overlaps != -INF]] assigned_labels = assigned_gt_inds.new_full((num_bboxes, ), -1) pos_inds = torch.nonzero( assigned_gt_inds > 0, as_tuple=False).squeeze() if pos_inds.numel() > 0: assigned_labels[pos_inds] = gt_labels[assigned_gt_inds[pos_inds] - 1] assign_result = AssignResult( num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels) assign_result.assign_metrics = assign_metrics return assign_result
6,966
42.81761
79
py
ERD
ERD-main/mmdet/models/task_modules/assigners/uniform_assigner.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import Optional import torch from mmengine.structures import InstanceData from mmdet.registry import TASK_UTILS from mmdet.structures.bbox import bbox_xyxy_to_cxcywh from mmdet.utils import ConfigType from .assign_result import AssignResult from .base_assigner import BaseAssigner @TASK_UTILS.register_module() class UniformAssigner(BaseAssigner): """Uniform Matching between the priors and gt boxes, which can achieve balance in positive priors, and gt_bboxes_ignore was not considered for now. Args: pos_ignore_thr (float): the threshold to ignore positive priors neg_ignore_thr (float): the threshold to ignore negative priors match_times(int): Number of positive priors for each gt box. Defaults to 4. iou_calculator (:obj:`ConfigDict` or dict): Config dict for iou calculator. Defaults to ``dict(type='BboxOverlaps2D')`` """ def __init__(self, pos_ignore_thr: float, neg_ignore_thr: float, match_times: int = 4, iou_calculator: ConfigType = dict(type='BboxOverlaps2D')): self.match_times = match_times self.pos_ignore_thr = pos_ignore_thr self.neg_ignore_thr = neg_ignore_thr self.iou_calculator = TASK_UTILS.build(iou_calculator) def assign( self, pred_instances: InstanceData, gt_instances: InstanceData, gt_instances_ignore: Optional[InstanceData] = None ) -> AssignResult: """Assign gt to priors. The assignment is done in following steps 1. assign -1 by default 2. compute the L1 cost between boxes. Note that we use priors and predict boxes both 3. compute the ignore indexes use gt_bboxes and predict boxes 4. compute the ignore indexes of positive sample use priors and predict boxes Args: pred_instances (:obj:`InstaceData`): Instances of model predictions. It includes ``priors``, and the priors can be priors, points, or bboxes predicted by the model, shape(n, 4). gt_instances (:obj:`InstaceData`): Ground truth of instance annotations. It usually includes ``bboxes`` and ``labels`` attributes. gt_instances_ignore (:obj:`InstaceData`, optional): Instances to be ignored during training. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. Returns: :obj:`AssignResult`: The assign result. """ gt_bboxes = gt_instances.bboxes gt_labels = gt_instances.labels priors = pred_instances.priors bbox_pred = pred_instances.decoder_priors num_gts, num_bboxes = gt_bboxes.size(0), bbox_pred.size(0) # 1. assign -1 by default assigned_gt_inds = bbox_pred.new_full((num_bboxes, ), 0, dtype=torch.long) assigned_labels = bbox_pred.new_full((num_bboxes, ), -1, dtype=torch.long) if num_gts == 0 or num_bboxes == 0: # No ground truth or boxes, return empty assignment if num_gts == 0: # No ground truth, assign all to background assigned_gt_inds[:] = 0 assign_result = AssignResult( num_gts, assigned_gt_inds, None, labels=assigned_labels) assign_result.set_extra_property( 'pos_idx', bbox_pred.new_empty(0, dtype=torch.bool)) assign_result.set_extra_property('pos_predicted_boxes', bbox_pred.new_empty((0, 4))) assign_result.set_extra_property('target_boxes', bbox_pred.new_empty((0, 4))) return assign_result # 2. Compute the L1 cost between boxes # Note that we use priors and predict boxes both cost_bbox = torch.cdist( bbox_xyxy_to_cxcywh(bbox_pred), bbox_xyxy_to_cxcywh(gt_bboxes), p=1) cost_bbox_priors = torch.cdist( bbox_xyxy_to_cxcywh(priors), bbox_xyxy_to_cxcywh(gt_bboxes), p=1) # We found that topk function has different results in cpu and # cuda mode. In order to ensure consistency with the source code, # we also use cpu mode. # TODO: Check whether the performance of cpu and cuda are the same. C = cost_bbox.cpu() C1 = cost_bbox_priors.cpu() # self.match_times x n index = torch.topk( C, # c=b,n,x c[i]=n,x k=self.match_times, dim=0, largest=False)[1] # self.match_times x n index1 = torch.topk(C1, k=self.match_times, dim=0, largest=False)[1] # (self.match_times*2) x n indexes = torch.cat((index, index1), dim=1).reshape(-1).to(bbox_pred.device) pred_overlaps = self.iou_calculator(bbox_pred, gt_bboxes) anchor_overlaps = self.iou_calculator(priors, gt_bboxes) pred_max_overlaps, _ = pred_overlaps.max(dim=1) anchor_max_overlaps, _ = anchor_overlaps.max(dim=0) # 3. Compute the ignore indexes use gt_bboxes and predict boxes ignore_idx = pred_max_overlaps > self.neg_ignore_thr assigned_gt_inds[ignore_idx] = -1 # 4. Compute the ignore indexes of positive sample use priors # and predict boxes pos_gt_index = torch.arange( 0, C1.size(1), device=bbox_pred.device).repeat(self.match_times * 2) pos_ious = anchor_overlaps[indexes, pos_gt_index] pos_ignore_idx = pos_ious < self.pos_ignore_thr pos_gt_index_with_ignore = pos_gt_index + 1 pos_gt_index_with_ignore[pos_ignore_idx] = -1 assigned_gt_inds[indexes] = pos_gt_index_with_ignore if gt_labels is not None: assigned_labels = assigned_gt_inds.new_full((num_bboxes, ), -1) pos_inds = torch.nonzero( assigned_gt_inds > 0, as_tuple=False).squeeze() if pos_inds.numel() > 0: assigned_labels[pos_inds] = gt_labels[ assigned_gt_inds[pos_inds] - 1] else: assigned_labels = None assign_result = AssignResult( num_gts, assigned_gt_inds, anchor_max_overlaps, labels=assigned_labels) assign_result.set_extra_property('pos_idx', ~pos_ignore_idx) assign_result.set_extra_property('pos_predicted_boxes', bbox_pred[indexes]) assign_result.set_extra_property('target_boxes', gt_bboxes[pos_gt_index]) return assign_result
7,092
39.764368
77
py
ERD
ERD-main/mmdet/models/task_modules/assigners/point_assigner.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import Optional import torch from mmengine.structures import InstanceData from mmdet.registry import TASK_UTILS from .assign_result import AssignResult from .base_assigner import BaseAssigner @TASK_UTILS.register_module() class PointAssigner(BaseAssigner): """Assign a corresponding gt bbox or background to each point. Each proposals will be assigned with `0`, or a positive integer indicating the ground truth index. - 0: negative sample, no assigned gt - positive integer: positive sample, index (1-based) of assigned gt """ def __init__(self, scale: int = 4, pos_num: int = 3) -> None: self.scale = scale self.pos_num = pos_num def assign(self, pred_instances: InstanceData, gt_instances: InstanceData, gt_instances_ignore: Optional[InstanceData] = None, **kwargs) -> AssignResult: """Assign gt to points. This method assign a gt bbox to every points set, each points set will be assigned with the background_label (-1), or a label number. -1 is background, and semi-positive number is the index (0-based) of assigned gt. The assignment is done in following steps, the order matters. 1. assign every points to the background_label (-1) 2. A point is assigned to some gt bbox if (i) the point is within the k closest points to the gt bbox (ii) the distance between this point and the gt is smaller than other gt bboxes Args: pred_instances (:obj:`InstanceData`): Instances of model predictions. It includes ``priors``, and the priors can be anchors or points, or the bboxes predicted by the previous stage, has shape (n, 4). The bboxes predicted by the current model or stage will be named ``bboxes``, ``labels``, and ``scores``, the same as the ``InstanceData`` in other places. gt_instances (:obj:`InstanceData`): Ground truth of instance annotations. It usually includes ``bboxes``, with shape (k, 4), and ``labels``, with shape (k, ). gt_instances_ignore (:obj:`InstanceData`, optional): Instances to be ignored during training. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. Returns: :obj:`AssignResult`: The assign result. """ gt_bboxes = gt_instances.bboxes gt_labels = gt_instances.labels # points to be assigned, shape(n, 3) while last # dimension stands for (x, y, stride). points = pred_instances.priors num_points = points.shape[0] num_gts = gt_bboxes.shape[0] if num_gts == 0 or num_points == 0: # If no truth assign everything to the background assigned_gt_inds = points.new_full((num_points, ), 0, dtype=torch.long) assigned_labels = points.new_full((num_points, ), -1, dtype=torch.long) return AssignResult( num_gts=num_gts, gt_inds=assigned_gt_inds, max_overlaps=None, labels=assigned_labels) points_xy = points[:, :2] points_stride = points[:, 2] points_lvl = torch.log2( points_stride).int() # [3...,4...,5...,6...,7...] lvl_min, lvl_max = points_lvl.min(), points_lvl.max() # assign gt box gt_bboxes_xy = (gt_bboxes[:, :2] + gt_bboxes[:, 2:]) / 2 gt_bboxes_wh = (gt_bboxes[:, 2:] - gt_bboxes[:, :2]).clamp(min=1e-6) scale = self.scale gt_bboxes_lvl = ((torch.log2(gt_bboxes_wh[:, 0] / scale) + torch.log2(gt_bboxes_wh[:, 1] / scale)) / 2).int() gt_bboxes_lvl = torch.clamp(gt_bboxes_lvl, min=lvl_min, max=lvl_max) # stores the assigned gt index of each point assigned_gt_inds = points.new_zeros((num_points, ), dtype=torch.long) # stores the assigned gt dist (to this point) of each point assigned_gt_dist = points.new_full((num_points, ), float('inf')) points_range = torch.arange(points.shape[0]) for idx in range(num_gts): gt_lvl = gt_bboxes_lvl[idx] # get the index of points in this level lvl_idx = gt_lvl == points_lvl points_index = points_range[lvl_idx] # get the points in this level lvl_points = points_xy[lvl_idx, :] # get the center point of gt gt_point = gt_bboxes_xy[[idx], :] # get width and height of gt gt_wh = gt_bboxes_wh[[idx], :] # compute the distance between gt center and # all points in this level points_gt_dist = ((lvl_points - gt_point) / gt_wh).norm(dim=1) # find the nearest k points to gt center in this level min_dist, min_dist_index = torch.topk( points_gt_dist, self.pos_num, largest=False) # the index of nearest k points to gt center in this level min_dist_points_index = points_index[min_dist_index] # The less_than_recorded_index stores the index # of min_dist that is less then the assigned_gt_dist. Where # assigned_gt_dist stores the dist from previous assigned gt # (if exist) to each point. less_than_recorded_index = min_dist < assigned_gt_dist[ min_dist_points_index] # The min_dist_points_index stores the index of points satisfy: # (1) it is k nearest to current gt center in this level. # (2) it is closer to current gt center than other gt center. min_dist_points_index = min_dist_points_index[ less_than_recorded_index] # assign the result assigned_gt_inds[min_dist_points_index] = idx + 1 assigned_gt_dist[min_dist_points_index] = min_dist[ less_than_recorded_index] assigned_labels = assigned_gt_inds.new_full((num_points, ), -1) pos_inds = torch.nonzero( assigned_gt_inds > 0, as_tuple=False).squeeze() if pos_inds.numel() > 0: assigned_labels[pos_inds] = gt_labels[assigned_gt_inds[pos_inds] - 1] return AssignResult( num_gts=num_gts, gt_inds=assigned_gt_inds, max_overlaps=None, labels=assigned_labels)
6,884
43.134615
79
py
ERD
ERD-main/mmdet/models/task_modules/assigners/approx_max_iou_assigner.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import Optional, Union import torch from mmengine.config import ConfigDict from mmengine.structures import InstanceData from mmdet.registry import TASK_UTILS from .assign_result import AssignResult from .max_iou_assigner import MaxIoUAssigner @TASK_UTILS.register_module() class ApproxMaxIoUAssigner(MaxIoUAssigner): """Assign a corresponding gt bbox or background to each bbox. Each proposals will be assigned with an integer indicating the ground truth index. (semi-positive index: gt label (0-based), -1: background) - -1: negative sample, no assigned gt - semi-positive integer: positive sample, index (0-based) of assigned gt Args: pos_iou_thr (float): IoU threshold for positive bboxes. neg_iou_thr (float or tuple): IoU threshold for negative bboxes. min_pos_iou (float): Minimum iou for a bbox to be considered as a positive bbox. Positive samples can have smaller IoU than pos_iou_thr due to the 4th step (assign max IoU sample to each gt). gt_max_assign_all (bool): Whether to assign all bboxes with the same highest overlap with some gt to that gt. ignore_iof_thr (float): IoF threshold for ignoring bboxes (if `gt_bboxes_ignore` is specified). Negative values mean not ignoring any bboxes. ignore_wrt_candidates (bool): Whether to compute the iof between `bboxes` and `gt_bboxes_ignore`, or the contrary. match_low_quality (bool): Whether to allow quality matches. This is usually allowed for RPN and single stage detectors, but not allowed in the second stage. gpu_assign_thr (int): The upper bound of the number of GT for GPU assign. When the number of gt is above this threshold, will assign on CPU device. Negative values mean not assign on CPU. iou_calculator (:obj:`ConfigDict` or dict): Config of overlaps Calculator. """ def __init__( self, pos_iou_thr: float, neg_iou_thr: Union[float, tuple], min_pos_iou: float = .0, gt_max_assign_all: bool = True, ignore_iof_thr: float = -1, ignore_wrt_candidates: bool = True, match_low_quality: bool = True, gpu_assign_thr: int = -1, iou_calculator: Union[ConfigDict, dict] = dict(type='BboxOverlaps2D') ) -> None: self.pos_iou_thr = pos_iou_thr self.neg_iou_thr = neg_iou_thr self.min_pos_iou = min_pos_iou self.gt_max_assign_all = gt_max_assign_all self.ignore_iof_thr = ignore_iof_thr self.ignore_wrt_candidates = ignore_wrt_candidates self.gpu_assign_thr = gpu_assign_thr self.match_low_quality = match_low_quality self.iou_calculator = TASK_UTILS.build(iou_calculator) def assign(self, pred_instances: InstanceData, gt_instances: InstanceData, gt_instances_ignore: Optional[InstanceData] = None, **kwargs) -> AssignResult: """Assign gt to approxs. This method assign a gt bbox to each group of approxs (bboxes), each group of approxs is represent by a base approx (bbox) and will be assigned with -1, or a semi-positive number. background_label (-1) means negative sample, semi-positive number is the index (0-based) of assigned gt. The assignment is done in following steps, the order matters. 1. assign every bbox to background_label (-1) 2. use the max IoU of each group of approxs to assign 2. assign proposals whose iou with all gts < neg_iou_thr to background 3. for each bbox, if the iou with its nearest gt >= pos_iou_thr, assign it to that bbox 4. for each gt bbox, assign its nearest proposals (may be more than one) to itself Args: pred_instances (:obj:`InstanceData`): Instances of model predictions. It includes ``priors``, and the priors can be anchors or points, or the bboxes predicted by the previous stage, has shape (n, 4). ``approxs`` means the group of approxs aligned with ``priors``, has shape (n, num_approxs, 4). gt_instances (:obj:`InstanceData`): Ground truth of instance annotations. It usually includes ``bboxes``, with shape (k, 4), and ``labels``, with shape (k, ). gt_instances_ignore (:obj:`InstanceData`, optional): Instances to be ignored during training. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. Returns: :obj:`AssignResult`: The assign result. """ squares = pred_instances.priors approxs = pred_instances.approxs gt_bboxes = gt_instances.bboxes gt_labels = gt_instances.labels gt_bboxes_ignore = None if gt_instances_ignore is None else \ gt_instances_ignore.get('bboxes', None) approxs_per_octave = approxs.size(1) num_squares = squares.size(0) num_gts = gt_bboxes.size(0) if num_squares == 0 or num_gts == 0: # No predictions and/or truth, return empty assignment overlaps = approxs.new(num_gts, num_squares) assign_result = self.assign_wrt_overlaps(overlaps, gt_labels) return assign_result # re-organize anchors by approxs_per_octave x num_squares approxs = torch.transpose(approxs, 0, 1).contiguous().view(-1, 4) assign_on_cpu = True if (self.gpu_assign_thr > 0) and ( num_gts > self.gpu_assign_thr) else False # compute overlap and assign gt on CPU when number of GT is large if assign_on_cpu: device = approxs.device approxs = approxs.cpu() gt_bboxes = gt_bboxes.cpu() if gt_bboxes_ignore is not None: gt_bboxes_ignore = gt_bboxes_ignore.cpu() if gt_labels is not None: gt_labels = gt_labels.cpu() all_overlaps = self.iou_calculator(approxs, gt_bboxes) overlaps, _ = all_overlaps.view(approxs_per_octave, num_squares, num_gts).max(dim=0) overlaps = torch.transpose(overlaps, 0, 1) if (self.ignore_iof_thr > 0 and gt_bboxes_ignore is not None and gt_bboxes_ignore.numel() > 0 and squares.numel() > 0): if self.ignore_wrt_candidates: ignore_overlaps = self.iou_calculator( squares, gt_bboxes_ignore, mode='iof') ignore_max_overlaps, _ = ignore_overlaps.max(dim=1) else: ignore_overlaps = self.iou_calculator( gt_bboxes_ignore, squares, mode='iof') ignore_max_overlaps, _ = ignore_overlaps.max(dim=0) overlaps[:, ignore_max_overlaps > self.ignore_iof_thr] = -1 assign_result = self.assign_wrt_overlaps(overlaps, gt_labels) if assign_on_cpu: assign_result.gt_inds = assign_result.gt_inds.to(device) assign_result.max_overlaps = assign_result.max_overlaps.to(device) if assign_result.labels is not None: assign_result.labels = assign_result.labels.to(device) return assign_result
7,510
45.079755
79
py
ERD
ERD-main/mmdet/models/task_modules/assigners/iou2d_calculator.py
# Copyright (c) OpenMMLab. All rights reserved. import torch from mmdet.registry import TASK_UTILS from mmdet.structures.bbox import bbox_overlaps, get_box_tensor def cast_tensor_type(x, scale=1., dtype=None): if dtype == 'fp16': # scale is for preventing overflows x = (x / scale).half() return x @TASK_UTILS.register_module() class BboxOverlaps2D: """2D Overlaps (e.g. IoUs, GIoUs) Calculator.""" def __init__(self, scale=1., dtype=None): self.scale = scale self.dtype = dtype def __call__(self, bboxes1, bboxes2, mode='iou', is_aligned=False): """Calculate IoU between 2D bboxes. Args: bboxes1 (Tensor or :obj:`BaseBoxes`): bboxes have shape (m, 4) in <x1, y1, x2, y2> format, or shape (m, 5) in <x1, y1, x2, y2, score> format. bboxes2 (Tensor or :obj:`BaseBoxes`): bboxes have shape (m, 4) in <x1, y1, x2, y2> format, shape (m, 5) in <x1, y1, x2, y2, score> format, or be empty. If ``is_aligned `` is ``True``, then m and n must be equal. mode (str): "iou" (intersection over union), "iof" (intersection over foreground), or "giou" (generalized intersection over union). is_aligned (bool, optional): If True, then m and n must be equal. Default False. Returns: Tensor: shape (m, n) if ``is_aligned `` is False else shape (m,) """ bboxes1 = get_box_tensor(bboxes1) bboxes2 = get_box_tensor(bboxes2) assert bboxes1.size(-1) in [0, 4, 5] assert bboxes2.size(-1) in [0, 4, 5] if bboxes2.size(-1) == 5: bboxes2 = bboxes2[..., :4] if bboxes1.size(-1) == 5: bboxes1 = bboxes1[..., :4] if self.dtype == 'fp16': # change tensor type to save cpu and cuda memory and keep speed bboxes1 = cast_tensor_type(bboxes1, self.scale, self.dtype) bboxes2 = cast_tensor_type(bboxes2, self.scale, self.dtype) overlaps = bbox_overlaps(bboxes1, bboxes2, mode, is_aligned) if not overlaps.is_cuda and overlaps.dtype == torch.float16: # resume cpu float32 overlaps = overlaps.float() return overlaps return bbox_overlaps(bboxes1, bboxes2, mode, is_aligned) def __repr__(self): """str: a string describing the module""" repr_str = self.__class__.__name__ + f'(' \ f'scale={self.scale}, dtype={self.dtype})' return repr_str
2,616
36.927536
77
py
ERD
ERD-main/mmdet/models/task_modules/assigners/max_iou_assigner.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import Optional, Union import torch from mmengine.structures import InstanceData from torch import Tensor from mmdet.registry import TASK_UTILS from .assign_result import AssignResult from .base_assigner import BaseAssigner @TASK_UTILS.register_module() class MaxIoUAssigner(BaseAssigner): """Assign a corresponding gt bbox or background to each bbox. Each proposals will be assigned with `-1`, or a semi-positive integer indicating the ground truth index. - -1: negative sample, no assigned gt - semi-positive integer: positive sample, index (0-based) of assigned gt Args: pos_iou_thr (float): IoU threshold for positive bboxes. neg_iou_thr (float or tuple): IoU threshold for negative bboxes. min_pos_iou (float): Minimum iou for a bbox to be considered as a positive bbox. Positive samples can have smaller IoU than pos_iou_thr due to the 4th step (assign max IoU sample to each gt). `min_pos_iou` is set to avoid assigning bboxes that have extremely small iou with GT as positive samples. It brings about 0.3 mAP improvements in 1x schedule but does not affect the performance of 3x schedule. More comparisons can be found in `PR #7464 <https://github.com/open-mmlab/mmdetection/pull/7464>`_. gt_max_assign_all (bool): Whether to assign all bboxes with the same highest overlap with some gt to that gt. ignore_iof_thr (float): IoF threshold for ignoring bboxes (if `gt_bboxes_ignore` is specified). Negative values mean not ignoring any bboxes. ignore_wrt_candidates (bool): Whether to compute the iof between `bboxes` and `gt_bboxes_ignore`, or the contrary. match_low_quality (bool): Whether to allow low quality matches. This is usually allowed for RPN and single stage detectors, but not allowed in the second stage. Details are demonstrated in Step 4. gpu_assign_thr (int): The upper bound of the number of GT for GPU assign. When the number of gt is above this threshold, will assign on CPU device. Negative values mean not assign on CPU. iou_calculator (dict): Config of overlaps Calculator. """ def __init__(self, pos_iou_thr: float, neg_iou_thr: Union[float, tuple], min_pos_iou: float = .0, gt_max_assign_all: bool = True, ignore_iof_thr: float = -1, ignore_wrt_candidates: bool = True, match_low_quality: bool = True, gpu_assign_thr: float = -1, iou_calculator: dict = dict(type='BboxOverlaps2D')): self.pos_iou_thr = pos_iou_thr self.neg_iou_thr = neg_iou_thr self.min_pos_iou = min_pos_iou self.gt_max_assign_all = gt_max_assign_all self.ignore_iof_thr = ignore_iof_thr self.ignore_wrt_candidates = ignore_wrt_candidates self.gpu_assign_thr = gpu_assign_thr self.match_low_quality = match_low_quality self.iou_calculator = TASK_UTILS.build(iou_calculator) def assign(self, pred_instances: InstanceData, gt_instances: InstanceData, gt_instances_ignore: Optional[InstanceData] = None, **kwargs) -> AssignResult: """Assign gt to bboxes. This method assign a gt bbox to every bbox (proposal/anchor), each bbox will be assigned with -1, or a semi-positive number. -1 means negative sample, semi-positive number is the index (0-based) of assigned gt. The assignment is done in following steps, the order matters. 1. assign every bbox to the background 2. assign proposals whose iou with all gts < neg_iou_thr to 0 3. for each bbox, if the iou with its nearest gt >= pos_iou_thr, assign it to that bbox 4. for each gt bbox, assign its nearest proposals (may be more than one) to itself Args: pred_instances (:obj:`InstanceData`): Instances of model predictions. It includes ``priors``, and the priors can be anchors or points, or the bboxes predicted by the previous stage, has shape (n, 4). The bboxes predicted by the current model or stage will be named ``bboxes``, ``labels``, and ``scores``, the same as the ``InstanceData`` in other places. gt_instances (:obj:`InstanceData`): Ground truth of instance annotations. It usually includes ``bboxes``, with shape (k, 4), and ``labels``, with shape (k, ). gt_instances_ignore (:obj:`InstanceData`, optional): Instances to be ignored during training. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. Returns: :obj:`AssignResult`: The assign result. Example: >>> from mmengine.structures import InstanceData >>> self = MaxIoUAssigner(0.5, 0.5) >>> pred_instances = InstanceData() >>> pred_instances.priors = torch.Tensor([[0, 0, 10, 10], ... [10, 10, 20, 20]]) >>> gt_instances = InstanceData() >>> gt_instances.bboxes = torch.Tensor([[0, 0, 10, 9]]) >>> gt_instances.labels = torch.Tensor([0]) >>> assign_result = self.assign(pred_instances, gt_instances) >>> expected_gt_inds = torch.LongTensor([1, 0]) >>> assert torch.all(assign_result.gt_inds == expected_gt_inds) """ gt_bboxes = gt_instances.bboxes priors = pred_instances.priors gt_labels = gt_instances.labels if gt_instances_ignore is not None: gt_bboxes_ignore = gt_instances_ignore.bboxes else: gt_bboxes_ignore = None assign_on_cpu = True if (self.gpu_assign_thr > 0) and ( gt_bboxes.shape[0] > self.gpu_assign_thr) else False # compute overlap and assign gt on CPU when number of GT is large if assign_on_cpu: device = priors.device priors = priors.cpu() gt_bboxes = gt_bboxes.cpu() gt_labels = gt_labels.cpu() if gt_bboxes_ignore is not None: gt_bboxes_ignore = gt_bboxes_ignore.cpu() overlaps = self.iou_calculator(gt_bboxes, priors) if (self.ignore_iof_thr > 0 and gt_bboxes_ignore is not None and gt_bboxes_ignore.numel() > 0 and priors.numel() > 0): if self.ignore_wrt_candidates: ignore_overlaps = self.iou_calculator( priors, gt_bboxes_ignore, mode='iof') ignore_max_overlaps, _ = ignore_overlaps.max(dim=1) else: ignore_overlaps = self.iou_calculator( gt_bboxes_ignore, priors, mode='iof') ignore_max_overlaps, _ = ignore_overlaps.max(dim=0) overlaps[:, ignore_max_overlaps > self.ignore_iof_thr] = -1 assign_result = self.assign_wrt_overlaps(overlaps, gt_labels) if assign_on_cpu: assign_result.gt_inds = assign_result.gt_inds.to(device) assign_result.max_overlaps = assign_result.max_overlaps.to(device) if assign_result.labels is not None: assign_result.labels = assign_result.labels.to(device) return assign_result def assign_wrt_overlaps(self, overlaps: Tensor, gt_labels: Tensor) -> AssignResult: """Assign w.r.t. the overlaps of priors with gts. Args: overlaps (Tensor): Overlaps between k gt_bboxes and n bboxes, shape(k, n). gt_labels (Tensor): Labels of k gt_bboxes, shape (k, ). Returns: :obj:`AssignResult`: The assign result. """ num_gts, num_bboxes = overlaps.size(0), overlaps.size(1) # 1. assign -1 by default assigned_gt_inds = overlaps.new_full((num_bboxes, ), -1, dtype=torch.long) if num_gts == 0 or num_bboxes == 0: # No ground truth or boxes, return empty assignment max_overlaps = overlaps.new_zeros((num_bboxes, )) assigned_labels = overlaps.new_full((num_bboxes, ), -1, dtype=torch.long) if num_gts == 0: # No truth, assign everything to background assigned_gt_inds[:] = 0 return AssignResult( num_gts=num_gts, gt_inds=assigned_gt_inds, max_overlaps=max_overlaps, labels=assigned_labels) # for each anchor, which gt best overlaps with it # for each anchor, the max iou of all gts max_overlaps, argmax_overlaps = overlaps.max(dim=0) # for each gt, which anchor best overlaps with it # for each gt, the max iou of all proposals gt_max_overlaps, gt_argmax_overlaps = overlaps.max(dim=1) # 2. assign negative: below # the negative inds are set to be 0 if isinstance(self.neg_iou_thr, float): assigned_gt_inds[(max_overlaps >= 0) & (max_overlaps < self.neg_iou_thr)] = 0 elif isinstance(self.neg_iou_thr, tuple): assert len(self.neg_iou_thr) == 2 assigned_gt_inds[(max_overlaps >= self.neg_iou_thr[0]) & (max_overlaps < self.neg_iou_thr[1])] = 0 # 3. assign positive: above positive IoU threshold pos_inds = max_overlaps >= self.pos_iou_thr assigned_gt_inds[pos_inds] = argmax_overlaps[pos_inds] + 1 if self.match_low_quality: # Low-quality matching will overwrite the assigned_gt_inds assigned # in Step 3. Thus, the assigned gt might not be the best one for # prediction. # For example, if bbox A has 0.9 and 0.8 iou with GT bbox 1 & 2, # bbox 1 will be assigned as the best target for bbox A in step 3. # However, if GT bbox 2's gt_argmax_overlaps = A, bbox A's # assigned_gt_inds will be overwritten to be bbox 2. # This might be the reason that it is not used in ROI Heads. for i in range(num_gts): if gt_max_overlaps[i] >= self.min_pos_iou: if self.gt_max_assign_all: max_iou_inds = overlaps[i, :] == gt_max_overlaps[i] assigned_gt_inds[max_iou_inds] = i + 1 else: assigned_gt_inds[gt_argmax_overlaps[i]] = i + 1 assigned_labels = assigned_gt_inds.new_full((num_bboxes, ), -1) pos_inds = torch.nonzero( assigned_gt_inds > 0, as_tuple=False).squeeze() if pos_inds.numel() > 0: assigned_labels[pos_inds] = gt_labels[assigned_gt_inds[pos_inds] - 1] return AssignResult( num_gts=num_gts, gt_inds=assigned_gt_inds, max_overlaps=max_overlaps, labels=assigned_labels)
11,559
45.99187
79
py
ERD
ERD-main/mmdet/models/task_modules/prior_generators/point_generator.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import List, Tuple, Union import numpy as np import torch from torch import Tensor from torch.nn.modules.utils import _pair from mmdet.registry import TASK_UTILS DeviceType = Union[str, torch.device] @TASK_UTILS.register_module() class PointGenerator: def _meshgrid(self, x: Tensor, y: Tensor, row_major: bool = True) -> Tuple[Tensor, Tensor]: """Generate mesh grid of x and y. Args: x (torch.Tensor): Grids of x dimension. y (torch.Tensor): Grids of y dimension. row_major (bool): Whether to return y grids first. Defaults to True. Returns: tuple[torch.Tensor]: The mesh grids of x and y. """ xx = x.repeat(len(y)) yy = y.view(-1, 1).repeat(1, len(x)).view(-1) if row_major: return xx, yy else: return yy, xx def grid_points(self, featmap_size: Tuple[int, int], stride=16, device: DeviceType = 'cuda') -> Tensor: """Generate grid points of a single level. Args: featmap_size (tuple[int, int]): Size of the feature maps. stride (int): The stride of corresponding feature map. device (str | torch.device): The device the tensor will be put on. Defaults to 'cuda'. Returns: torch.Tensor: grid point in a feature map. """ feat_h, feat_w = featmap_size shift_x = torch.arange(0., feat_w, device=device) * stride shift_y = torch.arange(0., feat_h, device=device) * stride shift_xx, shift_yy = self._meshgrid(shift_x, shift_y) stride = shift_x.new_full((shift_xx.shape[0], ), stride) shifts = torch.stack([shift_xx, shift_yy, stride], dim=-1) all_points = shifts.to(device) return all_points def valid_flags(self, featmap_size: Tuple[int, int], valid_size: Tuple[int, int], device: DeviceType = 'cuda') -> Tensor: """Generate valid flags of anchors in a feature map. Args: featmap_sizes (list(tuple[int, int])): List of feature map sizes in multiple feature levels. valid_shape (tuple[int, int]): The valid shape of the image. device (str | torch.device): Device where the anchors will be put on. Return: torch.Tensor: Valid flags of anchors in a level. """ feat_h, feat_w = featmap_size valid_h, valid_w = valid_size assert valid_h <= feat_h and valid_w <= feat_w valid_x = torch.zeros(feat_w, dtype=torch.bool, device=device) valid_y = torch.zeros(feat_h, dtype=torch.bool, device=device) valid_x[:valid_w] = 1 valid_y[:valid_h] = 1 valid_xx, valid_yy = self._meshgrid(valid_x, valid_y) valid = valid_xx & valid_yy return valid @TASK_UTILS.register_module() class MlvlPointGenerator: """Standard points generator for multi-level (Mlvl) feature maps in 2D points-based detectors. Args: strides (list[int] | list[tuple[int, int]]): Strides of anchors in multiple feature levels in order (w, h). offset (float): The offset of points, the value is normalized with corresponding stride. Defaults to 0.5. """ def __init__(self, strides: Union[List[int], List[Tuple[int, int]]], offset: float = 0.5) -> None: self.strides = [_pair(stride) for stride in strides] self.offset = offset @property def num_levels(self) -> int: """int: number of feature levels that the generator will be applied""" return len(self.strides) @property def num_base_priors(self) -> List[int]: """list[int]: The number of priors (points) at a point on the feature grid""" return [1 for _ in range(len(self.strides))] def _meshgrid(self, x: Tensor, y: Tensor, row_major: bool = True) -> Tuple[Tensor, Tensor]: yy, xx = torch.meshgrid(y, x) if row_major: # warning .flatten() would cause error in ONNX exporting # have to use reshape here return xx.reshape(-1), yy.reshape(-1) else: return yy.reshape(-1), xx.reshape(-1) def grid_priors(self, featmap_sizes: List[Tuple], dtype: torch.dtype = torch.float32, device: DeviceType = 'cuda', with_stride: bool = False) -> List[Tensor]: """Generate grid points of multiple feature levels. Args: featmap_sizes (list[tuple]): List of feature map sizes in multiple feature levels, each size arrange as as (h, w). dtype (:obj:`dtype`): Dtype of priors. Defaults to torch.float32. device (str | torch.device): The device where the anchors will be put on. with_stride (bool): Whether to concatenate the stride to the last dimension of points. Return: list[torch.Tensor]: Points of multiple feature levels. The sizes of each tensor should be (N, 2) when with stride is ``False``, where N = width * height, width and height are the sizes of the corresponding feature level, and the last dimension 2 represent (coord_x, coord_y), otherwise the shape should be (N, 4), and the last dimension 4 represent (coord_x, coord_y, stride_w, stride_h). """ assert self.num_levels == len(featmap_sizes) multi_level_priors = [] for i in range(self.num_levels): priors = self.single_level_grid_priors( featmap_sizes[i], level_idx=i, dtype=dtype, device=device, with_stride=with_stride) multi_level_priors.append(priors) return multi_level_priors def single_level_grid_priors(self, featmap_size: Tuple[int], level_idx: int, dtype: torch.dtype = torch.float32, device: DeviceType = 'cuda', with_stride: bool = False) -> Tensor: """Generate grid Points of a single level. Note: This function is usually called by method ``self.grid_priors``. Args: featmap_size (tuple[int]): Size of the feature maps, arrange as (h, w). level_idx (int): The index of corresponding feature map level. dtype (:obj:`dtype`): Dtype of priors. Defaults to torch.float32. device (str | torch.device): The device the tensor will be put on. Defaults to 'cuda'. with_stride (bool): Concatenate the stride to the last dimension of points. Return: Tensor: Points of single feature levels. The shape of tensor should be (N, 2) when with stride is ``False``, where N = width * height, width and height are the sizes of the corresponding feature level, and the last dimension 2 represent (coord_x, coord_y), otherwise the shape should be (N, 4), and the last dimension 4 represent (coord_x, coord_y, stride_w, stride_h). """ feat_h, feat_w = featmap_size stride_w, stride_h = self.strides[level_idx] shift_x = (torch.arange(0, feat_w, device=device) + self.offset) * stride_w # keep featmap_size as Tensor instead of int, so that we # can convert to ONNX correctly shift_x = shift_x.to(dtype) shift_y = (torch.arange(0, feat_h, device=device) + self.offset) * stride_h # keep featmap_size as Tensor instead of int, so that we # can convert to ONNX correctly shift_y = shift_y.to(dtype) shift_xx, shift_yy = self._meshgrid(shift_x, shift_y) if not with_stride: shifts = torch.stack([shift_xx, shift_yy], dim=-1) else: # use `shape[0]` instead of `len(shift_xx)` for ONNX export stride_w = shift_xx.new_full((shift_xx.shape[0], ), stride_w).to(dtype) stride_h = shift_xx.new_full((shift_yy.shape[0], ), stride_h).to(dtype) shifts = torch.stack([shift_xx, shift_yy, stride_w, stride_h], dim=-1) all_points = shifts.to(device) return all_points def valid_flags(self, featmap_sizes: List[Tuple[int, int]], pad_shape: Tuple[int], device: DeviceType = 'cuda') -> List[Tensor]: """Generate valid flags of points of multiple feature levels. Args: featmap_sizes (list(tuple)): List of feature map sizes in multiple feature levels, each size arrange as as (h, w). pad_shape (tuple(int)): The padded shape of the image, arrange as (h, w). device (str | torch.device): The device where the anchors will be put on. Return: list(torch.Tensor): Valid flags of points of multiple levels. """ assert self.num_levels == len(featmap_sizes) multi_level_flags = [] for i in range(self.num_levels): point_stride = self.strides[i] feat_h, feat_w = featmap_sizes[i] h, w = pad_shape[:2] valid_feat_h = min(int(np.ceil(h / point_stride[1])), feat_h) valid_feat_w = min(int(np.ceil(w / point_stride[0])), feat_w) flags = self.single_level_valid_flags((feat_h, feat_w), (valid_feat_h, valid_feat_w), device=device) multi_level_flags.append(flags) return multi_level_flags def single_level_valid_flags(self, featmap_size: Tuple[int, int], valid_size: Tuple[int, int], device: DeviceType = 'cuda') -> Tensor: """Generate the valid flags of points of a single feature map. Args: featmap_size (tuple[int]): The size of feature maps, arrange as as (h, w). valid_size (tuple[int]): The valid size of the feature maps. The size arrange as as (h, w). device (str | torch.device): The device where the flags will be put on. Defaults to 'cuda'. Returns: torch.Tensor: The valid flags of each points in a single level \ feature map. """ feat_h, feat_w = featmap_size valid_h, valid_w = valid_size assert valid_h <= feat_h and valid_w <= feat_w valid_x = torch.zeros(feat_w, dtype=torch.bool, device=device) valid_y = torch.zeros(feat_h, dtype=torch.bool, device=device) valid_x[:valid_w] = 1 valid_y[:valid_h] = 1 valid_xx, valid_yy = self._meshgrid(valid_x, valid_y) valid = valid_xx & valid_yy return valid def sparse_priors(self, prior_idxs: Tensor, featmap_size: Tuple[int], level_idx: int, dtype: torch.dtype = torch.float32, device: DeviceType = 'cuda') -> Tensor: """Generate sparse points according to the ``prior_idxs``. Args: prior_idxs (Tensor): The index of corresponding anchors in the feature map. featmap_size (tuple[int]): feature map size arrange as (w, h). level_idx (int): The level index of corresponding feature map. dtype (obj:`torch.dtype`): Date type of points. Defaults to ``torch.float32``. device (str | torch.device): The device where the points is located. Returns: Tensor: Anchor with shape (N, 2), N should be equal to the length of ``prior_idxs``. And last dimension 2 represent (coord_x, coord_y). """ height, width = featmap_size x = (prior_idxs % width + self.offset) * self.strides[level_idx][0] y = ((prior_idxs // width) % height + self.offset) * self.strides[level_idx][1] prioris = torch.stack([x, y], 1).to(dtype) prioris = prioris.to(device) return prioris
13,026
39.456522
79
py
ERD
ERD-main/mmdet/models/task_modules/prior_generators/anchor_generator.py
# Copyright (c) OpenMMLab. All rights reserved. import warnings from typing import List, Optional, Tuple, Union import numpy as np import torch from mmengine.utils import is_tuple_of from torch import Tensor from torch.nn.modules.utils import _pair from mmdet.registry import TASK_UTILS from mmdet.structures.bbox import HorizontalBoxes DeviceType = Union[str, torch.device] @TASK_UTILS.register_module() class AnchorGenerator: """Standard anchor generator for 2D anchor-based detectors. Args: strides (list[int] | list[tuple[int, int]]): Strides of anchors in multiple feature levels in order (w, h). ratios (list[float]): The list of ratios between the height and width of anchors in a single level. scales (list[int], Optional): Anchor scales for anchors in a single level. It cannot be set at the same time if `octave_base_scale` and `scales_per_octave` are set. base_sizes (list[int], Optional): The basic sizes of anchors in multiple levels. If None is given, strides will be used as base_sizes. (If strides are non square, the shortest stride is taken.) scale_major (bool): Whether to multiply scales first when generating base anchors. If true, the anchors in the same row will have the same scales. By default it is True in V2.0 octave_base_scale (int, Optional): The base scale of octave. scales_per_octave (int, Optional): Number of scales for each octave. `octave_base_scale` and `scales_per_octave` are usually used in retinanet and the `scales` should be None when they are set. centers (list[tuple[float]], Optional): The centers of the anchor relative to the feature grid center in multiple feature levels. By default it is set to be None and not used. If a list of tuple of float is given, they will be used to shift the centers of anchors. center_offset (float): The offset of center in proportion to anchors' width and height. By default it is 0 in V2.0. use_box_type (bool): Whether to warp anchors with the box type data structure. Defaults to False. Examples: >>> from mmdet.models.task_modules. ... prior_generators import AnchorGenerator >>> self = AnchorGenerator([16], [1.], [1.], [9]) >>> all_anchors = self.grid_priors([(2, 2)], device='cpu') >>> print(all_anchors) [tensor([[-4.5000, -4.5000, 4.5000, 4.5000], [11.5000, -4.5000, 20.5000, 4.5000], [-4.5000, 11.5000, 4.5000, 20.5000], [11.5000, 11.5000, 20.5000, 20.5000]])] >>> self = AnchorGenerator([16, 32], [1.], [1.], [9, 18]) >>> all_anchors = self.grid_priors([(2, 2), (1, 1)], device='cpu') >>> print(all_anchors) [tensor([[-4.5000, -4.5000, 4.5000, 4.5000], [11.5000, -4.5000, 20.5000, 4.5000], [-4.5000, 11.5000, 4.5000, 20.5000], [11.5000, 11.5000, 20.5000, 20.5000]]), \ tensor([[-9., -9., 9., 9.]])] """ def __init__(self, strides: Union[List[int], List[Tuple[int, int]]], ratios: List[float], scales: Optional[List[int]] = None, base_sizes: Optional[List[int]] = None, scale_major: bool = True, octave_base_scale: Optional[int] = None, scales_per_octave: Optional[int] = None, centers: Optional[List[Tuple[float, float]]] = None, center_offset: float = 0., use_box_type: bool = False) -> None: # check center and center_offset if center_offset != 0: assert centers is None, 'center cannot be set when center_offset' \ f'!=0, {centers} is given.' if not (0 <= center_offset <= 1): raise ValueError('center_offset should be in range [0, 1], ' f'{center_offset} is given.') if centers is not None: assert len(centers) == len(strides), \ 'The number of strides should be the same as centers, got ' \ f'{strides} and {centers}' # calculate base sizes of anchors self.strides = [_pair(stride) for stride in strides] self.base_sizes = [min(stride) for stride in self.strides ] if base_sizes is None else base_sizes assert len(self.base_sizes) == len(self.strides), \ 'The number of strides should be the same as base sizes, got ' \ f'{self.strides} and {self.base_sizes}' # calculate scales of anchors assert ((octave_base_scale is not None and scales_per_octave is not None) ^ (scales is not None)), \ 'scales and octave_base_scale with scales_per_octave cannot' \ ' be set at the same time' if scales is not None: self.scales = torch.Tensor(scales) elif octave_base_scale is not None and scales_per_octave is not None: octave_scales = np.array( [2**(i / scales_per_octave) for i in range(scales_per_octave)]) scales = octave_scales * octave_base_scale self.scales = torch.Tensor(scales) else: raise ValueError('Either scales or octave_base_scale with ' 'scales_per_octave should be set') self.octave_base_scale = octave_base_scale self.scales_per_octave = scales_per_octave self.ratios = torch.Tensor(ratios) self.scale_major = scale_major self.centers = centers self.center_offset = center_offset self.base_anchors = self.gen_base_anchors() self.use_box_type = use_box_type @property def num_base_anchors(self) -> List[int]: """list[int]: total number of base anchors in a feature grid""" return self.num_base_priors @property def num_base_priors(self) -> List[int]: """list[int]: The number of priors (anchors) at a point on the feature grid""" return [base_anchors.size(0) for base_anchors in self.base_anchors] @property def num_levels(self) -> int: """int: number of feature levels that the generator will be applied""" return len(self.strides) def gen_base_anchors(self) -> List[Tensor]: """Generate base anchors. Returns: list(torch.Tensor): Base anchors of a feature grid in multiple \ feature levels. """ multi_level_base_anchors = [] for i, base_size in enumerate(self.base_sizes): center = None if self.centers is not None: center = self.centers[i] multi_level_base_anchors.append( self.gen_single_level_base_anchors( base_size, scales=self.scales, ratios=self.ratios, center=center)) return multi_level_base_anchors def gen_single_level_base_anchors(self, base_size: Union[int, float], scales: Tensor, ratios: Tensor, center: Optional[Tuple[float]] = None) \ -> Tensor: """Generate base anchors of a single level. Args: base_size (int | float): Basic size of an anchor. scales (torch.Tensor): Scales of the anchor. ratios (torch.Tensor): The ratio between the height and width of anchors in a single level. center (tuple[float], optional): The center of the base anchor related to a single feature grid. Defaults to None. Returns: torch.Tensor: Anchors in a single-level feature maps. """ w = base_size h = base_size if center is None: x_center = self.center_offset * w y_center = self.center_offset * h else: x_center, y_center = center h_ratios = torch.sqrt(ratios) w_ratios = 1 / h_ratios if self.scale_major: ws = (w * w_ratios[:, None] * scales[None, :]).view(-1) hs = (h * h_ratios[:, None] * scales[None, :]).view(-1) else: ws = (w * scales[:, None] * w_ratios[None, :]).view(-1) hs = (h * scales[:, None] * h_ratios[None, :]).view(-1) # use float anchor and the anchor's center is aligned with the # pixel center base_anchors = [ x_center - 0.5 * ws, y_center - 0.5 * hs, x_center + 0.5 * ws, y_center + 0.5 * hs ] base_anchors = torch.stack(base_anchors, dim=-1) return base_anchors def _meshgrid(self, x: Tensor, y: Tensor, row_major: bool = True) -> Tuple[Tensor]: """Generate mesh grid of x and y. Args: x (torch.Tensor): Grids of x dimension. y (torch.Tensor): Grids of y dimension. row_major (bool): Whether to return y grids first. Defaults to True. Returns: tuple[torch.Tensor]: The mesh grids of x and y. """ # use shape instead of len to keep tracing while exporting to onnx xx = x.repeat(y.shape[0]) yy = y.view(-1, 1).repeat(1, x.shape[0]).view(-1) if row_major: return xx, yy else: return yy, xx def grid_priors(self, featmap_sizes: List[Tuple], dtype: torch.dtype = torch.float32, device: DeviceType = 'cuda') -> List[Tensor]: """Generate grid anchors in multiple feature levels. Args: featmap_sizes (list[tuple]): List of feature map sizes in multiple feature levels. dtype (:obj:`torch.dtype`): Dtype of priors. Defaults to torch.float32. device (str | torch.device): The device where the anchors will be put on. Return: list[torch.Tensor]: Anchors in multiple feature levels. \ The sizes of each tensor should be [N, 4], where \ N = width * height * num_base_anchors, width and height \ are the sizes of the corresponding feature level, \ num_base_anchors is the number of anchors for that level. """ assert self.num_levels == len(featmap_sizes) multi_level_anchors = [] for i in range(self.num_levels): anchors = self.single_level_grid_priors( featmap_sizes[i], level_idx=i, dtype=dtype, device=device) multi_level_anchors.append(anchors) return multi_level_anchors def single_level_grid_priors(self, featmap_size: Tuple[int, int], level_idx: int, dtype: torch.dtype = torch.float32, device: DeviceType = 'cuda') -> Tensor: """Generate grid anchors of a single level. Note: This function is usually called by method ``self.grid_priors``. Args: featmap_size (tuple[int, int]): Size of the feature maps. level_idx (int): The index of corresponding feature map level. dtype (obj:`torch.dtype`): Date type of points.Defaults to ``torch.float32``. device (str | torch.device): The device the tensor will be put on. Defaults to 'cuda'. Returns: torch.Tensor: Anchors in the overall feature maps. """ base_anchors = self.base_anchors[level_idx].to(device).to(dtype) feat_h, feat_w = featmap_size stride_w, stride_h = self.strides[level_idx] # First create Range with the default dtype, than convert to # target `dtype` for onnx exporting. shift_x = torch.arange(0, feat_w, device=device).to(dtype) * stride_w shift_y = torch.arange(0, feat_h, device=device).to(dtype) * stride_h shift_xx, shift_yy = self._meshgrid(shift_x, shift_y) shifts = torch.stack([shift_xx, shift_yy, shift_xx, shift_yy], dim=-1) # first feat_w elements correspond to the first row of shifts # add A anchors (1, A, 4) to K shifts (K, 1, 4) to get # shifted anchors (K, A, 4), reshape to (K*A, 4) all_anchors = base_anchors[None, :, :] + shifts[:, None, :] all_anchors = all_anchors.view(-1, 4) # first A rows correspond to A anchors of (0, 0) in feature map, # then (0, 1), (0, 2), ... if self.use_box_type: all_anchors = HorizontalBoxes(all_anchors) return all_anchors def sparse_priors(self, prior_idxs: Tensor, featmap_size: Tuple[int, int], level_idx: int, dtype: torch.dtype = torch.float32, device: DeviceType = 'cuda') -> Tensor: """Generate sparse anchors according to the ``prior_idxs``. Args: prior_idxs (Tensor): The index of corresponding anchors in the feature map. featmap_size (tuple[int, int]): feature map size arrange as (h, w). level_idx (int): The level index of corresponding feature map. dtype (obj:`torch.dtype`): Date type of points.Defaults to ``torch.float32``. device (str | torch.device): The device where the points is located. Returns: Tensor: Anchor with shape (N, 4), N should be equal to the length of ``prior_idxs``. """ height, width = featmap_size num_base_anchors = self.num_base_anchors[level_idx] base_anchor_id = prior_idxs % num_base_anchors x = (prior_idxs // num_base_anchors) % width * self.strides[level_idx][0] y = (prior_idxs // width // num_base_anchors) % height * self.strides[level_idx][1] priors = torch.stack([x, y, x, y], 1).to(dtype).to(device) + \ self.base_anchors[level_idx][base_anchor_id, :].to(device) return priors def grid_anchors(self, featmap_sizes: List[Tuple], device: DeviceType = 'cuda') -> List[Tensor]: """Generate grid anchors in multiple feature levels. Args: featmap_sizes (list[tuple]): List of feature map sizes in multiple feature levels. device (str | torch.device): Device where the anchors will be put on. Return: list[torch.Tensor]: Anchors in multiple feature levels. \ The sizes of each tensor should be [N, 4], where \ N = width * height * num_base_anchors, width and height \ are the sizes of the corresponding feature level, \ num_base_anchors is the number of anchors for that level. """ warnings.warn('``grid_anchors`` would be deprecated soon. ' 'Please use ``grid_priors`` ') assert self.num_levels == len(featmap_sizes) multi_level_anchors = [] for i in range(self.num_levels): anchors = self.single_level_grid_anchors( self.base_anchors[i].to(device), featmap_sizes[i], self.strides[i], device=device) multi_level_anchors.append(anchors) return multi_level_anchors def single_level_grid_anchors(self, base_anchors: Tensor, featmap_size: Tuple[int, int], stride: Tuple[int, int] = (16, 16), device: DeviceType = 'cuda') -> Tensor: """Generate grid anchors of a single level. Note: This function is usually called by method ``self.grid_anchors``. Args: base_anchors (torch.Tensor): The base anchors of a feature grid. featmap_size (tuple[int]): Size of the feature maps. stride (tuple[int, int]): Stride of the feature map in order (w, h). Defaults to (16, 16). device (str | torch.device): Device the tensor will be put on. Defaults to 'cuda'. Returns: torch.Tensor: Anchors in the overall feature maps. """ warnings.warn( '``single_level_grid_anchors`` would be deprecated soon. ' 'Please use ``single_level_grid_priors`` ') # keep featmap_size as Tensor instead of int, so that we # can convert to ONNX correctly feat_h, feat_w = featmap_size shift_x = torch.arange(0, feat_w, device=device) * stride[0] shift_y = torch.arange(0, feat_h, device=device) * stride[1] shift_xx, shift_yy = self._meshgrid(shift_x, shift_y) shifts = torch.stack([shift_xx, shift_yy, shift_xx, shift_yy], dim=-1) shifts = shifts.type_as(base_anchors) # first feat_w elements correspond to the first row of shifts # add A anchors (1, A, 4) to K shifts (K, 1, 4) to get # shifted anchors (K, A, 4), reshape to (K*A, 4) all_anchors = base_anchors[None, :, :] + shifts[:, None, :] all_anchors = all_anchors.view(-1, 4) # first A rows correspond to A anchors of (0, 0) in feature map, # then (0, 1), (0, 2), ... return all_anchors def valid_flags(self, featmap_sizes: List[Tuple[int, int]], pad_shape: Tuple, device: DeviceType = 'cuda') -> List[Tensor]: """Generate valid flags of anchors in multiple feature levels. Args: featmap_sizes (list(tuple[int, int])): List of feature map sizes in multiple feature levels. pad_shape (tuple): The padded shape of the image. device (str | torch.device): Device where the anchors will be put on. Return: list(torch.Tensor): Valid flags of anchors in multiple levels. """ assert self.num_levels == len(featmap_sizes) multi_level_flags = [] for i in range(self.num_levels): anchor_stride = self.strides[i] feat_h, feat_w = featmap_sizes[i] h, w = pad_shape[:2] valid_feat_h = min(int(np.ceil(h / anchor_stride[1])), feat_h) valid_feat_w = min(int(np.ceil(w / anchor_stride[0])), feat_w) flags = self.single_level_valid_flags((feat_h, feat_w), (valid_feat_h, valid_feat_w), self.num_base_anchors[i], device=device) multi_level_flags.append(flags) return multi_level_flags def single_level_valid_flags(self, featmap_size: Tuple[int, int], valid_size: Tuple[int, int], num_base_anchors: int, device: DeviceType = 'cuda') -> Tensor: """Generate the valid flags of anchor in a single feature map. Args: featmap_size (tuple[int]): The size of feature maps, arrange as (h, w). valid_size (tuple[int]): The valid size of the feature maps. num_base_anchors (int): The number of base anchors. device (str | torch.device): Device where the flags will be put on. Defaults to 'cuda'. Returns: torch.Tensor: The valid flags of each anchor in a single level \ feature map. """ feat_h, feat_w = featmap_size valid_h, valid_w = valid_size assert valid_h <= feat_h and valid_w <= feat_w valid_x = torch.zeros(feat_w, dtype=torch.bool, device=device) valid_y = torch.zeros(feat_h, dtype=torch.bool, device=device) valid_x[:valid_w] = 1 valid_y[:valid_h] = 1 valid_xx, valid_yy = self._meshgrid(valid_x, valid_y) valid = valid_xx & valid_yy valid = valid[:, None].expand(valid.size(0), num_base_anchors).contiguous().view(-1) return valid def __repr__(self) -> str: """str: a string that describes the module""" indent_str = ' ' repr_str = self.__class__.__name__ + '(\n' repr_str += f'{indent_str}strides={self.strides},\n' repr_str += f'{indent_str}ratios={self.ratios},\n' repr_str += f'{indent_str}scales={self.scales},\n' repr_str += f'{indent_str}base_sizes={self.base_sizes},\n' repr_str += f'{indent_str}scale_major={self.scale_major},\n' repr_str += f'{indent_str}octave_base_scale=' repr_str += f'{self.octave_base_scale},\n' repr_str += f'{indent_str}scales_per_octave=' repr_str += f'{self.scales_per_octave},\n' repr_str += f'{indent_str}num_levels={self.num_levels}\n' repr_str += f'{indent_str}centers={self.centers},\n' repr_str += f'{indent_str}center_offset={self.center_offset})' return repr_str @TASK_UTILS.register_module() class SSDAnchorGenerator(AnchorGenerator): """Anchor generator for SSD. Args: strides (list[int] | list[tuple[int, int]]): Strides of anchors in multiple feature levels. ratios (list[float]): The list of ratios between the height and width of anchors in a single level. min_sizes (list[float]): The list of minimum anchor sizes on each level. max_sizes (list[float]): The list of maximum anchor sizes on each level. basesize_ratio_range (tuple(float)): Ratio range of anchors. Being used when not setting min_sizes and max_sizes. input_size (int): Size of feature map, 300 for SSD300, 512 for SSD512. Being used when not setting min_sizes and max_sizes. scale_major (bool): Whether to multiply scales first when generating base anchors. If true, the anchors in the same row will have the same scales. It is always set to be False in SSD. use_box_type (bool): Whether to warp anchors with the box type data structure. Defaults to False. """ def __init__(self, strides: Union[List[int], List[Tuple[int, int]]], ratios: List[float], min_sizes: Optional[List[float]] = None, max_sizes: Optional[List[float]] = None, basesize_ratio_range: Tuple[float] = (0.15, 0.9), input_size: int = 300, scale_major: bool = True, use_box_type: bool = False) -> None: assert len(strides) == len(ratios) assert not (min_sizes is None) ^ (max_sizes is None) self.strides = [_pair(stride) for stride in strides] self.centers = [(stride[0] / 2., stride[1] / 2.) for stride in self.strides] if min_sizes is None and max_sizes is None: # use hard code to generate SSD anchors self.input_size = input_size assert is_tuple_of(basesize_ratio_range, float) self.basesize_ratio_range = basesize_ratio_range # calculate anchor ratios and sizes min_ratio, max_ratio = basesize_ratio_range min_ratio = int(min_ratio * 100) max_ratio = int(max_ratio * 100) step = int(np.floor(max_ratio - min_ratio) / (self.num_levels - 2)) min_sizes = [] max_sizes = [] for ratio in range(int(min_ratio), int(max_ratio) + 1, step): min_sizes.append(int(self.input_size * ratio / 100)) max_sizes.append(int(self.input_size * (ratio + step) / 100)) if self.input_size == 300: if basesize_ratio_range[0] == 0.15: # SSD300 COCO min_sizes.insert(0, int(self.input_size * 7 / 100)) max_sizes.insert(0, int(self.input_size * 15 / 100)) elif basesize_ratio_range[0] == 0.2: # SSD300 VOC min_sizes.insert(0, int(self.input_size * 10 / 100)) max_sizes.insert(0, int(self.input_size * 20 / 100)) else: raise ValueError( 'basesize_ratio_range[0] should be either 0.15' 'or 0.2 when input_size is 300, got ' f'{basesize_ratio_range[0]}.') elif self.input_size == 512: if basesize_ratio_range[0] == 0.1: # SSD512 COCO min_sizes.insert(0, int(self.input_size * 4 / 100)) max_sizes.insert(0, int(self.input_size * 10 / 100)) elif basesize_ratio_range[0] == 0.15: # SSD512 VOC min_sizes.insert(0, int(self.input_size * 7 / 100)) max_sizes.insert(0, int(self.input_size * 15 / 100)) else: raise ValueError( 'When not setting min_sizes and max_sizes,' 'basesize_ratio_range[0] should be either 0.1' 'or 0.15 when input_size is 512, got' f' {basesize_ratio_range[0]}.') else: raise ValueError( 'Only support 300 or 512 in SSDAnchorGenerator when ' 'not setting min_sizes and max_sizes, ' f'got {self.input_size}.') assert len(min_sizes) == len(max_sizes) == len(strides) anchor_ratios = [] anchor_scales = [] for k in range(len(self.strides)): scales = [1., np.sqrt(max_sizes[k] / min_sizes[k])] anchor_ratio = [1.] for r in ratios[k]: anchor_ratio += [1 / r, r] # 4 or 6 ratio anchor_ratios.append(torch.Tensor(anchor_ratio)) anchor_scales.append(torch.Tensor(scales)) self.base_sizes = min_sizes self.scales = anchor_scales self.ratios = anchor_ratios self.scale_major = scale_major self.center_offset = 0 self.base_anchors = self.gen_base_anchors() self.use_box_type = use_box_type def gen_base_anchors(self) -> List[Tensor]: """Generate base anchors. Returns: list(torch.Tensor): Base anchors of a feature grid in multiple \ feature levels. """ multi_level_base_anchors = [] for i, base_size in enumerate(self.base_sizes): base_anchors = self.gen_single_level_base_anchors( base_size, scales=self.scales[i], ratios=self.ratios[i], center=self.centers[i]) indices = list(range(len(self.ratios[i]))) indices.insert(1, len(indices)) base_anchors = torch.index_select(base_anchors, 0, torch.LongTensor(indices)) multi_level_base_anchors.append(base_anchors) return multi_level_base_anchors def __repr__(self) -> str: """str: a string that describes the module""" indent_str = ' ' repr_str = self.__class__.__name__ + '(\n' repr_str += f'{indent_str}strides={self.strides},\n' repr_str += f'{indent_str}scales={self.scales},\n' repr_str += f'{indent_str}scale_major={self.scale_major},\n' repr_str += f'{indent_str}input_size={self.input_size},\n' repr_str += f'{indent_str}scales={self.scales},\n' repr_str += f'{indent_str}ratios={self.ratios},\n' repr_str += f'{indent_str}num_levels={self.num_levels},\n' repr_str += f'{indent_str}base_sizes={self.base_sizes},\n' repr_str += f'{indent_str}basesize_ratio_range=' repr_str += f'{self.basesize_ratio_range})' return repr_str @TASK_UTILS.register_module() class LegacyAnchorGenerator(AnchorGenerator): """Legacy anchor generator used in MMDetection V1.x. Note: Difference to the V2.0 anchor generator: 1. The center offset of V1.x anchors are set to be 0.5 rather than 0. 2. The width/height are minused by 1 when calculating the anchors' \ centers and corners to meet the V1.x coordinate system. 3. The anchors' corners are quantized. Args: strides (list[int] | list[tuple[int]]): Strides of anchors in multiple feature levels. ratios (list[float]): The list of ratios between the height and width of anchors in a single level. scales (list[int] | None): Anchor scales for anchors in a single level. It cannot be set at the same time if `octave_base_scale` and `scales_per_octave` are set. base_sizes (list[int]): The basic sizes of anchors in multiple levels. If None is given, strides will be used to generate base_sizes. scale_major (bool): Whether to multiply scales first when generating base anchors. If true, the anchors in the same row will have the same scales. By default it is True in V2.0 octave_base_scale (int): The base scale of octave. scales_per_octave (int): Number of scales for each octave. `octave_base_scale` and `scales_per_octave` are usually used in retinanet and the `scales` should be None when they are set. centers (list[tuple[float, float]] | None): The centers of the anchor relative to the feature grid center in multiple feature levels. By default it is set to be None and not used. It a list of float is given, this list will be used to shift the centers of anchors. center_offset (float): The offset of center in proportion to anchors' width and height. By default it is 0.5 in V2.0 but it should be 0.5 in v1.x models. use_box_type (bool): Whether to warp anchors with the box type data structure. Defaults to False. Examples: >>> from mmdet.models.task_modules. ... prior_generators import LegacyAnchorGenerator >>> self = LegacyAnchorGenerator( >>> [16], [1.], [1.], [9], center_offset=0.5) >>> all_anchors = self.grid_anchors(((2, 2),), device='cpu') >>> print(all_anchors) [tensor([[ 0., 0., 8., 8.], [16., 0., 24., 8.], [ 0., 16., 8., 24.], [16., 16., 24., 24.]])] """ def gen_single_level_base_anchors(self, base_size: Union[int, float], scales: Tensor, ratios: Tensor, center: Optional[Tuple[float]] = None) \ -> Tensor: """Generate base anchors of a single level. Note: The width/height of anchors are minused by 1 when calculating \ the centers and corners to meet the V1.x coordinate system. Args: base_size (int | float): Basic size of an anchor. scales (torch.Tensor): Scales of the anchor. ratios (torch.Tensor): The ratio between the height. and width of anchors in a single level. center (tuple[float], optional): The center of the base anchor related to a single feature grid. Defaults to None. Returns: torch.Tensor: Anchors in a single-level feature map. """ w = base_size h = base_size if center is None: x_center = self.center_offset * (w - 1) y_center = self.center_offset * (h - 1) else: x_center, y_center = center h_ratios = torch.sqrt(ratios) w_ratios = 1 / h_ratios if self.scale_major: ws = (w * w_ratios[:, None] * scales[None, :]).view(-1) hs = (h * h_ratios[:, None] * scales[None, :]).view(-1) else: ws = (w * scales[:, None] * w_ratios[None, :]).view(-1) hs = (h * scales[:, None] * h_ratios[None, :]).view(-1) # use float anchor and the anchor's center is aligned with the # pixel center base_anchors = [ x_center - 0.5 * (ws - 1), y_center - 0.5 * (hs - 1), x_center + 0.5 * (ws - 1), y_center + 0.5 * (hs - 1) ] base_anchors = torch.stack(base_anchors, dim=-1).round() return base_anchors @TASK_UTILS.register_module() class LegacySSDAnchorGenerator(SSDAnchorGenerator, LegacyAnchorGenerator): """Legacy anchor generator used in MMDetection V1.x. The difference between `LegacySSDAnchorGenerator` and `SSDAnchorGenerator` can be found in `LegacyAnchorGenerator`. """ def __init__(self, strides: Union[List[int], List[Tuple[int, int]]], ratios: List[float], basesize_ratio_range: Tuple[float], input_size: int = 300, scale_major: bool = True, use_box_type: bool = False) -> None: super(LegacySSDAnchorGenerator, self).__init__( strides=strides, ratios=ratios, basesize_ratio_range=basesize_ratio_range, input_size=input_size, scale_major=scale_major, use_box_type=use_box_type) self.centers = [((stride - 1) / 2., (stride - 1) / 2.) for stride in strides] self.base_anchors = self.gen_base_anchors() @TASK_UTILS.register_module() class YOLOAnchorGenerator(AnchorGenerator): """Anchor generator for YOLO. Args: strides (list[int] | list[tuple[int, int]]): Strides of anchors in multiple feature levels. base_sizes (list[list[tuple[int, int]]]): The basic sizes of anchors in multiple levels. """ def __init__(self, strides: Union[List[int], List[Tuple[int, int]]], base_sizes: List[List[Tuple[int, int]]], use_box_type: bool = False) -> None: self.strides = [_pair(stride) for stride in strides] self.centers = [(stride[0] / 2., stride[1] / 2.) for stride in self.strides] self.base_sizes = [] num_anchor_per_level = len(base_sizes[0]) for base_sizes_per_level in base_sizes: assert num_anchor_per_level == len(base_sizes_per_level) self.base_sizes.append( [_pair(base_size) for base_size in base_sizes_per_level]) self.base_anchors = self.gen_base_anchors() self.use_box_type = use_box_type @property def num_levels(self) -> int: """int: number of feature levels that the generator will be applied""" return len(self.base_sizes) def gen_base_anchors(self) -> List[Tensor]: """Generate base anchors. Returns: list(torch.Tensor): Base anchors of a feature grid in multiple \ feature levels. """ multi_level_base_anchors = [] for i, base_sizes_per_level in enumerate(self.base_sizes): center = None if self.centers is not None: center = self.centers[i] multi_level_base_anchors.append( self.gen_single_level_base_anchors(base_sizes_per_level, center)) return multi_level_base_anchors def gen_single_level_base_anchors(self, base_sizes_per_level: List[Tuple[int]], center: Optional[Tuple[float]] = None) \ -> Tensor: """Generate base anchors of a single level. Args: base_sizes_per_level (list[tuple[int]]): Basic sizes of anchors. center (tuple[float], optional): The center of the base anchor related to a single feature grid. Defaults to None. Returns: torch.Tensor: Anchors in a single-level feature maps. """ x_center, y_center = center base_anchors = [] for base_size in base_sizes_per_level: w, h = base_size # use float anchor and the anchor's center is aligned with the # pixel center base_anchor = torch.Tensor([ x_center - 0.5 * w, y_center - 0.5 * h, x_center + 0.5 * w, y_center + 0.5 * h ]) base_anchors.append(base_anchor) base_anchors = torch.stack(base_anchors, dim=0) return base_anchors
37,295
42.929329
79
py
ERD
ERD-main/mmdet/models/task_modules/prior_generators/utils.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import Optional, Tuple import torch from torch import Tensor from mmdet.structures.bbox import BaseBoxes def anchor_inside_flags(flat_anchors: Tensor, valid_flags: Tensor, img_shape: Tuple[int], allowed_border: int = 0) -> Tensor: """Check whether the anchors are inside the border. Args: flat_anchors (torch.Tensor): Flatten anchors, shape (n, 4). valid_flags (torch.Tensor): An existing valid flags of anchors. img_shape (tuple(int)): Shape of current image. allowed_border (int): The border to allow the valid anchor. Defaults to 0. Returns: torch.Tensor: Flags indicating whether the anchors are inside a \ valid range. """ img_h, img_w = img_shape[:2] if allowed_border >= 0: if isinstance(flat_anchors, BaseBoxes): inside_flags = valid_flags & \ flat_anchors.is_inside([img_h, img_w], all_inside=True, allowed_border=allowed_border) else: inside_flags = valid_flags & \ (flat_anchors[:, 0] >= -allowed_border) & \ (flat_anchors[:, 1] >= -allowed_border) & \ (flat_anchors[:, 2] < img_w + allowed_border) & \ (flat_anchors[:, 3] < img_h + allowed_border) else: inside_flags = valid_flags return inside_flags def calc_region(bbox: Tensor, ratio: float, featmap_size: Optional[Tuple] = None) -> Tuple[int]: """Calculate a proportional bbox region. The bbox center are fixed and the new h' and w' is h * ratio and w * ratio. Args: bbox (Tensor): Bboxes to calculate regions, shape (n, 4). ratio (float): Ratio of the output region. featmap_size (tuple, Optional): Feature map size in (height, width) order used for clipping the boundary. Defaults to None. Returns: tuple: x1, y1, x2, y2 """ x1 = torch.round((1 - ratio) * bbox[0] + ratio * bbox[2]).long() y1 = torch.round((1 - ratio) * bbox[1] + ratio * bbox[3]).long() x2 = torch.round(ratio * bbox[0] + (1 - ratio) * bbox[2]).long() y2 = torch.round(ratio * bbox[1] + (1 - ratio) * bbox[3]).long() if featmap_size is not None: x1 = x1.clamp(min=0, max=featmap_size[1]) y1 = y1.clamp(min=0, max=featmap_size[0]) x2 = x2.clamp(min=0, max=featmap_size[1]) y2 = y2.clamp(min=0, max=featmap_size[0]) return (x1, y1, x2, y2)
2,671
36.633803
79
py
ERD
ERD-main/mmdet/models/task_modules/samplers/multi_instance_sampling_result.py
# Copyright (c) OpenMMLab. All rights reserved. import torch from torch import Tensor from ..assigners import AssignResult from .sampling_result import SamplingResult class MultiInstanceSamplingResult(SamplingResult): """Bbox sampling result. Further encapsulation of SamplingResult. Three attributes neg_assigned_gt_inds, neg_gt_labels, and neg_gt_bboxes have been added for SamplingResult. Args: pos_inds (Tensor): Indices of positive samples. neg_inds (Tensor): Indices of negative samples. priors (Tensor): The priors can be anchors or points, or the bboxes predicted by the previous stage. gt_and_ignore_bboxes (Tensor): Ground truth and ignore bboxes. assign_result (:obj:`AssignResult`): Assigning results. gt_flags (Tensor): The Ground truth flags. avg_factor_with_neg (bool): If True, ``avg_factor`` equal to the number of total priors; Otherwise, it is the number of positive priors. Defaults to True. """ def __init__(self, pos_inds: Tensor, neg_inds: Tensor, priors: Tensor, gt_and_ignore_bboxes: Tensor, assign_result: AssignResult, gt_flags: Tensor, avg_factor_with_neg: bool = True) -> None: self.neg_assigned_gt_inds = assign_result.gt_inds[neg_inds] self.neg_gt_labels = assign_result.labels[neg_inds] if gt_and_ignore_bboxes.numel() == 0: self.neg_gt_bboxes = torch.empty_like(gt_and_ignore_bboxes).view( -1, 4) else: if len(gt_and_ignore_bboxes.shape) < 2: gt_and_ignore_bboxes = gt_and_ignore_bboxes.view(-1, 4) self.neg_gt_bboxes = gt_and_ignore_bboxes[ self.neg_assigned_gt_inds.long(), :] # To resist the minus 1 operation in `SamplingResult.init()`. assign_result.gt_inds += 1 super().__init__( pos_inds=pos_inds, neg_inds=neg_inds, priors=priors, gt_bboxes=gt_and_ignore_bboxes, assign_result=assign_result, gt_flags=gt_flags, avg_factor_with_neg=avg_factor_with_neg)
2,260
38.666667
79
py
ERD
ERD-main/mmdet/models/task_modules/samplers/instance_balanced_pos_sampler.py
# Copyright (c) OpenMMLab. All rights reserved. import numpy as np import torch from mmdet.registry import TASK_UTILS from .random_sampler import RandomSampler @TASK_UTILS.register_module() class InstanceBalancedPosSampler(RandomSampler): """Instance balanced sampler that samples equal number of positive samples for each instance.""" def _sample_pos(self, assign_result, num_expected, **kwargs): """Sample positive boxes. Args: assign_result (:obj:`AssignResult`): The assigned results of boxes. num_expected (int): The number of expected positive samples Returns: Tensor or ndarray: sampled indices. """ pos_inds = torch.nonzero(assign_result.gt_inds > 0, as_tuple=False) if pos_inds.numel() != 0: pos_inds = pos_inds.squeeze(1) if pos_inds.numel() <= num_expected: return pos_inds else: unique_gt_inds = assign_result.gt_inds[pos_inds].unique() num_gts = len(unique_gt_inds) num_per_gt = int(round(num_expected / float(num_gts)) + 1) sampled_inds = [] for i in unique_gt_inds: inds = torch.nonzero( assign_result.gt_inds == i.item(), as_tuple=False) if inds.numel() != 0: inds = inds.squeeze(1) else: continue if len(inds) > num_per_gt: inds = self.random_choice(inds, num_per_gt) sampled_inds.append(inds) sampled_inds = torch.cat(sampled_inds) if len(sampled_inds) < num_expected: num_extra = num_expected - len(sampled_inds) extra_inds = np.array( list(set(pos_inds.cpu()) - set(sampled_inds.cpu()))) if len(extra_inds) > num_extra: extra_inds = self.random_choice(extra_inds, num_extra) extra_inds = torch.from_numpy(extra_inds).to( assign_result.gt_inds.device).long() sampled_inds = torch.cat([sampled_inds, extra_inds]) elif len(sampled_inds) > num_expected: sampled_inds = self.random_choice(sampled_inds, num_expected) return sampled_inds
2,318
39.684211
79
py
ERD
ERD-main/mmdet/models/task_modules/samplers/mask_sampling_result.py
# Copyright (c) OpenMMLab. All rights reserved. """copy from https://github.com/ZwwWayne/K-Net/blob/main/knet/det/mask_pseudo_sampler.py.""" import torch from torch import Tensor from ..assigners import AssignResult from .sampling_result import SamplingResult class MaskSamplingResult(SamplingResult): """Mask sampling result.""" def __init__(self, pos_inds: Tensor, neg_inds: Tensor, masks: Tensor, gt_masks: Tensor, assign_result: AssignResult, gt_flags: Tensor, avg_factor_with_neg: bool = True) -> None: self.pos_inds = pos_inds self.neg_inds = neg_inds self.num_pos = max(pos_inds.numel(), 1) self.num_neg = max(neg_inds.numel(), 1) self.avg_factor = self.num_pos + self.num_neg \ if avg_factor_with_neg else self.num_pos self.pos_masks = masks[pos_inds] self.neg_masks = masks[neg_inds] self.pos_is_gt = gt_flags[pos_inds] self.num_gts = gt_masks.shape[0] self.pos_assigned_gt_inds = assign_result.gt_inds[pos_inds] - 1 if gt_masks.numel() == 0: # hack for index error case assert self.pos_assigned_gt_inds.numel() == 0 self.pos_gt_masks = torch.empty_like(gt_masks) else: self.pos_gt_masks = gt_masks[self.pos_assigned_gt_inds, :] @property def masks(self) -> Tensor: """torch.Tensor: concatenated positive and negative masks.""" return torch.cat([self.pos_masks, self.neg_masks]) def __nice__(self) -> str: data = self.info.copy() data['pos_masks'] = data.pop('pos_masks').shape data['neg_masks'] = data.pop('neg_masks').shape parts = [f"'{k}': {v!r}" for k, v in sorted(data.items())] body = ' ' + ',\n '.join(parts) return '{\n' + body + '\n}' @property def info(self) -> dict: """Returns a dictionary of info about the object.""" return { 'pos_inds': self.pos_inds, 'neg_inds': self.neg_inds, 'pos_masks': self.pos_masks, 'neg_masks': self.neg_masks, 'pos_is_gt': self.pos_is_gt, 'num_gts': self.num_gts, 'pos_assigned_gt_inds': self.pos_assigned_gt_inds, }
2,361
33.231884
79
py
ERD
ERD-main/mmdet/models/task_modules/samplers/multi_instance_random_sampler.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import Union import torch from mmengine.structures import InstanceData from numpy import ndarray from torch import Tensor from mmdet.registry import TASK_UTILS from ..assigners import AssignResult from .multi_instance_sampling_result import MultiInstanceSamplingResult from .random_sampler import RandomSampler @TASK_UTILS.register_module() class MultiInsRandomSampler(RandomSampler): """Random sampler for multi instance. Note: Multi-instance means to predict multiple detection boxes with one proposal box. `AssignResult` may assign multiple gt boxes to each proposal box, in this case `RandomSampler` should be replaced by `MultiInsRandomSampler` """ def _sample_pos(self, assign_result: AssignResult, num_expected: int, **kwargs) -> Union[Tensor, ndarray]: """Randomly sample some positive samples. Args: assign_result (:obj:`AssignResult`): Bbox assigning results. num_expected (int): The number of expected positive samples Returns: Tensor or ndarray: sampled indices. """ pos_inds = torch.nonzero( assign_result.labels[:, 0] > 0, as_tuple=False) if pos_inds.numel() != 0: pos_inds = pos_inds.squeeze(1) if pos_inds.numel() <= num_expected: return pos_inds else: return self.random_choice(pos_inds, num_expected) def _sample_neg(self, assign_result: AssignResult, num_expected: int, **kwargs) -> Union[Tensor, ndarray]: """Randomly sample some negative samples. Args: assign_result (:obj:`AssignResult`): Bbox assigning results. num_expected (int): The number of expected positive samples Returns: Tensor or ndarray: sampled indices. """ neg_inds = torch.nonzero( assign_result.labels[:, 0] == 0, as_tuple=False) if neg_inds.numel() != 0: neg_inds = neg_inds.squeeze(1) if len(neg_inds) <= num_expected: return neg_inds else: return self.random_choice(neg_inds, num_expected) def sample(self, assign_result: AssignResult, pred_instances: InstanceData, gt_instances: InstanceData, **kwargs) -> MultiInstanceSamplingResult: """Sample positive and negative bboxes. Args: assign_result (:obj:`AssignResult`): Assigning results from MultiInstanceAssigner. pred_instances (:obj:`InstanceData`): Instances of model predictions. It includes ``priors``, and the priors can be anchors or points, or the bboxes predicted by the previous stage, has shape (n, 4). The bboxes predicted by the current model or stage will be named ``bboxes``, ``labels``, and ``scores``, the same as the ``InstanceData`` in other places. gt_instances (:obj:`InstanceData`): Ground truth of instance annotations. It usually includes ``bboxes``, with shape (k, 4), and ``labels``, with shape (k, ). Returns: :obj:`MultiInstanceSamplingResult`: Sampling result. """ assert 'batch_gt_instances_ignore' in kwargs, \ 'batch_gt_instances_ignore is necessary for MultiInsRandomSampler' gt_bboxes = gt_instances.bboxes ignore_bboxes = kwargs['batch_gt_instances_ignore'].bboxes gt_and_ignore_bboxes = torch.cat([gt_bboxes, ignore_bboxes], dim=0) priors = pred_instances.priors if len(priors.shape) < 2: priors = priors[None, :] priors = priors[:, :4] gt_flags = priors.new_zeros((priors.shape[0], ), dtype=torch.uint8) priors = torch.cat([priors, gt_and_ignore_bboxes], dim=0) gt_ones = priors.new_ones( gt_and_ignore_bboxes.shape[0], dtype=torch.uint8) gt_flags = torch.cat([gt_flags, gt_ones]) num_expected_pos = int(self.num * self.pos_fraction) pos_inds = self.pos_sampler._sample_pos(assign_result, num_expected_pos) # We found that sampled indices have duplicated items occasionally. # (may be a bug of PyTorch) pos_inds = pos_inds.unique() num_sampled_pos = pos_inds.numel() num_expected_neg = self.num - num_sampled_pos if self.neg_pos_ub >= 0: _pos = max(1, num_sampled_pos) neg_upper_bound = int(self.neg_pos_ub * _pos) if num_expected_neg > neg_upper_bound: num_expected_neg = neg_upper_bound neg_inds = self.neg_sampler._sample_neg(assign_result, num_expected_neg) neg_inds = neg_inds.unique() sampling_result = MultiInstanceSamplingResult( pos_inds=pos_inds, neg_inds=neg_inds, priors=priors, gt_and_ignore_bboxes=gt_and_ignore_bboxes, assign_result=assign_result, gt_flags=gt_flags) return sampling_result
5,250
39.083969
79
py
ERD
ERD-main/mmdet/models/task_modules/samplers/base_sampler.py
# Copyright (c) OpenMMLab. All rights reserved. from abc import ABCMeta, abstractmethod import torch from mmengine.structures import InstanceData from mmdet.structures.bbox import BaseBoxes, cat_boxes from ..assigners import AssignResult from .sampling_result import SamplingResult class BaseSampler(metaclass=ABCMeta): """Base class of samplers. Args: num (int): Number of samples pos_fraction (float): Fraction of positive samples neg_pos_up (int): Upper bound number of negative and positive samples. Defaults to -1. add_gt_as_proposals (bool): Whether to add ground truth boxes as proposals. Defaults to True. """ def __init__(self, num: int, pos_fraction: float, neg_pos_ub: int = -1, add_gt_as_proposals: bool = True, **kwargs) -> None: self.num = num self.pos_fraction = pos_fraction self.neg_pos_ub = neg_pos_ub self.add_gt_as_proposals = add_gt_as_proposals self.pos_sampler = self self.neg_sampler = self @abstractmethod def _sample_pos(self, assign_result: AssignResult, num_expected: int, **kwargs): """Sample positive samples.""" pass @abstractmethod def _sample_neg(self, assign_result: AssignResult, num_expected: int, **kwargs): """Sample negative samples.""" pass def sample(self, assign_result: AssignResult, pred_instances: InstanceData, gt_instances: InstanceData, **kwargs) -> SamplingResult: """Sample positive and negative bboxes. This is a simple implementation of bbox sampling given candidates, assigning results and ground truth bboxes. Args: assign_result (:obj:`AssignResult`): Assigning results. pred_instances (:obj:`InstanceData`): Instances of model predictions. It includes ``priors``, and the priors can be anchors or points, or the bboxes predicted by the previous stage, has shape (n, 4). The bboxes predicted by the current model or stage will be named ``bboxes``, ``labels``, and ``scores``, the same as the ``InstanceData`` in other places. gt_instances (:obj:`InstanceData`): Ground truth of instance annotations. It usually includes ``bboxes``, with shape (k, 4), and ``labels``, with shape (k, ). Returns: :obj:`SamplingResult`: Sampling result. Example: >>> from mmengine.structures import InstanceData >>> from mmdet.models.task_modules.samplers import RandomSampler, >>> from mmdet.models.task_modules.assigners import AssignResult >>> from mmdet.models.task_modules.samplers. ... sampling_result import ensure_rng, random_boxes >>> rng = ensure_rng(None) >>> assign_result = AssignResult.random(rng=rng) >>> pred_instances = InstanceData() >>> pred_instances.priors = random_boxes(assign_result.num_preds, ... rng=rng) >>> gt_instances = InstanceData() >>> gt_instances.bboxes = random_boxes(assign_result.num_gts, ... rng=rng) >>> gt_instances.labels = torch.randint( ... 0, 5, (assign_result.num_gts,), dtype=torch.long) >>> self = RandomSampler(num=32, pos_fraction=0.5, neg_pos_ub=-1, >>> add_gt_as_proposals=False) >>> self = self.sample(assign_result, pred_instances, gt_instances) """ gt_bboxes = gt_instances.bboxes priors = pred_instances.priors gt_labels = gt_instances.labels if len(priors.shape) < 2: priors = priors[None, :] gt_flags = priors.new_zeros((priors.shape[0], ), dtype=torch.uint8) if self.add_gt_as_proposals and len(gt_bboxes) > 0: # When `gt_bboxes` and `priors` are all box type, convert # `gt_bboxes` type to `priors` type. if (isinstance(gt_bboxes, BaseBoxes) and isinstance(priors, BaseBoxes)): gt_bboxes_ = gt_bboxes.convert_to(type(priors)) else: gt_bboxes_ = gt_bboxes priors = cat_boxes([gt_bboxes_, priors], dim=0) assign_result.add_gt_(gt_labels) gt_ones = priors.new_ones(gt_bboxes_.shape[0], dtype=torch.uint8) gt_flags = torch.cat([gt_ones, gt_flags]) num_expected_pos = int(self.num * self.pos_fraction) pos_inds = self.pos_sampler._sample_pos( assign_result, num_expected_pos, bboxes=priors, **kwargs) # We found that sampled indices have duplicated items occasionally. # (may be a bug of PyTorch) pos_inds = pos_inds.unique() num_sampled_pos = pos_inds.numel() num_expected_neg = self.num - num_sampled_pos if self.neg_pos_ub >= 0: _pos = max(1, num_sampled_pos) neg_upper_bound = int(self.neg_pos_ub * _pos) if num_expected_neg > neg_upper_bound: num_expected_neg = neg_upper_bound neg_inds = self.neg_sampler._sample_neg( assign_result, num_expected_neg, bboxes=priors, **kwargs) neg_inds = neg_inds.unique() sampling_result = SamplingResult( pos_inds=pos_inds, neg_inds=neg_inds, priors=priors, gt_bboxes=gt_bboxes, assign_result=assign_result, gt_flags=gt_flags) return sampling_result
5,805
41.379562
79
py
ERD
ERD-main/mmdet/models/task_modules/samplers/random_sampler.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import Union import torch from numpy import ndarray from torch import Tensor from mmdet.registry import TASK_UTILS from ..assigners import AssignResult from .base_sampler import BaseSampler @TASK_UTILS.register_module() class RandomSampler(BaseSampler): """Random sampler. Args: num (int): Number of samples pos_fraction (float): Fraction of positive samples neg_pos_up (int): Upper bound number of negative and positive samples. Defaults to -1. add_gt_as_proposals (bool): Whether to add ground truth boxes as proposals. Defaults to True. """ def __init__(self, num: int, pos_fraction: float, neg_pos_ub: int = -1, add_gt_as_proposals: bool = True, **kwargs): from .sampling_result import ensure_rng super().__init__( num=num, pos_fraction=pos_fraction, neg_pos_ub=neg_pos_ub, add_gt_as_proposals=add_gt_as_proposals) self.rng = ensure_rng(kwargs.get('rng', None)) def random_choice(self, gallery: Union[Tensor, ndarray, list], num: int) -> Union[Tensor, ndarray]: """Random select some elements from the gallery. If `gallery` is a Tensor, the returned indices will be a Tensor; If `gallery` is a ndarray or list, the returned indices will be a ndarray. Args: gallery (Tensor | ndarray | list): indices pool. num (int): expected sample num. Returns: Tensor or ndarray: sampled indices. """ assert len(gallery) >= num is_tensor = isinstance(gallery, torch.Tensor) if not is_tensor: if torch.cuda.is_available(): device = torch.cuda.current_device() else: device = 'cpu' gallery = torch.tensor(gallery, dtype=torch.long, device=device) # This is a temporary fix. We can revert the following code # when PyTorch fixes the abnormal return of torch.randperm. # See: https://github.com/open-mmlab/mmdetection/pull/5014 perm = torch.randperm(gallery.numel())[:num].to(device=gallery.device) rand_inds = gallery[perm] if not is_tensor: rand_inds = rand_inds.cpu().numpy() return rand_inds def _sample_pos(self, assign_result: AssignResult, num_expected: int, **kwargs) -> Union[Tensor, ndarray]: """Randomly sample some positive samples. Args: assign_result (:obj:`AssignResult`): Bbox assigning results. num_expected (int): The number of expected positive samples Returns: Tensor or ndarray: sampled indices. """ pos_inds = torch.nonzero(assign_result.gt_inds > 0, as_tuple=False) if pos_inds.numel() != 0: pos_inds = pos_inds.squeeze(1) if pos_inds.numel() <= num_expected: return pos_inds else: return self.random_choice(pos_inds, num_expected) def _sample_neg(self, assign_result: AssignResult, num_expected: int, **kwargs) -> Union[Tensor, ndarray]: """Randomly sample some negative samples. Args: assign_result (:obj:`AssignResult`): Bbox assigning results. num_expected (int): The number of expected positive samples Returns: Tensor or ndarray: sampled indices. """ neg_inds = torch.nonzero(assign_result.gt_inds == 0, as_tuple=False) if neg_inds.numel() != 0: neg_inds = neg_inds.squeeze(1) if len(neg_inds) <= num_expected: return neg_inds else: return self.random_choice(neg_inds, num_expected)
3,902
34.481818
78
py
ERD
ERD-main/mmdet/models/task_modules/samplers/ohem_sampler.py
# Copyright (c) OpenMMLab. All rights reserved. import torch from mmdet.registry import TASK_UTILS from mmdet.structures.bbox import bbox2roi from .base_sampler import BaseSampler @TASK_UTILS.register_module() class OHEMSampler(BaseSampler): r"""Online Hard Example Mining Sampler described in `Training Region-based Object Detectors with Online Hard Example Mining <https://arxiv.org/abs/1604.03540>`_. """ def __init__(self, num, pos_fraction, context, neg_pos_ub=-1, add_gt_as_proposals=True, loss_key='loss_cls', **kwargs): super(OHEMSampler, self).__init__(num, pos_fraction, neg_pos_ub, add_gt_as_proposals) self.context = context if not hasattr(self.context, 'num_stages'): self.bbox_head = self.context.bbox_head else: self.bbox_head = self.context.bbox_head[self.context.current_stage] self.loss_key = loss_key def hard_mining(self, inds, num_expected, bboxes, labels, feats): with torch.no_grad(): rois = bbox2roi([bboxes]) if not hasattr(self.context, 'num_stages'): bbox_results = self.context._bbox_forward(feats, rois) else: bbox_results = self.context._bbox_forward( self.context.current_stage, feats, rois) cls_score = bbox_results['cls_score'] loss = self.bbox_head.loss( cls_score=cls_score, bbox_pred=None, rois=rois, labels=labels, label_weights=cls_score.new_ones(cls_score.size(0)), bbox_targets=None, bbox_weights=None, reduction_override='none')[self.loss_key] _, topk_loss_inds = loss.topk(num_expected) return inds[topk_loss_inds] def _sample_pos(self, assign_result, num_expected, bboxes=None, feats=None, **kwargs): """Sample positive boxes. Args: assign_result (:obj:`AssignResult`): Assigned results num_expected (int): Number of expected positive samples bboxes (torch.Tensor, optional): Boxes. Defaults to None. feats (list[torch.Tensor], optional): Multi-level features. Defaults to None. Returns: torch.Tensor: Indices of positive samples """ # Sample some hard positive samples pos_inds = torch.nonzero(assign_result.gt_inds > 0, as_tuple=False) if pos_inds.numel() != 0: pos_inds = pos_inds.squeeze(1) if pos_inds.numel() <= num_expected: return pos_inds else: return self.hard_mining(pos_inds, num_expected, bboxes[pos_inds], assign_result.labels[pos_inds], feats) def _sample_neg(self, assign_result, num_expected, bboxes=None, feats=None, **kwargs): """Sample negative boxes. Args: assign_result (:obj:`AssignResult`): Assigned results num_expected (int): Number of expected negative samples bboxes (torch.Tensor, optional): Boxes. Defaults to None. feats (list[torch.Tensor], optional): Multi-level features. Defaults to None. Returns: torch.Tensor: Indices of negative samples """ # Sample some hard negative samples neg_inds = torch.nonzero(assign_result.gt_inds == 0, as_tuple=False) if neg_inds.numel() != 0: neg_inds = neg_inds.squeeze(1) if len(neg_inds) <= num_expected: return neg_inds else: neg_labels = assign_result.labels.new_empty( neg_inds.size(0)).fill_(self.bbox_head.num_classes) return self.hard_mining(neg_inds, num_expected, bboxes[neg_inds], neg_labels, feats)
4,229
36.767857
79
py
ERD
ERD-main/mmdet/models/task_modules/samplers/iou_balanced_neg_sampler.py
# Copyright (c) OpenMMLab. All rights reserved. import numpy as np import torch from mmdet.registry import TASK_UTILS from .random_sampler import RandomSampler @TASK_UTILS.register_module() class IoUBalancedNegSampler(RandomSampler): """IoU Balanced Sampling. arXiv: https://arxiv.org/pdf/1904.02701.pdf (CVPR 2019) Sampling proposals according to their IoU. `floor_fraction` of needed RoIs are sampled from proposals whose IoU are lower than `floor_thr` randomly. The others are sampled from proposals whose IoU are higher than `floor_thr`. These proposals are sampled from some bins evenly, which are split by `num_bins` via IoU evenly. Args: num (int): number of proposals. pos_fraction (float): fraction of positive proposals. floor_thr (float): threshold (minimum) IoU for IoU balanced sampling, set to -1 if all using IoU balanced sampling. floor_fraction (float): sampling fraction of proposals under floor_thr. num_bins (int): number of bins in IoU balanced sampling. """ def __init__(self, num, pos_fraction, floor_thr=-1, floor_fraction=0, num_bins=3, **kwargs): super(IoUBalancedNegSampler, self).__init__(num, pos_fraction, **kwargs) assert floor_thr >= 0 or floor_thr == -1 assert 0 <= floor_fraction <= 1 assert num_bins >= 1 self.floor_thr = floor_thr self.floor_fraction = floor_fraction self.num_bins = num_bins def sample_via_interval(self, max_overlaps, full_set, num_expected): """Sample according to the iou interval. Args: max_overlaps (torch.Tensor): IoU between bounding boxes and ground truth boxes. full_set (set(int)): A full set of indices of boxes。 num_expected (int): Number of expected samples。 Returns: np.ndarray: Indices of samples """ max_iou = max_overlaps.max() iou_interval = (max_iou - self.floor_thr) / self.num_bins per_num_expected = int(num_expected / self.num_bins) sampled_inds = [] for i in range(self.num_bins): start_iou = self.floor_thr + i * iou_interval end_iou = self.floor_thr + (i + 1) * iou_interval tmp_set = set( np.where( np.logical_and(max_overlaps >= start_iou, max_overlaps < end_iou))[0]) tmp_inds = list(tmp_set & full_set) if len(tmp_inds) > per_num_expected: tmp_sampled_set = self.random_choice(tmp_inds, per_num_expected) else: tmp_sampled_set = np.array(tmp_inds, dtype=np.int64) sampled_inds.append(tmp_sampled_set) sampled_inds = np.concatenate(sampled_inds) if len(sampled_inds) < num_expected: num_extra = num_expected - len(sampled_inds) extra_inds = np.array(list(full_set - set(sampled_inds))) if len(extra_inds) > num_extra: extra_inds = self.random_choice(extra_inds, num_extra) sampled_inds = np.concatenate([sampled_inds, extra_inds]) return sampled_inds def _sample_neg(self, assign_result, num_expected, **kwargs): """Sample negative boxes. Args: assign_result (:obj:`AssignResult`): The assigned results of boxes. num_expected (int): The number of expected negative samples Returns: Tensor or ndarray: sampled indices. """ neg_inds = torch.nonzero(assign_result.gt_inds == 0, as_tuple=False) if neg_inds.numel() != 0: neg_inds = neg_inds.squeeze(1) if len(neg_inds) <= num_expected: return neg_inds else: max_overlaps = assign_result.max_overlaps.cpu().numpy() # balance sampling for negative samples neg_set = set(neg_inds.cpu().numpy()) if self.floor_thr > 0: floor_set = set( np.where( np.logical_and(max_overlaps >= 0, max_overlaps < self.floor_thr))[0]) iou_sampling_set = set( np.where(max_overlaps >= self.floor_thr)[0]) elif self.floor_thr == 0: floor_set = set(np.where(max_overlaps == 0)[0]) iou_sampling_set = set( np.where(max_overlaps > self.floor_thr)[0]) else: floor_set = set() iou_sampling_set = set( np.where(max_overlaps > self.floor_thr)[0]) # for sampling interval calculation self.floor_thr = 0 floor_neg_inds = list(floor_set & neg_set) iou_sampling_neg_inds = list(iou_sampling_set & neg_set) num_expected_iou_sampling = int(num_expected * (1 - self.floor_fraction)) if len(iou_sampling_neg_inds) > num_expected_iou_sampling: if self.num_bins >= 2: iou_sampled_inds = self.sample_via_interval( max_overlaps, set(iou_sampling_neg_inds), num_expected_iou_sampling) else: iou_sampled_inds = self.random_choice( iou_sampling_neg_inds, num_expected_iou_sampling) else: iou_sampled_inds = np.array( iou_sampling_neg_inds, dtype=np.int64) num_expected_floor = num_expected - len(iou_sampled_inds) if len(floor_neg_inds) > num_expected_floor: sampled_floor_inds = self.random_choice( floor_neg_inds, num_expected_floor) else: sampled_floor_inds = np.array(floor_neg_inds, dtype=np.int64) sampled_inds = np.concatenate( (sampled_floor_inds, iou_sampled_inds)) if len(sampled_inds) < num_expected: num_extra = num_expected - len(sampled_inds) extra_inds = np.array(list(neg_set - set(sampled_inds))) if len(extra_inds) > num_extra: extra_inds = self.random_choice(extra_inds, num_extra) sampled_inds = np.concatenate((sampled_inds, extra_inds)) sampled_inds = torch.from_numpy(sampled_inds).long().to( assign_result.gt_inds.device) return sampled_inds
6,745
41.427673
79
py
ERD
ERD-main/mmdet/models/task_modules/samplers/mask_pseudo_sampler.py
# Copyright (c) OpenMMLab. All rights reserved. """copy from https://github.com/ZwwWayne/K-Net/blob/main/knet/det/mask_pseudo_sampler.py.""" import torch from mmengine.structures import InstanceData from mmdet.registry import TASK_UTILS from ..assigners import AssignResult from .base_sampler import BaseSampler from .mask_sampling_result import MaskSamplingResult @TASK_UTILS.register_module() class MaskPseudoSampler(BaseSampler): """A pseudo sampler that does not do sampling actually.""" def __init__(self, **kwargs): pass def _sample_pos(self, **kwargs): """Sample positive samples.""" raise NotImplementedError def _sample_neg(self, **kwargs): """Sample negative samples.""" raise NotImplementedError def sample(self, assign_result: AssignResult, pred_instances: InstanceData, gt_instances: InstanceData, *args, **kwargs): """Directly returns the positive and negative indices of samples. Args: assign_result (:obj:`AssignResult`): Mask assigning results. pred_instances (:obj:`InstanceData`): Instances of model predictions. It includes ``scores`` and ``masks`` predicted by the model. gt_instances (:obj:`InstanceData`): Ground truth of instance annotations. It usually includes ``labels`` and ``masks`` attributes. Returns: :obj:`SamplingResult`: sampler results """ pred_masks = pred_instances.masks gt_masks = gt_instances.masks pos_inds = torch.nonzero( assign_result.gt_inds > 0, as_tuple=False).squeeze(-1).unique() neg_inds = torch.nonzero( assign_result.gt_inds == 0, as_tuple=False).squeeze(-1).unique() gt_flags = pred_masks.new_zeros(pred_masks.shape[0], dtype=torch.uint8) sampling_result = MaskSamplingResult( pos_inds=pos_inds, neg_inds=neg_inds, masks=pred_masks, gt_masks=gt_masks, assign_result=assign_result, gt_flags=gt_flags, avg_factor_with_neg=False) return sampling_result
2,198
35.04918
79
py
ERD
ERD-main/mmdet/models/task_modules/samplers/score_hlr_sampler.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import Union import torch from mmcv.ops import nms_match from mmengine.structures import InstanceData from numpy import ndarray from torch import Tensor from mmdet.registry import TASK_UTILS from mmdet.structures.bbox import bbox2roi from ..assigners import AssignResult from .base_sampler import BaseSampler from .sampling_result import SamplingResult @TASK_UTILS.register_module() class ScoreHLRSampler(BaseSampler): r"""Importance-based Sample Reweighting (ISR_N), described in `Prime Sample Attention in Object Detection <https://arxiv.org/abs/1904.04821>`_. Score hierarchical local rank (HLR) differentiates with RandomSampler in negative part. It firstly computes Score-HLR in a two-step way, then linearly maps score hlr to the loss weights. Args: num (int): Total number of sampled RoIs. pos_fraction (float): Fraction of positive samples. context (:obj:`BaseRoIHead`): RoI head that the sampler belongs to. neg_pos_ub (int): Upper bound of the ratio of num negative to num positive, -1 means no upper bound. Defaults to -1. add_gt_as_proposals (bool): Whether to add ground truth as proposals. Defaults to True. k (float): Power of the non-linear mapping. Defaults to 0.5 bias (float): Shift of the non-linear mapping. Defaults to 0. score_thr (float): Minimum score that a negative sample is to be considered as valid bbox. Defaults to 0.05. iou_thr (float): IoU threshold for NMS match. Defaults to 0.5. """ def __init__(self, num: int, pos_fraction: float, context, neg_pos_ub: int = -1, add_gt_as_proposals: bool = True, k: float = 0.5, bias: float = 0, score_thr: float = 0.05, iou_thr: float = 0.5, **kwargs) -> None: super().__init__( num=num, pos_fraction=pos_fraction, neg_pos_ub=neg_pos_ub, add_gt_as_proposals=add_gt_as_proposals) self.k = k self.bias = bias self.score_thr = score_thr self.iou_thr = iou_thr self.context = context # context of cascade detectors is a list, so distinguish them here. if not hasattr(context, 'num_stages'): self.bbox_roi_extractor = context.bbox_roi_extractor self.bbox_head = context.bbox_head self.with_shared_head = context.with_shared_head if self.with_shared_head: self.shared_head = context.shared_head else: self.bbox_roi_extractor = context.bbox_roi_extractor[ context.current_stage] self.bbox_head = context.bbox_head[context.current_stage] @staticmethod def random_choice(gallery: Union[Tensor, ndarray, list], num: int) -> Union[Tensor, ndarray]: """Randomly select some elements from the gallery. If `gallery` is a Tensor, the returned indices will be a Tensor; If `gallery` is a ndarray or list, the returned indices will be a ndarray. Args: gallery (Tensor or ndarray or list): indices pool. num (int): expected sample num. Returns: Tensor or ndarray: sampled indices. """ assert len(gallery) >= num is_tensor = isinstance(gallery, torch.Tensor) if not is_tensor: if torch.cuda.is_available(): device = torch.cuda.current_device() else: device = 'cpu' gallery = torch.tensor(gallery, dtype=torch.long, device=device) perm = torch.randperm(gallery.numel(), device=gallery.device)[:num] rand_inds = gallery[perm] if not is_tensor: rand_inds = rand_inds.cpu().numpy() return rand_inds def _sample_pos(self, assign_result: AssignResult, num_expected: int, **kwargs) -> Union[Tensor, ndarray]: """Randomly sample some positive samples. Args: assign_result (:obj:`AssignResult`): Bbox assigning results. num_expected (int): The number of expected positive samples Returns: Tensor or ndarray: sampled indices. """ pos_inds = torch.nonzero(assign_result.gt_inds > 0).flatten() if pos_inds.numel() <= num_expected: return pos_inds else: return self.random_choice(pos_inds, num_expected) def _sample_neg(self, assign_result: AssignResult, num_expected: int, bboxes: Tensor, feats: Tensor, **kwargs) -> Union[Tensor, ndarray]: """Sample negative samples. Score-HLR sampler is done in the following steps: 1. Take the maximum positive score prediction of each negative samples as s_i. 2. Filter out negative samples whose s_i <= score_thr, the left samples are called valid samples. 3. Use NMS-Match to divide valid samples into different groups, samples in the same group will greatly overlap with each other 4. Rank the matched samples in two-steps to get Score-HLR. (1) In the same group, rank samples with their scores. (2) In the same score rank across different groups, rank samples with their scores again. 5. Linearly map Score-HLR to the final label weights. Args: assign_result (:obj:`AssignResult`): result of assigner. num_expected (int): Expected number of samples. bboxes (Tensor): bbox to be sampled. feats (Tensor): Features come from FPN. Returns: Tensor or ndarray: sampled indices. """ neg_inds = torch.nonzero(assign_result.gt_inds == 0).flatten() num_neg = neg_inds.size(0) if num_neg == 0: return neg_inds, None with torch.no_grad(): neg_bboxes = bboxes[neg_inds] neg_rois = bbox2roi([neg_bboxes]) bbox_result = self.context._bbox_forward(feats, neg_rois) cls_score, bbox_pred = bbox_result['cls_score'], bbox_result[ 'bbox_pred'] ori_loss = self.bbox_head.loss( cls_score=cls_score, bbox_pred=None, rois=None, labels=neg_inds.new_full((num_neg, ), self.bbox_head.num_classes), label_weights=cls_score.new_ones(num_neg), bbox_targets=None, bbox_weights=None, reduction_override='none')['loss_cls'] # filter out samples with the max score lower than score_thr max_score, argmax_score = cls_score.softmax(-1)[:, :-1].max(-1) valid_inds = (max_score > self.score_thr).nonzero().view(-1) invalid_inds = (max_score <= self.score_thr).nonzero().view(-1) num_valid = valid_inds.size(0) num_invalid = invalid_inds.size(0) num_expected = min(num_neg, num_expected) num_hlr = min(num_valid, num_expected) num_rand = num_expected - num_hlr if num_valid > 0: valid_rois = neg_rois[valid_inds] valid_max_score = max_score[valid_inds] valid_argmax_score = argmax_score[valid_inds] valid_bbox_pred = bbox_pred[valid_inds] # valid_bbox_pred shape: [num_valid, #num_classes, 4] valid_bbox_pred = valid_bbox_pred.view( valid_bbox_pred.size(0), -1, 4) selected_bbox_pred = valid_bbox_pred[range(num_valid), valid_argmax_score] pred_bboxes = self.bbox_head.bbox_coder.decode( valid_rois[:, 1:], selected_bbox_pred) pred_bboxes_with_score = torch.cat( [pred_bboxes, valid_max_score[:, None]], -1) group = nms_match(pred_bboxes_with_score, self.iou_thr) # imp: importance imp = cls_score.new_zeros(num_valid) for g in group: g_score = valid_max_score[g] # g_score has already sorted rank = g_score.new_tensor(range(g_score.size(0))) imp[g] = num_valid - rank + g_score _, imp_rank_inds = imp.sort(descending=True) _, imp_rank = imp_rank_inds.sort() hlr_inds = imp_rank_inds[:num_expected] if num_rand > 0: rand_inds = torch.randperm(num_invalid)[:num_rand] select_inds = torch.cat( [valid_inds[hlr_inds], invalid_inds[rand_inds]]) else: select_inds = valid_inds[hlr_inds] neg_label_weights = cls_score.new_ones(num_expected) up_bound = max(num_expected, num_valid) imp_weights = (up_bound - imp_rank[hlr_inds].float()) / up_bound neg_label_weights[:num_hlr] = imp_weights neg_label_weights[num_hlr:] = imp_weights.min() neg_label_weights = (self.bias + (1 - self.bias) * neg_label_weights).pow( self.k) ori_selected_loss = ori_loss[select_inds] new_loss = ori_selected_loss * neg_label_weights norm_ratio = ori_selected_loss.sum() / new_loss.sum() neg_label_weights *= norm_ratio else: neg_label_weights = cls_score.new_ones(num_expected) select_inds = torch.randperm(num_neg)[:num_expected] return neg_inds[select_inds], neg_label_weights def sample(self, assign_result: AssignResult, pred_instances: InstanceData, gt_instances: InstanceData, **kwargs) -> SamplingResult: """Sample positive and negative bboxes. This is a simple implementation of bbox sampling given candidates, assigning results and ground truth bboxes. Args: assign_result (:obj:`AssignResult`): Assigning results. pred_instances (:obj:`InstanceData`): Instances of model predictions. It includes ``priors``, and the priors can be anchors or points, or the bboxes predicted by the previous stage, has shape (n, 4). The bboxes predicted by the current model or stage will be named ``bboxes``, ``labels``, and ``scores``, the same as the ``InstanceData`` in other places. gt_instances (:obj:`InstanceData`): Ground truth of instance annotations. It usually includes ``bboxes``, with shape (k, 4), and ``labels``, with shape (k, ). Returns: :obj:`SamplingResult`: Sampling result. """ gt_bboxes = gt_instances.bboxes priors = pred_instances.priors gt_labels = gt_instances.labels gt_flags = priors.new_zeros((priors.shape[0], ), dtype=torch.uint8) if self.add_gt_as_proposals and len(gt_bboxes) > 0: priors = torch.cat([gt_bboxes, priors], dim=0) assign_result.add_gt_(gt_labels) gt_ones = priors.new_ones(gt_bboxes.shape[0], dtype=torch.uint8) gt_flags = torch.cat([gt_ones, gt_flags]) num_expected_pos = int(self.num * self.pos_fraction) pos_inds = self.pos_sampler._sample_pos( assign_result, num_expected_pos, bboxes=priors, **kwargs) num_sampled_pos = pos_inds.numel() num_expected_neg = self.num - num_sampled_pos if self.neg_pos_ub >= 0: _pos = max(1, num_sampled_pos) neg_upper_bound = int(self.neg_pos_ub * _pos) if num_expected_neg > neg_upper_bound: num_expected_neg = neg_upper_bound neg_inds, neg_label_weights = self.neg_sampler._sample_neg( assign_result, num_expected_neg, bboxes=priors, **kwargs) sampling_result = SamplingResult( pos_inds=pos_inds, neg_inds=neg_inds, priors=priors, gt_bboxes=gt_bboxes, assign_result=assign_result, gt_flags=gt_flags) return sampling_result, neg_label_weights
12,663
42.5189
79
py
ERD
ERD-main/mmdet/models/task_modules/samplers/sampling_result.py
# Copyright (c) OpenMMLab. All rights reserved. import warnings import numpy as np import torch from torch import Tensor from mmdet.structures.bbox import BaseBoxes, cat_boxes from mmdet.utils import util_mixins from mmdet.utils.util_random import ensure_rng from ..assigners import AssignResult def random_boxes(num=1, scale=1, rng=None): """Simple version of ``kwimage.Boxes.random`` Returns: Tensor: shape (n, 4) in x1, y1, x2, y2 format. References: https://gitlab.kitware.com/computer-vision/kwimage/blob/master/kwimage/structs/boxes.py#L1390 Example: >>> num = 3 >>> scale = 512 >>> rng = 0 >>> boxes = random_boxes(num, scale, rng) >>> print(boxes) tensor([[280.9925, 278.9802, 308.6148, 366.1769], [216.9113, 330.6978, 224.0446, 456.5878], [405.3632, 196.3221, 493.3953, 270.7942]]) """ rng = ensure_rng(rng) tlbr = rng.rand(num, 4).astype(np.float32) tl_x = np.minimum(tlbr[:, 0], tlbr[:, 2]) tl_y = np.minimum(tlbr[:, 1], tlbr[:, 3]) br_x = np.maximum(tlbr[:, 0], tlbr[:, 2]) br_y = np.maximum(tlbr[:, 1], tlbr[:, 3]) tlbr[:, 0] = tl_x * scale tlbr[:, 1] = tl_y * scale tlbr[:, 2] = br_x * scale tlbr[:, 3] = br_y * scale boxes = torch.from_numpy(tlbr) return boxes class SamplingResult(util_mixins.NiceRepr): """Bbox sampling result. Args: pos_inds (Tensor): Indices of positive samples. neg_inds (Tensor): Indices of negative samples. priors (Tensor): The priors can be anchors or points, or the bboxes predicted by the previous stage. gt_bboxes (Tensor): Ground truth of bboxes. assign_result (:obj:`AssignResult`): Assigning results. gt_flags (Tensor): The Ground truth flags. avg_factor_with_neg (bool): If True, ``avg_factor`` equal to the number of total priors; Otherwise, it is the number of positive priors. Defaults to True. Example: >>> # xdoctest: +IGNORE_WANT >>> from mmdet.models.task_modules.samplers.sampling_result import * # NOQA >>> self = SamplingResult.random(rng=10) >>> print(f'self = {self}') self = <SamplingResult({ 'neg_inds': tensor([1, 2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13]), 'neg_priors': torch.Size([12, 4]), 'num_gts': 1, 'num_neg': 12, 'num_pos': 1, 'avg_factor': 13, 'pos_assigned_gt_inds': tensor([0]), 'pos_inds': tensor([0]), 'pos_is_gt': tensor([1], dtype=torch.uint8), 'pos_priors': torch.Size([1, 4]) })> """ def __init__(self, pos_inds: Tensor, neg_inds: Tensor, priors: Tensor, gt_bboxes: Tensor, assign_result: AssignResult, gt_flags: Tensor, avg_factor_with_neg: bool = True) -> None: self.pos_inds = pos_inds self.neg_inds = neg_inds self.num_pos = max(pos_inds.numel(), 1) self.num_neg = max(neg_inds.numel(), 1) self.avg_factor_with_neg = avg_factor_with_neg self.avg_factor = self.num_pos + self.num_neg \ if avg_factor_with_neg else self.num_pos self.pos_priors = priors[pos_inds] self.neg_priors = priors[neg_inds] self.pos_is_gt = gt_flags[pos_inds] self.num_gts = gt_bboxes.shape[0] self.pos_assigned_gt_inds = assign_result.gt_inds[pos_inds] - 1 self.pos_gt_labels = assign_result.labels[pos_inds] box_dim = gt_bboxes.box_dim if isinstance(gt_bboxes, BaseBoxes) else 4 if gt_bboxes.numel() == 0: # hack for index error case assert self.pos_assigned_gt_inds.numel() == 0 self.pos_gt_bboxes = gt_bboxes.view(-1, box_dim) else: if len(gt_bboxes.shape) < 2: gt_bboxes = gt_bboxes.view(-1, box_dim) self.pos_gt_bboxes = gt_bboxes[self.pos_assigned_gt_inds.long()] @property def priors(self): """torch.Tensor: concatenated positive and negative priors""" return cat_boxes([self.pos_priors, self.neg_priors]) @property def bboxes(self): """torch.Tensor: concatenated positive and negative boxes""" warnings.warn('DeprecationWarning: bboxes is deprecated, ' 'please use "priors" instead') return self.priors @property def pos_bboxes(self): warnings.warn('DeprecationWarning: pos_bboxes is deprecated, ' 'please use "pos_priors" instead') return self.pos_priors @property def neg_bboxes(self): warnings.warn('DeprecationWarning: neg_bboxes is deprecated, ' 'please use "neg_priors" instead') return self.neg_priors def to(self, device): """Change the device of the data inplace. Example: >>> self = SamplingResult.random() >>> print(f'self = {self.to(None)}') >>> # xdoctest: +REQUIRES(--gpu) >>> print(f'self = {self.to(0)}') """ _dict = self.__dict__ for key, value in _dict.items(): if isinstance(value, (torch.Tensor, BaseBoxes)): _dict[key] = value.to(device) return self def __nice__(self): data = self.info.copy() data['pos_priors'] = data.pop('pos_priors').shape data['neg_priors'] = data.pop('neg_priors').shape parts = [f"'{k}': {v!r}" for k, v in sorted(data.items())] body = ' ' + ',\n '.join(parts) return '{\n' + body + '\n}' @property def info(self): """Returns a dictionary of info about the object.""" return { 'pos_inds': self.pos_inds, 'neg_inds': self.neg_inds, 'pos_priors': self.pos_priors, 'neg_priors': self.neg_priors, 'pos_is_gt': self.pos_is_gt, 'num_gts': self.num_gts, 'pos_assigned_gt_inds': self.pos_assigned_gt_inds, 'num_pos': self.num_pos, 'num_neg': self.num_neg, 'avg_factor': self.avg_factor } @classmethod def random(cls, rng=None, **kwargs): """ Args: rng (None | int | numpy.random.RandomState): seed or state. kwargs (keyword arguments): - num_preds: Number of predicted boxes. - num_gts: Number of true boxes. - p_ignore (float): Probability of a predicted box assigned to an ignored truth. - p_assigned (float): probability of a predicted box not being assigned. Returns: :obj:`SamplingResult`: Randomly generated sampling result. Example: >>> from mmdet.models.task_modules.samplers.sampling_result import * # NOQA >>> self = SamplingResult.random() >>> print(self.__dict__) """ from mmengine.structures import InstanceData from mmdet.models.task_modules.assigners import AssignResult from mmdet.models.task_modules.samplers import RandomSampler rng = ensure_rng(rng) # make probabilistic? num = 32 pos_fraction = 0.5 neg_pos_ub = -1 assign_result = AssignResult.random(rng=rng, **kwargs) # Note we could just compute an assignment priors = random_boxes(assign_result.num_preds, rng=rng) gt_bboxes = random_boxes(assign_result.num_gts, rng=rng) gt_labels = torch.randint( 0, 5, (assign_result.num_gts, ), dtype=torch.long) pred_instances = InstanceData() pred_instances.priors = priors gt_instances = InstanceData() gt_instances.bboxes = gt_bboxes gt_instances.labels = gt_labels add_gt_as_proposals = True sampler = RandomSampler( num, pos_fraction, neg_pos_ub=neg_pos_ub, add_gt_as_proposals=add_gt_as_proposals, rng=rng) self = sampler.sample( assign_result=assign_result, pred_instances=pred_instances, gt_instances=gt_instances) return self
8,409
33.896266
101
py
ERD
ERD-main/mmdet/models/task_modules/samplers/pseudo_sampler.py
# Copyright (c) OpenMMLab. All rights reserved. import torch from mmengine.structures import InstanceData from mmdet.registry import TASK_UTILS from ..assigners import AssignResult from .base_sampler import BaseSampler from .sampling_result import SamplingResult @TASK_UTILS.register_module() class PseudoSampler(BaseSampler): """A pseudo sampler that does not do sampling actually.""" def __init__(self, **kwargs): pass def _sample_pos(self, **kwargs): """Sample positive samples.""" raise NotImplementedError def _sample_neg(self, **kwargs): """Sample negative samples.""" raise NotImplementedError def sample(self, assign_result: AssignResult, pred_instances: InstanceData, gt_instances: InstanceData, *args, **kwargs): """Directly returns the positive and negative indices of samples. Args: assign_result (:obj:`AssignResult`): Bbox assigning results. pred_instances (:obj:`InstanceData`): Instances of model predictions. It includes ``priors``, and the priors can be anchors, points, or bboxes predicted by the model, shape(n, 4). gt_instances (:obj:`InstanceData`): Ground truth of instance annotations. It usually includes ``bboxes`` and ``labels`` attributes. Returns: :obj:`SamplingResult`: sampler results """ gt_bboxes = gt_instances.bboxes priors = pred_instances.priors pos_inds = torch.nonzero( assign_result.gt_inds > 0, as_tuple=False).squeeze(-1).unique() neg_inds = torch.nonzero( assign_result.gt_inds == 0, as_tuple=False).squeeze(-1).unique() gt_flags = priors.new_zeros(priors.shape[0], dtype=torch.uint8) sampling_result = SamplingResult( pos_inds=pos_inds, neg_inds=neg_inds, priors=priors, gt_bboxes=gt_bboxes, assign_result=assign_result, gt_flags=gt_flags, avg_factor_with_neg=False) return sampling_result
2,145
34.180328
79
py
ERD
ERD-main/mmdet/models/task_modules/coders/yolo_bbox_coder.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import Union import torch from torch import Tensor from mmdet.registry import TASK_UTILS from mmdet.structures.bbox import BaseBoxes, HorizontalBoxes, get_box_tensor from .base_bbox_coder import BaseBBoxCoder @TASK_UTILS.register_module() class YOLOBBoxCoder(BaseBBoxCoder): """YOLO BBox coder. Following `YOLO <https://arxiv.org/abs/1506.02640>`_, this coder divide image into grids, and encode bbox (x1, y1, x2, y2) into (cx, cy, dw, dh). cx, cy in [0., 1.], denotes relative center position w.r.t the center of bboxes. dw, dh are the same as :obj:`DeltaXYWHBBoxCoder`. Args: eps (float): Min value of cx, cy when encoding. """ def __init__(self, eps: float = 1e-6, **kwargs): super().__init__(**kwargs) self.eps = eps def encode(self, bboxes: Union[Tensor, BaseBoxes], gt_bboxes: Union[Tensor, BaseBoxes], stride: Union[Tensor, int]) -> Tensor: """Get box regression transformation deltas that can be used to transform the ``bboxes`` into the ``gt_bboxes``. Args: bboxes (torch.Tensor or :obj:`BaseBoxes`): Source boxes, e.g., anchors. gt_bboxes (torch.Tensor or :obj:`BaseBoxes`): Target of the transformation, e.g., ground-truth boxes. stride (torch.Tensor | int): Stride of bboxes. Returns: torch.Tensor: Box transformation deltas """ bboxes = get_box_tensor(bboxes) gt_bboxes = get_box_tensor(gt_bboxes) assert bboxes.size(0) == gt_bboxes.size(0) assert bboxes.size(-1) == gt_bboxes.size(-1) == 4 x_center_gt = (gt_bboxes[..., 0] + gt_bboxes[..., 2]) * 0.5 y_center_gt = (gt_bboxes[..., 1] + gt_bboxes[..., 3]) * 0.5 w_gt = gt_bboxes[..., 2] - gt_bboxes[..., 0] h_gt = gt_bboxes[..., 3] - gt_bboxes[..., 1] x_center = (bboxes[..., 0] + bboxes[..., 2]) * 0.5 y_center = (bboxes[..., 1] + bboxes[..., 3]) * 0.5 w = bboxes[..., 2] - bboxes[..., 0] h = bboxes[..., 3] - bboxes[..., 1] w_target = torch.log((w_gt / w).clamp(min=self.eps)) h_target = torch.log((h_gt / h).clamp(min=self.eps)) x_center_target = ((x_center_gt - x_center) / stride + 0.5).clamp( self.eps, 1 - self.eps) y_center_target = ((y_center_gt - y_center) / stride + 0.5).clamp( self.eps, 1 - self.eps) encoded_bboxes = torch.stack( [x_center_target, y_center_target, w_target, h_target], dim=-1) return encoded_bboxes def decode(self, bboxes: Union[Tensor, BaseBoxes], pred_bboxes: Tensor, stride: Union[Tensor, int]) -> Union[Tensor, BaseBoxes]: """Apply transformation `pred_bboxes` to `boxes`. Args: boxes (torch.Tensor or :obj:`BaseBoxes`): Basic boxes, e.g. anchors. pred_bboxes (torch.Tensor): Encoded boxes with shape stride (torch.Tensor | int): Strides of bboxes. Returns: Union[torch.Tensor, :obj:`BaseBoxes`]: Decoded boxes. """ bboxes = get_box_tensor(bboxes) assert pred_bboxes.size(-1) == bboxes.size(-1) == 4 xy_centers = (bboxes[..., :2] + bboxes[..., 2:]) * 0.5 + ( pred_bboxes[..., :2] - 0.5) * stride whs = (bboxes[..., 2:] - bboxes[..., :2]) * 0.5 * pred_bboxes[..., 2:].exp() decoded_bboxes = torch.stack( (xy_centers[..., 0] - whs[..., 0], xy_centers[..., 1] - whs[..., 1], xy_centers[..., 0] + whs[..., 0], xy_centers[..., 1] + whs[..., 1]), dim=-1) if self.use_box_type: decoded_bboxes = HorizontalBoxes(decoded_bboxes) return decoded_bboxes
3,864
39.684211
77
py
ERD
ERD-main/mmdet/models/task_modules/coders/distance_point_bbox_coder.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import Optional, Sequence, Union from torch import Tensor from mmdet.registry import TASK_UTILS from mmdet.structures.bbox import (BaseBoxes, HorizontalBoxes, bbox2distance, distance2bbox, get_box_tensor) from .base_bbox_coder import BaseBBoxCoder @TASK_UTILS.register_module() class DistancePointBBoxCoder(BaseBBoxCoder): """Distance Point BBox coder. This coder encodes gt bboxes (x1, y1, x2, y2) into (top, bottom, left, right) and decode it back to the original. Args: clip_border (bool, optional): Whether clip the objects outside the border of the image. Defaults to True. """ def __init__(self, clip_border: Optional[bool] = True, **kwargs) -> None: super().__init__(**kwargs) self.clip_border = clip_border def encode(self, points: Tensor, gt_bboxes: Union[Tensor, BaseBoxes], max_dis: Optional[float] = None, eps: float = 0.1) -> Tensor: """Encode bounding box to distances. Args: points (Tensor): Shape (N, 2), The format is [x, y]. gt_bboxes (Tensor or :obj:`BaseBoxes`): Shape (N, 4), The format is "xyxy" max_dis (float): Upper bound of the distance. Default None. eps (float): a small value to ensure target < max_dis, instead <=. Default 0.1. Returns: Tensor: Box transformation deltas. The shape is (N, 4). """ gt_bboxes = get_box_tensor(gt_bboxes) assert points.size(0) == gt_bboxes.size(0) assert points.size(-1) == 2 assert gt_bboxes.size(-1) == 4 return bbox2distance(points, gt_bboxes, max_dis, eps) def decode( self, points: Tensor, pred_bboxes: Tensor, max_shape: Optional[Union[Sequence[int], Tensor, Sequence[Sequence[int]]]] = None ) -> Union[Tensor, BaseBoxes]: """Decode distance prediction to bounding box. Args: points (Tensor): Shape (B, N, 2) or (N, 2). pred_bboxes (Tensor): Distance from the given point to 4 boundaries (left, top, right, bottom). Shape (B, N, 4) or (N, 4) max_shape (Sequence[int] or torch.Tensor or Sequence[ Sequence[int]],optional): Maximum bounds for boxes, specifies (H, W, C) or (H, W). If priors shape is (B, N, 4), then the max_shape should be a Sequence[Sequence[int]], and the length of max_shape should also be B. Default None. Returns: Union[Tensor, :obj:`BaseBoxes`]: Boxes with shape (N, 4) or (B, N, 4) """ assert points.size(0) == pred_bboxes.size(0) assert points.size(-1) == 2 assert pred_bboxes.size(-1) == 4 if self.clip_border is False: max_shape = None bboxes = distance2bbox(points, pred_bboxes, max_shape) if self.use_box_type: bboxes = HorizontalBoxes(bboxes) return bboxes
3,205
36.27907
78
py
ERD
ERD-main/mmdet/models/task_modules/coders/bucketing_bbox_coder.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import Optional, Sequence, Tuple, Union import numpy as np import torch import torch.nn.functional as F from torch import Tensor from mmdet.registry import TASK_UTILS from mmdet.structures.bbox import (BaseBoxes, HorizontalBoxes, bbox_rescale, get_box_tensor) from .base_bbox_coder import BaseBBoxCoder @TASK_UTILS.register_module() class BucketingBBoxCoder(BaseBBoxCoder): """Bucketing BBox Coder for Side-Aware Boundary Localization (SABL). Boundary Localization with Bucketing and Bucketing Guided Rescoring are implemented here. Please refer to https://arxiv.org/abs/1912.04260 for more details. Args: num_buckets (int): Number of buckets. scale_factor (int): Scale factor of proposals to generate buckets. offset_topk (int): Topk buckets are used to generate bucket fine regression targets. Defaults to 2. offset_upperbound (float): Offset upperbound to generate bucket fine regression targets. To avoid too large offset displacements. Defaults to 1.0. cls_ignore_neighbor (bool): Ignore second nearest bucket or Not. Defaults to True. clip_border (bool, optional): Whether clip the objects outside the border of the image. Defaults to True. """ def __init__(self, num_buckets: int, scale_factor: int, offset_topk: int = 2, offset_upperbound: float = 1.0, cls_ignore_neighbor: bool = True, clip_border: bool = True, **kwargs) -> None: super().__init__(**kwargs) self.num_buckets = num_buckets self.scale_factor = scale_factor self.offset_topk = offset_topk self.offset_upperbound = offset_upperbound self.cls_ignore_neighbor = cls_ignore_neighbor self.clip_border = clip_border def encode(self, bboxes: Union[Tensor, BaseBoxes], gt_bboxes: Union[Tensor, BaseBoxes]) -> Tuple[Tensor]: """Get bucketing estimation and fine regression targets during training. Args: bboxes (torch.Tensor or :obj:`BaseBoxes`): source boxes, e.g., object proposals. gt_bboxes (torch.Tensor or :obj:`BaseBoxes`): target of the transformation, e.g., ground truth boxes. Returns: encoded_bboxes(tuple[Tensor]): bucketing estimation and fine regression targets and weights """ bboxes = get_box_tensor(bboxes) gt_bboxes = get_box_tensor(gt_bboxes) assert bboxes.size(0) == gt_bboxes.size(0) assert bboxes.size(-1) == gt_bboxes.size(-1) == 4 encoded_bboxes = bbox2bucket(bboxes, gt_bboxes, self.num_buckets, self.scale_factor, self.offset_topk, self.offset_upperbound, self.cls_ignore_neighbor) return encoded_bboxes def decode( self, bboxes: Union[Tensor, BaseBoxes], pred_bboxes: Tensor, max_shape: Optional[Tuple[int]] = None ) -> Tuple[Union[Tensor, BaseBoxes], Tensor]: """Apply transformation `pred_bboxes` to `boxes`. Args: boxes (torch.Tensor or :obj:`BaseBoxes`): Basic boxes. pred_bboxes (torch.Tensor): Predictions for bucketing estimation and fine regression max_shape (tuple[int], optional): Maximum shape of boxes. Defaults to None. Returns: Union[torch.Tensor, :obj:`BaseBoxes`]: Decoded boxes. """ bboxes = get_box_tensor(bboxes) assert len(pred_bboxes) == 2 cls_preds, offset_preds = pred_bboxes assert cls_preds.size(0) == bboxes.size(0) and offset_preds.size( 0) == bboxes.size(0) bboxes, loc_confidence = bucket2bbox(bboxes, cls_preds, offset_preds, self.num_buckets, self.scale_factor, max_shape, self.clip_border) if self.use_box_type: bboxes = HorizontalBoxes(bboxes, clone=False) return bboxes, loc_confidence def generat_buckets(proposals: Tensor, num_buckets: int, scale_factor: float = 1.0) -> Tuple[Tensor]: """Generate buckets w.r.t bucket number and scale factor of proposals. Args: proposals (Tensor): Shape (n, 4) num_buckets (int): Number of buckets. scale_factor (float): Scale factor to rescale proposals. Returns: tuple[Tensor]: (bucket_w, bucket_h, l_buckets, r_buckets, t_buckets, d_buckets) - bucket_w: Width of buckets on x-axis. Shape (n, ). - bucket_h: Height of buckets on y-axis. Shape (n, ). - l_buckets: Left buckets. Shape (n, ceil(side_num/2)). - r_buckets: Right buckets. Shape (n, ceil(side_num/2)). - t_buckets: Top buckets. Shape (n, ceil(side_num/2)). - d_buckets: Down buckets. Shape (n, ceil(side_num/2)). """ proposals = bbox_rescale(proposals, scale_factor) # number of buckets in each side side_num = int(np.ceil(num_buckets / 2.0)) pw = proposals[..., 2] - proposals[..., 0] ph = proposals[..., 3] - proposals[..., 1] px1 = proposals[..., 0] py1 = proposals[..., 1] px2 = proposals[..., 2] py2 = proposals[..., 3] bucket_w = pw / num_buckets bucket_h = ph / num_buckets # left buckets l_buckets = px1[:, None] + (0.5 + torch.arange( 0, side_num).to(proposals).float())[None, :] * bucket_w[:, None] # right buckets r_buckets = px2[:, None] - (0.5 + torch.arange( 0, side_num).to(proposals).float())[None, :] * bucket_w[:, None] # top buckets t_buckets = py1[:, None] + (0.5 + torch.arange( 0, side_num).to(proposals).float())[None, :] * bucket_h[:, None] # down buckets d_buckets = py2[:, None] - (0.5 + torch.arange( 0, side_num).to(proposals).float())[None, :] * bucket_h[:, None] return bucket_w, bucket_h, l_buckets, r_buckets, t_buckets, d_buckets def bbox2bucket(proposals: Tensor, gt: Tensor, num_buckets: int, scale_factor: float, offset_topk: int = 2, offset_upperbound: float = 1.0, cls_ignore_neighbor: bool = True) -> Tuple[Tensor]: """Generate buckets estimation and fine regression targets. Args: proposals (Tensor): Shape (n, 4) gt (Tensor): Shape (n, 4) num_buckets (int): Number of buckets. scale_factor (float): Scale factor to rescale proposals. offset_topk (int): Topk buckets are used to generate bucket fine regression targets. Defaults to 2. offset_upperbound (float): Offset allowance to generate bucket fine regression targets. To avoid too large offset displacements. Defaults to 1.0. cls_ignore_neighbor (bool): Ignore second nearest bucket or Not. Defaults to True. Returns: tuple[Tensor]: (offsets, offsets_weights, bucket_labels, cls_weights). - offsets: Fine regression targets. \ Shape (n, num_buckets*2). - offsets_weights: Fine regression weights. \ Shape (n, num_buckets*2). - bucket_labels: Bucketing estimation labels. \ Shape (n, num_buckets*2). - cls_weights: Bucketing estimation weights. \ Shape (n, num_buckets*2). """ assert proposals.size() == gt.size() # generate buckets proposals = proposals.float() gt = gt.float() (bucket_w, bucket_h, l_buckets, r_buckets, t_buckets, d_buckets) = generat_buckets(proposals, num_buckets, scale_factor) gx1 = gt[..., 0] gy1 = gt[..., 1] gx2 = gt[..., 2] gy2 = gt[..., 3] # generate offset targets and weights # offsets from buckets to gts l_offsets = (l_buckets - gx1[:, None]) / bucket_w[:, None] r_offsets = (r_buckets - gx2[:, None]) / bucket_w[:, None] t_offsets = (t_buckets - gy1[:, None]) / bucket_h[:, None] d_offsets = (d_buckets - gy2[:, None]) / bucket_h[:, None] # select top-k nearest buckets l_topk, l_label = l_offsets.abs().topk( offset_topk, dim=1, largest=False, sorted=True) r_topk, r_label = r_offsets.abs().topk( offset_topk, dim=1, largest=False, sorted=True) t_topk, t_label = t_offsets.abs().topk( offset_topk, dim=1, largest=False, sorted=True) d_topk, d_label = d_offsets.abs().topk( offset_topk, dim=1, largest=False, sorted=True) offset_l_weights = l_offsets.new_zeros(l_offsets.size()) offset_r_weights = r_offsets.new_zeros(r_offsets.size()) offset_t_weights = t_offsets.new_zeros(t_offsets.size()) offset_d_weights = d_offsets.new_zeros(d_offsets.size()) inds = torch.arange(0, proposals.size(0)).to(proposals).long() # generate offset weights of top-k nearest buckets for k in range(offset_topk): if k >= 1: offset_l_weights[inds, l_label[:, k]] = (l_topk[:, k] < offset_upperbound).float() offset_r_weights[inds, r_label[:, k]] = (r_topk[:, k] < offset_upperbound).float() offset_t_weights[inds, t_label[:, k]] = (t_topk[:, k] < offset_upperbound).float() offset_d_weights[inds, d_label[:, k]] = (d_topk[:, k] < offset_upperbound).float() else: offset_l_weights[inds, l_label[:, k]] = 1.0 offset_r_weights[inds, r_label[:, k]] = 1.0 offset_t_weights[inds, t_label[:, k]] = 1.0 offset_d_weights[inds, d_label[:, k]] = 1.0 offsets = torch.cat([l_offsets, r_offsets, t_offsets, d_offsets], dim=-1) offsets_weights = torch.cat([ offset_l_weights, offset_r_weights, offset_t_weights, offset_d_weights ], dim=-1) # generate bucket labels and weight side_num = int(np.ceil(num_buckets / 2.0)) labels = torch.stack( [l_label[:, 0], r_label[:, 0], t_label[:, 0], d_label[:, 0]], dim=-1) batch_size = labels.size(0) bucket_labels = F.one_hot(labels.view(-1), side_num).view(batch_size, -1).float() bucket_cls_l_weights = (l_offsets.abs() < 1).float() bucket_cls_r_weights = (r_offsets.abs() < 1).float() bucket_cls_t_weights = (t_offsets.abs() < 1).float() bucket_cls_d_weights = (d_offsets.abs() < 1).float() bucket_cls_weights = torch.cat([ bucket_cls_l_weights, bucket_cls_r_weights, bucket_cls_t_weights, bucket_cls_d_weights ], dim=-1) # ignore second nearest buckets for cls if necessary if cls_ignore_neighbor: bucket_cls_weights = (~((bucket_cls_weights == 1) & (bucket_labels == 0))).float() else: bucket_cls_weights[:] = 1.0 return offsets, offsets_weights, bucket_labels, bucket_cls_weights def bucket2bbox(proposals: Tensor, cls_preds: Tensor, offset_preds: Tensor, num_buckets: int, scale_factor: float = 1.0, max_shape: Optional[Union[Sequence[int], Tensor, Sequence[Sequence[int]]]] = None, clip_border: bool = True) -> Tuple[Tensor]: """Apply bucketing estimation (cls preds) and fine regression (offset preds) to generate det bboxes. Args: proposals (Tensor): Boxes to be transformed. Shape (n, 4) cls_preds (Tensor): bucketing estimation. Shape (n, num_buckets*2). offset_preds (Tensor): fine regression. Shape (n, num_buckets*2). num_buckets (int): Number of buckets. scale_factor (float): Scale factor to rescale proposals. max_shape (tuple[int, int]): Maximum bounds for boxes. specifies (H, W) clip_border (bool, optional): Whether clip the objects outside the border of the image. Defaults to True. Returns: tuple[Tensor]: (bboxes, loc_confidence). - bboxes: predicted bboxes. Shape (n, 4) - loc_confidence: localization confidence of predicted bboxes. Shape (n,). """ side_num = int(np.ceil(num_buckets / 2.0)) cls_preds = cls_preds.view(-1, side_num) offset_preds = offset_preds.view(-1, side_num) scores = F.softmax(cls_preds, dim=1) score_topk, score_label = scores.topk(2, dim=1, largest=True, sorted=True) rescaled_proposals = bbox_rescale(proposals, scale_factor) pw = rescaled_proposals[..., 2] - rescaled_proposals[..., 0] ph = rescaled_proposals[..., 3] - rescaled_proposals[..., 1] px1 = rescaled_proposals[..., 0] py1 = rescaled_proposals[..., 1] px2 = rescaled_proposals[..., 2] py2 = rescaled_proposals[..., 3] bucket_w = pw / num_buckets bucket_h = ph / num_buckets score_inds_l = score_label[0::4, 0] score_inds_r = score_label[1::4, 0] score_inds_t = score_label[2::4, 0] score_inds_d = score_label[3::4, 0] l_buckets = px1 + (0.5 + score_inds_l.float()) * bucket_w r_buckets = px2 - (0.5 + score_inds_r.float()) * bucket_w t_buckets = py1 + (0.5 + score_inds_t.float()) * bucket_h d_buckets = py2 - (0.5 + score_inds_d.float()) * bucket_h offsets = offset_preds.view(-1, 4, side_num) inds = torch.arange(proposals.size(0)).to(proposals).long() l_offsets = offsets[:, 0, :][inds, score_inds_l] r_offsets = offsets[:, 1, :][inds, score_inds_r] t_offsets = offsets[:, 2, :][inds, score_inds_t] d_offsets = offsets[:, 3, :][inds, score_inds_d] x1 = l_buckets - l_offsets * bucket_w x2 = r_buckets - r_offsets * bucket_w y1 = t_buckets - t_offsets * bucket_h y2 = d_buckets - d_offsets * bucket_h if clip_border and max_shape is not None: x1 = x1.clamp(min=0, max=max_shape[1] - 1) y1 = y1.clamp(min=0, max=max_shape[0] - 1) x2 = x2.clamp(min=0, max=max_shape[1] - 1) y2 = y2.clamp(min=0, max=max_shape[0] - 1) bboxes = torch.cat([x1[:, None], y1[:, None], x2[:, None], y2[:, None]], dim=-1) # bucketing guided rescoring loc_confidence = score_topk[:, 0] top2_neighbor_inds = (score_label[:, 0] - score_label[:, 1]).abs() == 1 loc_confidence += score_topk[:, 1] * top2_neighbor_inds.float() loc_confidence = loc_confidence.view(-1, 4).mean(dim=1) return bboxes, loc_confidence
15,197
40.411444
79
py
ERD
ERD-main/mmdet/models/task_modules/coders/pseudo_bbox_coder.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import Union from torch import Tensor from mmdet.registry import TASK_UTILS from mmdet.structures.bbox import BaseBoxes, HorizontalBoxes, get_box_tensor from .base_bbox_coder import BaseBBoxCoder @TASK_UTILS.register_module() class PseudoBBoxCoder(BaseBBoxCoder): """Pseudo bounding box coder.""" def __init__(self, **kwargs): super().__init__(**kwargs) def encode(self, bboxes: Tensor, gt_bboxes: Union[Tensor, BaseBoxes]) -> Tensor: """torch.Tensor: return the given ``bboxes``""" gt_bboxes = get_box_tensor(gt_bboxes) return gt_bboxes def decode(self, bboxes: Tensor, pred_bboxes: Union[Tensor, BaseBoxes]) -> Tensor: """torch.Tensor: return the given ``pred_bboxes``""" if self.use_box_type: pred_bboxes = HorizontalBoxes(pred_bboxes) return pred_bboxes
1,019
33
78
py
ERD
ERD-main/mmdet/models/task_modules/coders/tblr_bbox_coder.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import Optional, Sequence, Union import torch from torch import Tensor from mmdet.registry import TASK_UTILS from mmdet.structures.bbox import BaseBoxes, HorizontalBoxes, get_box_tensor from .base_bbox_coder import BaseBBoxCoder @TASK_UTILS.register_module() class TBLRBBoxCoder(BaseBBoxCoder): """TBLR BBox coder. Following the practice in `FSAF <https://arxiv.org/abs/1903.00621>`_, this coder encodes gt bboxes (x1, y1, x2, y2) into (top, bottom, left, right) and decode it back to the original. Args: normalizer (list | float): Normalization factor to be divided with when coding the coordinates. If it is a list, it should have length of 4 indicating normalization factor in tblr dims. Otherwise it is a unified float factor for all dims. Default: 4.0 clip_border (bool, optional): Whether clip the objects outside the border of the image. Defaults to True. """ def __init__(self, normalizer: Union[Sequence[float], float] = 4.0, clip_border: bool = True, **kwargs) -> None: super().__init__(**kwargs) self.normalizer = normalizer self.clip_border = clip_border def encode(self, bboxes: Union[Tensor, BaseBoxes], gt_bboxes: Union[Tensor, BaseBoxes]) -> Tensor: """Get box regression transformation deltas that can be used to transform the ``bboxes`` into the ``gt_bboxes`` in the (top, left, bottom, right) order. Args: bboxes (torch.Tensor or :obj:`BaseBoxes`): source boxes, e.g., object proposals. gt_bboxes (torch.Tensor or :obj:`BaseBoxes`): target of the transformation, e.g., ground truth boxes. Returns: torch.Tensor: Box transformation deltas """ bboxes = get_box_tensor(bboxes) gt_bboxes = get_box_tensor(gt_bboxes) assert bboxes.size(0) == gt_bboxes.size(0) assert bboxes.size(-1) == gt_bboxes.size(-1) == 4 encoded_bboxes = bboxes2tblr( bboxes, gt_bboxes, normalizer=self.normalizer) return encoded_bboxes def decode( self, bboxes: Union[Tensor, BaseBoxes], pred_bboxes: Tensor, max_shape: Optional[Union[Sequence[int], Tensor, Sequence[Sequence[int]]]] = None ) -> Union[Tensor, BaseBoxes]: """Apply transformation `pred_bboxes` to `boxes`. Args: bboxes (torch.Tensor or :obj:`BaseBoxes`): Basic boxes.Shape (B, N, 4) or (N, 4) pred_bboxes (torch.Tensor): Encoded boxes with shape (B, N, 4) or (N, 4) max_shape (Sequence[int] or torch.Tensor or Sequence[ Sequence[int]],optional): Maximum bounds for boxes, specifies (H, W, C) or (H, W). If bboxes shape is (B, N, 4), then the max_shape should be a Sequence[Sequence[int]] and the length of max_shape should also be B. Returns: Union[torch.Tensor, :obj:`BaseBoxes`]: Decoded boxes. """ bboxes = get_box_tensor(bboxes) decoded_bboxes = tblr2bboxes( bboxes, pred_bboxes, normalizer=self.normalizer, max_shape=max_shape, clip_border=self.clip_border) if self.use_box_type: decoded_bboxes = HorizontalBoxes(decoded_bboxes) return decoded_bboxes def bboxes2tblr(priors: Tensor, gts: Tensor, normalizer: Union[Sequence[float], float] = 4.0, normalize_by_wh: bool = True) -> Tensor: """Encode ground truth boxes to tblr coordinate. It first convert the gt coordinate to tblr format, (top, bottom, left, right), relative to prior box centers. The tblr coordinate may be normalized by the side length of prior bboxes if `normalize_by_wh` is specified as True, and it is then normalized by the `normalizer` factor. Args: priors (Tensor): Prior boxes in point form Shape: (num_proposals,4). gts (Tensor): Coords of ground truth for each prior in point-form Shape: (num_proposals, 4). normalizer (Sequence[float] | float): normalization parameter of encoded boxes. If it is a list, it has to have length = 4. Default: 4.0 normalize_by_wh (bool): Whether to normalize tblr coordinate by the side length (wh) of prior bboxes. Return: encoded boxes (Tensor), Shape: (num_proposals, 4) """ # dist b/t match center and prior's center if not isinstance(normalizer, float): normalizer = torch.tensor(normalizer, device=priors.device) assert len(normalizer) == 4, 'Normalizer must have length = 4' assert priors.size(0) == gts.size(0) prior_centers = (priors[:, 0:2] + priors[:, 2:4]) / 2 xmin, ymin, xmax, ymax = gts.split(1, dim=1) top = prior_centers[:, 1].unsqueeze(1) - ymin bottom = ymax - prior_centers[:, 1].unsqueeze(1) left = prior_centers[:, 0].unsqueeze(1) - xmin right = xmax - prior_centers[:, 0].unsqueeze(1) loc = torch.cat((top, bottom, left, right), dim=1) if normalize_by_wh: # Normalize tblr by anchor width and height wh = priors[:, 2:4] - priors[:, 0:2] w, h = torch.split(wh, 1, dim=1) loc[:, :2] /= h # tb is normalized by h loc[:, 2:] /= w # lr is normalized by w # Normalize tblr by the given normalization factor return loc / normalizer def tblr2bboxes(priors: Tensor, tblr: Tensor, normalizer: Union[Sequence[float], float] = 4.0, normalize_by_wh: bool = True, max_shape: Optional[Union[Sequence[int], Tensor, Sequence[Sequence[int]]]] = None, clip_border: bool = True) -> Tensor: """Decode tblr outputs to prediction boxes. The process includes 3 steps: 1) De-normalize tblr coordinates by multiplying it with `normalizer`; 2) De-normalize tblr coordinates by the prior bbox width and height if `normalize_by_wh` is `True`; 3) Convert tblr (top, bottom, left, right) pair relative to the center of priors back to (xmin, ymin, xmax, ymax) coordinate. Args: priors (Tensor): Prior boxes in point form (x0, y0, x1, y1) Shape: (N,4) or (B, N, 4). tblr (Tensor): Coords of network output in tblr form Shape: (N, 4) or (B, N, 4). normalizer (Sequence[float] | float): Normalization parameter of encoded boxes. By list, it represents the normalization factors at tblr dims. By float, it is the unified normalization factor at all dims. Default: 4.0 normalize_by_wh (bool): Whether the tblr coordinates have been normalized by the side length (wh) of prior bboxes. max_shape (Sequence[int] or torch.Tensor or Sequence[ Sequence[int]],optional): Maximum bounds for boxes, specifies (H, W, C) or (H, W). If priors shape is (B, N, 4), then the max_shape should be a Sequence[Sequence[int]] and the length of max_shape should also be B. clip_border (bool, optional): Whether clip the objects outside the border of the image. Defaults to True. Return: encoded boxes (Tensor): Boxes with shape (N, 4) or (B, N, 4) """ if not isinstance(normalizer, float): normalizer = torch.tensor(normalizer, device=priors.device) assert len(normalizer) == 4, 'Normalizer must have length = 4' assert priors.size(0) == tblr.size(0) if priors.ndim == 3: assert priors.size(1) == tblr.size(1) loc_decode = tblr * normalizer prior_centers = (priors[..., 0:2] + priors[..., 2:4]) / 2 if normalize_by_wh: wh = priors[..., 2:4] - priors[..., 0:2] w, h = torch.split(wh, 1, dim=-1) # Inplace operation with slice would failed for exporting to ONNX th = h * loc_decode[..., :2] # tb tw = w * loc_decode[..., 2:] # lr loc_decode = torch.cat([th, tw], dim=-1) # Cannot be exported using onnx when loc_decode.split(1, dim=-1) top, bottom, left, right = loc_decode.split((1, 1, 1, 1), dim=-1) xmin = prior_centers[..., 0].unsqueeze(-1) - left xmax = prior_centers[..., 0].unsqueeze(-1) + right ymin = prior_centers[..., 1].unsqueeze(-1) - top ymax = prior_centers[..., 1].unsqueeze(-1) + bottom bboxes = torch.cat((xmin, ymin, xmax, ymax), dim=-1) if clip_border and max_shape is not None: # clip bboxes with dynamic `min` and `max` for onnx if torch.onnx.is_in_onnx_export(): from mmdet.core.export import dynamic_clip_for_onnx xmin, ymin, xmax, ymax = dynamic_clip_for_onnx( xmin, ymin, xmax, ymax, max_shape) bboxes = torch.cat([xmin, ymin, xmax, ymax], dim=-1) return bboxes if not isinstance(max_shape, torch.Tensor): max_shape = priors.new_tensor(max_shape) max_shape = max_shape[..., :2].type_as(priors) if max_shape.ndim == 2: assert bboxes.ndim == 3 assert max_shape.size(0) == bboxes.size(0) min_xy = priors.new_tensor(0) max_xy = torch.cat([max_shape, max_shape], dim=-1).flip(-1).unsqueeze(-2) bboxes = torch.where(bboxes < min_xy, min_xy, bboxes) bboxes = torch.where(bboxes > max_xy, max_xy, bboxes) return bboxes
9,724
41.467249
78
py
ERD
ERD-main/mmdet/models/task_modules/coders/legacy_delta_xywh_bbox_coder.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import Optional, Sequence, Union import numpy as np import torch from torch import Tensor from mmdet.registry import TASK_UTILS from mmdet.structures.bbox import BaseBoxes, HorizontalBoxes, get_box_tensor from .base_bbox_coder import BaseBBoxCoder @TASK_UTILS.register_module() class LegacyDeltaXYWHBBoxCoder(BaseBBoxCoder): """Legacy Delta XYWH BBox coder used in MMDet V1.x. Following the practice in R-CNN [1]_, this coder encodes bbox (x1, y1, x2, y2) into delta (dx, dy, dw, dh) and decodes delta (dx, dy, dw, dh) back to original bbox (x1, y1, x2, y2). Note: The main difference between :class`LegacyDeltaXYWHBBoxCoder` and :class:`DeltaXYWHBBoxCoder` is whether ``+ 1`` is used during width and height calculation. We suggest to only use this coder when testing with MMDet V1.x models. References: .. [1] https://arxiv.org/abs/1311.2524 Args: target_means (Sequence[float]): denormalizing means of target for delta coordinates target_stds (Sequence[float]): denormalizing standard deviation of target for delta coordinates """ def __init__(self, target_means: Sequence[float] = (0., 0., 0., 0.), target_stds: Sequence[float] = (1., 1., 1., 1.), **kwargs) -> None: super().__init__(**kwargs) self.means = target_means self.stds = target_stds def encode(self, bboxes: Union[Tensor, BaseBoxes], gt_bboxes: Union[Tensor, BaseBoxes]) -> Tensor: """Get box regression transformation deltas that can be used to transform the ``bboxes`` into the ``gt_bboxes``. Args: bboxes (torch.Tensor or :obj:`BaseBoxes`): source boxes, e.g., object proposals. gt_bboxes (torch.Tensor or :obj:`BaseBoxes`): target of the transformation, e.g., ground-truth boxes. Returns: torch.Tensor: Box transformation deltas """ bboxes = get_box_tensor(bboxes) gt_bboxes = get_box_tensor(gt_bboxes) assert bboxes.size(0) == gt_bboxes.size(0) assert bboxes.size(-1) == gt_bboxes.size(-1) == 4 encoded_bboxes = legacy_bbox2delta(bboxes, gt_bboxes, self.means, self.stds) return encoded_bboxes def decode( self, bboxes: Union[Tensor, BaseBoxes], pred_bboxes: Tensor, max_shape: Optional[Union[Sequence[int], Tensor, Sequence[Sequence[int]]]] = None, wh_ratio_clip: Optional[float] = 16 / 1000 ) -> Union[Tensor, BaseBoxes]: """Apply transformation `pred_bboxes` to `boxes`. Args: boxes (torch.Tensor or :obj:`BaseBoxes`): Basic boxes. pred_bboxes (torch.Tensor): Encoded boxes with shape max_shape (tuple[int], optional): Maximum shape of boxes. Defaults to None. wh_ratio_clip (float, optional): The allowed ratio between width and height. Returns: Union[torch.Tensor, :obj:`BaseBoxes`]: Decoded boxes. """ bboxes = get_box_tensor(bboxes) assert pred_bboxes.size(0) == bboxes.size(0) decoded_bboxes = legacy_delta2bbox(bboxes, pred_bboxes, self.means, self.stds, max_shape, wh_ratio_clip) if self.use_box_type: assert decoded_bboxes.size(-1) == 4, \ ('Cannot warp decoded boxes with box type when decoded boxes' 'have shape of (N, num_classes * 4)') decoded_bboxes = HorizontalBoxes(decoded_bboxes) return decoded_bboxes def legacy_bbox2delta( proposals: Tensor, gt: Tensor, means: Sequence[float] = (0., 0., 0., 0.), stds: Sequence[float] = (1., 1., 1., 1.) ) -> Tensor: """Compute deltas of proposals w.r.t. gt in the MMDet V1.x manner. We usually compute the deltas of x, y, w, h of proposals w.r.t ground truth bboxes to get regression target. This is the inverse function of `delta2bbox()` Args: proposals (Tensor): Boxes to be transformed, shape (N, ..., 4) gt (Tensor): Gt bboxes to be used as base, shape (N, ..., 4) means (Sequence[float]): Denormalizing means for delta coordinates stds (Sequence[float]): Denormalizing standard deviation for delta coordinates Returns: Tensor: deltas with shape (N, 4), where columns represent dx, dy, dw, dh. """ assert proposals.size() == gt.size() proposals = proposals.float() gt = gt.float() px = (proposals[..., 0] + proposals[..., 2]) * 0.5 py = (proposals[..., 1] + proposals[..., 3]) * 0.5 pw = proposals[..., 2] - proposals[..., 0] + 1.0 ph = proposals[..., 3] - proposals[..., 1] + 1.0 gx = (gt[..., 0] + gt[..., 2]) * 0.5 gy = (gt[..., 1] + gt[..., 3]) * 0.5 gw = gt[..., 2] - gt[..., 0] + 1.0 gh = gt[..., 3] - gt[..., 1] + 1.0 dx = (gx - px) / pw dy = (gy - py) / ph dw = torch.log(gw / pw) dh = torch.log(gh / ph) deltas = torch.stack([dx, dy, dw, dh], dim=-1) means = deltas.new_tensor(means).unsqueeze(0) stds = deltas.new_tensor(stds).unsqueeze(0) deltas = deltas.sub_(means).div_(stds) return deltas def legacy_delta2bbox(rois: Tensor, deltas: Tensor, means: Sequence[float] = (0., 0., 0., 0.), stds: Sequence[float] = (1., 1., 1., 1.), max_shape: Optional[ Union[Sequence[int], Tensor, Sequence[Sequence[int]]]] = None, wh_ratio_clip: float = 16 / 1000) -> Tensor: """Apply deltas to shift/scale base boxes in the MMDet V1.x manner. Typically the rois are anchor or proposed bounding boxes and the deltas are network outputs used to shift/scale those boxes. This is the inverse function of `bbox2delta()` Args: rois (Tensor): Boxes to be transformed. Has shape (N, 4) deltas (Tensor): Encoded offsets with respect to each roi. Has shape (N, 4 * num_classes). Note N = num_anchors * W * H when rois is a grid of anchors. Offset encoding follows [1]_. means (Sequence[float]): Denormalizing means for delta coordinates stds (Sequence[float]): Denormalizing standard deviation for delta coordinates max_shape (tuple[int, int]): Maximum bounds for boxes. specifies (H, W) wh_ratio_clip (float): Maximum aspect ratio for boxes. Returns: Tensor: Boxes with shape (N, 4), where columns represent tl_x, tl_y, br_x, br_y. References: .. [1] https://arxiv.org/abs/1311.2524 Example: >>> rois = torch.Tensor([[ 0., 0., 1., 1.], >>> [ 0., 0., 1., 1.], >>> [ 0., 0., 1., 1.], >>> [ 5., 5., 5., 5.]]) >>> deltas = torch.Tensor([[ 0., 0., 0., 0.], >>> [ 1., 1., 1., 1.], >>> [ 0., 0., 2., -1.], >>> [ 0.7, -1.9, -0.5, 0.3]]) >>> legacy_delta2bbox(rois, deltas, max_shape=(32, 32)) tensor([[0.0000, 0.0000, 1.5000, 1.5000], [0.0000, 0.0000, 5.2183, 5.2183], [0.0000, 0.1321, 7.8891, 0.8679], [5.3967, 2.4251, 6.0033, 3.7749]]) """ means = deltas.new_tensor(means).repeat(1, deltas.size(1) // 4) stds = deltas.new_tensor(stds).repeat(1, deltas.size(1) // 4) denorm_deltas = deltas * stds + means dx = denorm_deltas[:, 0::4] dy = denorm_deltas[:, 1::4] dw = denorm_deltas[:, 2::4] dh = denorm_deltas[:, 3::4] max_ratio = np.abs(np.log(wh_ratio_clip)) dw = dw.clamp(min=-max_ratio, max=max_ratio) dh = dh.clamp(min=-max_ratio, max=max_ratio) # Compute center of each roi px = ((rois[:, 0] + rois[:, 2]) * 0.5).unsqueeze(1).expand_as(dx) py = ((rois[:, 1] + rois[:, 3]) * 0.5).unsqueeze(1).expand_as(dy) # Compute width/height of each roi pw = (rois[:, 2] - rois[:, 0] + 1.0).unsqueeze(1).expand_as(dw) ph = (rois[:, 3] - rois[:, 1] + 1.0).unsqueeze(1).expand_as(dh) # Use exp(network energy) to enlarge/shrink each roi gw = pw * dw.exp() gh = ph * dh.exp() # Use network energy to shift the center of each roi gx = px + pw * dx gy = py + ph * dy # Convert center-xy/width/height to top-left, bottom-right # The true legacy box coder should +- 0.5 here. # However, current implementation improves the performance when testing # the models trained in MMDetection 1.X (~0.5 bbox AP, 0.2 mask AP) x1 = gx - gw * 0.5 y1 = gy - gh * 0.5 x2 = gx + gw * 0.5 y2 = gy + gh * 0.5 if max_shape is not None: x1 = x1.clamp(min=0, max=max_shape[1] - 1) y1 = y1.clamp(min=0, max=max_shape[0] - 1) x2 = x2.clamp(min=0, max=max_shape[1] - 1) y2 = y2.clamp(min=0, max=max_shape[0] - 1) bboxes = torch.stack([x1, y1, x2, y2], dim=-1).view_as(deltas) return bboxes
9,370
38.707627
79
py
ERD
ERD-main/mmdet/models/task_modules/coders/delta_xywh_bbox_coder.py
# Copyright (c) OpenMMLab. All rights reserved. import warnings from typing import Optional, Sequence, Union import numpy as np import torch from torch import Tensor from mmdet.registry import TASK_UTILS from mmdet.structures.bbox import BaseBoxes, HorizontalBoxes, get_box_tensor from .base_bbox_coder import BaseBBoxCoder @TASK_UTILS.register_module() class DeltaXYWHBBoxCoder(BaseBBoxCoder): """Delta XYWH BBox coder. Following the practice in `R-CNN <https://arxiv.org/abs/1311.2524>`_, this coder encodes bbox (x1, y1, x2, y2) into delta (dx, dy, dw, dh) and decodes delta (dx, dy, dw, dh) back to original bbox (x1, y1, x2, y2). Args: target_means (Sequence[float]): Denormalizing means of target for delta coordinates target_stds (Sequence[float]): Denormalizing standard deviation of target for delta coordinates clip_border (bool, optional): Whether clip the objects outside the border of the image. Defaults to True. add_ctr_clamp (bool): Whether to add center clamp, when added, the predicted box is clamped is its center is too far away from the original anchor's center. Only used by YOLOF. Default False. ctr_clamp (int): the maximum pixel shift to clamp. Only used by YOLOF. Default 32. """ def __init__(self, target_means: Sequence[float] = (0., 0., 0., 0.), target_stds: Sequence[float] = (1., 1., 1., 1.), clip_border: bool = True, add_ctr_clamp: bool = False, ctr_clamp: int = 32, **kwargs) -> None: super().__init__(**kwargs) self.means = target_means self.stds = target_stds self.clip_border = clip_border self.add_ctr_clamp = add_ctr_clamp self.ctr_clamp = ctr_clamp def encode(self, bboxes: Union[Tensor, BaseBoxes], gt_bboxes: Union[Tensor, BaseBoxes]) -> Tensor: """Get box regression transformation deltas that can be used to transform the ``bboxes`` into the ``gt_bboxes``. Args: bboxes (torch.Tensor or :obj:`BaseBoxes`): Source boxes, e.g., object proposals. gt_bboxes (torch.Tensor or :obj:`BaseBoxes`): Target of the transformation, e.g., ground-truth boxes. Returns: torch.Tensor: Box transformation deltas """ bboxes = get_box_tensor(bboxes) gt_bboxes = get_box_tensor(gt_bboxes) assert bboxes.size(0) == gt_bboxes.size(0) assert bboxes.size(-1) == gt_bboxes.size(-1) == 4 encoded_bboxes = bbox2delta(bboxes, gt_bboxes, self.means, self.stds) return encoded_bboxes def decode( self, bboxes: Union[Tensor, BaseBoxes], pred_bboxes: Tensor, max_shape: Optional[Union[Sequence[int], Tensor, Sequence[Sequence[int]]]] = None, wh_ratio_clip: Optional[float] = 16 / 1000 ) -> Union[Tensor, BaseBoxes]: """Apply transformation `pred_bboxes` to `boxes`. Args: bboxes (torch.Tensor or :obj:`BaseBoxes`): Basic boxes. Shape (B, N, 4) or (N, 4) pred_bboxes (Tensor): Encoded offsets with respect to each roi. Has shape (B, N, num_classes * 4) or (B, N, 4) or (N, num_classes * 4) or (N, 4). Note N = num_anchors * W * H when rois is a grid of anchors.Offset encoding follows [1]_. max_shape (Sequence[int] or torch.Tensor or Sequence[ Sequence[int]],optional): Maximum bounds for boxes, specifies (H, W, C) or (H, W). If bboxes shape is (B, N, 4), then the max_shape should be a Sequence[Sequence[int]] and the length of max_shape should also be B. wh_ratio_clip (float, optional): The allowed ratio between width and height. Returns: Union[torch.Tensor, :obj:`BaseBoxes`]: Decoded boxes. """ bboxes = get_box_tensor(bboxes) assert pred_bboxes.size(0) == bboxes.size(0) if pred_bboxes.ndim == 3: assert pred_bboxes.size(1) == bboxes.size(1) if pred_bboxes.ndim == 2 and not torch.onnx.is_in_onnx_export(): # single image decode decoded_bboxes = delta2bbox(bboxes, pred_bboxes, self.means, self.stds, max_shape, wh_ratio_clip, self.clip_border, self.add_ctr_clamp, self.ctr_clamp) else: if pred_bboxes.ndim == 3 and not torch.onnx.is_in_onnx_export(): warnings.warn( 'DeprecationWarning: onnx_delta2bbox is deprecated ' 'in the case of batch decoding and non-ONNX, ' 'please use “delta2bbox” instead. In order to improve ' 'the decoding speed, the batch function will no ' 'longer be supported. ') decoded_bboxes = onnx_delta2bbox(bboxes, pred_bboxes, self.means, self.stds, max_shape, wh_ratio_clip, self.clip_border, self.add_ctr_clamp, self.ctr_clamp) if self.use_box_type: assert decoded_bboxes.size(-1) == 4, \ ('Cannot warp decoded boxes with box type when decoded boxes' 'have shape of (N, num_classes * 4)') decoded_bboxes = HorizontalBoxes(decoded_bboxes) return decoded_bboxes def bbox2delta( proposals: Tensor, gt: Tensor, means: Sequence[float] = (0., 0., 0., 0.), stds: Sequence[float] = (1., 1., 1., 1.) ) -> Tensor: """Compute deltas of proposals w.r.t. gt. We usually compute the deltas of x, y, w, h of proposals w.r.t ground truth bboxes to get regression target. This is the inverse function of :func:`delta2bbox`. Args: proposals (Tensor): Boxes to be transformed, shape (N, ..., 4) gt (Tensor): Gt bboxes to be used as base, shape (N, ..., 4) means (Sequence[float]): Denormalizing means for delta coordinates stds (Sequence[float]): Denormalizing standard deviation for delta coordinates Returns: Tensor: deltas with shape (N, 4), where columns represent dx, dy, dw, dh. """ assert proposals.size() == gt.size() proposals = proposals.float() gt = gt.float() px = (proposals[..., 0] + proposals[..., 2]) * 0.5 py = (proposals[..., 1] + proposals[..., 3]) * 0.5 pw = proposals[..., 2] - proposals[..., 0] ph = proposals[..., 3] - proposals[..., 1] gx = (gt[..., 0] + gt[..., 2]) * 0.5 gy = (gt[..., 1] + gt[..., 3]) * 0.5 gw = gt[..., 2] - gt[..., 0] gh = gt[..., 3] - gt[..., 1] dx = (gx - px) / pw dy = (gy - py) / ph dw = torch.log(gw / pw) dh = torch.log(gh / ph) deltas = torch.stack([dx, dy, dw, dh], dim=-1) means = deltas.new_tensor(means).unsqueeze(0) stds = deltas.new_tensor(stds).unsqueeze(0) deltas = deltas.sub_(means).div_(stds) return deltas def delta2bbox(rois: Tensor, deltas: Tensor, means: Sequence[float] = (0., 0., 0., 0.), stds: Sequence[float] = (1., 1., 1., 1.), max_shape: Optional[Union[Sequence[int], Tensor, Sequence[Sequence[int]]]] = None, wh_ratio_clip: float = 16 / 1000, clip_border: bool = True, add_ctr_clamp: bool = False, ctr_clamp: int = 32) -> Tensor: """Apply deltas to shift/scale base boxes. Typically the rois are anchor or proposed bounding boxes and the deltas are network outputs used to shift/scale those boxes. This is the inverse function of :func:`bbox2delta`. Args: rois (Tensor): Boxes to be transformed. Has shape (N, 4). deltas (Tensor): Encoded offsets relative to each roi. Has shape (N, num_classes * 4) or (N, 4). Note N = num_base_anchors * W * H, when rois is a grid of anchors. Offset encoding follows [1]_. means (Sequence[float]): Denormalizing means for delta coordinates. Default (0., 0., 0., 0.). stds (Sequence[float]): Denormalizing standard deviation for delta coordinates. Default (1., 1., 1., 1.). max_shape (tuple[int, int]): Maximum bounds for boxes, specifies (H, W). Default None. wh_ratio_clip (float): Maximum aspect ratio for boxes. Default 16 / 1000. clip_border (bool, optional): Whether clip the objects outside the border of the image. Default True. add_ctr_clamp (bool): Whether to add center clamp. When set to True, the center of the prediction bounding box will be clamped to avoid being too far away from the center of the anchor. Only used by YOLOF. Default False. ctr_clamp (int): the maximum pixel shift to clamp. Only used by YOLOF. Default 32. Returns: Tensor: Boxes with shape (N, num_classes * 4) or (N, 4), where 4 represent tl_x, tl_y, br_x, br_y. References: .. [1] https://arxiv.org/abs/1311.2524 Example: >>> rois = torch.Tensor([[ 0., 0., 1., 1.], >>> [ 0., 0., 1., 1.], >>> [ 0., 0., 1., 1.], >>> [ 5., 5., 5., 5.]]) >>> deltas = torch.Tensor([[ 0., 0., 0., 0.], >>> [ 1., 1., 1., 1.], >>> [ 0., 0., 2., -1.], >>> [ 0.7, -1.9, -0.5, 0.3]]) >>> delta2bbox(rois, deltas, max_shape=(32, 32, 3)) tensor([[0.0000, 0.0000, 1.0000, 1.0000], [0.1409, 0.1409, 2.8591, 2.8591], [0.0000, 0.3161, 4.1945, 0.6839], [5.0000, 5.0000, 5.0000, 5.0000]]) """ num_bboxes, num_classes = deltas.size(0), deltas.size(1) // 4 if num_bboxes == 0: return deltas deltas = deltas.reshape(-1, 4) means = deltas.new_tensor(means).view(1, -1) stds = deltas.new_tensor(stds).view(1, -1) denorm_deltas = deltas * stds + means dxy = denorm_deltas[:, :2] dwh = denorm_deltas[:, 2:] # Compute width/height of each roi rois_ = rois.repeat(1, num_classes).reshape(-1, 4) pxy = ((rois_[:, :2] + rois_[:, 2:]) * 0.5) pwh = (rois_[:, 2:] - rois_[:, :2]) dxy_wh = pwh * dxy max_ratio = np.abs(np.log(wh_ratio_clip)) if add_ctr_clamp: dxy_wh = torch.clamp(dxy_wh, max=ctr_clamp, min=-ctr_clamp) dwh = torch.clamp(dwh, max=max_ratio) else: dwh = dwh.clamp(min=-max_ratio, max=max_ratio) gxy = pxy + dxy_wh gwh = pwh * dwh.exp() x1y1 = gxy - (gwh * 0.5) x2y2 = gxy + (gwh * 0.5) bboxes = torch.cat([x1y1, x2y2], dim=-1) if clip_border and max_shape is not None: bboxes[..., 0::2].clamp_(min=0, max=max_shape[1]) bboxes[..., 1::2].clamp_(min=0, max=max_shape[0]) bboxes = bboxes.reshape(num_bboxes, -1) return bboxes def onnx_delta2bbox(rois: Tensor, deltas: Tensor, means: Sequence[float] = (0., 0., 0., 0.), stds: Sequence[float] = (1., 1., 1., 1.), max_shape: Optional[Union[Sequence[int], Tensor, Sequence[Sequence[int]]]] = None, wh_ratio_clip: float = 16 / 1000, clip_border: Optional[bool] = True, add_ctr_clamp: bool = False, ctr_clamp: int = 32) -> Tensor: """Apply deltas to shift/scale base boxes. Typically the rois are anchor or proposed bounding boxes and the deltas are network outputs used to shift/scale those boxes. This is the inverse function of :func:`bbox2delta`. Args: rois (Tensor): Boxes to be transformed. Has shape (N, 4) or (B, N, 4) deltas (Tensor): Encoded offsets with respect to each roi. Has shape (B, N, num_classes * 4) or (B, N, 4) or (N, num_classes * 4) or (N, 4). Note N = num_anchors * W * H when rois is a grid of anchors.Offset encoding follows [1]_. means (Sequence[float]): Denormalizing means for delta coordinates. Default (0., 0., 0., 0.). stds (Sequence[float]): Denormalizing standard deviation for delta coordinates. Default (1., 1., 1., 1.). max_shape (Sequence[int] or torch.Tensor or Sequence[ Sequence[int]],optional): Maximum bounds for boxes, specifies (H, W, C) or (H, W). If rois shape is (B, N, 4), then the max_shape should be a Sequence[Sequence[int]] and the length of max_shape should also be B. Default None. wh_ratio_clip (float): Maximum aspect ratio for boxes. Default 16 / 1000. clip_border (bool, optional): Whether clip the objects outside the border of the image. Default True. add_ctr_clamp (bool): Whether to add center clamp, when added, the predicted box is clamped is its center is too far away from the original anchor's center. Only used by YOLOF. Default False. ctr_clamp (int): the maximum pixel shift to clamp. Only used by YOLOF. Default 32. Returns: Tensor: Boxes with shape (B, N, num_classes * 4) or (B, N, 4) or (N, num_classes * 4) or (N, 4), where 4 represent tl_x, tl_y, br_x, br_y. References: .. [1] https://arxiv.org/abs/1311.2524 Example: >>> rois = torch.Tensor([[ 0., 0., 1., 1.], >>> [ 0., 0., 1., 1.], >>> [ 0., 0., 1., 1.], >>> [ 5., 5., 5., 5.]]) >>> deltas = torch.Tensor([[ 0., 0., 0., 0.], >>> [ 1., 1., 1., 1.], >>> [ 0., 0., 2., -1.], >>> [ 0.7, -1.9, -0.5, 0.3]]) >>> delta2bbox(rois, deltas, max_shape=(32, 32, 3)) tensor([[0.0000, 0.0000, 1.0000, 1.0000], [0.1409, 0.1409, 2.8591, 2.8591], [0.0000, 0.3161, 4.1945, 0.6839], [5.0000, 5.0000, 5.0000, 5.0000]]) """ means = deltas.new_tensor(means).view(1, -1).repeat(1, deltas.size(-1) // 4) stds = deltas.new_tensor(stds).view(1, -1).repeat(1, deltas.size(-1) // 4) denorm_deltas = deltas * stds + means dx = denorm_deltas[..., 0::4] dy = denorm_deltas[..., 1::4] dw = denorm_deltas[..., 2::4] dh = denorm_deltas[..., 3::4] x1, y1 = rois[..., 0], rois[..., 1] x2, y2 = rois[..., 2], rois[..., 3] # Compute center of each roi px = ((x1 + x2) * 0.5).unsqueeze(-1).expand_as(dx) py = ((y1 + y2) * 0.5).unsqueeze(-1).expand_as(dy) # Compute width/height of each roi pw = (x2 - x1).unsqueeze(-1).expand_as(dw) ph = (y2 - y1).unsqueeze(-1).expand_as(dh) dx_width = pw * dx dy_height = ph * dy max_ratio = np.abs(np.log(wh_ratio_clip)) if add_ctr_clamp: dx_width = torch.clamp(dx_width, max=ctr_clamp, min=-ctr_clamp) dy_height = torch.clamp(dy_height, max=ctr_clamp, min=-ctr_clamp) dw = torch.clamp(dw, max=max_ratio) dh = torch.clamp(dh, max=max_ratio) else: dw = dw.clamp(min=-max_ratio, max=max_ratio) dh = dh.clamp(min=-max_ratio, max=max_ratio) # Use exp(network energy) to enlarge/shrink each roi gw = pw * dw.exp() gh = ph * dh.exp() # Use network energy to shift the center of each roi gx = px + dx_width gy = py + dy_height # Convert center-xy/width/height to top-left, bottom-right x1 = gx - gw * 0.5 y1 = gy - gh * 0.5 x2 = gx + gw * 0.5 y2 = gy + gh * 0.5 bboxes = torch.stack([x1, y1, x2, y2], dim=-1).view(deltas.size()) if clip_border and max_shape is not None: # clip bboxes with dynamic `min` and `max` for onnx if torch.onnx.is_in_onnx_export(): from mmdet.core.export import dynamic_clip_for_onnx x1, y1, x2, y2 = dynamic_clip_for_onnx(x1, y1, x2, y2, max_shape) bboxes = torch.stack([x1, y1, x2, y2], dim=-1).view(deltas.size()) return bboxes if not isinstance(max_shape, torch.Tensor): max_shape = x1.new_tensor(max_shape) max_shape = max_shape[..., :2].type_as(x1) if max_shape.ndim == 2: assert bboxes.ndim == 3 assert max_shape.size(0) == bboxes.size(0) min_xy = x1.new_tensor(0) max_xy = torch.cat( [max_shape] * (deltas.size(-1) // 2), dim=-1).flip(-1).unsqueeze(-2) bboxes = torch.where(bboxes < min_xy, min_xy, bboxes) bboxes = torch.where(bboxes > max_xy, max_xy, bboxes) return bboxes
17,417
41.174334
79
py
ERD
ERD-main/mmdet/models/roi_heads/standard_roi_head.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import List, Optional, Tuple import torch from torch import Tensor from mmdet.registry import MODELS, TASK_UTILS from mmdet.structures import DetDataSample, SampleList from mmdet.structures.bbox import bbox2roi from mmdet.utils import ConfigType, InstanceList from ..task_modules.samplers import SamplingResult from ..utils import empty_instances, unpack_gt_instances from .base_roi_head import BaseRoIHead @MODELS.register_module() class StandardRoIHead(BaseRoIHead): """Simplest base roi head including one bbox head and one mask head.""" def init_assigner_sampler(self) -> None: """Initialize assigner and sampler.""" self.bbox_assigner = None self.bbox_sampler = None if self.train_cfg: self.bbox_assigner = TASK_UTILS.build(self.train_cfg.assigner) self.bbox_sampler = TASK_UTILS.build( self.train_cfg.sampler, default_args=dict(context=self)) def init_bbox_head(self, bbox_roi_extractor: ConfigType, bbox_head: ConfigType) -> None: """Initialize box head and box roi extractor. Args: bbox_roi_extractor (dict or ConfigDict): Config of box roi extractor. bbox_head (dict or ConfigDict): Config of box in box head. """ self.bbox_roi_extractor = MODELS.build(bbox_roi_extractor) self.bbox_head = MODELS.build(bbox_head) def init_mask_head(self, mask_roi_extractor: ConfigType, mask_head: ConfigType) -> None: """Initialize mask head and mask roi extractor. Args: mask_roi_extractor (dict or ConfigDict): Config of mask roi extractor. mask_head (dict or ConfigDict): Config of mask in mask head. """ if mask_roi_extractor is not None: self.mask_roi_extractor = MODELS.build(mask_roi_extractor) self.share_roi_extractor = False else: self.share_roi_extractor = True self.mask_roi_extractor = self.bbox_roi_extractor self.mask_head = MODELS.build(mask_head) # TODO: Need to refactor later def forward(self, x: Tuple[Tensor], rpn_results_list: InstanceList, batch_data_samples: SampleList = None) -> tuple: """Network forward process. Usually includes backbone, neck and head forward without any post-processing. Args: x (List[Tensor]): Multi-level features that may have different resolutions. rpn_results_list (list[:obj:`InstanceData`]): List of region proposals. batch_data_samples (list[:obj:`DetDataSample`]): Each item contains the meta information of each image and corresponding annotations. Returns tuple: A tuple of features from ``bbox_head`` and ``mask_head`` forward. """ results = () proposals = [rpn_results.bboxes for rpn_results in rpn_results_list] rois = bbox2roi(proposals) # bbox head if self.with_bbox: bbox_results = self._bbox_forward(x, rois) results = results + (bbox_results['cls_score'], bbox_results['bbox_pred']) # mask head if self.with_mask: mask_rois = rois[:100] mask_results = self._mask_forward(x, mask_rois) results = results + (mask_results['mask_preds'], ) return results def loss(self, x: Tuple[Tensor], rpn_results_list: InstanceList, batch_data_samples: List[DetDataSample]) -> dict: """Perform forward propagation and loss calculation of the detection roi on the features of the upstream network. Args: x (tuple[Tensor]): List of multi-level img features. rpn_results_list (list[:obj:`InstanceData`]): List of region proposals. batch_data_samples (list[:obj:`DetDataSample`]): The batch data samples. It usually includes information such as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`. Returns: dict[str, Tensor]: A dictionary of loss components """ assert len(rpn_results_list) == len(batch_data_samples) outputs = unpack_gt_instances(batch_data_samples) batch_gt_instances, batch_gt_instances_ignore, _ = outputs # assign gts and sample proposals num_imgs = len(batch_data_samples) sampling_results = [] for i in range(num_imgs): # rename rpn_results.bboxes to rpn_results.priors rpn_results = rpn_results_list[i] rpn_results.priors = rpn_results.pop('bboxes') assign_result = self.bbox_assigner.assign( rpn_results, batch_gt_instances[i], batch_gt_instances_ignore[i]) sampling_result = self.bbox_sampler.sample( assign_result, rpn_results, batch_gt_instances[i], feats=[lvl_feat[i][None] for lvl_feat in x]) sampling_results.append(sampling_result) losses = dict() # bbox head loss if self.with_bbox: bbox_results = self.bbox_loss(x, sampling_results) losses.update(bbox_results['loss_bbox']) # mask head forward and loss if self.with_mask: mask_results = self.mask_loss(x, sampling_results, bbox_results['bbox_feats'], batch_gt_instances) losses.update(mask_results['loss_mask']) return losses def _bbox_forward(self, x: Tuple[Tensor], rois: Tensor) -> dict: """Box head forward function used in both training and testing. Args: x (tuple[Tensor]): List of multi-level img features. rois (Tensor): RoIs with the shape (n, 5) where the first column indicates batch id of each RoI. Returns: dict[str, Tensor]: Usually returns a dictionary with keys: - `cls_score` (Tensor): Classification scores. - `bbox_pred` (Tensor): Box energies / deltas. - `bbox_feats` (Tensor): Extract bbox RoI features. """ # TODO: a more flexible way to decide which feature maps to use bbox_feats = self.bbox_roi_extractor( x[:self.bbox_roi_extractor.num_inputs], rois) if self.with_shared_head: bbox_feats = self.shared_head(bbox_feats) cls_score, bbox_pred = self.bbox_head(bbox_feats) bbox_results = dict( cls_score=cls_score, bbox_pred=bbox_pred, bbox_feats=bbox_feats) return bbox_results def bbox_loss(self, x: Tuple[Tensor], sampling_results: List[SamplingResult]) -> dict: """Perform forward propagation and loss calculation of the bbox head on the features of the upstream network. Args: x (tuple[Tensor]): List of multi-level img features. sampling_results (list["obj:`SamplingResult`]): Sampling results. Returns: dict[str, Tensor]: Usually returns a dictionary with keys: - `cls_score` (Tensor): Classification scores. - `bbox_pred` (Tensor): Box energies / deltas. - `bbox_feats` (Tensor): Extract bbox RoI features. - `loss_bbox` (dict): A dictionary of bbox loss components. """ rois = bbox2roi([res.priors for res in sampling_results]) bbox_results = self._bbox_forward(x, rois) bbox_loss_and_target = self.bbox_head.loss_and_target( cls_score=bbox_results['cls_score'], bbox_pred=bbox_results['bbox_pred'], rois=rois, sampling_results=sampling_results, rcnn_train_cfg=self.train_cfg) bbox_results.update(loss_bbox=bbox_loss_and_target['loss_bbox']) return bbox_results def mask_loss(self, x: Tuple[Tensor], sampling_results: List[SamplingResult], bbox_feats: Tensor, batch_gt_instances: InstanceList) -> dict: """Perform forward propagation and loss calculation of the mask head on the features of the upstream network. Args: x (tuple[Tensor]): Tuple of multi-level img features. sampling_results (list["obj:`SamplingResult`]): Sampling results. bbox_feats (Tensor): Extract bbox RoI features. batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes``, ``labels``, and ``masks`` attributes. Returns: dict: Usually returns a dictionary with keys: - `mask_preds` (Tensor): Mask prediction. - `mask_feats` (Tensor): Extract mask RoI features. - `mask_targets` (Tensor): Mask target of each positive\ proposals in the image. - `loss_mask` (dict): A dictionary of mask loss components. """ if not self.share_roi_extractor: pos_rois = bbox2roi([res.pos_priors for res in sampling_results]) mask_results = self._mask_forward(x, pos_rois) else: pos_inds = [] device = bbox_feats.device for res in sampling_results: pos_inds.append( torch.ones( res.pos_priors.shape[0], device=device, dtype=torch.uint8)) pos_inds.append( torch.zeros( res.neg_priors.shape[0], device=device, dtype=torch.uint8)) pos_inds = torch.cat(pos_inds) mask_results = self._mask_forward( x, pos_inds=pos_inds, bbox_feats=bbox_feats) mask_loss_and_target = self.mask_head.loss_and_target( mask_preds=mask_results['mask_preds'], sampling_results=sampling_results, batch_gt_instances=batch_gt_instances, rcnn_train_cfg=self.train_cfg) mask_results.update(loss_mask=mask_loss_and_target['loss_mask']) return mask_results def _mask_forward(self, x: Tuple[Tensor], rois: Tensor = None, pos_inds: Optional[Tensor] = None, bbox_feats: Optional[Tensor] = None) -> dict: """Mask head forward function used in both training and testing. Args: x (tuple[Tensor]): Tuple of multi-level img features. rois (Tensor): RoIs with the shape (n, 5) where the first column indicates batch id of each RoI. pos_inds (Tensor, optional): Indices of positive samples. Defaults to None. bbox_feats (Tensor): Extract bbox RoI features. Defaults to None. Returns: dict[str, Tensor]: Usually returns a dictionary with keys: - `mask_preds` (Tensor): Mask prediction. - `mask_feats` (Tensor): Extract mask RoI features. """ assert ((rois is not None) ^ (pos_inds is not None and bbox_feats is not None)) if rois is not None: mask_feats = self.mask_roi_extractor( x[:self.mask_roi_extractor.num_inputs], rois) if self.with_shared_head: mask_feats = self.shared_head(mask_feats) else: assert bbox_feats is not None mask_feats = bbox_feats[pos_inds] mask_preds = self.mask_head(mask_feats) mask_results = dict(mask_preds=mask_preds, mask_feats=mask_feats) return mask_results def predict_bbox(self, x: Tuple[Tensor], batch_img_metas: List[dict], rpn_results_list: InstanceList, rcnn_test_cfg: ConfigType, rescale: bool = False) -> InstanceList: """Perform forward propagation of the bbox head and predict detection results on the features of the upstream network. Args: x (tuple[Tensor]): Feature maps of all scale level. batch_img_metas (list[dict]): List of image information. rpn_results_list (list[:obj:`InstanceData`]): List of region proposals. rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of R-CNN. rescale (bool): If True, return boxes in original image space. Defaults to False. Returns: list[:obj:`InstanceData`]: Detection results of each image after the post process. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). """ proposals = [res.bboxes for res in rpn_results_list] rois = bbox2roi(proposals) if rois.shape[0] == 0: return empty_instances( batch_img_metas, rois.device, task_type='bbox', box_type=self.bbox_head.predict_box_type, num_classes=self.bbox_head.num_classes, score_per_cls=rcnn_test_cfg is None) bbox_results = self._bbox_forward(x, rois) # split batch bbox prediction back to each image cls_scores = bbox_results['cls_score'] bbox_preds = bbox_results['bbox_pred'] num_proposals_per_img = tuple(len(p) for p in proposals) rois = rois.split(num_proposals_per_img, 0) cls_scores = cls_scores.split(num_proposals_per_img, 0) # some detector with_reg is False, bbox_preds will be None if bbox_preds is not None: # TODO move this to a sabl_roi_head # the bbox prediction of some detectors like SABL is not Tensor if isinstance(bbox_preds, torch.Tensor): bbox_preds = bbox_preds.split(num_proposals_per_img, 0) else: bbox_preds = self.bbox_head.bbox_pred_split( bbox_preds, num_proposals_per_img) else: bbox_preds = (None, ) * len(proposals) result_list = self.bbox_head.predict_by_feat( rois=rois, cls_scores=cls_scores, bbox_preds=bbox_preds, batch_img_metas=batch_img_metas, rcnn_test_cfg=rcnn_test_cfg, rescale=rescale) return result_list def predict_mask(self, x: Tuple[Tensor], batch_img_metas: List[dict], results_list: InstanceList, rescale: bool = False) -> InstanceList: """Perform forward propagation of the mask head and predict detection results on the features of the upstream network. Args: x (tuple[Tensor]): Feature maps of all scale level. batch_img_metas (list[dict]): List of image information. results_list (list[:obj:`InstanceData`]): Detection results of each image. rescale (bool): If True, return boxes in original image space. Defaults to False. Returns: list[:obj:`InstanceData`]: Detection results of each image after the post process. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). - masks (Tensor): Has a shape (num_instances, H, W). """ # don't need to consider aug_test. bboxes = [res.bboxes for res in results_list] mask_rois = bbox2roi(bboxes) if mask_rois.shape[0] == 0: results_list = empty_instances( batch_img_metas, mask_rois.device, task_type='mask', instance_results=results_list, mask_thr_binary=self.test_cfg.mask_thr_binary) return results_list mask_results = self._mask_forward(x, mask_rois) mask_preds = mask_results['mask_preds'] # split batch mask prediction back to each image num_mask_rois_per_img = [len(res) for res in results_list] mask_preds = mask_preds.split(num_mask_rois_per_img, 0) # TODO: Handle the case where rescale is false results_list = self.mask_head.predict_by_feat( mask_preds=mask_preds, results_list=results_list, batch_img_metas=batch_img_metas, rcnn_test_cfg=self.test_cfg, rescale=rescale) return results_list
17,440
40.52619
79
py
ERD
ERD-main/mmdet/models/roi_heads/grid_roi_head.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import List, Optional, Tuple import torch from torch import Tensor from mmdet.registry import MODELS from mmdet.structures import SampleList from mmdet.structures.bbox import bbox2roi from mmdet.utils import ConfigType, InstanceList from ..task_modules.samplers import SamplingResult from ..utils.misc import unpack_gt_instances from .standard_roi_head import StandardRoIHead @MODELS.register_module() class GridRoIHead(StandardRoIHead): """Implementation of `Grid RoI Head <https://arxiv.org/abs/1811.12030>`_ Args: grid_roi_extractor (:obj:`ConfigDict` or dict): Config of roi extractor. grid_head (:obj:`ConfigDict` or dict): Config of grid head """ def __init__(self, grid_roi_extractor: ConfigType, grid_head: ConfigType, **kwargs) -> None: assert grid_head is not None super().__init__(**kwargs) if grid_roi_extractor is not None: self.grid_roi_extractor = MODELS.build(grid_roi_extractor) self.share_roi_extractor = False else: self.share_roi_extractor = True self.grid_roi_extractor = self.bbox_roi_extractor self.grid_head = MODELS.build(grid_head) def _random_jitter(self, sampling_results: List[SamplingResult], batch_img_metas: List[dict], amplitude: float = 0.15) -> List[SamplingResult]: """Ramdom jitter positive proposals for training. Args: sampling_results (List[obj:SamplingResult]): Assign results of all images in a batch after sampling. batch_img_metas (list[dict]): List of image information. amplitude (float): Amplitude of random offset. Defaults to 0.15. Returns: list[obj:SamplingResult]: SamplingResults after random jittering. """ for sampling_result, img_meta in zip(sampling_results, batch_img_metas): bboxes = sampling_result.pos_priors random_offsets = bboxes.new_empty(bboxes.shape[0], 4).uniform_( -amplitude, amplitude) # before jittering cxcy = (bboxes[:, 2:4] + bboxes[:, :2]) / 2 wh = (bboxes[:, 2:4] - bboxes[:, :2]).abs() # after jittering new_cxcy = cxcy + wh * random_offsets[:, :2] new_wh = wh * (1 + random_offsets[:, 2:]) # xywh to xyxy new_x1y1 = (new_cxcy - new_wh / 2) new_x2y2 = (new_cxcy + new_wh / 2) new_bboxes = torch.cat([new_x1y1, new_x2y2], dim=1) # clip bboxes max_shape = img_meta['img_shape'] if max_shape is not None: new_bboxes[:, 0::2].clamp_(min=0, max=max_shape[1] - 1) new_bboxes[:, 1::2].clamp_(min=0, max=max_shape[0] - 1) sampling_result.pos_priors = new_bboxes return sampling_results # TODO: Forward is incorrect and need to refactor. def forward(self, x: Tuple[Tensor], rpn_results_list: InstanceList, batch_data_samples: SampleList = None) -> tuple: """Network forward process. Usually includes backbone, neck and head forward without any post-processing. Args: x (Tuple[Tensor]): Multi-level features that may have different resolutions. rpn_results_list (list[:obj:`InstanceData`]): List of region proposals. batch_data_samples (list[:obj:`DetDataSample`]): Each item contains the meta information of each image and corresponding annotations. Returns tuple: A tuple of features from ``bbox_head`` and ``mask_head`` forward. """ results = () proposals = [rpn_results.bboxes for rpn_results in rpn_results_list] rois = bbox2roi(proposals) # bbox head if self.with_bbox: bbox_results = self._bbox_forward(x, rois) results = results + (bbox_results['cls_score'], ) if self.bbox_head.with_reg: results = results + (bbox_results['bbox_pred'], ) # grid head grid_rois = rois[:100] grid_feats = self.grid_roi_extractor( x[:len(self.grid_roi_extractor.featmap_strides)], grid_rois) if self.with_shared_head: grid_feats = self.shared_head(grid_feats) self.grid_head.test_mode = True grid_preds = self.grid_head(grid_feats) results = results + (grid_preds, ) # mask head if self.with_mask: mask_rois = rois[:100] mask_results = self._mask_forward(x, mask_rois) results = results + (mask_results['mask_preds'], ) return results def loss(self, x: Tuple[Tensor], rpn_results_list: InstanceList, batch_data_samples: SampleList, **kwargs) -> dict: """Perform forward propagation and loss calculation of the detection roi on the features of the upstream network. Args: x (tuple[Tensor]): List of multi-level img features. rpn_results_list (list[:obj:`InstanceData`]): List of region proposals. batch_data_samples (list[:obj:`DetDataSample`]): The batch data samples. It usually includes information such as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`. Returns: dict[str, Tensor]: A dictionary of loss components """ assert len(rpn_results_list) == len(batch_data_samples) outputs = unpack_gt_instances(batch_data_samples) (batch_gt_instances, batch_gt_instances_ignore, batch_img_metas) = outputs # assign gts and sample proposals num_imgs = len(batch_data_samples) sampling_results = [] for i in range(num_imgs): # rename rpn_results.bboxes to rpn_results.priors rpn_results = rpn_results_list[i] rpn_results.priors = rpn_results.pop('bboxes') assign_result = self.bbox_assigner.assign( rpn_results, batch_gt_instances[i], batch_gt_instances_ignore[i]) sampling_result = self.bbox_sampler.sample( assign_result, rpn_results, batch_gt_instances[i], feats=[lvl_feat[i][None] for lvl_feat in x]) sampling_results.append(sampling_result) losses = dict() # bbox head loss if self.with_bbox: bbox_results = self.bbox_loss(x, sampling_results, batch_img_metas) losses.update(bbox_results['loss_bbox']) # mask head forward and loss if self.with_mask: mask_results = self.mask_loss(x, sampling_results, bbox_results['bbox_feats'], batch_gt_instances) losses.update(mask_results['loss_mask']) return losses def bbox_loss(self, x: Tuple[Tensor], sampling_results: List[SamplingResult], batch_img_metas: Optional[List[dict]] = None) -> dict: """Perform forward propagation and loss calculation of the bbox head on the features of the upstream network. Args: x (tuple[Tensor]): List of multi-level img features. sampling_results (list[:obj:`SamplingResult`]): Sampling results. batch_img_metas (list[dict], optional): Meta information of each image, e.g., image size, scaling factor, etc. Returns: dict[str, Tensor]: Usually returns a dictionary with keys: - `cls_score` (Tensor): Classification scores. - `bbox_pred` (Tensor): Box energies / deltas. - `bbox_feats` (Tensor): Extract bbox RoI features. - `loss_bbox` (dict): A dictionary of bbox loss components. """ assert batch_img_metas is not None bbox_results = super().bbox_loss(x, sampling_results) # Grid head forward and loss sampling_results = self._random_jitter(sampling_results, batch_img_metas) pos_rois = bbox2roi([res.pos_bboxes for res in sampling_results]) # GN in head does not support zero shape input if pos_rois.shape[0] == 0: return bbox_results grid_feats = self.grid_roi_extractor( x[:self.grid_roi_extractor.num_inputs], pos_rois) if self.with_shared_head: grid_feats = self.shared_head(grid_feats) # Accelerate training max_sample_num_grid = self.train_cfg.get('max_num_grid', 192) sample_idx = torch.randperm( grid_feats.shape[0])[:min(grid_feats.shape[0], max_sample_num_grid )] grid_feats = grid_feats[sample_idx] grid_pred = self.grid_head(grid_feats) loss_grid = self.grid_head.loss(grid_pred, sample_idx, sampling_results, self.train_cfg) bbox_results['loss_bbox'].update(loss_grid) return bbox_results def predict_bbox(self, x: Tuple[Tensor], batch_img_metas: List[dict], rpn_results_list: InstanceList, rcnn_test_cfg: ConfigType, rescale: bool = False) -> InstanceList: """Perform forward propagation of the bbox head and predict detection results on the features of the upstream network. Args: x (tuple[Tensor]): Feature maps of all scale level. batch_img_metas (list[dict]): List of image information. rpn_results_list (list[:obj:`InstanceData`]): List of region proposals. rcnn_test_cfg (:obj:`ConfigDict`): `test_cfg` of R-CNN. rescale (bool): If True, return boxes in original image space. Defaults to False. Returns: list[:obj:`InstanceData`]: Detection results of each image after the post process. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape \ (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last \ dimension 4 arrange as (x1, y1, x2, y2). """ results_list = super().predict_bbox( x, batch_img_metas=batch_img_metas, rpn_results_list=rpn_results_list, rcnn_test_cfg=rcnn_test_cfg, rescale=False) grid_rois = bbox2roi([res.bboxes for res in results_list]) if grid_rois.shape[0] != 0: grid_feats = self.grid_roi_extractor( x[:len(self.grid_roi_extractor.featmap_strides)], grid_rois) if self.with_shared_head: grid_feats = self.shared_head(grid_feats) self.grid_head.test_mode = True grid_preds = self.grid_head(grid_feats) results_list = self.grid_head.predict_by_feat( grid_preds=grid_preds, results_list=results_list, batch_img_metas=batch_img_metas, rescale=rescale) return results_list
11,647
40.451957
79
py
ERD
ERD-main/mmdet/models/roi_heads/scnet_roi_head.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import List, Optional, Tuple import torch import torch.nn.functional as F from mmengine.structures import InstanceData from torch import Tensor from mmdet.registry import MODELS from mmdet.structures import SampleList from mmdet.structures.bbox import bbox2roi from mmdet.utils import ConfigType, InstanceList, OptConfigType from ..layers import adaptive_avg_pool2d from ..task_modules.samplers import SamplingResult from ..utils import empty_instances, unpack_gt_instances from .cascade_roi_head import CascadeRoIHead @MODELS.register_module() class SCNetRoIHead(CascadeRoIHead): """RoIHead for `SCNet <https://arxiv.org/abs/2012.10150>`_. Args: num_stages (int): number of cascade stages. stage_loss_weights (list): loss weight of cascade stages. semantic_roi_extractor (dict): config to init semantic roi extractor. semantic_head (dict): config to init semantic head. feat_relay_head (dict): config to init feature_relay_head. glbctx_head (dict): config to init global context head. """ def __init__(self, num_stages: int, stage_loss_weights: List[float], semantic_roi_extractor: OptConfigType = None, semantic_head: OptConfigType = None, feat_relay_head: OptConfigType = None, glbctx_head: OptConfigType = None, **kwargs) -> None: super().__init__( num_stages=num_stages, stage_loss_weights=stage_loss_weights, **kwargs) assert self.with_bbox and self.with_mask assert not self.with_shared_head # shared head is not supported if semantic_head is not None: self.semantic_roi_extractor = MODELS.build(semantic_roi_extractor) self.semantic_head = MODELS.build(semantic_head) if feat_relay_head is not None: self.feat_relay_head = MODELS.build(feat_relay_head) if glbctx_head is not None: self.glbctx_head = MODELS.build(glbctx_head) def init_mask_head(self, mask_roi_extractor: ConfigType, mask_head: ConfigType) -> None: """Initialize ``mask_head``""" if mask_roi_extractor is not None: self.mask_roi_extractor = MODELS.build(mask_roi_extractor) self.mask_head = MODELS.build(mask_head) # TODO move to base_roi_head later @property def with_semantic(self) -> bool: """bool: whether the head has semantic head""" return hasattr(self, 'semantic_head') and self.semantic_head is not None @property def with_feat_relay(self) -> bool: """bool: whether the head has feature relay head""" return (hasattr(self, 'feat_relay_head') and self.feat_relay_head is not None) @property def with_glbctx(self) -> bool: """bool: whether the head has global context head""" return hasattr(self, 'glbctx_head') and self.glbctx_head is not None def _fuse_glbctx(self, roi_feats: Tensor, glbctx_feat: Tensor, rois: Tensor) -> Tensor: """Fuse global context feats with roi feats. Args: roi_feats (Tensor): RoI features. glbctx_feat (Tensor): Global context feature.. rois (Tensor): RoIs with the shape (n, 5) where the first column indicates batch id of each RoI. Returns: Tensor: Fused feature. """ assert roi_feats.size(0) == rois.size(0) # RuntimeError: isDifferentiableType(variable.scalar_type()) # INTERNAL ASSERT FAILED if detach() is not used when calling # roi_head.predict(). img_inds = torch.unique(rois[:, 0].detach().cpu(), sorted=True).long() fused_feats = torch.zeros_like(roi_feats) for img_id in img_inds: inds = (rois[:, 0] == img_id.item()) fused_feats[inds] = roi_feats[inds] + glbctx_feat[img_id] return fused_feats def _slice_pos_feats(self, feats: Tensor, sampling_results: List[SamplingResult]) -> Tensor: """Get features from pos rois. Args: feats (Tensor): Input features. sampling_results (list["obj:`SamplingResult`]): Sampling results. Returns: Tensor: Sliced features. """ num_rois = [res.priors.size(0) for res in sampling_results] num_pos_rois = [res.pos_priors.size(0) for res in sampling_results] inds = torch.zeros(sum(num_rois), dtype=torch.bool) start = 0 for i in range(len(num_rois)): start = 0 if i == 0 else start + num_rois[i - 1] stop = start + num_pos_rois[i] inds[start:stop] = 1 sliced_feats = feats[inds] return sliced_feats def _bbox_forward(self, stage: int, x: Tuple[Tensor], rois: Tensor, semantic_feat: Optional[Tensor] = None, glbctx_feat: Optional[Tensor] = None) -> dict: """Box head forward function used in both training and testing. Args: stage (int): The current stage in Cascade RoI Head. x (tuple[Tensor]): List of multi-level img features. rois (Tensor): RoIs with the shape (n, 5) where the first column indicates batch id of each RoI. semantic_feat (Tensor): Semantic feature. Defaults to None. glbctx_feat (Tensor): Global context feature. Defaults to None. Returns: dict[str, Tensor]: Usually returns a dictionary with keys: - `cls_score` (Tensor): Classification scores. - `bbox_pred` (Tensor): Box energies / deltas. - `bbox_feats` (Tensor): Extract bbox RoI features. """ bbox_roi_extractor = self.bbox_roi_extractor[stage] bbox_head = self.bbox_head[stage] bbox_feats = bbox_roi_extractor(x[:bbox_roi_extractor.num_inputs], rois) if self.with_semantic and semantic_feat is not None: bbox_semantic_feat = self.semantic_roi_extractor([semantic_feat], rois) if bbox_semantic_feat.shape[-2:] != bbox_feats.shape[-2:]: bbox_semantic_feat = adaptive_avg_pool2d( bbox_semantic_feat, bbox_feats.shape[-2:]) bbox_feats += bbox_semantic_feat if self.with_glbctx and glbctx_feat is not None: bbox_feats = self._fuse_glbctx(bbox_feats, glbctx_feat, rois) cls_score, bbox_pred, relayed_feat = bbox_head( bbox_feats, return_shared_feat=True) bbox_results = dict( cls_score=cls_score, bbox_pred=bbox_pred, relayed_feat=relayed_feat) return bbox_results def _mask_forward(self, x: Tuple[Tensor], rois: Tensor, semantic_feat: Optional[Tensor] = None, glbctx_feat: Optional[Tensor] = None, relayed_feat: Optional[Tensor] = None) -> dict: """Mask head forward function used in both training and testing. Args: stage (int): The current stage in Cascade RoI Head. x (tuple[Tensor]): Tuple of multi-level img features. rois (Tensor): RoIs with the shape (n, 5) where the first column indicates batch id of each RoI. semantic_feat (Tensor): Semantic feature. Defaults to None. glbctx_feat (Tensor): Global context feature. Defaults to None. relayed_feat (Tensor): Relayed feature. Defaults to None. Returns: dict: Usually returns a dictionary with keys: - `mask_preds` (Tensor): Mask prediction. """ mask_feats = self.mask_roi_extractor( x[:self.mask_roi_extractor.num_inputs], rois) if self.with_semantic and semantic_feat is not None: mask_semantic_feat = self.semantic_roi_extractor([semantic_feat], rois) if mask_semantic_feat.shape[-2:] != mask_feats.shape[-2:]: mask_semantic_feat = F.adaptive_avg_pool2d( mask_semantic_feat, mask_feats.shape[-2:]) mask_feats += mask_semantic_feat if self.with_glbctx and glbctx_feat is not None: mask_feats = self._fuse_glbctx(mask_feats, glbctx_feat, rois) if self.with_feat_relay and relayed_feat is not None: mask_feats = mask_feats + relayed_feat mask_preds = self.mask_head(mask_feats) mask_results = dict(mask_preds=mask_preds) return mask_results def bbox_loss(self, stage: int, x: Tuple[Tensor], sampling_results: List[SamplingResult], semantic_feat: Optional[Tensor] = None, glbctx_feat: Optional[Tensor] = None) -> dict: """Run forward function and calculate loss for box head in training. Args: stage (int): The current stage in Cascade RoI Head. x (tuple[Tensor]): List of multi-level img features. sampling_results (list["obj:`SamplingResult`]): Sampling results. semantic_feat (Tensor): Semantic feature. Defaults to None. glbctx_feat (Tensor): Global context feature. Defaults to None. Returns: dict: Usually returns a dictionary with keys: - `cls_score` (Tensor): Classification scores. - `bbox_pred` (Tensor): Box energies / deltas. - `bbox_feats` (Tensor): Extract bbox RoI features. - `loss_bbox` (dict): A dictionary of bbox loss components. - `rois` (Tensor): RoIs with the shape (n, 5) where the first column indicates batch id of each RoI. - `bbox_targets` (tuple): Ground truth for proposals in a single image. Containing the following list of Tensors: (labels, label_weights, bbox_targets, bbox_weights) """ bbox_head = self.bbox_head[stage] rois = bbox2roi([res.priors for res in sampling_results]) bbox_results = self._bbox_forward( stage, x, rois, semantic_feat=semantic_feat, glbctx_feat=glbctx_feat) bbox_results.update(rois=rois) bbox_loss_and_target = bbox_head.loss_and_target( cls_score=bbox_results['cls_score'], bbox_pred=bbox_results['bbox_pred'], rois=rois, sampling_results=sampling_results, rcnn_train_cfg=self.train_cfg[stage]) bbox_results.update(bbox_loss_and_target) return bbox_results def mask_loss(self, x: Tuple[Tensor], sampling_results: List[SamplingResult], batch_gt_instances: InstanceList, semantic_feat: Optional[Tensor] = None, glbctx_feat: Optional[Tensor] = None, relayed_feat: Optional[Tensor] = None) -> dict: """Run forward function and calculate loss for mask head in training. Args: x (tuple[Tensor]): Tuple of multi-level img features. sampling_results (list["obj:`SamplingResult`]): Sampling results. batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes``, ``labels``, and ``masks`` attributes. semantic_feat (Tensor): Semantic feature. Defaults to None. glbctx_feat (Tensor): Global context feature. Defaults to None. relayed_feat (Tensor): Relayed feature. Defaults to None. Returns: dict: Usually returns a dictionary with keys: - `mask_preds` (Tensor): Mask prediction. - `loss_mask` (dict): A dictionary of mask loss components. """ pos_rois = bbox2roi([res.pos_priors for res in sampling_results]) mask_results = self._mask_forward( x, pos_rois, semantic_feat=semantic_feat, glbctx_feat=glbctx_feat, relayed_feat=relayed_feat) mask_loss_and_target = self.mask_head.loss_and_target( mask_preds=mask_results['mask_preds'], sampling_results=sampling_results, batch_gt_instances=batch_gt_instances, rcnn_train_cfg=self.train_cfg[-1]) mask_results.update(mask_loss_and_target) return mask_results def semantic_loss(self, x: Tuple[Tensor], batch_data_samples: SampleList) -> dict: """Semantic segmentation loss. Args: x (Tuple[Tensor]): Tuple of multi-level img features. batch_data_samples (list[:obj:`DetDataSample`]): The batch data samples. It usually includes information such as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`. Returns: dict: Usually returns a dictionary with keys: - `semantic_feat` (Tensor): Semantic feature. - `loss_seg` (dict): Semantic segmentation loss. """ gt_semantic_segs = [ data_sample.gt_sem_seg.sem_seg for data_sample in batch_data_samples ] gt_semantic_segs = torch.stack(gt_semantic_segs) semantic_pred, semantic_feat = self.semantic_head(x) loss_seg = self.semantic_head.loss(semantic_pred, gt_semantic_segs) semantic_results = dict(loss_seg=loss_seg, semantic_feat=semantic_feat) return semantic_results def global_context_loss(self, x: Tuple[Tensor], batch_gt_instances: InstanceList) -> dict: """Global context loss. Args: x (Tuple[Tensor]): Tuple of multi-level img features. batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes``, ``labels``, and ``masks`` attributes. Returns: dict: Usually returns a dictionary with keys: - `glbctx_feat` (Tensor): Global context feature. - `loss_glbctx` (dict): Global context loss. """ gt_labels = [ gt_instances.labels for gt_instances in batch_gt_instances ] mc_pred, glbctx_feat = self.glbctx_head(x) loss_glbctx = self.glbctx_head.loss(mc_pred, gt_labels) global_context_results = dict( loss_glbctx=loss_glbctx, glbctx_feat=glbctx_feat) return global_context_results def loss(self, x: Tensor, rpn_results_list: InstanceList, batch_data_samples: SampleList) -> dict: """Perform forward propagation and loss calculation of the detection roi on the features of the upstream network. Args: x (tuple[Tensor]): List of multi-level img features. rpn_results_list (list[:obj:`InstanceData`]): List of region proposals. batch_data_samples (list[:obj:`DetDataSample`]): The batch data samples. It usually includes information such as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`. Returns: dict[str, Tensor]: A dictionary of loss components """ assert len(rpn_results_list) == len(batch_data_samples) outputs = unpack_gt_instances(batch_data_samples) batch_gt_instances, batch_gt_instances_ignore, batch_img_metas \ = outputs losses = dict() # semantic segmentation branch if self.with_semantic: semantic_results = self.semantic_loss( x=x, batch_data_samples=batch_data_samples) losses['loss_semantic_seg'] = semantic_results['loss_seg'] semantic_feat = semantic_results['semantic_feat'] else: semantic_feat = None # global context branch if self.with_glbctx: global_context_results = self.global_context_loss( x=x, batch_gt_instances=batch_gt_instances) losses['loss_glbctx'] = global_context_results['loss_glbctx'] glbctx_feat = global_context_results['glbctx_feat'] else: glbctx_feat = None results_list = rpn_results_list num_imgs = len(batch_img_metas) for stage in range(self.num_stages): stage_loss_weight = self.stage_loss_weights[stage] # assign gts and sample proposals sampling_results = [] bbox_assigner = self.bbox_assigner[stage] bbox_sampler = self.bbox_sampler[stage] for i in range(num_imgs): results = results_list[i] # rename rpn_results.bboxes to rpn_results.priors results.priors = results.pop('bboxes') assign_result = bbox_assigner.assign( results, batch_gt_instances[i], batch_gt_instances_ignore[i]) sampling_result = bbox_sampler.sample( assign_result, results, batch_gt_instances[i], feats=[lvl_feat[i][None] for lvl_feat in x]) sampling_results.append(sampling_result) # bbox head forward and loss bbox_results = self.bbox_loss( stage=stage, x=x, sampling_results=sampling_results, semantic_feat=semantic_feat, glbctx_feat=glbctx_feat) for name, value in bbox_results['loss_bbox'].items(): losses[f's{stage}.{name}'] = ( value * stage_loss_weight if 'loss' in name else value) # refine bboxes if stage < self.num_stages - 1: bbox_head = self.bbox_head[stage] with torch.no_grad(): results_list = bbox_head.refine_bboxes( sampling_results=sampling_results, bbox_results=bbox_results, batch_img_metas=batch_img_metas) if self.with_feat_relay: relayed_feat = self._slice_pos_feats(bbox_results['relayed_feat'], sampling_results) relayed_feat = self.feat_relay_head(relayed_feat) else: relayed_feat = None # mask head forward and loss mask_results = self.mask_loss( x=x, sampling_results=sampling_results, batch_gt_instances=batch_gt_instances, semantic_feat=semantic_feat, glbctx_feat=glbctx_feat, relayed_feat=relayed_feat) mask_stage_loss_weight = sum(self.stage_loss_weights) losses['loss_mask'] = mask_stage_loss_weight * mask_results[ 'loss_mask']['loss_mask'] return losses def predict(self, x: Tuple[Tensor], rpn_results_list: InstanceList, batch_data_samples: SampleList, rescale: bool = False) -> InstanceList: """Perform forward propagation of the roi head and predict detection results on the features of the upstream network. Args: x (tuple[Tensor]): Features from upstream network. Each has shape (N, C, H, W). rpn_results_list (list[:obj:`InstanceData`]): list of region proposals. batch_data_samples (List[:obj:`DetDataSample`]): The Data Samples. It usually includes information such as `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`. rescale (bool): Whether to rescale the results to the original image. Defaults to False. Returns: list[obj:`InstanceData`]: Detection results of each image. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). - masks (Tensor): Has a shape (num_instances, H, W). """ assert self.with_bbox, 'Bbox head must be implemented.' batch_img_metas = [ data_samples.metainfo for data_samples in batch_data_samples ] if self.with_semantic: _, semantic_feat = self.semantic_head(x) else: semantic_feat = None if self.with_glbctx: _, glbctx_feat = self.glbctx_head(x) else: glbctx_feat = None # TODO: nms_op in mmcv need be enhanced, the bbox result may get # difference when not rescale in bbox_head # If it has the mask branch, the bbox branch does not need # to be scaled to the original image scale, because the mask # branch will scale both bbox and mask at the same time. bbox_rescale = rescale if not self.with_mask else False results_list = self.predict_bbox( x=x, semantic_feat=semantic_feat, glbctx_feat=glbctx_feat, batch_img_metas=batch_img_metas, rpn_results_list=rpn_results_list, rcnn_test_cfg=self.test_cfg, rescale=bbox_rescale) if self.with_mask: results_list = self.predict_mask( x=x, semantic_heat=semantic_feat, glbctx_feat=glbctx_feat, batch_img_metas=batch_img_metas, results_list=results_list, rescale=rescale) return results_list def predict_mask(self, x: Tuple[Tensor], semantic_heat: Tensor, glbctx_feat: Tensor, batch_img_metas: List[dict], results_list: List[InstanceData], rescale: bool = False) -> List[InstanceData]: """Perform forward propagation of the mask head and predict detection results on the features of the upstream network. Args: x (tuple[Tensor]): Feature maps of all scale level. semantic_feat (Tensor): Semantic feature. glbctx_feat (Tensor): Global context feature. batch_img_metas (list[dict]): List of image information. results_list (list[:obj:`InstanceData`]): Detection results of each image. rescale (bool): If True, return boxes in original image space. Defaults to False. Returns: list[:obj:`InstanceData`]: Detection results of each image after the post process. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). - masks (Tensor): Has a shape (num_instances, H, W). """ bboxes = [res.bboxes for res in results_list] mask_rois = bbox2roi(bboxes) if mask_rois.shape[0] == 0: results_list = empty_instances( batch_img_metas=batch_img_metas, device=mask_rois.device, task_type='mask', instance_results=results_list, mask_thr_binary=self.test_cfg.mask_thr_binary) return results_list bboxes_results = self._bbox_forward( stage=-1, x=x, rois=mask_rois, semantic_feat=semantic_heat, glbctx_feat=glbctx_feat) relayed_feat = bboxes_results['relayed_feat'] relayed_feat = self.feat_relay_head(relayed_feat) mask_results = self._mask_forward( x=x, rois=mask_rois, semantic_feat=semantic_heat, glbctx_feat=glbctx_feat, relayed_feat=relayed_feat) mask_preds = mask_results['mask_preds'] # split batch mask prediction back to each image num_bbox_per_img = tuple(len(_bbox) for _bbox in bboxes) mask_preds = mask_preds.split(num_bbox_per_img, 0) results_list = self.mask_head.predict_by_feat( mask_preds=mask_preds, results_list=results_list, batch_img_metas=batch_img_metas, rcnn_test_cfg=self.test_cfg, rescale=rescale) return results_list def forward(self, x: Tuple[Tensor], rpn_results_list: InstanceList, batch_data_samples: SampleList) -> tuple: """Network forward process. Usually includes backbone, neck and head forward without any post-processing. Args: x (List[Tensor]): Multi-level features that may have different resolutions. rpn_results_list (list[:obj:`InstanceData`]): List of region proposals. batch_data_samples (list[:obj:`DetDataSample`]): Each item contains the meta information of each image and corresponding annotations. Returns tuple: A tuple of features from ``bbox_head`` and ``mask_head`` forward. """ results = () batch_img_metas = [ data_samples.metainfo for data_samples in batch_data_samples ] if self.with_semantic: _, semantic_feat = self.semantic_head(x) else: semantic_feat = None if self.with_glbctx: _, glbctx_feat = self.glbctx_head(x) else: glbctx_feat = None proposals = [rpn_results.bboxes for rpn_results in rpn_results_list] num_proposals_per_img = tuple(len(p) for p in proposals) rois = bbox2roi(proposals) # bbox head if self.with_bbox: rois, cls_scores, bbox_preds = self._refine_roi( x=x, rois=rois, semantic_feat=semantic_feat, glbctx_feat=glbctx_feat, batch_img_metas=batch_img_metas, num_proposals_per_img=num_proposals_per_img) results = results + (cls_scores, bbox_preds) # mask head if self.with_mask: rois = torch.cat(rois) bboxes_results = self._bbox_forward( stage=-1, x=x, rois=rois, semantic_feat=semantic_feat, glbctx_feat=glbctx_feat) relayed_feat = bboxes_results['relayed_feat'] relayed_feat = self.feat_relay_head(relayed_feat) mask_results = self._mask_forward( x=x, rois=rois, semantic_feat=semantic_feat, glbctx_feat=glbctx_feat, relayed_feat=relayed_feat) mask_preds = mask_results['mask_preds'] mask_preds = mask_preds.split(num_proposals_per_img, 0) results = results + (mask_preds, ) return results
27,836
40.057522
79
py
ERD
ERD-main/mmdet/models/roi_heads/double_roi_head.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import Tuple from torch import Tensor from mmdet.registry import MODELS from .standard_roi_head import StandardRoIHead @MODELS.register_module() class DoubleHeadRoIHead(StandardRoIHead): """RoI head for `Double Head RCNN <https://arxiv.org/abs/1904.06493>`_. Args: reg_roi_scale_factor (float): The scale factor to extend the rois used to extract the regression features. """ def __init__(self, reg_roi_scale_factor: float, **kwargs): super().__init__(**kwargs) self.reg_roi_scale_factor = reg_roi_scale_factor def _bbox_forward(self, x: Tuple[Tensor], rois: Tensor) -> dict: """Box head forward function used in both training and testing. Args: x (tuple[Tensor]): List of multi-level img features. rois (Tensor): RoIs with the shape (n, 5) where the first column indicates batch id of each RoI. Returns: dict[str, Tensor]: Usually returns a dictionary with keys: - `cls_score` (Tensor): Classification scores. - `bbox_pred` (Tensor): Box energies / deltas. - `bbox_feats` (Tensor): Extract bbox RoI features. """ bbox_cls_feats = self.bbox_roi_extractor( x[:self.bbox_roi_extractor.num_inputs], rois) bbox_reg_feats = self.bbox_roi_extractor( x[:self.bbox_roi_extractor.num_inputs], rois, roi_scale_factor=self.reg_roi_scale_factor) if self.with_shared_head: bbox_cls_feats = self.shared_head(bbox_cls_feats) bbox_reg_feats = self.shared_head(bbox_reg_feats) cls_score, bbox_pred = self.bbox_head(bbox_cls_feats, bbox_reg_feats) bbox_results = dict( cls_score=cls_score, bbox_pred=bbox_pred, bbox_feats=bbox_cls_feats) return bbox_results
1,956
35.240741
77
py
ERD
ERD-main/mmdet/models/roi_heads/multi_instance_roi_head.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import List, Tuple from torch import Tensor from mmdet.registry import MODELS from mmdet.structures import DetDataSample from mmdet.structures.bbox import bbox2roi from mmdet.utils import ConfigType, InstanceList from ..task_modules.samplers import SamplingResult from ..utils import empty_instances, unpack_gt_instances from .standard_roi_head import StandardRoIHead @MODELS.register_module() class MultiInstanceRoIHead(StandardRoIHead): """The roi head for Multi-instance prediction.""" def __init__(self, num_instance: int = 2, *args, **kwargs) -> None: self.num_instance = num_instance super().__init__(*args, **kwargs) def init_bbox_head(self, bbox_roi_extractor: ConfigType, bbox_head: ConfigType) -> None: """Initialize box head and box roi extractor. Args: bbox_roi_extractor (dict or ConfigDict): Config of box roi extractor. bbox_head (dict or ConfigDict): Config of box in box head. """ self.bbox_roi_extractor = MODELS.build(bbox_roi_extractor) self.bbox_head = MODELS.build(bbox_head) def _bbox_forward(self, x: Tuple[Tensor], rois: Tensor) -> dict: """Box head forward function used in both training and testing. Args: x (tuple[Tensor]): List of multi-level img features. rois (Tensor): RoIs with the shape (n, 5) where the first column indicates batch id of each RoI. Returns: dict[str, Tensor]: Usually returns a dictionary with keys: - `cls_score` (Tensor): Classification scores. - `bbox_pred` (Tensor): Box energies / deltas. - `cls_score_ref` (Tensor): The cls_score after refine model. - `bbox_pred_ref` (Tensor): The bbox_pred after refine model. - `bbox_feats` (Tensor): Extract bbox RoI features. """ # TODO: a more flexible way to decide which feature maps to use bbox_feats = self.bbox_roi_extractor( x[:self.bbox_roi_extractor.num_inputs], rois) bbox_results = self.bbox_head(bbox_feats) if self.bbox_head.with_refine: bbox_results = dict( cls_score=bbox_results[0], bbox_pred=bbox_results[1], cls_score_ref=bbox_results[2], bbox_pred_ref=bbox_results[3], bbox_feats=bbox_feats) else: bbox_results = dict( cls_score=bbox_results[0], bbox_pred=bbox_results[1], bbox_feats=bbox_feats) return bbox_results def bbox_loss(self, x: Tuple[Tensor], sampling_results: List[SamplingResult]) -> dict: """Perform forward propagation and loss calculation of the bbox head on the features of the upstream network. Args: x (tuple[Tensor]): List of multi-level img features. sampling_results (list["obj:`SamplingResult`]): Sampling results. Returns: dict[str, Tensor]: Usually returns a dictionary with keys: - `cls_score` (Tensor): Classification scores. - `bbox_pred` (Tensor): Box energies / deltas. - `bbox_feats` (Tensor): Extract bbox RoI features. - `loss_bbox` (dict): A dictionary of bbox loss components. """ rois = bbox2roi([res.priors for res in sampling_results]) bbox_results = self._bbox_forward(x, rois) # If there is a refining process, add refine loss. if 'cls_score_ref' in bbox_results: bbox_loss_and_target = self.bbox_head.loss_and_target( cls_score=bbox_results['cls_score'], bbox_pred=bbox_results['bbox_pred'], rois=rois, sampling_results=sampling_results, rcnn_train_cfg=self.train_cfg) bbox_results.update(loss_bbox=bbox_loss_and_target['loss_bbox']) bbox_loss_and_target_ref = self.bbox_head.loss_and_target( cls_score=bbox_results['cls_score_ref'], bbox_pred=bbox_results['bbox_pred_ref'], rois=rois, sampling_results=sampling_results, rcnn_train_cfg=self.train_cfg) bbox_results['loss_bbox']['loss_rcnn_emd_ref'] = \ bbox_loss_and_target_ref['loss_bbox']['loss_rcnn_emd'] else: bbox_loss_and_target = self.bbox_head.loss_and_target( cls_score=bbox_results['cls_score'], bbox_pred=bbox_results['bbox_pred'], rois=rois, sampling_results=sampling_results, rcnn_train_cfg=self.train_cfg) bbox_results.update(loss_bbox=bbox_loss_and_target['loss_bbox']) return bbox_results def loss(self, x: Tuple[Tensor], rpn_results_list: InstanceList, batch_data_samples: List[DetDataSample]) -> dict: """Perform forward propagation and loss calculation of the detection roi on the features of the upstream network. Args: x (tuple[Tensor]): List of multi-level img features. rpn_results_list (list[:obj:`InstanceData`]): List of region proposals. batch_data_samples (list[:obj:`DetDataSample`]): The batch data samples. It usually includes information such as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`. Returns: dict[str, Tensor]: A dictionary of loss components """ assert len(rpn_results_list) == len(batch_data_samples) outputs = unpack_gt_instances(batch_data_samples) batch_gt_instances, batch_gt_instances_ignore, _ = outputs sampling_results = [] for i in range(len(batch_data_samples)): # rename rpn_results.bboxes to rpn_results.priors rpn_results = rpn_results_list[i] rpn_results.priors = rpn_results.pop('bboxes') assign_result = self.bbox_assigner.assign( rpn_results, batch_gt_instances[i], batch_gt_instances_ignore[i]) sampling_result = self.bbox_sampler.sample( assign_result, rpn_results, batch_gt_instances[i], batch_gt_instances_ignore=batch_gt_instances_ignore[i]) sampling_results.append(sampling_result) losses = dict() # bbox head loss if self.with_bbox: bbox_results = self.bbox_loss(x, sampling_results) losses.update(bbox_results['loss_bbox']) return losses def predict_bbox(self, x: Tuple[Tensor], batch_img_metas: List[dict], rpn_results_list: InstanceList, rcnn_test_cfg: ConfigType, rescale: bool = False) -> InstanceList: """Perform forward propagation of the bbox head and predict detection results on the features of the upstream network. Args: x (tuple[Tensor]): Feature maps of all scale level. batch_img_metas (list[dict]): List of image information. rpn_results_list (list[:obj:`InstanceData`]): List of region proposals. rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of R-CNN. rescale (bool): If True, return boxes in original image space. Defaults to False. Returns: list[:obj:`InstanceData`]: Detection results of each image after the post process. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). """ proposals = [res.bboxes for res in rpn_results_list] rois = bbox2roi(proposals) if rois.shape[0] == 0: return empty_instances( batch_img_metas, rois.device, task_type='bbox') bbox_results = self._bbox_forward(x, rois) # split batch bbox prediction back to each image if 'cls_score_ref' in bbox_results: cls_scores = bbox_results['cls_score_ref'] bbox_preds = bbox_results['bbox_pred_ref'] else: cls_scores = bbox_results['cls_score'] bbox_preds = bbox_results['bbox_pred'] num_proposals_per_img = tuple(len(p) for p in proposals) rois = rois.split(num_proposals_per_img, 0) cls_scores = cls_scores.split(num_proposals_per_img, 0) if bbox_preds is not None: bbox_preds = bbox_preds.split(num_proposals_per_img, 0) else: bbox_preds = (None, ) * len(proposals) result_list = self.bbox_head.predict_by_feat( rois=rois, cls_scores=cls_scores, bbox_preds=bbox_preds, batch_img_metas=batch_img_metas, rcnn_test_cfg=rcnn_test_cfg, rescale=rescale) return result_list
9,392
40.378855
79
py
ERD
ERD-main/mmdet/models/roi_heads/sparse_roi_head.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import List, Tuple import torch from mmengine.config import ConfigDict from mmengine.structures import InstanceData from torch import Tensor from mmdet.models.task_modules.samplers import PseudoSampler from mmdet.registry import MODELS from mmdet.structures import SampleList from mmdet.structures.bbox import bbox2roi from mmdet.utils import ConfigType, InstanceList, OptConfigType from ..utils.misc import empty_instances, unpack_gt_instances from .cascade_roi_head import CascadeRoIHead @MODELS.register_module() class SparseRoIHead(CascadeRoIHead): r"""The RoIHead for `Sparse R-CNN: End-to-End Object Detection with Learnable Proposals <https://arxiv.org/abs/2011.12450>`_ and `Instances as Queries <http://arxiv.org/abs/2105.01928>`_ Args: num_stages (int): Number of stage whole iterative process. Defaults to 6. stage_loss_weights (Tuple[float]): The loss weight of each stage. By default all stages have the same weight 1. bbox_roi_extractor (:obj:`ConfigDict` or dict): Config of box roi extractor. mask_roi_extractor (:obj:`ConfigDict` or dict): Config of mask roi extractor. bbox_head (:obj:`ConfigDict` or dict): Config of box head. mask_head (:obj:`ConfigDict` or dict): Config of mask head. train_cfg (:obj:`ConfigDict` or dict, Optional): Configuration information in train stage. Defaults to None. test_cfg (:obj:`ConfigDict` or dict, Optional): Configuration information in test stage. Defaults to None. init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \ dict]): Initialization config dict. Defaults to None. """ def __init__(self, num_stages: int = 6, stage_loss_weights: Tuple[float] = (1, 1, 1, 1, 1, 1), proposal_feature_channel: int = 256, bbox_roi_extractor: ConfigType = dict( type='SingleRoIExtractor', roi_layer=dict( type='RoIAlign', output_size=7, sampling_ratio=2), out_channels=256, featmap_strides=[4, 8, 16, 32]), mask_roi_extractor: OptConfigType = None, bbox_head: ConfigType = dict( type='DIIHead', num_classes=80, num_fcs=2, num_heads=8, num_cls_fcs=1, num_reg_fcs=3, feedforward_channels=2048, hidden_channels=256, dropout=0.0, roi_feat_size=7, ffn_act_cfg=dict(type='ReLU', inplace=True)), mask_head: OptConfigType = None, train_cfg: OptConfigType = None, test_cfg: OptConfigType = None, init_cfg: OptConfigType = None) -> None: assert bbox_roi_extractor is not None assert bbox_head is not None assert len(stage_loss_weights) == num_stages self.num_stages = num_stages self.stage_loss_weights = stage_loss_weights self.proposal_feature_channel = proposal_feature_channel super().__init__( num_stages=num_stages, stage_loss_weights=stage_loss_weights, bbox_roi_extractor=bbox_roi_extractor, mask_roi_extractor=mask_roi_extractor, bbox_head=bbox_head, mask_head=mask_head, train_cfg=train_cfg, test_cfg=test_cfg, init_cfg=init_cfg) # train_cfg would be None when run the test.py if train_cfg is not None: for stage in range(num_stages): assert isinstance(self.bbox_sampler[stage], PseudoSampler), \ 'Sparse R-CNN and QueryInst only support `PseudoSampler`' def bbox_loss(self, stage: int, x: Tuple[Tensor], results_list: InstanceList, object_feats: Tensor, batch_img_metas: List[dict], batch_gt_instances: InstanceList) -> dict: """Perform forward propagation and loss calculation of the bbox head on the features of the upstream network. Args: stage (int): The current stage in iterative process. x (tuple[Tensor]): List of multi-level img features. results_list (List[:obj:`InstanceData`]) : List of region proposals. object_feats (Tensor): The object feature extracted from the previous stage. batch_img_metas (list[dict]): Meta information of each image. batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes``, ``labels``, and ``masks`` attributes. Returns: dict[str, Tensor]: Usually returns a dictionary with keys: - `cls_score` (Tensor): Classification scores. - `bbox_pred` (Tensor): Box energies / deltas. - `bbox_feats` (Tensor): Extract bbox RoI features. - `loss_bbox` (dict): A dictionary of bbox loss components. """ proposal_list = [res.bboxes for res in results_list] rois = bbox2roi(proposal_list) bbox_results = self._bbox_forward(stage, x, rois, object_feats, batch_img_metas) imgs_whwh = torch.cat( [res.imgs_whwh[None, ...] for res in results_list]) cls_pred_list = bbox_results['detached_cls_scores'] proposal_list = bbox_results['detached_proposals'] sampling_results = [] bbox_head = self.bbox_head[stage] for i in range(len(batch_img_metas)): pred_instances = InstanceData() # TODO: Enhance the logic pred_instances.bboxes = proposal_list[i] # for assinger pred_instances.scores = cls_pred_list[i] pred_instances.priors = proposal_list[i] # for sampler assign_result = self.bbox_assigner[stage].assign( pred_instances=pred_instances, gt_instances=batch_gt_instances[i], gt_instances_ignore=None, img_meta=batch_img_metas[i]) sampling_result = self.bbox_sampler[stage].sample( assign_result, pred_instances, batch_gt_instances[i]) sampling_results.append(sampling_result) bbox_results.update(sampling_results=sampling_results) cls_score = bbox_results['cls_score'] decoded_bboxes = bbox_results['decoded_bboxes'] cls_score = cls_score.view(-1, cls_score.size(-1)) decoded_bboxes = decoded_bboxes.view(-1, 4) bbox_loss_and_target = bbox_head.loss_and_target( cls_score, decoded_bboxes, sampling_results, self.train_cfg[stage], imgs_whwh=imgs_whwh, concat=True) bbox_results.update(bbox_loss_and_target) # propose for the new proposal_list proposal_list = [] for idx in range(len(batch_img_metas)): results = InstanceData() results.imgs_whwh = results_list[idx].imgs_whwh results.bboxes = bbox_results['detached_proposals'][idx] proposal_list.append(results) bbox_results.update(results_list=proposal_list) return bbox_results def _bbox_forward(self, stage: int, x: Tuple[Tensor], rois: Tensor, object_feats: Tensor, batch_img_metas: List[dict]) -> dict: """Box head forward function used in both training and testing. Returns all regression, classification results and a intermediate feature. Args: stage (int): The current stage in iterative process. x (tuple[Tensor]): List of multi-level img features. rois (Tensor): RoIs with the shape (n, 5) where the first column indicates batch id of each RoI. Each dimension means (img_index, x1, y1, x2, y2). object_feats (Tensor): The object feature extracted from the previous stage. batch_img_metas (list[dict]): Meta information of each image. Returns: dict[str, Tensor]: a dictionary of bbox head outputs, Containing the following results: - cls_score (Tensor): The score of each class, has shape (batch_size, num_proposals, num_classes) when use focal loss or (batch_size, num_proposals, num_classes+1) otherwise. - decoded_bboxes (Tensor): The regression results with shape (batch_size, num_proposal, 4). The last dimension 4 represents [tl_x, tl_y, br_x, br_y]. - object_feats (Tensor): The object feature extracted from current stage - detached_cls_scores (list[Tensor]): The detached classification results, length is batch_size, and each tensor has shape (num_proposal, num_classes). - detached_proposals (list[tensor]): The detached regression results, length is batch_size, and each tensor has shape (num_proposal, 4). The last dimension 4 represents [tl_x, tl_y, br_x, br_y]. """ num_imgs = len(batch_img_metas) bbox_roi_extractor = self.bbox_roi_extractor[stage] bbox_head = self.bbox_head[stage] bbox_feats = bbox_roi_extractor(x[:bbox_roi_extractor.num_inputs], rois) cls_score, bbox_pred, object_feats, attn_feats = bbox_head( bbox_feats, object_feats) fake_bbox_results = dict( rois=rois, bbox_targets=(rois.new_zeros(len(rois), dtype=torch.long), None), bbox_pred=bbox_pred.view(-1, bbox_pred.size(-1)), cls_score=cls_score.view(-1, cls_score.size(-1))) fake_sampling_results = [ InstanceData(pos_is_gt=rois.new_zeros(object_feats.size(1))) for _ in range(len(batch_img_metas)) ] results_list = bbox_head.refine_bboxes( sampling_results=fake_sampling_results, bbox_results=fake_bbox_results, batch_img_metas=batch_img_metas) proposal_list = [res.bboxes for res in results_list] bbox_results = dict( cls_score=cls_score, decoded_bboxes=torch.cat(proposal_list), object_feats=object_feats, attn_feats=attn_feats, # detach then use it in label assign detached_cls_scores=[ cls_score[i].detach() for i in range(num_imgs) ], detached_proposals=[item.detach() for item in proposal_list]) return bbox_results def _mask_forward(self, stage: int, x: Tuple[Tensor], rois: Tensor, attn_feats) -> dict: """Mask head forward function used in both training and testing. Args: stage (int): The current stage in Cascade RoI Head. x (tuple[Tensor]): Tuple of multi-level img features. rois (Tensor): RoIs with the shape (n, 5) where the first column indicates batch id of each RoI. attn_feats (Tensot): Intermediate feature get from the last diihead, has shape (batch_size*num_proposals, feature_dimensions) Returns: dict: Usually returns a dictionary with keys: - `mask_preds` (Tensor): Mask prediction. """ mask_roi_extractor = self.mask_roi_extractor[stage] mask_head = self.mask_head[stage] mask_feats = mask_roi_extractor(x[:mask_roi_extractor.num_inputs], rois) # do not support caffe_c4 model anymore mask_preds = mask_head(mask_feats, attn_feats) mask_results = dict(mask_preds=mask_preds) return mask_results def mask_loss(self, stage: int, x: Tuple[Tensor], bbox_results: dict, batch_gt_instances: InstanceList, rcnn_train_cfg: ConfigDict) -> dict: """Run forward function and calculate loss for mask head in training. Args: stage (int): The current stage in Cascade RoI Head. x (tuple[Tensor]): Tuple of multi-level img features. bbox_results (dict): Results obtained from `bbox_loss`. batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes``, ``labels``, and ``masks`` attributes. rcnn_train_cfg (obj:ConfigDict): `train_cfg` of RCNN. Returns: dict: Usually returns a dictionary with keys: - `mask_preds` (Tensor): Mask prediction. - `loss_mask` (dict): A dictionary of mask loss components. """ attn_feats = bbox_results['attn_feats'] sampling_results = bbox_results['sampling_results'] pos_rois = bbox2roi([res.pos_priors for res in sampling_results]) attn_feats = torch.cat([ feats[res.pos_inds] for (feats, res) in zip(attn_feats, sampling_results) ]) mask_results = self._mask_forward(stage, x, pos_rois, attn_feats) mask_loss_and_target = self.mask_head[stage].loss_and_target( mask_preds=mask_results['mask_preds'], sampling_results=sampling_results, batch_gt_instances=batch_gt_instances, rcnn_train_cfg=rcnn_train_cfg) mask_results.update(mask_loss_and_target) return mask_results def loss(self, x: Tuple[Tensor], rpn_results_list: InstanceList, batch_data_samples: SampleList) -> dict: """Perform forward propagation and loss calculation of the detection roi on the features of the upstream network. Args: x (tuple[Tensor]): List of multi-level img features. rpn_results_list (List[:obj:`InstanceData`]): List of region proposals. batch_data_samples (list[:obj:`DetDataSample`]): The batch data samples. It usually includes information such as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`. Returns: dict: a dictionary of loss components of all stage. """ outputs = unpack_gt_instances(batch_data_samples) batch_gt_instances, batch_gt_instances_ignore, batch_img_metas \ = outputs object_feats = torch.cat( [res.pop('features')[None, ...] for res in rpn_results_list]) results_list = rpn_results_list losses = {} for stage in range(self.num_stages): stage_loss_weight = self.stage_loss_weights[stage] # bbox head forward and loss bbox_results = self.bbox_loss( stage=stage, x=x, object_feats=object_feats, results_list=results_list, batch_img_metas=batch_img_metas, batch_gt_instances=batch_gt_instances) for name, value in bbox_results['loss_bbox'].items(): losses[f's{stage}.{name}'] = ( value * stage_loss_weight if 'loss' in name else value) if self.with_mask: mask_results = self.mask_loss( stage=stage, x=x, bbox_results=bbox_results, batch_gt_instances=batch_gt_instances, rcnn_train_cfg=self.train_cfg[stage]) for name, value in mask_results['loss_mask'].items(): losses[f's{stage}.{name}'] = ( value * stage_loss_weight if 'loss' in name else value) object_feats = bbox_results['object_feats'] results_list = bbox_results['results_list'] return losses def predict_bbox(self, x: Tuple[Tensor], batch_img_metas: List[dict], rpn_results_list: InstanceList, rcnn_test_cfg: ConfigType, rescale: bool = False) -> InstanceList: """Perform forward propagation of the bbox head and predict detection results on the features of the upstream network. Args: x(tuple[Tensor]): Feature maps of all scale level. batch_img_metas (list[dict]): List of image information. rpn_results_list (list[:obj:`InstanceData`]): List of region proposals. rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of R-CNN. rescale (bool): If True, return boxes in original image space. Defaults to False. Returns: list[:obj:`InstanceData`]: Detection results of each image after the post process. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). """ proposal_list = [res.bboxes for res in rpn_results_list] object_feats = torch.cat( [res.pop('features')[None, ...] for res in rpn_results_list]) if all([proposal.shape[0] == 0 for proposal in proposal_list]): # There is no proposal in the whole batch return empty_instances( batch_img_metas, x[0].device, task_type='bbox') for stage in range(self.num_stages): rois = bbox2roi(proposal_list) bbox_results = self._bbox_forward(stage, x, rois, object_feats, batch_img_metas) object_feats = bbox_results['object_feats'] cls_score = bbox_results['cls_score'] proposal_list = bbox_results['detached_proposals'] num_classes = self.bbox_head[-1].num_classes if self.bbox_head[-1].loss_cls.use_sigmoid: cls_score = cls_score.sigmoid() else: cls_score = cls_score.softmax(-1)[..., :-1] topk_inds_list = [] results_list = [] for img_id in range(len(batch_img_metas)): cls_score_per_img = cls_score[img_id] scores_per_img, topk_inds = cls_score_per_img.flatten(0, 1).topk( self.test_cfg.max_per_img, sorted=False) labels_per_img = topk_inds % num_classes bboxes_per_img = proposal_list[img_id][topk_inds // num_classes] topk_inds_list.append(topk_inds) if rescale and bboxes_per_img.size(0) > 0: assert batch_img_metas[img_id].get('scale_factor') is not None scale_factor = bboxes_per_img.new_tensor( batch_img_metas[img_id]['scale_factor']).repeat((1, 2)) bboxes_per_img = ( bboxes_per_img.view(bboxes_per_img.size(0), -1, 4) / scale_factor).view(bboxes_per_img.size()[0], -1) results = InstanceData() results.bboxes = bboxes_per_img results.scores = scores_per_img results.labels = labels_per_img results_list.append(results) if self.with_mask: for img_id in range(len(batch_img_metas)): # add positive information in InstanceData to predict # mask results in `mask_head`. proposals = bbox_results['detached_proposals'][img_id] topk_inds = topk_inds_list[img_id] attn_feats = bbox_results['attn_feats'][img_id] results_list[img_id].proposals = proposals results_list[img_id].topk_inds = topk_inds results_list[img_id].attn_feats = attn_feats return results_list def predict_mask(self, x: Tuple[Tensor], batch_img_metas: List[dict], results_list: InstanceList, rescale: bool = False) -> InstanceList: """Perform forward propagation of the mask head and predict detection results on the features of the upstream network. Args: x (tuple[Tensor]): Feature maps of all scale level. batch_img_metas (list[dict]): List of image information. results_list (list[:obj:`InstanceData`]): Detection results of each image. Each item usually contains following keys: - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). - proposal (Tensor): Bboxes predicted from bbox_head, has a shape (num_instances, 4). - topk_inds (Tensor): Topk indices of each image, has shape (num_instances, ) - attn_feats (Tensor): Intermediate feature get from the last diihead, has shape (num_instances, feature_dimensions) rescale (bool): If True, return boxes in original image space. Defaults to False. Returns: list[:obj:`InstanceData`]: Detection results of each image after the post process. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). - masks (Tensor): Has a shape (num_instances, H, W). """ proposal_list = [res.pop('proposals') for res in results_list] topk_inds_list = [res.pop('topk_inds') for res in results_list] attn_feats = torch.cat( [res.pop('attn_feats')[None, ...] for res in results_list]) rois = bbox2roi(proposal_list) if rois.shape[0] == 0: results_list = empty_instances( batch_img_metas, rois.device, task_type='mask', instance_results=results_list, mask_thr_binary=self.test_cfg.mask_thr_binary) return results_list last_stage = self.num_stages - 1 mask_results = self._mask_forward(last_stage, x, rois, attn_feats) num_imgs = len(batch_img_metas) mask_results['mask_preds'] = mask_results['mask_preds'].reshape( num_imgs, -1, *mask_results['mask_preds'].size()[1:]) num_classes = self.bbox_head[-1].num_classes mask_preds = [] for img_id in range(num_imgs): topk_inds = topk_inds_list[img_id] masks_per_img = mask_results['mask_preds'][img_id].flatten( 0, 1)[topk_inds] masks_per_img = masks_per_img[:, None, ...].repeat(1, num_classes, 1, 1) mask_preds.append(masks_per_img) results_list = self.mask_head[-1].predict_by_feat( mask_preds, results_list, batch_img_metas, rcnn_test_cfg=self.test_cfg, rescale=rescale) return results_list # TODO: Need to refactor later def forward(self, x: Tuple[Tensor], rpn_results_list: InstanceList, batch_data_samples: SampleList) -> tuple: """Network forward process. Usually includes backbone, neck and head forward without any post-processing. Args: x (List[Tensor]): Multi-level features that may have different resolutions. rpn_results_list (List[:obj:`InstanceData`]): List of region proposals. batch_data_samples (list[:obj:`DetDataSample`]): The batch data samples. It usually includes information such as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`. Returns tuple: A tuple of features from ``bbox_head`` and ``mask_head`` forward. """ outputs = unpack_gt_instances(batch_data_samples) (batch_gt_instances, batch_gt_instances_ignore, batch_img_metas) = outputs all_stage_bbox_results = [] object_feats = torch.cat( [res.pop('features')[None, ...] for res in rpn_results_list]) results_list = rpn_results_list if self.with_bbox: for stage in range(self.num_stages): bbox_results = self.bbox_loss( stage=stage, x=x, results_list=results_list, object_feats=object_feats, batch_img_metas=batch_img_metas, batch_gt_instances=batch_gt_instances) bbox_results.pop('loss_bbox') # torch.jit does not support obj:SamplingResult bbox_results.pop('results_list') bbox_res = bbox_results.copy() bbox_res.pop('sampling_results') all_stage_bbox_results.append((bbox_res, )) if self.with_mask: attn_feats = bbox_results['attn_feats'] sampling_results = bbox_results['sampling_results'] pos_rois = bbox2roi( [res.pos_priors for res in sampling_results]) attn_feats = torch.cat([ feats[res.pos_inds] for (feats, res) in zip(attn_feats, sampling_results) ]) mask_results = self._mask_forward(stage, x, pos_rois, attn_feats) all_stage_bbox_results[-1] += (mask_results, ) return tuple(all_stage_bbox_results)
26,551
43.106312
79
py
ERD
ERD-main/mmdet/models/roi_heads/cascade_roi_head.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import List, Sequence, Tuple, Union import torch import torch.nn as nn from mmengine.model import ModuleList from mmengine.structures import InstanceData from torch import Tensor from mmdet.models.task_modules.samplers import SamplingResult from mmdet.models.test_time_augs import merge_aug_masks from mmdet.registry import MODELS, TASK_UTILS from mmdet.structures import SampleList from mmdet.structures.bbox import bbox2roi, get_box_tensor from mmdet.utils import (ConfigType, InstanceList, MultiConfig, OptConfigType, OptMultiConfig) from ..utils.misc import empty_instances, unpack_gt_instances from .base_roi_head import BaseRoIHead @MODELS.register_module() class CascadeRoIHead(BaseRoIHead): """Cascade roi head including one bbox head and one mask head. https://arxiv.org/abs/1712.00726 """ def __init__(self, num_stages: int, stage_loss_weights: Union[List[float], Tuple[float]], bbox_roi_extractor: OptMultiConfig = None, bbox_head: OptMultiConfig = None, mask_roi_extractor: OptMultiConfig = None, mask_head: OptMultiConfig = None, shared_head: OptConfigType = None, train_cfg: OptConfigType = None, test_cfg: OptConfigType = None, init_cfg: OptMultiConfig = None) -> None: assert bbox_roi_extractor is not None assert bbox_head is not None assert shared_head is None, \ 'Shared head is not supported in Cascade RCNN anymore' self.num_stages = num_stages self.stage_loss_weights = stage_loss_weights super().__init__( bbox_roi_extractor=bbox_roi_extractor, bbox_head=bbox_head, mask_roi_extractor=mask_roi_extractor, mask_head=mask_head, shared_head=shared_head, train_cfg=train_cfg, test_cfg=test_cfg, init_cfg=init_cfg) def init_bbox_head(self, bbox_roi_extractor: MultiConfig, bbox_head: MultiConfig) -> None: """Initialize box head and box roi extractor. Args: bbox_roi_extractor (:obj:`ConfigDict`, dict or list): Config of box roi extractor. bbox_head (:obj:`ConfigDict`, dict or list): Config of box in box head. """ self.bbox_roi_extractor = ModuleList() self.bbox_head = ModuleList() if not isinstance(bbox_roi_extractor, list): bbox_roi_extractor = [ bbox_roi_extractor for _ in range(self.num_stages) ] if not isinstance(bbox_head, list): bbox_head = [bbox_head for _ in range(self.num_stages)] assert len(bbox_roi_extractor) == len(bbox_head) == self.num_stages for roi_extractor, head in zip(bbox_roi_extractor, bbox_head): self.bbox_roi_extractor.append(MODELS.build(roi_extractor)) self.bbox_head.append(MODELS.build(head)) def init_mask_head(self, mask_roi_extractor: MultiConfig, mask_head: MultiConfig) -> None: """Initialize mask head and mask roi extractor. Args: mask_head (dict): Config of mask in mask head. mask_roi_extractor (:obj:`ConfigDict`, dict or list): Config of mask roi extractor. """ self.mask_head = nn.ModuleList() if not isinstance(mask_head, list): mask_head = [mask_head for _ in range(self.num_stages)] assert len(mask_head) == self.num_stages for head in mask_head: self.mask_head.append(MODELS.build(head)) if mask_roi_extractor is not None: self.share_roi_extractor = False self.mask_roi_extractor = ModuleList() if not isinstance(mask_roi_extractor, list): mask_roi_extractor = [ mask_roi_extractor for _ in range(self.num_stages) ] assert len(mask_roi_extractor) == self.num_stages for roi_extractor in mask_roi_extractor: self.mask_roi_extractor.append(MODELS.build(roi_extractor)) else: self.share_roi_extractor = True self.mask_roi_extractor = self.bbox_roi_extractor def init_assigner_sampler(self) -> None: """Initialize assigner and sampler for each stage.""" self.bbox_assigner = [] self.bbox_sampler = [] if self.train_cfg is not None: for idx, rcnn_train_cfg in enumerate(self.train_cfg): self.bbox_assigner.append( TASK_UTILS.build(rcnn_train_cfg.assigner)) self.current_stage = idx self.bbox_sampler.append( TASK_UTILS.build( rcnn_train_cfg.sampler, default_args=dict(context=self))) def _bbox_forward(self, stage: int, x: Tuple[Tensor], rois: Tensor) -> dict: """Box head forward function used in both training and testing. Args: stage (int): The current stage in Cascade RoI Head. x (tuple[Tensor]): List of multi-level img features. rois (Tensor): RoIs with the shape (n, 5) where the first column indicates batch id of each RoI. Returns: dict[str, Tensor]: Usually returns a dictionary with keys: - `cls_score` (Tensor): Classification scores. - `bbox_pred` (Tensor): Box energies / deltas. - `bbox_feats` (Tensor): Extract bbox RoI features. """ bbox_roi_extractor = self.bbox_roi_extractor[stage] bbox_head = self.bbox_head[stage] bbox_feats = bbox_roi_extractor(x[:bbox_roi_extractor.num_inputs], rois) # do not support caffe_c4 model anymore cls_score, bbox_pred = bbox_head(bbox_feats) bbox_results = dict( cls_score=cls_score, bbox_pred=bbox_pred, bbox_feats=bbox_feats) return bbox_results def bbox_loss(self, stage: int, x: Tuple[Tensor], sampling_results: List[SamplingResult]) -> dict: """Run forward function and calculate loss for box head in training. Args: stage (int): The current stage in Cascade RoI Head. x (tuple[Tensor]): List of multi-level img features. sampling_results (list["obj:`SamplingResult`]): Sampling results. Returns: dict: Usually returns a dictionary with keys: - `cls_score` (Tensor): Classification scores. - `bbox_pred` (Tensor): Box energies / deltas. - `bbox_feats` (Tensor): Extract bbox RoI features. - `loss_bbox` (dict): A dictionary of bbox loss components. - `rois` (Tensor): RoIs with the shape (n, 5) where the first column indicates batch id of each RoI. - `bbox_targets` (tuple): Ground truth for proposals in a single image. Containing the following list of Tensors: (labels, label_weights, bbox_targets, bbox_weights) """ bbox_head = self.bbox_head[stage] rois = bbox2roi([res.priors for res in sampling_results]) bbox_results = self._bbox_forward(stage, x, rois) bbox_results.update(rois=rois) bbox_loss_and_target = bbox_head.loss_and_target( cls_score=bbox_results['cls_score'], bbox_pred=bbox_results['bbox_pred'], rois=rois, sampling_results=sampling_results, rcnn_train_cfg=self.train_cfg[stage]) bbox_results.update(bbox_loss_and_target) return bbox_results def _mask_forward(self, stage: int, x: Tuple[Tensor], rois: Tensor) -> dict: """Mask head forward function used in both training and testing. Args: stage (int): The current stage in Cascade RoI Head. x (tuple[Tensor]): Tuple of multi-level img features. rois (Tensor): RoIs with the shape (n, 5) where the first column indicates batch id of each RoI. Returns: dict: Usually returns a dictionary with keys: - `mask_preds` (Tensor): Mask prediction. """ mask_roi_extractor = self.mask_roi_extractor[stage] mask_head = self.mask_head[stage] mask_feats = mask_roi_extractor(x[:mask_roi_extractor.num_inputs], rois) # do not support caffe_c4 model anymore mask_preds = mask_head(mask_feats) mask_results = dict(mask_preds=mask_preds) return mask_results def mask_loss(self, stage: int, x: Tuple[Tensor], sampling_results: List[SamplingResult], batch_gt_instances: InstanceList) -> dict: """Run forward function and calculate loss for mask head in training. Args: stage (int): The current stage in Cascade RoI Head. x (tuple[Tensor]): Tuple of multi-level img features. sampling_results (list["obj:`SamplingResult`]): Sampling results. batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes``, ``labels``, and ``masks`` attributes. Returns: dict: Usually returns a dictionary with keys: - `mask_preds` (Tensor): Mask prediction. - `loss_mask` (dict): A dictionary of mask loss components. """ pos_rois = bbox2roi([res.pos_priors for res in sampling_results]) mask_results = self._mask_forward(stage, x, pos_rois) mask_head = self.mask_head[stage] mask_loss_and_target = mask_head.loss_and_target( mask_preds=mask_results['mask_preds'], sampling_results=sampling_results, batch_gt_instances=batch_gt_instances, rcnn_train_cfg=self.train_cfg[stage]) mask_results.update(mask_loss_and_target) return mask_results def loss(self, x: Tuple[Tensor], rpn_results_list: InstanceList, batch_data_samples: SampleList) -> dict: """Perform forward propagation and loss calculation of the detection roi on the features of the upstream network. Args: x (tuple[Tensor]): List of multi-level img features. rpn_results_list (list[:obj:`InstanceData`]): List of region proposals. batch_data_samples (list[:obj:`DetDataSample`]): The batch data samples. It usually includes information such as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`. Returns: dict[str, Tensor]: A dictionary of loss components """ # TODO: May add a new function in baseroihead assert len(rpn_results_list) == len(batch_data_samples) outputs = unpack_gt_instances(batch_data_samples) batch_gt_instances, batch_gt_instances_ignore, batch_img_metas \ = outputs num_imgs = len(batch_data_samples) losses = dict() results_list = rpn_results_list for stage in range(self.num_stages): self.current_stage = stage stage_loss_weight = self.stage_loss_weights[stage] # assign gts and sample proposals sampling_results = [] if self.with_bbox or self.with_mask: bbox_assigner = self.bbox_assigner[stage] bbox_sampler = self.bbox_sampler[stage] for i in range(num_imgs): results = results_list[i] # rename rpn_results.bboxes to rpn_results.priors results.priors = results.pop('bboxes') assign_result = bbox_assigner.assign( results, batch_gt_instances[i], batch_gt_instances_ignore[i]) sampling_result = bbox_sampler.sample( assign_result, results, batch_gt_instances[i], feats=[lvl_feat[i][None] for lvl_feat in x]) sampling_results.append(sampling_result) # bbox head forward and loss bbox_results = self.bbox_loss(stage, x, sampling_results) for name, value in bbox_results['loss_bbox'].items(): losses[f's{stage}.{name}'] = ( value * stage_loss_weight if 'loss' in name else value) # mask head forward and loss if self.with_mask: mask_results = self.mask_loss(stage, x, sampling_results, batch_gt_instances) for name, value in mask_results['loss_mask'].items(): losses[f's{stage}.{name}'] = ( value * stage_loss_weight if 'loss' in name else value) # refine bboxes if stage < self.num_stages - 1: bbox_head = self.bbox_head[stage] with torch.no_grad(): results_list = bbox_head.refine_bboxes( sampling_results, bbox_results, batch_img_metas) # Empty proposal if results_list is None: break return losses def predict_bbox(self, x: Tuple[Tensor], batch_img_metas: List[dict], rpn_results_list: InstanceList, rcnn_test_cfg: ConfigType, rescale: bool = False, **kwargs) -> InstanceList: """Perform forward propagation of the bbox head and predict detection results on the features of the upstream network. Args: x (tuple[Tensor]): Feature maps of all scale level. batch_img_metas (list[dict]): List of image information. rpn_results_list (list[:obj:`InstanceData`]): List of region proposals. rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of R-CNN. rescale (bool): If True, return boxes in original image space. Defaults to False. Returns: list[:obj:`InstanceData`]: Detection results of each image after the post process. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). """ proposals = [res.bboxes for res in rpn_results_list] num_proposals_per_img = tuple(len(p) for p in proposals) rois = bbox2roi(proposals) if rois.shape[0] == 0: return empty_instances( batch_img_metas, rois.device, task_type='bbox', box_type=self.bbox_head[-1].predict_box_type, num_classes=self.bbox_head[-1].num_classes, score_per_cls=rcnn_test_cfg is None) rois, cls_scores, bbox_preds = self._refine_roi( x=x, rois=rois, batch_img_metas=batch_img_metas, num_proposals_per_img=num_proposals_per_img, **kwargs) results_list = self.bbox_head[-1].predict_by_feat( rois=rois, cls_scores=cls_scores, bbox_preds=bbox_preds, batch_img_metas=batch_img_metas, rescale=rescale, rcnn_test_cfg=rcnn_test_cfg) return results_list def predict_mask(self, x: Tuple[Tensor], batch_img_metas: List[dict], results_list: List[InstanceData], rescale: bool = False) -> List[InstanceData]: """Perform forward propagation of the mask head and predict detection results on the features of the upstream network. Args: x (tuple[Tensor]): Feature maps of all scale level. batch_img_metas (list[dict]): List of image information. results_list (list[:obj:`InstanceData`]): Detection results of each image. rescale (bool): If True, return boxes in original image space. Defaults to False. Returns: list[:obj:`InstanceData`]: Detection results of each image after the post process. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). - masks (Tensor): Has a shape (num_instances, H, W). """ bboxes = [res.bboxes for res in results_list] mask_rois = bbox2roi(bboxes) if mask_rois.shape[0] == 0: results_list = empty_instances( batch_img_metas, mask_rois.device, task_type='mask', instance_results=results_list, mask_thr_binary=self.test_cfg.mask_thr_binary) return results_list num_mask_rois_per_img = [len(res) for res in results_list] aug_masks = [] for stage in range(self.num_stages): mask_results = self._mask_forward(stage, x, mask_rois) mask_preds = mask_results['mask_preds'] # split batch mask prediction back to each image mask_preds = mask_preds.split(num_mask_rois_per_img, 0) aug_masks.append([m.sigmoid().detach() for m in mask_preds]) merged_masks = [] for i in range(len(batch_img_metas)): aug_mask = [mask[i] for mask in aug_masks] merged_mask = merge_aug_masks(aug_mask, batch_img_metas[i]) merged_masks.append(merged_mask) results_list = self.mask_head[-1].predict_by_feat( mask_preds=merged_masks, results_list=results_list, batch_img_metas=batch_img_metas, rcnn_test_cfg=self.test_cfg, rescale=rescale, activate_map=True) return results_list def _refine_roi(self, x: Tuple[Tensor], rois: Tensor, batch_img_metas: List[dict], num_proposals_per_img: Sequence[int], **kwargs) -> tuple: """Multi-stage refinement of RoI. Args: x (tuple[Tensor]): List of multi-level img features. rois (Tensor): shape (n, 5), [batch_ind, x1, y1, x2, y2] batch_img_metas (list[dict]): List of image information. num_proposals_per_img (sequence[int]): number of proposals in each image. Returns: tuple: - rois (Tensor): Refined RoI. - cls_scores (list[Tensor]): Average predicted cls score per image. - bbox_preds (list[Tensor]): Bbox branch predictions for the last stage of per image. """ # "ms" in variable names means multi-stage ms_scores = [] for stage in range(self.num_stages): bbox_results = self._bbox_forward( stage=stage, x=x, rois=rois, **kwargs) # split batch bbox prediction back to each image cls_scores = bbox_results['cls_score'] bbox_preds = bbox_results['bbox_pred'] rois = rois.split(num_proposals_per_img, 0) cls_scores = cls_scores.split(num_proposals_per_img, 0) ms_scores.append(cls_scores) # some detector with_reg is False, bbox_preds will be None if bbox_preds is not None: # TODO move this to a sabl_roi_head # the bbox prediction of some detectors like SABL is not Tensor if isinstance(bbox_preds, torch.Tensor): bbox_preds = bbox_preds.split(num_proposals_per_img, 0) else: bbox_preds = self.bbox_head[stage].bbox_pred_split( bbox_preds, num_proposals_per_img) else: bbox_preds = (None, ) * len(batch_img_metas) if stage < self.num_stages - 1: bbox_head = self.bbox_head[stage] if bbox_head.custom_activation: cls_scores = [ bbox_head.loss_cls.get_activation(s) for s in cls_scores ] refine_rois_list = [] for i in range(len(batch_img_metas)): if rois[i].shape[0] > 0: bbox_label = cls_scores[i][:, :-1].argmax(dim=1) # Refactor `bbox_head.regress_by_class` to only accept # box tensor without img_idx concatenated. refined_bboxes = bbox_head.regress_by_class( rois[i][:, 1:], bbox_label, bbox_preds[i], batch_img_metas[i]) refined_bboxes = get_box_tensor(refined_bboxes) refined_rois = torch.cat( [rois[i][:, [0]], refined_bboxes], dim=1) refine_rois_list.append(refined_rois) rois = torch.cat(refine_rois_list) # average scores of each image by stages cls_scores = [ sum([score[i] for score in ms_scores]) / float(len(ms_scores)) for i in range(len(batch_img_metas)) ] return rois, cls_scores, bbox_preds def forward(self, x: Tuple[Tensor], rpn_results_list: InstanceList, batch_data_samples: SampleList) -> tuple: """Network forward process. Usually includes backbone, neck and head forward without any post-processing. Args: x (List[Tensor]): Multi-level features that may have different resolutions. rpn_results_list (list[:obj:`InstanceData`]): List of region proposals. batch_data_samples (list[:obj:`DetDataSample`]): Each item contains the meta information of each image and corresponding annotations. Returns tuple: A tuple of features from ``bbox_head`` and ``mask_head`` forward. """ results = () batch_img_metas = [ data_samples.metainfo for data_samples in batch_data_samples ] proposals = [rpn_results.bboxes for rpn_results in rpn_results_list] num_proposals_per_img = tuple(len(p) for p in proposals) rois = bbox2roi(proposals) # bbox head if self.with_bbox: rois, cls_scores, bbox_preds = self._refine_roi( x, rois, batch_img_metas, num_proposals_per_img) results = results + (cls_scores, bbox_preds) # mask head if self.with_mask: aug_masks = [] rois = torch.cat(rois) for stage in range(self.num_stages): mask_results = self._mask_forward(stage, x, rois) mask_preds = mask_results['mask_preds'] mask_preds = mask_preds.split(num_proposals_per_img, 0) aug_masks.append([m.sigmoid().detach() for m in mask_preds]) merged_masks = [] for i in range(len(batch_img_metas)): aug_mask = [mask[i] for mask in aug_masks] merged_mask = merge_aug_masks(aug_mask, batch_img_metas[i]) merged_masks.append(merged_mask) results = results + (merged_masks, ) return results
24,347
41.790861
79
py
ERD
ERD-main/mmdet/models/roi_heads/trident_roi_head.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import Tuple import torch from mmcv.ops import batched_nms from mmengine.structures import InstanceData from torch import Tensor from mmdet.registry import MODELS from mmdet.structures import SampleList from mmdet.utils import InstanceList from .standard_roi_head import StandardRoIHead @MODELS.register_module() class TridentRoIHead(StandardRoIHead): """Trident roi head. Args: num_branch (int): Number of branches in TridentNet. test_branch_idx (int): In inference, all 3 branches will be used if `test_branch_idx==-1`, otherwise only branch with index `test_branch_idx` will be used. """ def __init__(self, num_branch: int, test_branch_idx: int, **kwargs) -> None: self.num_branch = num_branch self.test_branch_idx = test_branch_idx super().__init__(**kwargs) def merge_trident_bboxes(self, trident_results: InstanceList) -> InstanceData: """Merge bbox predictions of each branch. Args: trident_results (List[:obj:`InstanceData`]): A list of InstanceData predicted from every branch. Returns: :obj:`InstanceData`: merged InstanceData. """ bboxes = torch.cat([res.bboxes for res in trident_results]) scores = torch.cat([res.scores for res in trident_results]) labels = torch.cat([res.labels for res in trident_results]) nms_cfg = self.test_cfg['nms'] results = InstanceData() if bboxes.numel() == 0: results.bboxes = bboxes results.scores = scores results.labels = labels else: det_bboxes, keep = batched_nms(bboxes, scores, labels, nms_cfg) results.bboxes = det_bboxes[:, :-1] results.scores = det_bboxes[:, -1] results.labels = labels[keep] if self.test_cfg['max_per_img'] > 0: results = results[:self.test_cfg['max_per_img']] return results def predict(self, x: Tuple[Tensor], rpn_results_list: InstanceList, batch_data_samples: SampleList, rescale: bool = False) -> InstanceList: """Perform forward propagation of the roi head and predict detection results on the features of the upstream network. - Compute prediction bbox and label per branch. - Merge predictions of each branch according to scores of bboxes, i.e., bboxes with higher score are kept to give top-k prediction. Args: x (tuple[Tensor]): Features from upstream network. Each has shape (N, C, H, W). rpn_results_list (list[:obj:`InstanceData`]): list of region proposals. batch_data_samples (List[:obj:`DetDataSample`]): The Data Samples. It usually includes information such as `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`. rescale (bool): Whether to rescale the results to the original image. Defaults to True. Returns: list[obj:`InstanceData`]: Detection results of each image. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). """ results_list = super().predict( x=x, rpn_results_list=rpn_results_list, batch_data_samples=batch_data_samples, rescale=rescale) num_branch = self.num_branch \ if self.training or self.test_branch_idx == -1 else 1 merged_results_list = [] for i in range(len(batch_data_samples) // num_branch): merged_results_list.append( self.merge_trident_bboxes(results_list[i * num_branch:(i + 1) * num_branch])) return merged_results_list
4,298
37.044248
79
py
ERD
ERD-main/mmdet/models/roi_heads/dynamic_roi_head.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import List, Tuple import numpy as np import torch from torch import Tensor from mmdet.models.losses import SmoothL1Loss from mmdet.models.task_modules.samplers import SamplingResult from mmdet.registry import MODELS from mmdet.structures import SampleList from mmdet.structures.bbox import bbox2roi from mmdet.utils import InstanceList from ..utils.misc import unpack_gt_instances from .standard_roi_head import StandardRoIHead EPS = 1e-15 @MODELS.register_module() class DynamicRoIHead(StandardRoIHead): """RoI head for `Dynamic R-CNN <https://arxiv.org/abs/2004.06002>`_.""" def __init__(self, **kwargs) -> None: super().__init__(**kwargs) assert isinstance(self.bbox_head.loss_bbox, SmoothL1Loss) # the IoU history of the past `update_iter_interval` iterations self.iou_history = [] # the beta history of the past `update_iter_interval` iterations self.beta_history = [] def loss(self, x: Tuple[Tensor], rpn_results_list: InstanceList, batch_data_samples: SampleList) -> dict: """Forward function for training. Args: x (tuple[Tensor]): List of multi-level img features. rpn_results_list (list[:obj:`InstanceData`]): List of region proposals. batch_data_samples (list[:obj:`DetDataSample`]): The batch data samples. It usually includes information such as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`. Returns: dict[str, Tensor]: a dictionary of loss components """ assert len(rpn_results_list) == len(batch_data_samples) outputs = unpack_gt_instances(batch_data_samples) batch_gt_instances, batch_gt_instances_ignore, _ = outputs # assign gts and sample proposals num_imgs = len(batch_data_samples) sampling_results = [] cur_iou = [] for i in range(num_imgs): # rename rpn_results.bboxes to rpn_results.priors rpn_results = rpn_results_list[i] rpn_results.priors = rpn_results.pop('bboxes') assign_result = self.bbox_assigner.assign( rpn_results, batch_gt_instances[i], batch_gt_instances_ignore[i]) sampling_result = self.bbox_sampler.sample( assign_result, rpn_results, batch_gt_instances[i], feats=[lvl_feat[i][None] for lvl_feat in x]) # record the `iou_topk`-th largest IoU in an image iou_topk = min(self.train_cfg.dynamic_rcnn.iou_topk, len(assign_result.max_overlaps)) ious, _ = torch.topk(assign_result.max_overlaps, iou_topk) cur_iou.append(ious[-1].item()) sampling_results.append(sampling_result) # average the current IoUs over images cur_iou = np.mean(cur_iou) self.iou_history.append(cur_iou) losses = dict() # bbox head forward and loss if self.with_bbox: bbox_results = self.bbox_loss(x, sampling_results) losses.update(bbox_results['loss_bbox']) # mask head forward and loss if self.with_mask: mask_results = self.mask_loss(x, sampling_results, bbox_results['bbox_feats'], batch_gt_instances) losses.update(mask_results['loss_mask']) # update IoU threshold and SmoothL1 beta update_iter_interval = self.train_cfg.dynamic_rcnn.update_iter_interval if len(self.iou_history) % update_iter_interval == 0: new_iou_thr, new_beta = self.update_hyperparameters() return losses def bbox_loss(self, x: Tuple[Tensor], sampling_results: List[SamplingResult]) -> dict: """Perform forward propagation and loss calculation of the bbox head on the features of the upstream network. Args: x (tuple[Tensor]): List of multi-level img features. sampling_results (list["obj:`SamplingResult`]): Sampling results. Returns: dict[str, Tensor]: Usually returns a dictionary with keys: - `cls_score` (Tensor): Classification scores. - `bbox_pred` (Tensor): Box energies / deltas. - `bbox_feats` (Tensor): Extract bbox RoI features. - `loss_bbox` (dict): A dictionary of bbox loss components. """ rois = bbox2roi([res.priors for res in sampling_results]) bbox_results = self._bbox_forward(x, rois) bbox_loss_and_target = self.bbox_head.loss_and_target( cls_score=bbox_results['cls_score'], bbox_pred=bbox_results['bbox_pred'], rois=rois, sampling_results=sampling_results, rcnn_train_cfg=self.train_cfg) bbox_results.update(loss_bbox=bbox_loss_and_target['loss_bbox']) # record the `beta_topk`-th smallest target # `bbox_targets[2]` and `bbox_targets[3]` stand for bbox_targets # and bbox_weights, respectively bbox_targets = bbox_loss_and_target['bbox_targets'] pos_inds = bbox_targets[3][:, 0].nonzero().squeeze(1) num_pos = len(pos_inds) num_imgs = len(sampling_results) if num_pos > 0: cur_target = bbox_targets[2][pos_inds, :2].abs().mean(dim=1) beta_topk = min(self.train_cfg.dynamic_rcnn.beta_topk * num_imgs, num_pos) cur_target = torch.kthvalue(cur_target, beta_topk)[0].item() self.beta_history.append(cur_target) return bbox_results def update_hyperparameters(self): """Update hyperparameters like IoU thresholds for assigner and beta for SmoothL1 loss based on the training statistics. Returns: tuple[float]: the updated ``iou_thr`` and ``beta``. """ new_iou_thr = max(self.train_cfg.dynamic_rcnn.initial_iou, np.mean(self.iou_history)) self.iou_history = [] self.bbox_assigner.pos_iou_thr = new_iou_thr self.bbox_assigner.neg_iou_thr = new_iou_thr self.bbox_assigner.min_pos_iou = new_iou_thr if (not self.beta_history) or (np.median(self.beta_history) < EPS): # avoid 0 or too small value for new_beta new_beta = self.bbox_head.loss_bbox.beta else: new_beta = min(self.train_cfg.dynamic_rcnn.initial_beta, np.median(self.beta_history)) self.beta_history = [] self.bbox_head.loss_bbox.beta = new_beta return new_iou_thr, new_beta
6,794
40.432927
79
py
ERD
ERD-main/mmdet/models/roi_heads/point_rend_roi_head.py
# Copyright (c) OpenMMLab. All rights reserved. # Modified from https://github.com/facebookresearch/detectron2/tree/master/projects/PointRend # noqa from typing import List, Tuple import torch import torch.nn.functional as F from mmcv.ops import point_sample, rel_roi_point_to_rel_img_point from torch import Tensor from mmdet.registry import MODELS from mmdet.structures.bbox import bbox2roi from mmdet.utils import ConfigType, InstanceList from ..task_modules.samplers import SamplingResult from ..utils import empty_instances from .standard_roi_head import StandardRoIHead @MODELS.register_module() class PointRendRoIHead(StandardRoIHead): """`PointRend <https://arxiv.org/abs/1912.08193>`_.""" def __init__(self, point_head: ConfigType, *args, **kwargs) -> None: super().__init__(*args, **kwargs) assert self.with_bbox and self.with_mask self.init_point_head(point_head) def init_point_head(self, point_head: ConfigType) -> None: """Initialize ``point_head``""" self.point_head = MODELS.build(point_head) def mask_loss(self, x: Tuple[Tensor], sampling_results: List[SamplingResult], bbox_feats: Tensor, batch_gt_instances: InstanceList) -> dict: """Run forward function and calculate loss for mask head and point head in training.""" mask_results = super().mask_loss( x=x, sampling_results=sampling_results, bbox_feats=bbox_feats, batch_gt_instances=batch_gt_instances) mask_point_results = self._mask_point_loss( x=x, sampling_results=sampling_results, mask_preds=mask_results['mask_preds'], batch_gt_instances=batch_gt_instances) mask_results['loss_mask'].update( loss_point=mask_point_results['loss_point']) return mask_results def _mask_point_loss(self, x: Tuple[Tensor], sampling_results: List[SamplingResult], mask_preds: Tensor, batch_gt_instances: InstanceList) -> dict: """Run forward function and calculate loss for point head in training.""" pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results]) rel_roi_points = self.point_head.get_roi_rel_points_train( mask_preds, pos_labels, cfg=self.train_cfg) rois = bbox2roi([res.pos_bboxes for res in sampling_results]) fine_grained_point_feats = self._get_fine_grained_point_feats( x, rois, rel_roi_points) coarse_point_feats = point_sample(mask_preds, rel_roi_points) mask_point_pred = self.point_head(fine_grained_point_feats, coarse_point_feats) loss_and_target = self.point_head.loss_and_target( point_pred=mask_point_pred, rel_roi_points=rel_roi_points, sampling_results=sampling_results, batch_gt_instances=batch_gt_instances, cfg=self.train_cfg) return loss_and_target def _mask_point_forward_test(self, x: Tuple[Tensor], rois: Tensor, label_preds: Tensor, mask_preds: Tensor) -> Tensor: """Mask refining process with point head in testing. Args: x (tuple[Tensor]): Feature maps of all scale level. rois (Tensor): shape (num_rois, 5). label_preds (Tensor): The predication class for each rois. mask_preds (Tensor): The predication coarse masks of shape (num_rois, num_classes, small_size, small_size). Returns: Tensor: The refined masks of shape (num_rois, num_classes, large_size, large_size). """ refined_mask_pred = mask_preds.clone() for subdivision_step in range(self.test_cfg.subdivision_steps): refined_mask_pred = F.interpolate( refined_mask_pred, scale_factor=self.test_cfg.scale_factor, mode='bilinear', align_corners=False) # If `subdivision_num_points` is larger or equal to the # resolution of the next step, then we can skip this step num_rois, channels, mask_height, mask_width = \ refined_mask_pred.shape if (self.test_cfg.subdivision_num_points >= self.test_cfg.scale_factor**2 * mask_height * mask_width and subdivision_step < self.test_cfg.subdivision_steps - 1): continue point_indices, rel_roi_points = \ self.point_head.get_roi_rel_points_test( refined_mask_pred, label_preds, cfg=self.test_cfg) fine_grained_point_feats = self._get_fine_grained_point_feats( x=x, rois=rois, rel_roi_points=rel_roi_points) coarse_point_feats = point_sample(mask_preds, rel_roi_points) mask_point_pred = self.point_head(fine_grained_point_feats, coarse_point_feats) point_indices = point_indices.unsqueeze(1).expand(-1, channels, -1) refined_mask_pred = refined_mask_pred.reshape( num_rois, channels, mask_height * mask_width) refined_mask_pred = refined_mask_pred.scatter_( 2, point_indices, mask_point_pred) refined_mask_pred = refined_mask_pred.view(num_rois, channels, mask_height, mask_width) return refined_mask_pred def _get_fine_grained_point_feats(self, x: Tuple[Tensor], rois: Tensor, rel_roi_points: Tensor) -> Tensor: """Sample fine grained feats from each level feature map and concatenate them together. Args: x (tuple[Tensor]): Feature maps of all scale level. rois (Tensor): shape (num_rois, 5). rel_roi_points (Tensor): A tensor of shape (num_rois, num_points, 2) that contains [0, 1] x [0, 1] normalized coordinates of the most uncertain points from the [mask_height, mask_width] grid. Returns: Tensor: The fine grained features for each points, has shape (num_rois, feats_channels, num_points). """ assert rois.shape[0] > 0, 'RoI is a empty tensor.' num_imgs = x[0].shape[0] fine_grained_feats = [] for idx in range(self.mask_roi_extractor.num_inputs): feats = x[idx] spatial_scale = 1. / float( self.mask_roi_extractor.featmap_strides[idx]) point_feats = [] for batch_ind in range(num_imgs): # unravel batch dim feat = feats[batch_ind].unsqueeze(0) inds = (rois[:, 0].long() == batch_ind) if inds.any(): rel_img_points = rel_roi_point_to_rel_img_point( rois=rois[inds], rel_roi_points=rel_roi_points[inds], img=feat.shape[2:], spatial_scale=spatial_scale).unsqueeze(0) point_feat = point_sample(feat, rel_img_points) point_feat = point_feat.squeeze(0).transpose(0, 1) point_feats.append(point_feat) fine_grained_feats.append(torch.cat(point_feats, dim=0)) return torch.cat(fine_grained_feats, dim=1) def predict_mask(self, x: Tuple[Tensor], batch_img_metas: List[dict], results_list: InstanceList, rescale: bool = False) -> InstanceList: """Perform forward propagation of the mask head and predict detection results on the features of the upstream network. Args: x (tuple[Tensor]): Feature maps of all scale level. batch_img_metas (list[dict]): List of image information. results_list (list[:obj:`InstanceData`]): Detection results of each image. rescale (bool): If True, return boxes in original image space. Defaults to False. Returns: list[:obj:`InstanceData`]: Detection results of each image after the post process. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). - masks (Tensor): Has a shape (num_instances, H, W). """ # don't need to consider aug_test. bboxes = [res.bboxes for res in results_list] mask_rois = bbox2roi(bboxes) if mask_rois.shape[0] == 0: results_list = empty_instances( batch_img_metas, mask_rois.device, task_type='mask', instance_results=results_list, mask_thr_binary=self.test_cfg.mask_thr_binary) return results_list mask_results = self._mask_forward(x, mask_rois) mask_preds = mask_results['mask_preds'] # split batch mask prediction back to each image num_mask_rois_per_img = [len(res) for res in results_list] mask_preds = mask_preds.split(num_mask_rois_per_img, 0) # refine mask_preds mask_rois = mask_rois.split(num_mask_rois_per_img, 0) mask_preds_refined = [] for i in range(len(batch_img_metas)): labels = results_list[i].labels x_i = [xx[[i]] for xx in x] mask_rois_i = mask_rois[i] mask_rois_i[:, 0] = 0 mask_pred_i = self._mask_point_forward_test( x_i, mask_rois_i, labels, mask_preds[i]) mask_preds_refined.append(mask_pred_i) # TODO: Handle the case where rescale is false results_list = self.mask_head.predict_by_feat( mask_preds=mask_preds_refined, results_list=results_list, batch_img_metas=batch_img_metas, rcnn_test_cfg=self.test_cfg, rescale=rescale) return results_list
10,479
43.219409
101
py
ERD
ERD-main/mmdet/models/roi_heads/base_roi_head.py
# Copyright (c) OpenMMLab. All rights reserved. from abc import ABCMeta, abstractmethod from typing import Tuple from mmengine.model import BaseModule from torch import Tensor from mmdet.registry import MODELS from mmdet.structures import SampleList from mmdet.utils import InstanceList, OptConfigType, OptMultiConfig class BaseRoIHead(BaseModule, metaclass=ABCMeta): """Base class for RoIHeads.""" def __init__(self, bbox_roi_extractor: OptMultiConfig = None, bbox_head: OptMultiConfig = None, mask_roi_extractor: OptMultiConfig = None, mask_head: OptMultiConfig = None, shared_head: OptConfigType = None, train_cfg: OptConfigType = None, test_cfg: OptConfigType = None, init_cfg: OptMultiConfig = None) -> None: super().__init__(init_cfg=init_cfg) self.train_cfg = train_cfg self.test_cfg = test_cfg if shared_head is not None: self.shared_head = MODELS.build(shared_head) if bbox_head is not None: self.init_bbox_head(bbox_roi_extractor, bbox_head) if mask_head is not None: self.init_mask_head(mask_roi_extractor, mask_head) self.init_assigner_sampler() @property def with_bbox(self) -> bool: """bool: whether the RoI head contains a `bbox_head`""" return hasattr(self, 'bbox_head') and self.bbox_head is not None @property def with_mask(self) -> bool: """bool: whether the RoI head contains a `mask_head`""" return hasattr(self, 'mask_head') and self.mask_head is not None @property def with_shared_head(self) -> bool: """bool: whether the RoI head contains a `shared_head`""" return hasattr(self, 'shared_head') and self.shared_head is not None @abstractmethod def init_bbox_head(self, *args, **kwargs): """Initialize ``bbox_head``""" pass @abstractmethod def init_mask_head(self, *args, **kwargs): """Initialize ``mask_head``""" pass @abstractmethod def init_assigner_sampler(self, *args, **kwargs): """Initialize assigner and sampler.""" pass @abstractmethod def loss(self, x: Tuple[Tensor], rpn_results_list: InstanceList, batch_data_samples: SampleList): """Perform forward propagation and loss calculation of the roi head on the features of the upstream network.""" def predict(self, x: Tuple[Tensor], rpn_results_list: InstanceList, batch_data_samples: SampleList, rescale: bool = False) -> InstanceList: """Perform forward propagation of the roi head and predict detection results on the features of the upstream network. Args: x (tuple[Tensor]): Features from upstream network. Each has shape (N, C, H, W). rpn_results_list (list[:obj:`InstanceData`]): list of region proposals. batch_data_samples (List[:obj:`DetDataSample`]): The Data Samples. It usually includes information such as `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`. rescale (bool): Whether to rescale the results to the original image. Defaults to True. Returns: list[obj:`InstanceData`]: Detection results of each image. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). - masks (Tensor): Has a shape (num_instances, H, W). """ assert self.with_bbox, 'Bbox head must be implemented.' batch_img_metas = [ data_samples.metainfo for data_samples in batch_data_samples ] # TODO: nms_op in mmcv need be enhanced, the bbox result may get # difference when not rescale in bbox_head # If it has the mask branch, the bbox branch does not need # to be scaled to the original image scale, because the mask # branch will scale both bbox and mask at the same time. bbox_rescale = rescale if not self.with_mask else False results_list = self.predict_bbox( x, batch_img_metas, rpn_results_list, rcnn_test_cfg=self.test_cfg, rescale=bbox_rescale) if self.with_mask: results_list = self.predict_mask( x, batch_img_metas, results_list, rescale=rescale) return results_list
4,889
36.615385
78
py
ERD
ERD-main/mmdet/models/roi_heads/mask_scoring_roi_head.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import List, Tuple import torch from torch import Tensor from mmdet.registry import MODELS from mmdet.structures import SampleList from mmdet.structures.bbox import bbox2roi from mmdet.utils import ConfigType, InstanceList from ..task_modules.samplers import SamplingResult from ..utils.misc import empty_instances from .standard_roi_head import StandardRoIHead @MODELS.register_module() class MaskScoringRoIHead(StandardRoIHead): """Mask Scoring RoIHead for `Mask Scoring RCNN. <https://arxiv.org/abs/1903.00241>`_. Args: mask_iou_head (:obj`ConfigDict`, dict): The config of mask_iou_head. """ def __init__(self, mask_iou_head: ConfigType, **kwargs): assert mask_iou_head is not None super().__init__(**kwargs) self.mask_iou_head = MODELS.build(mask_iou_head) def forward(self, x: Tuple[Tensor], rpn_results_list: InstanceList, batch_data_samples: SampleList = None) -> tuple: """Network forward process. Usually includes backbone, neck and head forward without any post-processing. Args: x (List[Tensor]): Multi-level features that may have different resolutions. rpn_results_list (list[:obj:`InstanceData`]): List of region proposals. batch_data_samples (list[:obj:`DetDataSample`]): Each item contains the meta information of each image and corresponding annotations. Returns tuple: A tuple of features from ``bbox_head`` and ``mask_head`` forward. """ results = () proposals = [rpn_results.bboxes for rpn_results in rpn_results_list] rois = bbox2roi(proposals) # bbox head if self.with_bbox: bbox_results = self._bbox_forward(x, rois) results = results + (bbox_results['cls_score'], bbox_results['bbox_pred']) # mask head if self.with_mask: mask_rois = rois[:100] mask_results = self._mask_forward(x, mask_rois) results = results + (mask_results['mask_preds'], ) # mask iou head cls_score = bbox_results['cls_score'][:100] mask_preds = mask_results['mask_preds'] mask_feats = mask_results['mask_feats'] _, labels = cls_score[:, :self.bbox_head.num_classes].max(dim=1) mask_iou_preds = self.mask_iou_head( mask_feats, mask_preds[range(labels.size(0)), labels]) results = results + (mask_iou_preds, ) return results def mask_loss(self, x: Tuple[Tensor], sampling_results: List[SamplingResult], bbox_feats, batch_gt_instances: InstanceList) -> dict: """Perform forward propagation and loss calculation of the mask head on the features of the upstream network. Args: x (tuple[Tensor]): Tuple of multi-level img features. sampling_results (list["obj:`SamplingResult`]): Sampling results. bbox_feats (Tensor): Extract bbox RoI features. batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes``, ``labels``, and ``masks`` attributes. Returns: dict: Usually returns a dictionary with keys: - `mask_preds` (Tensor): Mask prediction. - `mask_feats` (Tensor): Extract mask RoI features. - `mask_targets` (Tensor): Mask target of each positive\ proposals in the image. - `loss_mask` (dict): A dictionary of mask loss components. - `loss_mask_iou` (Tensor): mask iou loss. """ if not self.share_roi_extractor: pos_rois = bbox2roi([res.pos_priors for res in sampling_results]) mask_results = self._mask_forward(x, pos_rois) else: pos_inds = [] device = bbox_feats.device for res in sampling_results: pos_inds.append( torch.ones( res.pos_priors.shape[0], device=device, dtype=torch.uint8)) pos_inds.append( torch.zeros( res.neg_priors.shape[0], device=device, dtype=torch.uint8)) pos_inds = torch.cat(pos_inds) mask_results = self._mask_forward( x, pos_inds=pos_inds, bbox_feats=bbox_feats) mask_loss_and_target = self.mask_head.loss_and_target( mask_preds=mask_results['mask_preds'], sampling_results=sampling_results, batch_gt_instances=batch_gt_instances, rcnn_train_cfg=self.train_cfg) mask_targets = mask_loss_and_target['mask_targets'] mask_results.update(loss_mask=mask_loss_and_target['loss_mask']) if mask_results['loss_mask'] is None: return mask_results # mask iou head forward and loss pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results]) pos_mask_pred = mask_results['mask_preds'][ range(mask_results['mask_preds'].size(0)), pos_labels] mask_iou_pred = self.mask_iou_head(mask_results['mask_feats'], pos_mask_pred) pos_mask_iou_pred = mask_iou_pred[range(mask_iou_pred.size(0)), pos_labels] loss_mask_iou = self.mask_iou_head.loss_and_target( pos_mask_iou_pred, pos_mask_pred, mask_targets, sampling_results, batch_gt_instances, self.train_cfg) mask_results['loss_mask'].update(loss_mask_iou) return mask_results def predict_mask(self, x: Tensor, batch_img_metas: List[dict], results_list: InstanceList, rescale: bool = False) -> InstanceList: """Perform forward propagation of the mask head and predict detection results on the features of the upstream network. Args: x (tuple[Tensor]): Feature maps of all scale level. batch_img_metas (list[dict]): List of image information. results_list (list[:obj:`InstanceData`]): Detection results of each image. rescale (bool): If True, return boxes in original image space. Defaults to False. Returns: list[:obj:`InstanceData`]: Detection results of each image after the post process. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). - masks (Tensor): Has a shape (num_instances, H, W). """ bboxes = [res.bboxes for res in results_list] mask_rois = bbox2roi(bboxes) if mask_rois.shape[0] == 0: results_list = empty_instances( batch_img_metas, mask_rois.device, task_type='mask', instance_results=results_list, mask_thr_binary=self.test_cfg.mask_thr_binary) return results_list mask_results = self._mask_forward(x, mask_rois) mask_preds = mask_results['mask_preds'] mask_feats = mask_results['mask_feats'] # get mask scores with mask iou head labels = torch.cat([res.labels for res in results_list]) mask_iou_preds = self.mask_iou_head( mask_feats, mask_preds[range(labels.size(0)), labels]) # split batch mask prediction back to each image num_mask_rois_per_img = [len(res) for res in results_list] mask_preds = mask_preds.split(num_mask_rois_per_img, 0) mask_iou_preds = mask_iou_preds.split(num_mask_rois_per_img, 0) # TODO: Handle the case where rescale is false results_list = self.mask_head.predict_by_feat( mask_preds=mask_preds, results_list=results_list, batch_img_metas=batch_img_metas, rcnn_test_cfg=self.test_cfg, rescale=rescale) results_list = self.mask_iou_head.predict_by_feat( mask_iou_preds=mask_iou_preds, results_list=results_list) return results_list
8,784
41.033493
79
py
ERD
ERD-main/mmdet/models/roi_heads/htc_roi_head.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import Dict, List, Optional, Tuple import torch import torch.nn.functional as F from torch import Tensor from mmdet.models.test_time_augs import merge_aug_masks from mmdet.registry import MODELS from mmdet.structures import SampleList from mmdet.structures.bbox import bbox2roi from mmdet.utils import InstanceList, OptConfigType from ..layers import adaptive_avg_pool2d from ..task_modules.samplers import SamplingResult from ..utils import empty_instances, unpack_gt_instances from .cascade_roi_head import CascadeRoIHead @MODELS.register_module() class HybridTaskCascadeRoIHead(CascadeRoIHead): """Hybrid task cascade roi head including one bbox head and one mask head. https://arxiv.org/abs/1901.07518 Args: num_stages (int): Number of cascade stages. stage_loss_weights (list[float]): Loss weight for every stage. semantic_roi_extractor (:obj:`ConfigDict` or dict, optional): Config of semantic roi extractor. Defaults to None. Semantic_head (:obj:`ConfigDict` or dict, optional): Config of semantic head. Defaults to None. interleaved (bool): Whether to interleaves the box branch and mask branch. If True, the mask branch can take the refined bounding box predictions. Defaults to True. mask_info_flow (bool): Whether to turn on the mask information flow, which means that feeding the mask features of the preceding stage to the current stage. Defaults to True. """ def __init__(self, num_stages: int, stage_loss_weights: List[float], semantic_roi_extractor: OptConfigType = None, semantic_head: OptConfigType = None, semantic_fusion: Tuple[str] = ('bbox', 'mask'), interleaved: bool = True, mask_info_flow: bool = True, **kwargs) -> None: super().__init__( num_stages=num_stages, stage_loss_weights=stage_loss_weights, **kwargs) assert self.with_bbox assert not self.with_shared_head # shared head is not supported if semantic_head is not None: self.semantic_roi_extractor = MODELS.build(semantic_roi_extractor) self.semantic_head = MODELS.build(semantic_head) self.semantic_fusion = semantic_fusion self.interleaved = interleaved self.mask_info_flow = mask_info_flow # TODO move to base_roi_head later @property def with_semantic(self) -> bool: """bool: whether the head has semantic head""" return hasattr(self, 'semantic_head') and self.semantic_head is not None def _bbox_forward( self, stage: int, x: Tuple[Tensor], rois: Tensor, semantic_feat: Optional[Tensor] = None) -> Dict[str, Tensor]: """Box head forward function used in both training and testing. Args: stage (int): The current stage in Cascade RoI Head. x (tuple[Tensor]): List of multi-level img features. rois (Tensor): RoIs with the shape (n, 5) where the first column indicates batch id of each RoI. semantic_feat (Tensor, optional): Semantic feature. Defaults to None. Returns: dict[str, Tensor]: Usually returns a dictionary with keys: - `cls_score` (Tensor): Classification scores. - `bbox_pred` (Tensor): Box energies / deltas. - `bbox_feats` (Tensor): Extract bbox RoI features. """ bbox_roi_extractor = self.bbox_roi_extractor[stage] bbox_head = self.bbox_head[stage] bbox_feats = bbox_roi_extractor(x[:bbox_roi_extractor.num_inputs], rois) if self.with_semantic and 'bbox' in self.semantic_fusion: bbox_semantic_feat = self.semantic_roi_extractor([semantic_feat], rois) if bbox_semantic_feat.shape[-2:] != bbox_feats.shape[-2:]: bbox_semantic_feat = adaptive_avg_pool2d( bbox_semantic_feat, bbox_feats.shape[-2:]) bbox_feats += bbox_semantic_feat cls_score, bbox_pred = bbox_head(bbox_feats) bbox_results = dict(cls_score=cls_score, bbox_pred=bbox_pred) return bbox_results def bbox_loss(self, stage: int, x: Tuple[Tensor], sampling_results: List[SamplingResult], semantic_feat: Optional[Tensor] = None) -> dict: """Run forward function and calculate loss for box head in training. Args: stage (int): The current stage in Cascade RoI Head. x (tuple[Tensor]): List of multi-level img features. sampling_results (list["obj:`SamplingResult`]): Sampling results. semantic_feat (Tensor, optional): Semantic feature. Defaults to None. Returns: dict: Usually returns a dictionary with keys: - `cls_score` (Tensor): Classification scores. - `bbox_pred` (Tensor): Box energies / deltas. - `bbox_feats` (Tensor): Extract bbox RoI features. - `loss_bbox` (dict): A dictionary of bbox loss components. - `rois` (Tensor): RoIs with the shape (n, 5) where the first column indicates batch id of each RoI. - `bbox_targets` (tuple): Ground truth for proposals in a single image. Containing the following list of Tensors: (labels, label_weights, bbox_targets, bbox_weights) """ bbox_head = self.bbox_head[stage] rois = bbox2roi([res.priors for res in sampling_results]) bbox_results = self._bbox_forward( stage, x, rois, semantic_feat=semantic_feat) bbox_results.update(rois=rois) bbox_loss_and_target = bbox_head.loss_and_target( cls_score=bbox_results['cls_score'], bbox_pred=bbox_results['bbox_pred'], rois=rois, sampling_results=sampling_results, rcnn_train_cfg=self.train_cfg[stage]) bbox_results.update(bbox_loss_and_target) return bbox_results def _mask_forward(self, stage: int, x: Tuple[Tensor], rois: Tensor, semantic_feat: Optional[Tensor] = None, training: bool = True) -> Dict[str, Tensor]: """Mask head forward function used only in training. Args: stage (int): The current stage in Cascade RoI Head. x (tuple[Tensor]): Tuple of multi-level img features. rois (Tensor): RoIs with the shape (n, 5) where the first column indicates batch id of each RoI. semantic_feat (Tensor, optional): Semantic feature. Defaults to None. training (bool): Mask Forward is different between training and testing. If True, use the mask forward in training. Defaults to True. Returns: dict: Usually returns a dictionary with keys: - `mask_preds` (Tensor): Mask prediction. """ mask_roi_extractor = self.mask_roi_extractor[stage] mask_head = self.mask_head[stage] mask_feats = mask_roi_extractor(x[:mask_roi_extractor.num_inputs], rois) # semantic feature fusion # element-wise sum for original features and pooled semantic features if self.with_semantic and 'mask' in self.semantic_fusion: mask_semantic_feat = self.semantic_roi_extractor([semantic_feat], rois) if mask_semantic_feat.shape[-2:] != mask_feats.shape[-2:]: mask_semantic_feat = F.adaptive_avg_pool2d( mask_semantic_feat, mask_feats.shape[-2:]) mask_feats = mask_feats + mask_semantic_feat # mask information flow # forward all previous mask heads to obtain last_feat, and fuse it # with the normal mask feature if training: if self.mask_info_flow: last_feat = None for i in range(stage): last_feat = self.mask_head[i]( mask_feats, last_feat, return_logits=False) mask_preds = mask_head( mask_feats, last_feat, return_feat=False) else: mask_preds = mask_head(mask_feats, return_feat=False) mask_results = dict(mask_preds=mask_preds) else: aug_masks = [] last_feat = None for i in range(self.num_stages): mask_head = self.mask_head[i] if self.mask_info_flow: mask_preds, last_feat = mask_head(mask_feats, last_feat) else: mask_preds = mask_head(mask_feats) aug_masks.append(mask_preds) mask_results = dict(mask_preds=aug_masks) return mask_results def mask_loss(self, stage: int, x: Tuple[Tensor], sampling_results: List[SamplingResult], batch_gt_instances: InstanceList, semantic_feat: Optional[Tensor] = None) -> dict: """Run forward function and calculate loss for mask head in training. Args: stage (int): The current stage in Cascade RoI Head. x (tuple[Tensor]): Tuple of multi-level img features. sampling_results (list["obj:`SamplingResult`]): Sampling results. batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes``, ``labels``, and ``masks`` attributes. semantic_feat (Tensor, optional): Semantic feature. Defaults to None. Returns: dict: Usually returns a dictionary with keys: - `mask_preds` (Tensor): Mask prediction. - `loss_mask` (dict): A dictionary of mask loss components. """ pos_rois = bbox2roi([res.pos_priors for res in sampling_results]) mask_results = self._mask_forward( stage=stage, x=x, rois=pos_rois, semantic_feat=semantic_feat, training=True) mask_head = self.mask_head[stage] mask_loss_and_target = mask_head.loss_and_target( mask_preds=mask_results['mask_preds'], sampling_results=sampling_results, batch_gt_instances=batch_gt_instances, rcnn_train_cfg=self.train_cfg[stage]) mask_results.update(mask_loss_and_target) return mask_results def loss(self, x: Tuple[Tensor], rpn_results_list: InstanceList, batch_data_samples: SampleList) -> dict: """Perform forward propagation and loss calculation of the detection roi on the features of the upstream network. Args: x (tuple[Tensor]): List of multi-level img features. rpn_results_list (list[:obj:`InstanceData`]): List of region proposals. batch_data_samples (list[:obj:`DetDataSample`]): The batch data samples. It usually includes information such as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`. Returns: dict[str, Tensor]: A dictionary of loss components """ assert len(rpn_results_list) == len(batch_data_samples) outputs = unpack_gt_instances(batch_data_samples) batch_gt_instances, batch_gt_instances_ignore, batch_img_metas \ = outputs # semantic segmentation part # 2 outputs: segmentation prediction and embedded features losses = dict() if self.with_semantic: gt_semantic_segs = [ data_sample.gt_sem_seg.sem_seg for data_sample in batch_data_samples ] gt_semantic_segs = torch.stack(gt_semantic_segs) semantic_pred, semantic_feat = self.semantic_head(x) loss_seg = self.semantic_head.loss(semantic_pred, gt_semantic_segs) losses['loss_semantic_seg'] = loss_seg else: semantic_feat = None results_list = rpn_results_list num_imgs = len(batch_img_metas) for stage in range(self.num_stages): self.current_stage = stage stage_loss_weight = self.stage_loss_weights[stage] # assign gts and sample proposals sampling_results = [] bbox_assigner = self.bbox_assigner[stage] bbox_sampler = self.bbox_sampler[stage] for i in range(num_imgs): results = results_list[i] # rename rpn_results.bboxes to rpn_results.priors if 'bboxes' in results: results.priors = results.pop('bboxes') assign_result = bbox_assigner.assign( results, batch_gt_instances[i], batch_gt_instances_ignore[i]) sampling_result = bbox_sampler.sample( assign_result, results, batch_gt_instances[i], feats=[lvl_feat[i][None] for lvl_feat in x]) sampling_results.append(sampling_result) # bbox head forward and loss bbox_results = self.bbox_loss( stage=stage, x=x, sampling_results=sampling_results, semantic_feat=semantic_feat) for name, value in bbox_results['loss_bbox'].items(): losses[f's{stage}.{name}'] = ( value * stage_loss_weight if 'loss' in name else value) # mask head forward and loss if self.with_mask: # interleaved execution: use regressed bboxes by the box branch # to train the mask branch if self.interleaved: bbox_head = self.bbox_head[stage] with torch.no_grad(): results_list = bbox_head.refine_bboxes( sampling_results, bbox_results, batch_img_metas) # re-assign and sample 512 RoIs from 512 RoIs sampling_results = [] for i in range(num_imgs): results = results_list[i] # rename rpn_results.bboxes to rpn_results.priors results.priors = results.pop('bboxes') assign_result = bbox_assigner.assign( results, batch_gt_instances[i], batch_gt_instances_ignore[i]) sampling_result = bbox_sampler.sample( assign_result, results, batch_gt_instances[i], feats=[lvl_feat[i][None] for lvl_feat in x]) sampling_results.append(sampling_result) mask_results = self.mask_loss( stage=stage, x=x, sampling_results=sampling_results, batch_gt_instances=batch_gt_instances, semantic_feat=semantic_feat) for name, value in mask_results['loss_mask'].items(): losses[f's{stage}.{name}'] = ( value * stage_loss_weight if 'loss' in name else value) # refine bboxes (same as Cascade R-CNN) if stage < self.num_stages - 1 and not self.interleaved: bbox_head = self.bbox_head[stage] with torch.no_grad(): results_list = bbox_head.refine_bboxes( sampling_results=sampling_results, bbox_results=bbox_results, batch_img_metas=batch_img_metas) return losses def predict(self, x: Tuple[Tensor], rpn_results_list: InstanceList, batch_data_samples: SampleList, rescale: bool = False) -> InstanceList: """Perform forward propagation of the roi head and predict detection results on the features of the upstream network. Args: x (tuple[Tensor]): Features from upstream network. Each has shape (N, C, H, W). rpn_results_list (list[:obj:`InstanceData`]): list of region proposals. batch_data_samples (List[:obj:`DetDataSample`]): The Data Samples. It usually includes information such as `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`. rescale (bool): Whether to rescale the results to the original image. Defaults to False. Returns: list[obj:`InstanceData`]: Detection results of each image. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). - masks (Tensor): Has a shape (num_instances, H, W). """ assert self.with_bbox, 'Bbox head must be implemented.' batch_img_metas = [ data_samples.metainfo for data_samples in batch_data_samples ] if self.with_semantic: _, semantic_feat = self.semantic_head(x) else: semantic_feat = None # TODO: nms_op in mmcv need be enhanced, the bbox result may get # difference when not rescale in bbox_head # If it has the mask branch, the bbox branch does not need # to be scaled to the original image scale, because the mask # branch will scale both bbox and mask at the same time. bbox_rescale = rescale if not self.with_mask else False results_list = self.predict_bbox( x=x, semantic_feat=semantic_feat, batch_img_metas=batch_img_metas, rpn_results_list=rpn_results_list, rcnn_test_cfg=self.test_cfg, rescale=bbox_rescale) if self.with_mask: results_list = self.predict_mask( x=x, semantic_heat=semantic_feat, batch_img_metas=batch_img_metas, results_list=results_list, rescale=rescale) return results_list def predict_mask(self, x: Tuple[Tensor], semantic_heat: Tensor, batch_img_metas: List[dict], results_list: InstanceList, rescale: bool = False) -> InstanceList: """Perform forward propagation of the mask head and predict detection results on the features of the upstream network. Args: x (tuple[Tensor]): Feature maps of all scale level. semantic_feat (Tensor): Semantic feature. batch_img_metas (list[dict]): List of image information. results_list (list[:obj:`InstanceData`]): Detection results of each image. rescale (bool): If True, return boxes in original image space. Defaults to False. Returns: list[:obj:`InstanceData`]: Detection results of each image after the post process. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). - masks (Tensor): Has a shape (num_instances, H, W). """ num_imgs = len(batch_img_metas) bboxes = [res.bboxes for res in results_list] mask_rois = bbox2roi(bboxes) if mask_rois.shape[0] == 0: results_list = empty_instances( batch_img_metas=batch_img_metas, device=mask_rois.device, task_type='mask', instance_results=results_list, mask_thr_binary=self.test_cfg.mask_thr_binary) return results_list num_mask_rois_per_img = [len(res) for res in results_list] mask_results = self._mask_forward( stage=-1, x=x, rois=mask_rois, semantic_feat=semantic_heat, training=False) # split batch mask prediction back to each image aug_masks = [[ mask.sigmoid().detach() for mask in mask_preds.split(num_mask_rois_per_img, 0) ] for mask_preds in mask_results['mask_preds']] merged_masks = [] for i in range(num_imgs): aug_mask = [mask[i] for mask in aug_masks] merged_mask = merge_aug_masks(aug_mask, batch_img_metas[i]) merged_masks.append(merged_mask) results_list = self.mask_head[-1].predict_by_feat( mask_preds=merged_masks, results_list=results_list, batch_img_metas=batch_img_metas, rcnn_test_cfg=self.test_cfg, rescale=rescale, activate_map=True) return results_list def forward(self, x: Tuple[Tensor], rpn_results_list: InstanceList, batch_data_samples: SampleList) -> tuple: """Network forward process. Usually includes backbone, neck and head forward without any post-processing. Args: x (List[Tensor]): Multi-level features that may have different resolutions. rpn_results_list (list[:obj:`InstanceData`]): List of region proposals. batch_data_samples (list[:obj:`DetDataSample`]): Each item contains the meta information of each image and corresponding annotations. Returns tuple: A tuple of features from ``bbox_head`` and ``mask_head`` forward. """ results = () batch_img_metas = [ data_samples.metainfo for data_samples in batch_data_samples ] num_imgs = len(batch_img_metas) if self.with_semantic: _, semantic_feat = self.semantic_head(x) else: semantic_feat = None proposals = [rpn_results.bboxes for rpn_results in rpn_results_list] num_proposals_per_img = tuple(len(p) for p in proposals) rois = bbox2roi(proposals) # bbox head if self.with_bbox: rois, cls_scores, bbox_preds = self._refine_roi( x=x, rois=rois, semantic_feat=semantic_feat, batch_img_metas=batch_img_metas, num_proposals_per_img=num_proposals_per_img) results = results + (cls_scores, bbox_preds) # mask head if self.with_mask: rois = torch.cat(rois) mask_results = self._mask_forward( stage=-1, x=x, rois=rois, semantic_feat=semantic_feat, training=False) aug_masks = [[ mask.sigmoid().detach() for mask in mask_preds.split(num_proposals_per_img, 0) ] for mask_preds in mask_results['mask_preds']] merged_masks = [] for i in range(num_imgs): aug_mask = [mask[i] for mask in aug_masks] merged_mask = merge_aug_masks(aug_mask, batch_img_metas[i]) merged_masks.append(merged_mask) results = results + (merged_masks, ) return results
24,599
41.268041
79
py
ERD
ERD-main/mmdet/models/roi_heads/pisa_roi_head.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import List, Tuple from torch import Tensor from mmdet.models.task_modules import SamplingResult from mmdet.registry import MODELS from mmdet.structures import DetDataSample from mmdet.structures.bbox import bbox2roi from mmdet.utils import InstanceList from ..losses.pisa_loss import carl_loss, isr_p from ..utils import unpack_gt_instances from .standard_roi_head import StandardRoIHead @MODELS.register_module() class PISARoIHead(StandardRoIHead): r"""The RoI head for `Prime Sample Attention in Object Detection <https://arxiv.org/abs/1904.04821>`_.""" def loss(self, x: Tuple[Tensor], rpn_results_list: InstanceList, batch_data_samples: List[DetDataSample]) -> dict: """Perform forward propagation and loss calculation of the detection roi on the features of the upstream network. Args: x (tuple[Tensor]): List of multi-level img features. rpn_results_list (list[:obj:`InstanceData`]): List of region proposals. batch_data_samples (list[:obj:`DetDataSample`]): The batch data samples. It usually includes information such as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`. Returns: dict[str, Tensor]: A dictionary of loss components """ assert len(rpn_results_list) == len(batch_data_samples) outputs = unpack_gt_instances(batch_data_samples) batch_gt_instances, batch_gt_instances_ignore, _ = outputs # assign gts and sample proposals num_imgs = len(batch_data_samples) sampling_results = [] neg_label_weights = [] for i in range(num_imgs): # rename rpn_results.bboxes to rpn_results.priors rpn_results = rpn_results_list[i] rpn_results.priors = rpn_results.pop('bboxes') assign_result = self.bbox_assigner.assign( rpn_results, batch_gt_instances[i], batch_gt_instances_ignore[i]) sampling_result = self.bbox_sampler.sample( assign_result, rpn_results, batch_gt_instances[i], feats=[lvl_feat[i][None] for lvl_feat in x]) if isinstance(sampling_result, tuple): sampling_result, neg_label_weight = sampling_result sampling_results.append(sampling_result) neg_label_weights.append(neg_label_weight) losses = dict() # bbox head forward and loss if self.with_bbox: bbox_results = self.bbox_loss( x, sampling_results, neg_label_weights=neg_label_weights) losses.update(bbox_results['loss_bbox']) # mask head forward and loss if self.with_mask: mask_results = self.mask_loss(x, sampling_results, bbox_results['bbox_feats'], batch_gt_instances) losses.update(mask_results['loss_mask']) return losses def bbox_loss(self, x: Tuple[Tensor], sampling_results: List[SamplingResult], neg_label_weights: List[Tensor] = None) -> dict: """Perform forward propagation and loss calculation of the bbox head on the features of the upstream network. Args: x (tuple[Tensor]): List of multi-level img features. sampling_results (list["obj:`SamplingResult`]): Sampling results. Returns: dict[str, Tensor]: Usually returns a dictionary with keys: - `cls_score` (Tensor): Classification scores. - `bbox_pred` (Tensor): Box energies / deltas. - `bbox_feats` (Tensor): Extract bbox RoI features. - `loss_bbox` (dict): A dictionary of bbox loss components. """ rois = bbox2roi([res.priors for res in sampling_results]) bbox_results = self._bbox_forward(x, rois) bbox_targets = self.bbox_head.get_targets(sampling_results, self.train_cfg) # neg_label_weights obtained by sampler is image-wise, mapping back to # the corresponding location in label weights if neg_label_weights[0] is not None: label_weights = bbox_targets[1] cur_num_rois = 0 for i in range(len(sampling_results)): num_pos = sampling_results[i].pos_inds.size(0) num_neg = sampling_results[i].neg_inds.size(0) label_weights[cur_num_rois + num_pos:cur_num_rois + num_pos + num_neg] = neg_label_weights[i] cur_num_rois += num_pos + num_neg cls_score = bbox_results['cls_score'] bbox_pred = bbox_results['bbox_pred'] # Apply ISR-P isr_cfg = self.train_cfg.get('isr', None) if isr_cfg is not None: bbox_targets = isr_p( cls_score, bbox_pred, bbox_targets, rois, sampling_results, self.bbox_head.loss_cls, self.bbox_head.bbox_coder, **isr_cfg, num_class=self.bbox_head.num_classes) loss_bbox = self.bbox_head.loss(cls_score, bbox_pred, rois, *bbox_targets) # Add CARL Loss carl_cfg = self.train_cfg.get('carl', None) if carl_cfg is not None: loss_carl = carl_loss( cls_score, bbox_targets[0], bbox_pred, bbox_targets[2], self.bbox_head.loss_bbox, **carl_cfg, num_class=self.bbox_head.num_classes) loss_bbox.update(loss_carl) bbox_results.update(loss_bbox=loss_bbox) return bbox_results
5,966
39.04698
79
py
ERD
ERD-main/mmdet/models/roi_heads/test_mixins.py
# Copyright (c) OpenMMLab. All rights reserved. # TODO: delete this file after refactor import sys import torch from mmdet.models.layers import multiclass_nms from mmdet.models.test_time_augs import merge_aug_bboxes, merge_aug_masks from mmdet.structures.bbox import bbox2roi, bbox_mapping if sys.version_info >= (3, 7): from mmdet.utils.contextmanagers import completed class BBoxTestMixin: if sys.version_info >= (3, 7): # TODO: Currently not supported async def async_test_bboxes(self, x, img_metas, proposals, rcnn_test_cfg, rescale=False, **kwargs): """Asynchronized test for box head without augmentation.""" rois = bbox2roi(proposals) roi_feats = self.bbox_roi_extractor( x[:len(self.bbox_roi_extractor.featmap_strides)], rois) if self.with_shared_head: roi_feats = self.shared_head(roi_feats) sleep_interval = rcnn_test_cfg.get('async_sleep_interval', 0.017) async with completed( __name__, 'bbox_head_forward', sleep_interval=sleep_interval): cls_score, bbox_pred = self.bbox_head(roi_feats) img_shape = img_metas[0]['img_shape'] scale_factor = img_metas[0]['scale_factor'] det_bboxes, det_labels = self.bbox_head.get_bboxes( rois, cls_score, bbox_pred, img_shape, scale_factor, rescale=rescale, cfg=rcnn_test_cfg) return det_bboxes, det_labels # TODO: Currently not supported def aug_test_bboxes(self, feats, img_metas, rpn_results_list, rcnn_test_cfg): """Test det bboxes with test time augmentation.""" aug_bboxes = [] aug_scores = [] for x, img_meta in zip(feats, img_metas): # only one image in the batch img_shape = img_meta[0]['img_shape'] scale_factor = img_meta[0]['scale_factor'] flip = img_meta[0]['flip'] flip_direction = img_meta[0]['flip_direction'] # TODO more flexible proposals = bbox_mapping(rpn_results_list[0][:, :4], img_shape, scale_factor, flip, flip_direction) rois = bbox2roi([proposals]) bbox_results = self.bbox_forward(x, rois) bboxes, scores = self.bbox_head.get_bboxes( rois, bbox_results['cls_score'], bbox_results['bbox_pred'], img_shape, scale_factor, rescale=False, cfg=None) aug_bboxes.append(bboxes) aug_scores.append(scores) # after merging, bboxes will be rescaled to the original image size merged_bboxes, merged_scores = merge_aug_bboxes( aug_bboxes, aug_scores, img_metas, rcnn_test_cfg) if merged_bboxes.shape[0] == 0: # There is no proposal in the single image det_bboxes = merged_bboxes.new_zeros(0, 5) det_labels = merged_bboxes.new_zeros((0, ), dtype=torch.long) else: det_bboxes, det_labels = multiclass_nms(merged_bboxes, merged_scores, rcnn_test_cfg.score_thr, rcnn_test_cfg.nms, rcnn_test_cfg.max_per_img) return det_bboxes, det_labels class MaskTestMixin: if sys.version_info >= (3, 7): # TODO: Currently not supported async def async_test_mask(self, x, img_metas, det_bboxes, det_labels, rescale=False, mask_test_cfg=None): """Asynchronized test for mask head without augmentation.""" # image shape of the first image in the batch (only one) ori_shape = img_metas[0]['ori_shape'] scale_factor = img_metas[0]['scale_factor'] if det_bboxes.shape[0] == 0: segm_result = [[] for _ in range(self.mask_head.num_classes)] else: if rescale and not isinstance(scale_factor, (float, torch.Tensor)): scale_factor = det_bboxes.new_tensor(scale_factor) _bboxes = ( det_bboxes[:, :4] * scale_factor if rescale else det_bboxes) mask_rois = bbox2roi([_bboxes]) mask_feats = self.mask_roi_extractor( x[:len(self.mask_roi_extractor.featmap_strides)], mask_rois) if self.with_shared_head: mask_feats = self.shared_head(mask_feats) if mask_test_cfg and \ mask_test_cfg.get('async_sleep_interval'): sleep_interval = mask_test_cfg['async_sleep_interval'] else: sleep_interval = 0.035 async with completed( __name__, 'mask_head_forward', sleep_interval=sleep_interval): mask_pred = self.mask_head(mask_feats) segm_result = self.mask_head.get_results( mask_pred, _bboxes, det_labels, self.test_cfg, ori_shape, scale_factor, rescale) return segm_result # TODO: Currently not supported def aug_test_mask(self, feats, img_metas, det_bboxes, det_labels): """Test for mask head with test time augmentation.""" if det_bboxes.shape[0] == 0: segm_result = [[] for _ in range(self.mask_head.num_classes)] else: aug_masks = [] for x, img_meta in zip(feats, img_metas): img_shape = img_meta[0]['img_shape'] scale_factor = img_meta[0]['scale_factor'] flip = img_meta[0]['flip'] flip_direction = img_meta[0]['flip_direction'] _bboxes = bbox_mapping(det_bboxes[:, :4], img_shape, scale_factor, flip, flip_direction) mask_rois = bbox2roi([_bboxes]) mask_results = self._mask_forward(x, mask_rois) # convert to numpy array to save memory aug_masks.append( mask_results['mask_pred'].sigmoid().cpu().numpy()) merged_masks = merge_aug_masks(aug_masks, img_metas, self.test_cfg) ori_shape = img_metas[0][0]['ori_shape'] scale_factor = det_bboxes.new_ones(4) segm_result = self.mask_head.get_results( merged_masks, det_bboxes, det_labels, self.test_cfg, ori_shape, scale_factor=scale_factor, rescale=False) return segm_result
7,451
42.325581
79
py
ERD
ERD-main/mmdet/models/roi_heads/roi_extractors/base_roi_extractor.py
# Copyright (c) OpenMMLab. All rights reserved. from abc import ABCMeta, abstractmethod from typing import List, Optional, Tuple import torch import torch.nn as nn from mmcv import ops from mmengine.model import BaseModule from torch import Tensor from mmdet.utils import ConfigType, OptMultiConfig class BaseRoIExtractor(BaseModule, metaclass=ABCMeta): """Base class for RoI extractor. Args: roi_layer (:obj:`ConfigDict` or dict): Specify RoI layer type and arguments. out_channels (int): Output channels of RoI layers. featmap_strides (list[int]): Strides of input feature maps. init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \ dict], optional): Initialization config dict. Defaults to None. """ def __init__(self, roi_layer: ConfigType, out_channels: int, featmap_strides: List[int], init_cfg: OptMultiConfig = None) -> None: super().__init__(init_cfg=init_cfg) self.roi_layers = self.build_roi_layers(roi_layer, featmap_strides) self.out_channels = out_channels self.featmap_strides = featmap_strides @property def num_inputs(self) -> int: """int: Number of input feature maps.""" return len(self.featmap_strides) def build_roi_layers(self, layer_cfg: ConfigType, featmap_strides: List[int]) -> nn.ModuleList: """Build RoI operator to extract feature from each level feature map. Args: layer_cfg (:obj:`ConfigDict` or dict): Dictionary to construct and config RoI layer operation. Options are modules under ``mmcv/ops`` such as ``RoIAlign``. featmap_strides (list[int]): The stride of input feature map w.r.t to the original image size, which would be used to scale RoI coordinate (original image coordinate system) to feature coordinate system. Returns: :obj:`nn.ModuleList`: The RoI extractor modules for each level feature map. """ cfg = layer_cfg.copy() layer_type = cfg.pop('type') assert hasattr(ops, layer_type) layer_cls = getattr(ops, layer_type) roi_layers = nn.ModuleList( [layer_cls(spatial_scale=1 / s, **cfg) for s in featmap_strides]) return roi_layers def roi_rescale(self, rois: Tensor, scale_factor: float) -> Tensor: """Scale RoI coordinates by scale factor. Args: rois (Tensor): RoI (Region of Interest), shape (n, 5) scale_factor (float): Scale factor that RoI will be multiplied by. Returns: Tensor: Scaled RoI. """ cx = (rois[:, 1] + rois[:, 3]) * 0.5 cy = (rois[:, 2] + rois[:, 4]) * 0.5 w = rois[:, 3] - rois[:, 1] h = rois[:, 4] - rois[:, 2] new_w = w * scale_factor new_h = h * scale_factor x1 = cx - new_w * 0.5 x2 = cx + new_w * 0.5 y1 = cy - new_h * 0.5 y2 = cy + new_h * 0.5 new_rois = torch.stack((rois[:, 0], x1, y1, x2, y2), dim=-1) return new_rois @abstractmethod def forward(self, feats: Tuple[Tensor], rois: Tensor, roi_scale_factor: Optional[float] = None) -> Tensor: """Extractor ROI feats. Args: feats (Tuple[Tensor]): Multi-scale features. rois (Tensor): RoIs with the shape (n, 5) where the first column indicates batch id of each RoI. roi_scale_factor (Optional[float]): RoI scale factor. Defaults to None. Returns: Tensor: RoI feature. """ pass
3,822
34.073394
78
py
ERD
ERD-main/mmdet/models/roi_heads/roi_extractors/single_level_roi_extractor.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import List, Optional, Tuple import torch from torch import Tensor from mmdet.registry import MODELS from mmdet.utils import ConfigType, OptMultiConfig from .base_roi_extractor import BaseRoIExtractor @MODELS.register_module() class SingleRoIExtractor(BaseRoIExtractor): """Extract RoI features from a single level feature map. If there are multiple input feature levels, each RoI is mapped to a level according to its scale. The mapping rule is proposed in `FPN <https://arxiv.org/abs/1612.03144>`_. Args: roi_layer (:obj:`ConfigDict` or dict): Specify RoI layer type and arguments. out_channels (int): Output channels of RoI layers. featmap_strides (List[int]): Strides of input feature maps. finest_scale (int): Scale threshold of mapping to level 0. Defaults to 56. init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \ dict], optional): Initialization config dict. Defaults to None. """ def __init__(self, roi_layer: ConfigType, out_channels: int, featmap_strides: List[int], finest_scale: int = 56, init_cfg: OptMultiConfig = None) -> None: super().__init__( roi_layer=roi_layer, out_channels=out_channels, featmap_strides=featmap_strides, init_cfg=init_cfg) self.finest_scale = finest_scale def map_roi_levels(self, rois: Tensor, num_levels: int) -> Tensor: """Map rois to corresponding feature levels by scales. - scale < finest_scale * 2: level 0 - finest_scale * 2 <= scale < finest_scale * 4: level 1 - finest_scale * 4 <= scale < finest_scale * 8: level 2 - scale >= finest_scale * 8: level 3 Args: rois (Tensor): Input RoIs, shape (k, 5). num_levels (int): Total level number. Returns: Tensor: Level index (0-based) of each RoI, shape (k, ) """ scale = torch.sqrt( (rois[:, 3] - rois[:, 1]) * (rois[:, 4] - rois[:, 2])) target_lvls = torch.floor(torch.log2(scale / self.finest_scale + 1e-6)) target_lvls = target_lvls.clamp(min=0, max=num_levels - 1).long() return target_lvls def forward(self, feats: Tuple[Tensor], rois: Tensor, roi_scale_factor: Optional[float] = None): """Extractor ROI feats. Args: feats (Tuple[Tensor]): Multi-scale features. rois (Tensor): RoIs with the shape (n, 5) where the first column indicates batch id of each RoI. roi_scale_factor (Optional[float]): RoI scale factor. Defaults to None. Returns: Tensor: RoI feature. """ # convert fp32 to fp16 when amp is on rois = rois.type_as(feats[0]) out_size = self.roi_layers[0].output_size num_levels = len(feats) roi_feats = feats[0].new_zeros( rois.size(0), self.out_channels, *out_size) # TODO: remove this when parrots supports if torch.__version__ == 'parrots': roi_feats.requires_grad = True if num_levels == 1: if len(rois) == 0: return roi_feats return self.roi_layers[0](feats[0], rois) target_lvls = self.map_roi_levels(rois, num_levels) if roi_scale_factor is not None: rois = self.roi_rescale(rois, roi_scale_factor) for i in range(num_levels): mask = target_lvls == i inds = mask.nonzero(as_tuple=False).squeeze(1) if inds.numel() > 0: rois_ = rois[inds] roi_feats_t = self.roi_layers[i](feats[i], rois_) roi_feats[inds] = roi_feats_t else: # Sometimes some pyramid levels will not be used for RoI # feature extraction and this will cause an incomplete # computation graph in one GPU, which is different from those # in other GPUs and will cause a hanging error. # Therefore, we add it to ensure each feature pyramid is # included in the computation graph to avoid runtime bugs. roi_feats += sum( x.view(-1)[0] for x in self.parameters()) * 0. + feats[i].sum() * 0. return roi_feats
4,558
36.991667
79
py
ERD
ERD-main/mmdet/models/roi_heads/roi_extractors/generic_roi_extractor.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import Optional, Tuple from mmcv.cnn.bricks import build_plugin_layer from torch import Tensor from mmdet.registry import MODELS from mmdet.utils import OptConfigType from .base_roi_extractor import BaseRoIExtractor @MODELS.register_module() class GenericRoIExtractor(BaseRoIExtractor): """Extract RoI features from all level feature maps levels. This is the implementation of `A novel Region of Interest Extraction Layer for Instance Segmentation <https://arxiv.org/abs/2004.13665>`_. Args: aggregation (str): The method to aggregate multiple feature maps. Options are 'sum', 'concat'. Defaults to 'sum'. pre_cfg (:obj:`ConfigDict` or dict): Specify pre-processing modules. Defaults to None. post_cfg (:obj:`ConfigDict` or dict): Specify post-processing modules. Defaults to None. kwargs (keyword arguments): Arguments that are the same as :class:`BaseRoIExtractor`. """ def __init__(self, aggregation: str = 'sum', pre_cfg: OptConfigType = None, post_cfg: OptConfigType = None, **kwargs) -> None: super().__init__(**kwargs) assert aggregation in ['sum', 'concat'] self.aggregation = aggregation self.with_post = post_cfg is not None self.with_pre = pre_cfg is not None # build pre/post processing modules if self.with_post: self.post_module = build_plugin_layer(post_cfg, '_post_module')[1] if self.with_pre: self.pre_module = build_plugin_layer(pre_cfg, '_pre_module')[1] def forward(self, feats: Tuple[Tensor], rois: Tensor, roi_scale_factor: Optional[float] = None) -> Tensor: """Extractor ROI feats. Args: feats (Tuple[Tensor]): Multi-scale features. rois (Tensor): RoIs with the shape (n, 5) where the first column indicates batch id of each RoI. roi_scale_factor (Optional[float]): RoI scale factor. Defaults to None. Returns: Tensor: RoI feature. """ out_size = self.roi_layers[0].output_size num_levels = len(feats) roi_feats = feats[0].new_zeros( rois.size(0), self.out_channels, *out_size) # some times rois is an empty tensor if roi_feats.shape[0] == 0: return roi_feats if num_levels == 1: return self.roi_layers[0](feats[0], rois) if roi_scale_factor is not None: rois = self.roi_rescale(rois, roi_scale_factor) # mark the starting channels for concat mode start_channels = 0 for i in range(num_levels): roi_feats_t = self.roi_layers[i](feats[i], rois) end_channels = start_channels + roi_feats_t.size(1) if self.with_pre: # apply pre-processing to a RoI extracted from each layer roi_feats_t = self.pre_module(roi_feats_t) if self.aggregation == 'sum': # and sum them all roi_feats += roi_feats_t else: # and concat them along channel dimension roi_feats[:, start_channels:end_channels] = roi_feats_t # update channels starting position start_channels = end_channels # check if concat channels match at the end if self.aggregation == 'concat': assert start_channels == self.out_channels if self.with_post: # apply post-processing before return the result roi_feats = self.post_module(roi_feats) return roi_feats
3,805
35.951456
78
py
ERD
ERD-main/mmdet/models/roi_heads/bbox_heads/scnet_bbox_head.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import Tuple, Union from torch import Tensor from mmdet.registry import MODELS from .convfc_bbox_head import ConvFCBBoxHead @MODELS.register_module() class SCNetBBoxHead(ConvFCBBoxHead): """BBox head for `SCNet <https://arxiv.org/abs/2012.10150>`_. This inherits ``ConvFCBBoxHead`` with modified forward() function, allow us to get intermediate shared feature. """ def _forward_shared(self, x: Tensor) -> Tensor: """Forward function for shared part. Args: x (Tensor): Input feature. Returns: Tensor: Shared feature. """ if self.num_shared_convs > 0: for conv in self.shared_convs: x = conv(x) if self.num_shared_fcs > 0: if self.with_avg_pool: x = self.avg_pool(x) x = x.flatten(1) for fc in self.shared_fcs: x = self.relu(fc(x)) return x def _forward_cls_reg(self, x: Tensor) -> Tuple[Tensor]: """Forward function for classification and regression parts. Args: x (Tensor): Input feature. Returns: tuple[Tensor]: - cls_score (Tensor): classification prediction. - bbox_pred (Tensor): bbox prediction. """ x_cls = x x_reg = x for conv in self.cls_convs: x_cls = conv(x_cls) if x_cls.dim() > 2: if self.with_avg_pool: x_cls = self.avg_pool(x_cls) x_cls = x_cls.flatten(1) for fc in self.cls_fcs: x_cls = self.relu(fc(x_cls)) for conv in self.reg_convs: x_reg = conv(x_reg) if x_reg.dim() > 2: if self.with_avg_pool: x_reg = self.avg_pool(x_reg) x_reg = x_reg.flatten(1) for fc in self.reg_fcs: x_reg = self.relu(fc(x_reg)) cls_score = self.fc_cls(x_cls) if self.with_cls else None bbox_pred = self.fc_reg(x_reg) if self.with_reg else None return cls_score, bbox_pred def forward( self, x: Tensor, return_shared_feat: bool = False) -> Union[Tensor, Tuple[Tensor]]: """Forward function. Args: x (Tensor): input features return_shared_feat (bool): If True, return cls-reg-shared feature. Return: out (tuple[Tensor]): contain ``cls_score`` and ``bbox_pred``, if ``return_shared_feat`` is True, append ``x_shared`` to the returned tuple. """ x_shared = self._forward_shared(x) out = self._forward_cls_reg(x_shared) if return_shared_feat: out += (x_shared, ) return out
2,836
26.813725
79
py
ERD
ERD-main/mmdet/models/roi_heads/bbox_heads/multi_instance_bbox_head.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import List, Optional, Tuple, Union import numpy as np import torch import torch.nn.functional as F from mmcv.cnn import ConvModule from mmengine.config import ConfigDict from mmengine.structures import InstanceData from torch import Tensor, nn from mmdet.models.roi_heads.bbox_heads.bbox_head import BBoxHead from mmdet.models.task_modules.samplers import SamplingResult from mmdet.models.utils import empty_instances from mmdet.registry import MODELS from mmdet.structures.bbox import bbox_overlaps @MODELS.register_module() class MultiInstanceBBoxHead(BBoxHead): r"""Bbox head used in CrowdDet. .. code-block:: none /-> cls convs_1 -> cls fcs_1 -> cls_1 |-- | \-> reg convs_1 -> reg fcs_1 -> reg_1 | | /-> cls convs_2 -> cls fcs_2 -> cls_2 shared convs -> shared fcs |-- | \-> reg convs_2 -> reg fcs_2 -> reg_2 | | ... | | /-> cls convs_k -> cls fcs_k -> cls_k |-- \-> reg convs_k -> reg fcs_k -> reg_k Args: num_instance (int): The number of branches after shared fcs. Defaults to 2. with_refine (bool): Whether to use refine module. Defaults to False. num_shared_convs (int): The number of shared convs. Defaults to 0. num_shared_fcs (int): The number of shared fcs. Defaults to 2. num_cls_convs (int): The number of cls convs. Defaults to 0. num_cls_fcs (int): The number of cls fcs. Defaults to 0. num_reg_convs (int): The number of reg convs. Defaults to 0. num_reg_fcs (int): The number of reg fcs. Defaults to 0. conv_out_channels (int): The number of conv out channels. Defaults to 256. fc_out_channels (int): The number of fc out channels. Defaults to 1024. init_cfg (dict or list[dict], optional): Initialization config dict. Defaults to None. """ # noqa: W605 def __init__(self, num_instance: int = 2, with_refine: bool = False, num_shared_convs: int = 0, num_shared_fcs: int = 2, num_cls_convs: int = 0, num_cls_fcs: int = 0, num_reg_convs: int = 0, num_reg_fcs: int = 0, conv_out_channels: int = 256, fc_out_channels: int = 1024, init_cfg: Optional[Union[dict, ConfigDict]] = None, *args, **kwargs) -> None: super().__init__(*args, init_cfg=init_cfg, **kwargs) assert (num_shared_convs + num_shared_fcs + num_cls_convs + num_cls_fcs + num_reg_convs + num_reg_fcs > 0) assert num_instance == 2, 'Currently only 2 instances are supported' if num_cls_convs > 0 or num_reg_convs > 0: assert num_shared_fcs == 0 if not self.with_cls: assert num_cls_convs == 0 and num_cls_fcs == 0 if not self.with_reg: assert num_reg_convs == 0 and num_reg_fcs == 0 self.num_instance = num_instance self.num_shared_convs = num_shared_convs self.num_shared_fcs = num_shared_fcs self.num_cls_convs = num_cls_convs self.num_cls_fcs = num_cls_fcs self.num_reg_convs = num_reg_convs self.num_reg_fcs = num_reg_fcs self.conv_out_channels = conv_out_channels self.fc_out_channels = fc_out_channels self.with_refine = with_refine # add shared convs and fcs self.shared_convs, self.shared_fcs, last_layer_dim = \ self._add_conv_fc_branch( self.num_shared_convs, self.num_shared_fcs, self.in_channels, True) self.shared_out_channels = last_layer_dim self.relu = nn.ReLU(inplace=True) if self.with_refine: refine_model_cfg = { 'type': 'Linear', 'in_features': self.shared_out_channels + 20, 'out_features': self.shared_out_channels } self.shared_fcs_ref = MODELS.build(refine_model_cfg) self.fc_cls_ref = nn.ModuleList() self.fc_reg_ref = nn.ModuleList() self.cls_convs = nn.ModuleList() self.cls_fcs = nn.ModuleList() self.reg_convs = nn.ModuleList() self.reg_fcs = nn.ModuleList() self.cls_last_dim = list() self.reg_last_dim = list() self.fc_cls = nn.ModuleList() self.fc_reg = nn.ModuleList() for k in range(self.num_instance): # add cls specific branch cls_convs, cls_fcs, cls_last_dim = self._add_conv_fc_branch( self.num_cls_convs, self.num_cls_fcs, self.shared_out_channels) self.cls_convs.append(cls_convs) self.cls_fcs.append(cls_fcs) self.cls_last_dim.append(cls_last_dim) # add reg specific branch reg_convs, reg_fcs, reg_last_dim = self._add_conv_fc_branch( self.num_reg_convs, self.num_reg_fcs, self.shared_out_channels) self.reg_convs.append(reg_convs) self.reg_fcs.append(reg_fcs) self.reg_last_dim.append(reg_last_dim) if self.num_shared_fcs == 0 and not self.with_avg_pool: if self.num_cls_fcs == 0: self.cls_last_dim *= self.roi_feat_area if self.num_reg_fcs == 0: self.reg_last_dim *= self.roi_feat_area if self.with_cls: if self.custom_cls_channels: cls_channels = self.loss_cls.get_cls_channels( self.num_classes) else: cls_channels = self.num_classes + 1 cls_predictor_cfg_ = self.cls_predictor_cfg.copy() # deepcopy cls_predictor_cfg_.update( in_features=self.cls_last_dim[k], out_features=cls_channels) self.fc_cls.append(MODELS.build(cls_predictor_cfg_)) if self.with_refine: self.fc_cls_ref.append(MODELS.build(cls_predictor_cfg_)) if self.with_reg: out_dim_reg = (4 if self.reg_class_agnostic else 4 * self.num_classes) reg_predictor_cfg_ = self.reg_predictor_cfg.copy() reg_predictor_cfg_.update( in_features=self.reg_last_dim[k], out_features=out_dim_reg) self.fc_reg.append(MODELS.build(reg_predictor_cfg_)) if self.with_refine: self.fc_reg_ref.append(MODELS.build(reg_predictor_cfg_)) if init_cfg is None: # when init_cfg is None, # It has been set to # [[dict(type='Normal', std=0.01, override=dict(name='fc_cls'))], # [dict(type='Normal', std=0.001, override=dict(name='fc_reg'))] # after `super(ConvFCBBoxHead, self).__init__()` # we only need to append additional configuration # for `shared_fcs`, `cls_fcs` and `reg_fcs` self.init_cfg += [ dict( type='Xavier', distribution='uniform', override=[ dict(name='shared_fcs'), dict(name='cls_fcs'), dict(name='reg_fcs') ]) ] def _add_conv_fc_branch(self, num_branch_convs: int, num_branch_fcs: int, in_channels: int, is_shared: bool = False) -> tuple: """Add shared or separable branch. convs -> avg pool (optional) -> fcs """ last_layer_dim = in_channels # add branch specific conv layers branch_convs = nn.ModuleList() if num_branch_convs > 0: for i in range(num_branch_convs): conv_in_channels = ( last_layer_dim if i == 0 else self.conv_out_channels) branch_convs.append( ConvModule( conv_in_channels, self.conv_out_channels, 3, padding=1)) last_layer_dim = self.conv_out_channels # add branch specific fc layers branch_fcs = nn.ModuleList() if num_branch_fcs > 0: # for shared branch, only consider self.with_avg_pool # for separated branches, also consider self.num_shared_fcs if (is_shared or self.num_shared_fcs == 0) and not self.with_avg_pool: last_layer_dim *= self.roi_feat_area for i in range(num_branch_fcs): fc_in_channels = ( last_layer_dim if i == 0 else self.fc_out_channels) branch_fcs.append( nn.Linear(fc_in_channels, self.fc_out_channels)) last_layer_dim = self.fc_out_channels return branch_convs, branch_fcs, last_layer_dim def forward(self, x: Tuple[Tensor]) -> tuple: """Forward features from the upstream network. Args: x (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. Returns: tuple: A tuple of classification scores and bbox prediction. - cls_score (Tensor): Classification scores for all scale levels, each is a 4D-tensor, the channels number is num_base_priors * num_classes. - bbox_pred (Tensor): Box energies / deltas for all scale levels, each is a 4D-tensor, the channels number is num_base_priors * 4. - cls_score_ref (Tensor): The cls_score after refine model. - bbox_pred_ref (Tensor): The bbox_pred after refine model. """ # shared part if self.num_shared_convs > 0: for conv in self.shared_convs: x = conv(x) if self.num_shared_fcs > 0: if self.with_avg_pool: x = self.avg_pool(x) x = x.flatten(1) for fc in self.shared_fcs: x = self.relu(fc(x)) x_cls = x x_reg = x # separate branches cls_score = list() bbox_pred = list() for k in range(self.num_instance): for conv in self.cls_convs[k]: x_cls = conv(x_cls) if x_cls.dim() > 2: if self.with_avg_pool: x_cls = self.avg_pool(x_cls) x_cls = x_cls.flatten(1) for fc in self.cls_fcs[k]: x_cls = self.relu(fc(x_cls)) for conv in self.reg_convs[k]: x_reg = conv(x_reg) if x_reg.dim() > 2: if self.with_avg_pool: x_reg = self.avg_pool(x_reg) x_reg = x_reg.flatten(1) for fc in self.reg_fcs[k]: x_reg = self.relu(fc(x_reg)) cls_score.append(self.fc_cls[k](x_cls) if self.with_cls else None) bbox_pred.append(self.fc_reg[k](x_reg) if self.with_reg else None) if self.with_refine: x_ref = x cls_score_ref = list() bbox_pred_ref = list() for k in range(self.num_instance): feat_ref = cls_score[k].softmax(dim=-1) feat_ref = torch.cat((bbox_pred[k], feat_ref[:, 1][:, None]), dim=1).repeat(1, 4) feat_ref = torch.cat((x_ref, feat_ref), dim=1) feat_ref = F.relu_(self.shared_fcs_ref(feat_ref)) cls_score_ref.append(self.fc_cls_ref[k](feat_ref)) bbox_pred_ref.append(self.fc_reg_ref[k](feat_ref)) cls_score = torch.cat(cls_score, dim=1) bbox_pred = torch.cat(bbox_pred, dim=1) cls_score_ref = torch.cat(cls_score_ref, dim=1) bbox_pred_ref = torch.cat(bbox_pred_ref, dim=1) return cls_score, bbox_pred, cls_score_ref, bbox_pred_ref cls_score = torch.cat(cls_score, dim=1) bbox_pred = torch.cat(bbox_pred, dim=1) return cls_score, bbox_pred def get_targets(self, sampling_results: List[SamplingResult], rcnn_train_cfg: ConfigDict, concat: bool = True) -> tuple: """Calculate the ground truth for all samples in a batch according to the sampling_results. Almost the same as the implementation in bbox_head, we passed additional parameters pos_inds_list and neg_inds_list to `_get_targets_single` function. Args: sampling_results (List[obj:SamplingResult]): Assign results of all images in a batch after sampling. rcnn_train_cfg (obj:ConfigDict): `train_cfg` of RCNN. concat (bool): Whether to concatenate the results of all the images in a single batch. Returns: Tuple[Tensor]: Ground truth for proposals in a single image. Containing the following list of Tensors: - labels (list[Tensor],Tensor): Gt_labels for all proposals in a batch, each tensor in list has shape (num_proposals,) when `concat=False`, otherwise just a single tensor has shape (num_all_proposals,). - label_weights (list[Tensor]): Labels_weights for all proposals in a batch, each tensor in list has shape (num_proposals,) when `concat=False`, otherwise just a single tensor has shape (num_all_proposals,). - bbox_targets (list[Tensor],Tensor): Regression target for all proposals in a batch, each tensor in list has shape (num_proposals, 4) when `concat=False`, otherwise just a single tensor has shape (num_all_proposals, 4), the last dimension 4 represents [tl_x, tl_y, br_x, br_y]. - bbox_weights (list[tensor],Tensor): Regression weights for all proposals in a batch, each tensor in list has shape (num_proposals, 4) when `concat=False`, otherwise just a single tensor has shape (num_all_proposals, 4). """ labels = [] bbox_targets = [] bbox_weights = [] label_weights = [] for i in range(len(sampling_results)): sample_bboxes = torch.cat([ sampling_results[i].pos_gt_bboxes, sampling_results[i].neg_gt_bboxes ]) sample_priors = sampling_results[i].priors sample_priors = sample_priors.repeat(1, self.num_instance).reshape( -1, 4) sample_bboxes = sample_bboxes.reshape(-1, 4) if not self.reg_decoded_bbox: _bbox_targets = self.bbox_coder.encode(sample_priors, sample_bboxes) else: _bbox_targets = sample_priors _bbox_targets = _bbox_targets.reshape(-1, self.num_instance * 4) _bbox_weights = torch.ones(_bbox_targets.shape) _labels = torch.cat([ sampling_results[i].pos_gt_labels, sampling_results[i].neg_gt_labels ]) _labels_weights = torch.ones(_labels.shape) bbox_targets.append(_bbox_targets) bbox_weights.append(_bbox_weights) labels.append(_labels) label_weights.append(_labels_weights) if concat: labels = torch.cat(labels, 0) label_weights = torch.cat(label_weights, 0) bbox_targets = torch.cat(bbox_targets, 0) bbox_weights = torch.cat(bbox_weights, 0) return labels, label_weights, bbox_targets, bbox_weights def loss(self, cls_score: Tensor, bbox_pred: Tensor, rois: Tensor, labels: Tensor, label_weights: Tensor, bbox_targets: Tensor, bbox_weights: Tensor, **kwargs) -> dict: """Calculate the loss based on the network predictions and targets. Args: cls_score (Tensor): Classification prediction results of all class, has shape (batch_size * num_proposals_single_image, (num_classes + 1) * k), k represents the number of prediction boxes generated by each proposal box. bbox_pred (Tensor): Regression prediction results, has shape (batch_size * num_proposals_single_image, 4 * k), the last dimension 4 represents [tl_x, tl_y, br_x, br_y]. rois (Tensor): RoIs with the shape (batch_size * num_proposals_single_image, 5) where the first column indicates batch id of each RoI. labels (Tensor): Gt_labels for all proposals in a batch, has shape (batch_size * num_proposals_single_image, k). label_weights (Tensor): Labels_weights for all proposals in a batch, has shape (batch_size * num_proposals_single_image, k). bbox_targets (Tensor): Regression target for all proposals in a batch, has shape (batch_size * num_proposals_single_image, 4 * k), the last dimension 4 represents [tl_x, tl_y, br_x, br_y]. bbox_weights (Tensor): Regression weights for all proposals in a batch, has shape (batch_size * num_proposals_single_image, 4 * k). Returns: dict: A dictionary of loss. """ losses = dict() if bbox_pred.numel(): loss_0 = self.emd_loss(bbox_pred[:, 0:4], cls_score[:, 0:2], bbox_pred[:, 4:8], cls_score[:, 2:4], bbox_targets, labels) loss_1 = self.emd_loss(bbox_pred[:, 4:8], cls_score[:, 2:4], bbox_pred[:, 0:4], cls_score[:, 0:2], bbox_targets, labels) loss = torch.cat([loss_0, loss_1], dim=1) _, min_indices = loss.min(dim=1) loss_emd = loss[torch.arange(loss.shape[0]), min_indices] loss_emd = loss_emd.mean() else: loss_emd = bbox_pred.sum() losses['loss_rcnn_emd'] = loss_emd return losses def emd_loss(self, bbox_pred_0: Tensor, cls_score_0: Tensor, bbox_pred_1: Tensor, cls_score_1: Tensor, targets: Tensor, labels: Tensor) -> Tensor: """Calculate the emd loss. Note: This implementation is modified from https://github.com/Purkialo/ CrowdDet/blob/master/lib/det_oprs/loss_opr.py Args: bbox_pred_0 (Tensor): Part of regression prediction results, has shape (batch_size * num_proposals_single_image, 4), the last dimension 4 represents [tl_x, tl_y, br_x, br_y]. cls_score_0 (Tensor): Part of classification prediction results, has shape (batch_size * num_proposals_single_image, (num_classes + 1)), where 1 represents the background. bbox_pred_1 (Tensor): The other part of regression prediction results, has shape (batch_size*num_proposals_single_image, 4). cls_score_1 (Tensor):The other part of classification prediction results, has shape (batch_size * num_proposals_single_image, (num_classes + 1)). targets (Tensor):Regression target for all proposals in a batch, has shape (batch_size * num_proposals_single_image, 4 * k), the last dimension 4 represents [tl_x, tl_y, br_x, br_y], k represents the number of prediction boxes generated by each proposal box. labels (Tensor): Gt_labels for all proposals in a batch, has shape (batch_size * num_proposals_single_image, k). Returns: torch.Tensor: The calculated loss. """ bbox_pred = torch.cat([bbox_pred_0, bbox_pred_1], dim=1).reshape(-1, bbox_pred_0.shape[-1]) cls_score = torch.cat([cls_score_0, cls_score_1], dim=1).reshape(-1, cls_score_0.shape[-1]) targets = targets.reshape(-1, 4) labels = labels.long().flatten() # masks valid_masks = labels >= 0 fg_masks = labels > 0 # multiple class bbox_pred = bbox_pred.reshape(-1, self.num_classes, 4) fg_gt_classes = labels[fg_masks] bbox_pred = bbox_pred[fg_masks, fg_gt_classes - 1, :] # loss for regression loss_bbox = self.loss_bbox(bbox_pred, targets[fg_masks]) loss_bbox = loss_bbox.sum(dim=1) # loss for classification labels = labels * valid_masks loss_cls = self.loss_cls(cls_score, labels) loss_cls[fg_masks] = loss_cls[fg_masks] + loss_bbox loss = loss_cls.reshape(-1, 2).sum(dim=1) return loss.reshape(-1, 1) def _predict_by_feat_single( self, roi: Tensor, cls_score: Tensor, bbox_pred: Tensor, img_meta: dict, rescale: bool = False, rcnn_test_cfg: Optional[ConfigDict] = None) -> InstanceData: """Transform a single image's features extracted from the head into bbox results. Args: roi (Tensor): Boxes to be transformed. Has shape (num_boxes, 5). last dimension 5 arrange as (batch_index, x1, y1, x2, y2). cls_score (Tensor): Box scores, has shape (num_boxes, num_classes + 1). bbox_pred (Tensor): Box energies / deltas. has shape (num_boxes, num_classes * 4). img_meta (dict): image information. rescale (bool): If True, return boxes in original image space. Defaults to False. rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of Bbox Head. Defaults to None Returns: :obj:`InstanceData`: Detection results of each image. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). """ cls_score = cls_score.reshape(-1, self.num_classes + 1) bbox_pred = bbox_pred.reshape(-1, 4) roi = roi.repeat_interleave(self.num_instance, dim=0) results = InstanceData() if roi.shape[0] == 0: return empty_instances([img_meta], roi.device, task_type='bbox', instance_results=[results])[0] scores = cls_score.softmax(dim=-1) if cls_score is not None else None img_shape = img_meta['img_shape'] bboxes = self.bbox_coder.decode( roi[..., 1:], bbox_pred, max_shape=img_shape) if rescale and bboxes.size(0) > 0: assert img_meta.get('scale_factor') is not None scale_factor = bboxes.new_tensor(img_meta['scale_factor']).repeat( (1, 2)) bboxes = (bboxes.view(bboxes.size(0), -1, 4) / scale_factor).view( bboxes.size()[0], -1) if rcnn_test_cfg is None: # This means that it is aug test. # It needs to return the raw results without nms. results.bboxes = bboxes results.scores = scores else: roi_idx = np.tile( np.arange(bboxes.shape[0] / self.num_instance)[:, None], (1, self.num_instance)).reshape(-1, 1)[:, 0] roi_idx = torch.from_numpy(roi_idx).to(bboxes.device).reshape( -1, 1) bboxes = torch.cat([bboxes, roi_idx], dim=1) det_bboxes, det_scores = self.set_nms( bboxes, scores[:, 1], rcnn_test_cfg.score_thr, rcnn_test_cfg.nms['iou_threshold'], rcnn_test_cfg.max_per_img) results.bboxes = det_bboxes[:, :-1] results.scores = det_scores results.labels = torch.zeros_like(det_scores) return results @staticmethod def set_nms(bboxes: Tensor, scores: Tensor, score_thr: float, iou_threshold: float, max_num: int = -1) -> Tuple[Tensor, Tensor]: """NMS for multi-instance prediction. Please refer to https://github.com/Purkialo/CrowdDet for more details. Args: bboxes (Tensor): predict bboxes. scores (Tensor): The score of each predict bbox. score_thr (float): bbox threshold, bboxes with scores lower than it will not be considered. iou_threshold (float): IoU threshold to be considered as conflicted. max_num (int, optional): if there are more than max_num bboxes after NMS, only top max_num will be kept. Default to -1. Returns: Tuple[Tensor, Tensor]: (bboxes, scores). """ bboxes = bboxes[scores > score_thr] scores = scores[scores > score_thr] ordered_scores, order = scores.sort(descending=True) ordered_bboxes = bboxes[order] roi_idx = ordered_bboxes[:, -1] keep = torch.ones(len(ordered_bboxes)) == 1 ruler = torch.arange(len(ordered_bboxes)) while ruler.shape[0] > 0: basement = ruler[0] ruler = ruler[1:] idx = roi_idx[basement] # calculate the body overlap basement_bbox = ordered_bboxes[:, :4][basement].reshape(-1, 4) ruler_bbox = ordered_bboxes[:, :4][ruler].reshape(-1, 4) overlap = bbox_overlaps(basement_bbox, ruler_bbox) indices = torch.where(overlap > iou_threshold)[1] loc = torch.where(roi_idx[ruler][indices] == idx) # the mask won't change in the step mask = keep[ruler[indices][loc]] keep[ruler[indices]] = False keep[ruler[indices][loc][mask]] = True ruler[~keep[ruler]] = -1 ruler = ruler[ruler > 0] keep = keep[order.sort()[1]] return bboxes[keep][:max_num, :], scores[keep][:max_num]
27,194
42.651685
79
py