code
stringlengths
17
6.64M
def build_neck(cfg): 'Build neck.' return NECKS.build(cfg)
def build_roi_extractor(cfg): 'Build roi extractor.' return ROI_EXTRACTORS.build(cfg)
def build_shared_head(cfg): 'Build shared head.' return SHARED_HEADS.build(cfg)
def build_head(cfg): 'Build head.' return HEADS.build(cfg)
def build_loss(cfg): 'Build loss.' return LOSSES.build(cfg)
def build_detector(cfg, train_cfg=None, test_cfg=None): 'Build detector.' if ((train_cfg is not None) or (test_cfg is not None)): warnings.warn('train_cfg and test_cfg is deprecated, please specify them in model', UserWarning) assert ((cfg.get('train_cfg') is None) or (train_cfg is None)), 'train_cfg specified in both outer field and model field ' assert ((cfg.get('test_cfg') is None) or (test_cfg is None)), 'test_cfg specified in both outer field and model field ' return DETECTORS.build(cfg, default_args=dict(train_cfg=train_cfg, test_cfg=test_cfg))
@HEADS.register_module() class AnchorFreeHead(BaseDenseHead, BBoxTestMixin): 'Anchor-free head (FCOS, Fovea, RepPoints, etc.).\n\n Args:\n num_classes (int): Number of categories excluding the background\n category.\n in_channels (int): Number of channels in the input feature map.\n feat_channels (int): Number of hidden channels. Used in child classes.\n stacked_convs (int): Number of stacking convs of the head.\n strides (tuple): Downsample factor of each feature map.\n dcn_on_last_conv (bool): If true, use dcn in the last layer of\n towers. Default: False.\n conv_bias (bool | str): If specified as `auto`, it will be decided by\n the norm_cfg. Bias of conv will be set as True if `norm_cfg` is\n None, otherwise False. Default: "auto".\n loss_cls (dict): Config of classification loss.\n loss_bbox (dict): Config of localization loss.\n bbox_coder (dict): Config of bbox coder. Defaults\n \'DistancePointBBoxCoder\'.\n conv_cfg (dict): Config dict for convolution layer. Default: None.\n norm_cfg (dict): Config dict for normalization layer. Default: None.\n train_cfg (dict): Training config of anchor head.\n test_cfg (dict): Testing config of anchor head.\n init_cfg (dict or list[dict], optional): Initialization config dict.\n ' _version = 1 def __init__(self, num_classes, in_channels, feat_channels=256, stacked_convs=4, strides=(4, 8, 16, 32, 64), dcn_on_last_conv=False, conv_bias='auto', loss_cls=dict(type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_bbox=dict(type='IoULoss', loss_weight=1.0), bbox_coder=dict(type='DistancePointBBoxCoder'), conv_cfg=None, norm_cfg=None, train_cfg=None, test_cfg=None, init_cfg=dict(type='Normal', layer='Conv2d', std=0.01, override=dict(type='Normal', name='conv_cls', std=0.01, bias_prob=0.01))): super(AnchorFreeHead, self).__init__(init_cfg) self.num_classes = num_classes self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False) if self.use_sigmoid_cls: self.cls_out_channels = num_classes else: self.cls_out_channels = (num_classes + 1) self.in_channels = in_channels self.feat_channels = feat_channels self.stacked_convs = stacked_convs self.strides = strides self.dcn_on_last_conv = dcn_on_last_conv assert ((conv_bias == 'auto') or isinstance(conv_bias, bool)) self.conv_bias = conv_bias self.loss_cls = build_loss(loss_cls) self.loss_bbox = build_loss(loss_bbox) self.bbox_coder = build_bbox_coder(bbox_coder) self.prior_generator = MlvlPointGenerator(strides) self.num_base_priors = self.prior_generator.num_base_priors[0] self.train_cfg = train_cfg self.test_cfg = test_cfg self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.fp16_enabled = False self._init_layers() def _init_layers(self): 'Initialize layers of the head.' self._init_cls_convs() self._init_reg_convs() self._init_predictor() def _init_cls_convs(self): 'Initialize classification conv layers of the head.' self.cls_convs = nn.ModuleList() for i in range(self.stacked_convs): chn = (self.in_channels if (i == 0) else self.feat_channels) if (self.dcn_on_last_conv and (i == (self.stacked_convs - 1))): conv_cfg = dict(type='DCNv2') else: conv_cfg = self.conv_cfg self.cls_convs.append(ConvModule(chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=conv_cfg, norm_cfg=self.norm_cfg, bias=self.conv_bias)) def _init_reg_convs(self): 'Initialize bbox regression conv layers of the head.' self.reg_convs = nn.ModuleList() for i in range(self.stacked_convs): chn = (self.in_channels if (i == 0) else self.feat_channels) if (self.dcn_on_last_conv and (i == (self.stacked_convs - 1))): conv_cfg = dict(type='DCNv2') else: conv_cfg = self.conv_cfg self.reg_convs.append(ConvModule(chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=conv_cfg, norm_cfg=self.norm_cfg, bias=self.conv_bias)) def _init_predictor(self): 'Initialize predictor layers of the head.' self.conv_cls = nn.Conv2d(self.feat_channels, self.cls_out_channels, 3, padding=1) self.conv_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1) def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs): 'Hack some keys of the model state dict so that can load checkpoints\n of previous version.' version = local_metadata.get('version', None) if (version is None): bbox_head_keys = [k for k in state_dict.keys() if k.startswith(prefix)] ori_predictor_keys = [] new_predictor_keys = [] for key in bbox_head_keys: ori_predictor_keys.append(key) key = key.split('.') conv_name = None if key[1].endswith('cls'): conv_name = 'conv_cls' elif key[1].endswith('reg'): conv_name = 'conv_reg' elif key[1].endswith('centerness'): conv_name = 'conv_centerness' else: assert NotImplementedError if (conv_name is not None): key[1] = conv_name new_predictor_keys.append('.'.join(key)) else: ori_predictor_keys.pop((- 1)) for i in range(len(new_predictor_keys)): state_dict[new_predictor_keys[i]] = state_dict.pop(ori_predictor_keys[i]) super()._load_from_state_dict(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs) def forward(self, feats): 'Forward features from the upstream network.\n\n Args:\n feats (tuple[Tensor]): Features from the upstream network, each is\n a 4D-tensor.\n\n Returns:\n tuple: Usually contain classification scores and bbox predictions.\n cls_scores (list[Tensor]): Box scores for each scale level,\n each is a 4D-tensor, the channel number is\n num_points * num_classes.\n bbox_preds (list[Tensor]): Box energies / deltas for each scale\n level, each is a 4D-tensor, the channel number is\n num_points * 4.\n ' return multi_apply(self.forward_single, feats)[:2] def forward_single(self, x): 'Forward features of a single scale level.\n\n Args:\n x (Tensor): FPN feature maps of the specified stride.\n\n Returns:\n tuple: Scores for each class, bbox predictions, features\n after classification and regression conv layers, some\n models needs these features like FCOS.\n ' cls_feat = x reg_feat = x for cls_layer in self.cls_convs: cls_feat = cls_layer(cls_feat) cls_score = self.conv_cls(cls_feat) for reg_layer in self.reg_convs: reg_feat = reg_layer(reg_feat) bbox_pred = self.conv_reg(reg_feat) return (cls_score, bbox_pred, cls_feat, reg_feat) @abstractmethod @force_fp32(apply_to=('cls_scores', 'bbox_preds')) def loss(self, cls_scores, bbox_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None): 'Compute loss of the head.\n\n Args:\n cls_scores (list[Tensor]): Box scores for each scale level,\n each is a 4D-tensor, the channel number is\n num_points * num_classes.\n bbox_preds (list[Tensor]): Box energies / deltas for each scale\n level, each is a 4D-tensor, the channel number is\n num_points * 4.\n gt_bboxes (list[Tensor]): Ground truth bboxes for each image with\n shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.\n gt_labels (list[Tensor]): class indices corresponding to each box\n img_metas (list[dict]): Meta information of each image, e.g.,\n image size, scaling factor, etc.\n gt_bboxes_ignore (None | list[Tensor]): specify which bounding\n boxes can be ignored when computing the loss.\n ' raise NotImplementedError @abstractmethod def get_targets(self, points, gt_bboxes_list, gt_labels_list): 'Compute regression, classification and centerness targets for points\n in multiple images.\n\n Args:\n points (list[Tensor]): Points of each fpn level, each has shape\n (num_points, 2).\n gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image,\n each has shape (num_gt, 4).\n gt_labels_list (list[Tensor]): Ground truth labels of each box,\n each has shape (num_gt,).\n ' raise NotImplementedError def _get_points_single(self, featmap_size, stride, dtype, device, flatten=False): 'Get points of a single scale level.\n\n This function will be deprecated soon.\n ' warnings.warn('`_get_points_single` in `AnchorFreeHead` will be deprecated soon, we support a multi level point generator nowyou can get points of a single level feature map with `self.prior_generator.single_level_grid_priors` ') (h, w) = featmap_size x_range = torch.arange(w, device=device).to(dtype) y_range = torch.arange(h, device=device).to(dtype) (y, x) = torch.meshgrid(y_range, x_range) if flatten: y = y.flatten() x = x.flatten() return (y, x) def get_points(self, featmap_sizes, dtype, device, flatten=False): 'Get points according to feature map sizes.\n\n Args:\n featmap_sizes (list[tuple]): Multi-level feature map sizes.\n dtype (torch.dtype): Type of points.\n device (torch.device): Device of points.\n\n Returns:\n tuple: points of each image.\n ' warnings.warn('`get_points` in `AnchorFreeHead` will be deprecated soon, we support a multi level point generator nowyou can get points of all levels with `self.prior_generator.grid_priors` ') mlvl_points = [] for i in range(len(featmap_sizes)): mlvl_points.append(self._get_points_single(featmap_sizes[i], self.strides[i], dtype, device, flatten)) return mlvl_points def aug_test(self, feats, img_metas, rescale=False): 'Test function with test time augmentation.\n\n Args:\n feats (list[Tensor]): the outer list indicates test-time\n augmentations and inner Tensor should have a shape NxCxHxW,\n which contains features for all images in the batch.\n img_metas (list[list[dict]]): the outer list indicates test-time\n augs (multiscale, flip, etc.) and the inner list indicates\n images in a batch. each dict has image information.\n rescale (bool, optional): Whether to rescale the results.\n Defaults to False.\n\n Returns:\n list[ndarray]: bbox results of each class\n ' return self.aug_test_bboxes(feats, img_metas, rescale=rescale)
class BaseMaskHead(BaseModule, metaclass=ABCMeta): 'Base class for mask heads used in One-Stage Instance Segmentation.' def __init__(self, init_cfg): super(BaseMaskHead, self).__init__(init_cfg) @abstractmethod def loss(self, **kwargs): pass @abstractmethod def get_results(self, **kwargs): 'Get precessed :obj:`InstanceData` of multiple images.' pass def forward_train(self, x, gt_labels, gt_masks, img_metas, gt_bboxes=None, gt_bboxes_ignore=None, positive_infos=None, **kwargs): '\n Args:\n x (list[Tensor] | tuple[Tensor]): Features from FPN.\n Each has a shape (B, C, H, W).\n gt_labels (list[Tensor]): Ground truth labels of all images.\n each has a shape (num_gts,).\n gt_masks (list[Tensor]) : Masks for each bbox, has a shape\n (num_gts, h , w).\n img_metas (list[dict]): Meta information of each image, e.g.,\n image size, scaling factor, etc.\n gt_bboxes (list[Tensor]): Ground truth bboxes of the image,\n each item has a shape (num_gts, 4).\n gt_bboxes_ignore (list[Tensor], None): Ground truth bboxes to be\n ignored, each item has a shape (num_ignored_gts, 4).\n positive_infos (list[:obj:`InstanceData`], optional): Information\n of positive samples. Used when the label assignment is\n done outside the MaskHead, e.g., in BboxHead in\n YOLACT or CondInst, etc. When the label assignment is done in\n MaskHead, it would be None, like SOLO. All values\n in it should have shape (num_positive_samples, *).\n\n Returns:\n dict[str, Tensor]: A dictionary of loss components.\n ' if (positive_infos is None): outs = self(x) else: outs = self(x, positive_infos) assert isinstance(outs, tuple), 'Forward results should be a tuple, even if only one item is returned' loss = self.loss(*outs, gt_labels=gt_labels, gt_masks=gt_masks, img_metas=img_metas, gt_bboxes=gt_bboxes, gt_bboxes_ignore=gt_bboxes_ignore, positive_infos=positive_infos, **kwargs) return loss def simple_test(self, feats, img_metas, rescale=False, instances_list=None, **kwargs): 'Test function without test-time augmentation.\n\n Args:\n feats (tuple[torch.Tensor]): Multi-level features from the\n upstream network, each is a 4D-tensor.\n img_metas (list[dict]): List of image information.\n rescale (bool, optional): Whether to rescale the results.\n Defaults to False.\n instances_list (list[obj:`InstanceData`], optional): Detection\n results of each image after the post process. Only exist\n if there is a `bbox_head`, like `YOLACT`, `CondInst`, etc.\n\n Returns:\n list[obj:`InstanceData`]: Instance segmentation results of each image after the post process. Each item usually contains following keys. \n - scores (Tensor): Classification scores, has a shape\n (num_instance,)\n - labels (Tensor): Has a shape (num_instances,).\n - masks (Tensor): Processed mask results, has a\n shape (num_instances, h, w).\n ' if (instances_list is None): outs = self(feats) else: outs = self(feats, instances_list=instances_list) mask_inputs = (outs + (img_metas,)) results_list = self.get_results(*mask_inputs, rescale=rescale, instances_list=instances_list, **kwargs) return results_list def onnx_export(self, img, img_metas): raise NotImplementedError(f'{self.__class__.__name__} does not support ONNX EXPORT')
@HEADS.register_module() class GARetinaHead(GuidedAnchorHead): 'Guided-Anchor-based RetinaNet head.' def __init__(self, num_classes, in_channels, stacked_convs=4, conv_cfg=None, norm_cfg=None, init_cfg=None, **kwargs): if (init_cfg is None): init_cfg = dict(type='Normal', layer='Conv2d', std=0.01, override=[dict(type='Normal', name='conv_loc', std=0.01, bias_prob=0.01), dict(type='Normal', name='retina_cls', std=0.01, bias_prob=0.01)]) self.stacked_convs = stacked_convs self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg super(GARetinaHead, self).__init__(num_classes, in_channels, init_cfg=init_cfg, **kwargs) def _init_layers(self): 'Initialize layers of the head.' self.relu = nn.ReLU(inplace=True) self.cls_convs = nn.ModuleList() self.reg_convs = nn.ModuleList() for i in range(self.stacked_convs): chn = (self.in_channels if (i == 0) else self.feat_channels) self.cls_convs.append(ConvModule(chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) self.reg_convs.append(ConvModule(chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) self.conv_loc = nn.Conv2d(self.feat_channels, 1, 1) self.conv_shape = nn.Conv2d(self.feat_channels, (self.num_anchors * 2), 1) self.feature_adaption_cls = FeatureAdaption(self.feat_channels, self.feat_channels, kernel_size=3, deform_groups=self.deform_groups) self.feature_adaption_reg = FeatureAdaption(self.feat_channels, self.feat_channels, kernel_size=3, deform_groups=self.deform_groups) self.retina_cls = MaskedConv2d(self.feat_channels, (self.num_base_priors * self.cls_out_channels), 3, padding=1) self.retina_reg = MaskedConv2d(self.feat_channels, (self.num_base_priors * 4), 3, padding=1) def forward_single(self, x): 'Forward feature map of a single scale level.' cls_feat = x reg_feat = x for cls_conv in self.cls_convs: cls_feat = cls_conv(cls_feat) for reg_conv in self.reg_convs: reg_feat = reg_conv(reg_feat) loc_pred = self.conv_loc(cls_feat) shape_pred = self.conv_shape(reg_feat) cls_feat = self.feature_adaption_cls(cls_feat, shape_pred) reg_feat = self.feature_adaption_reg(reg_feat, shape_pred) if (not self.training): mask = (loc_pred.sigmoid()[0] >= self.loc_filter_thr) else: mask = None cls_score = self.retina_cls(cls_feat, mask) bbox_pred = self.retina_reg(reg_feat, mask) return (cls_score, bbox_pred, shape_pred, loc_pred)
@HEADS.register_module() class LADHead(PAAHead): 'Label Assignment Head from the paper: `Improving Object Detection by\n Label Assignment Distillation <https://arxiv.org/pdf/2108.10520.pdf>`_' @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'iou_preds')) def get_label_assignment(self, cls_scores, bbox_preds, iou_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None): 'Get label assignment (from teacher).\n\n Args:\n cls_scores (list[Tensor]): Box scores for each scale level.\n Has shape (N, num_anchors * num_classes, H, W)\n bbox_preds (list[Tensor]): Box energies / deltas for each scale\n level with shape (N, num_anchors * 4, H, W)\n iou_preds (list[Tensor]): iou_preds for each scale\n level with shape (N, num_anchors * 1, H, W)\n gt_bboxes (list[Tensor]): Ground truth bboxes for each image with\n shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.\n gt_labels (list[Tensor]): class indices corresponding to each box\n img_metas (list[dict]): Meta information of each image, e.g.,\n image size, scaling factor, etc.\n gt_bboxes_ignore (list[Tensor] | None): Specify which bounding\n boxes can be ignored when are computing the loss.\n\n Returns:\n tuple: Returns a tuple containing label assignment variables.\n\n - labels (Tensor): Labels of all anchors, each with\n shape (num_anchors,).\n - labels_weight (Tensor): Label weights of all anchor.\n each with shape (num_anchors,).\n - bboxes_target (Tensor): BBox targets of all anchors.\n each with shape (num_anchors, 4).\n - bboxes_weight (Tensor): BBox weights of all anchors.\n each with shape (num_anchors, 4).\n - pos_inds_flatten (Tensor): Contains all index of positive\n sample in all anchor.\n - pos_anchors (Tensor): Positive anchors.\n - num_pos (int): Number of positive anchors.\n ' featmap_sizes = [featmap.size()[(- 2):] for featmap in cls_scores] assert (len(featmap_sizes) == self.prior_generator.num_levels) device = cls_scores[0].device (anchor_list, valid_flag_list) = self.get_anchors(featmap_sizes, img_metas, device=device) label_channels = (self.cls_out_channels if self.use_sigmoid_cls else 1) cls_reg_targets = self.get_targets(anchor_list, valid_flag_list, gt_bboxes, img_metas, gt_bboxes_ignore_list=gt_bboxes_ignore, gt_labels_list=gt_labels, label_channels=label_channels) (labels, labels_weight, bboxes_target, bboxes_weight, pos_inds, pos_gt_index) = cls_reg_targets cls_scores = levels_to_images(cls_scores) cls_scores = [item.reshape((- 1), self.cls_out_channels) for item in cls_scores] bbox_preds = levels_to_images(bbox_preds) bbox_preds = [item.reshape((- 1), 4) for item in bbox_preds] (pos_losses_list,) = multi_apply(self.get_pos_loss, anchor_list, cls_scores, bbox_preds, labels, labels_weight, bboxes_target, bboxes_weight, pos_inds) with torch.no_grad(): (reassign_labels, reassign_label_weight, reassign_bbox_weights, num_pos) = multi_apply(self.paa_reassign, pos_losses_list, labels, labels_weight, bboxes_weight, pos_inds, pos_gt_index, anchor_list) num_pos = sum(num_pos) labels = torch.cat(reassign_labels, 0).view((- 1)) flatten_anchors = torch.cat([torch.cat(item, 0) for item in anchor_list]) labels_weight = torch.cat(reassign_label_weight, 0).view((- 1)) bboxes_target = torch.cat(bboxes_target, 0).view((- 1), bboxes_target[0].size((- 1))) pos_inds_flatten = ((labels >= 0) & (labels < self.num_classes)).nonzero().reshape((- 1)) if num_pos: pos_anchors = flatten_anchors[pos_inds_flatten] else: pos_anchors = None label_assignment_results = (labels, labels_weight, bboxes_target, bboxes_weight, pos_inds_flatten, pos_anchors, num_pos) return label_assignment_results def forward_train(self, x, label_assignment_results, img_metas, gt_bboxes, gt_labels=None, gt_bboxes_ignore=None, **kwargs): 'Forward train with the available label assignment (student receives\n from teacher).\n\n Args:\n x (list[Tensor]): Features from FPN.\n label_assignment_results (tuple): As the outputs defined in the\n function `self.get_label_assignment`.\n img_metas (list[dict]): Meta information of each image, e.g.,\n image size, scaling factor, etc.\n gt_bboxes (Tensor): Ground truth bboxes of the image,\n shape (num_gts, 4).\n gt_labels (Tensor): Ground truth labels of each box,\n shape (num_gts,).\n gt_bboxes_ignore (Tensor): Ground truth bboxes to be\n ignored, shape (num_ignored_gts, 4).\n\n Returns:\n losses: (dict[str, Tensor]): A dictionary of loss components.\n ' outs = self(x) if (gt_labels is None): loss_inputs = (outs + (gt_bboxes, img_metas)) else: loss_inputs = (outs + (gt_bboxes, gt_labels, img_metas)) losses = self.loss(*loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore, label_assignment_results=label_assignment_results) return losses @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'iou_preds')) def loss(self, cls_scores, bbox_preds, iou_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None, label_assignment_results=None): 'Compute losses of the head.\n\n Args:\n cls_scores (list[Tensor]): Box scores for each scale level\n Has shape (N, num_anchors * num_classes, H, W)\n bbox_preds (list[Tensor]): Box energies / deltas for each scale\n level with shape (N, num_anchors * 4, H, W)\n iou_preds (list[Tensor]): iou_preds for each scale\n level with shape (N, num_anchors * 1, H, W)\n gt_bboxes (list[Tensor]): Ground truth bboxes for each image with\n shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.\n gt_labels (list[Tensor]): class indices corresponding to each box\n img_metas (list[dict]): Meta information of each image, e.g.,\n image size, scaling factor, etc.\n gt_bboxes_ignore (list[Tensor] | None): Specify which bounding\n boxes can be ignored when are computing the loss.\n label_assignment_results (tuple): As the outputs defined in the\n function `self.get_label_assignment`.\n\n Returns:\n dict[str, Tensor]: A dictionary of loss gmm_assignment.\n ' (labels, labels_weight, bboxes_target, bboxes_weight, pos_inds_flatten, pos_anchors, num_pos) = label_assignment_results cls_scores = levels_to_images(cls_scores) cls_scores = [item.reshape((- 1), self.cls_out_channels) for item in cls_scores] bbox_preds = levels_to_images(bbox_preds) bbox_preds = [item.reshape((- 1), 4) for item in bbox_preds] iou_preds = levels_to_images(iou_preds) iou_preds = [item.reshape((- 1), 1) for item in iou_preds] cls_scores = torch.cat(cls_scores, 0).view((- 1), cls_scores[0].size((- 1))) bbox_preds = torch.cat(bbox_preds, 0).view((- 1), bbox_preds[0].size((- 1))) iou_preds = torch.cat(iou_preds, 0).view((- 1), iou_preds[0].size((- 1))) losses_cls = self.loss_cls(cls_scores, labels, labels_weight, avg_factor=max(num_pos, len(img_metas))) if num_pos: pos_bbox_pred = self.bbox_coder.decode(pos_anchors, bbox_preds[pos_inds_flatten]) pos_bbox_target = bboxes_target[pos_inds_flatten] iou_target = bbox_overlaps(pos_bbox_pred.detach(), pos_bbox_target, is_aligned=True) losses_iou = self.loss_centerness(iou_preds[pos_inds_flatten], iou_target.unsqueeze((- 1)), avg_factor=num_pos) losses_bbox = self.loss_bbox(pos_bbox_pred, pos_bbox_target, avg_factor=num_pos) else: losses_iou = (iou_preds.sum() * 0) losses_bbox = (bbox_preds.sum() * 0) return dict(loss_cls=losses_cls, loss_bbox=losses_bbox, loss_iou=losses_iou)
@HEADS.register_module() class NASFCOSHead(FCOSHead): 'Anchor-free head used in `NASFCOS <https://arxiv.org/abs/1906.04423>`_.\n\n It is quite similar with FCOS head, except for the searched structure of\n classification branch and bbox regression branch, where a structure of\n "dconv3x3, conv3x3, dconv3x3, conv1x1" is utilized instead.\n ' def __init__(self, *args, init_cfg=None, **kwargs): if (init_cfg is None): init_cfg = [dict(type='Caffe2Xavier', layer=['ConvModule', 'Conv2d']), dict(type='Normal', std=0.01, override=[dict(name='conv_reg'), dict(name='conv_centerness'), dict(name='conv_cls', type='Normal', std=0.01, bias_prob=0.01)])] super(NASFCOSHead, self).__init__(*args, init_cfg=init_cfg, **kwargs) def _init_layers(self): 'Initialize layers of the head.' dconv3x3_config = dict(type='DCNv2', kernel_size=3, use_bias=True, deform_groups=2, padding=1) conv3x3_config = dict(type='Conv', kernel_size=3, padding=1) conv1x1_config = dict(type='Conv', kernel_size=1) self.arch_config = [dconv3x3_config, conv3x3_config, dconv3x3_config, conv1x1_config] self.cls_convs = nn.ModuleList() self.reg_convs = nn.ModuleList() for (i, op_) in enumerate(self.arch_config): op = copy.deepcopy(op_) chn = (self.in_channels if (i == 0) else self.feat_channels) assert isinstance(op, dict) use_bias = op.pop('use_bias', False) padding = op.pop('padding', 0) kernel_size = op.pop('kernel_size') module = ConvModule(chn, self.feat_channels, kernel_size, stride=1, padding=padding, norm_cfg=self.norm_cfg, bias=use_bias, conv_cfg=op) self.cls_convs.append(copy.deepcopy(module)) self.reg_convs.append(copy.deepcopy(module)) self.conv_cls = nn.Conv2d(self.feat_channels, self.cls_out_channels, 3, padding=1) self.conv_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1) self.conv_centerness = nn.Conv2d(self.feat_channels, 1, 3, padding=1) self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides])
@HEADS.register_module() class PISARetinaHead(RetinaHead): 'PISA Retinanet Head.\n\n The head owns the same structure with Retinanet Head, but differs in two\n aspects:\n 1. Importance-based Sample Reweighting Positive (ISR-P) is applied to\n change the positive loss weights.\n 2. Classification-aware regression loss is adopted as a third loss.\n ' @force_fp32(apply_to=('cls_scores', 'bbox_preds')) def loss(self, cls_scores, bbox_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None): 'Compute losses of the head.\n\n Args:\n cls_scores (list[Tensor]): Box scores for each scale level\n Has shape (N, num_anchors * num_classes, H, W)\n bbox_preds (list[Tensor]): Box energies / deltas for each scale\n level with shape (N, num_anchors * 4, H, W)\n gt_bboxes (list[Tensor]): Ground truth bboxes of each image\n with shape (num_obj, 4).\n gt_labels (list[Tensor]): Ground truth labels of each image\n with shape (num_obj, 4).\n img_metas (list[dict]): Meta information of each image, e.g.,\n image size, scaling factor, etc.\n gt_bboxes_ignore (list[Tensor]): Ignored gt bboxes of each image.\n Default: None.\n\n Returns:\n dict: Loss dict, comprise classification loss, regression loss and\n carl loss.\n ' featmap_sizes = [featmap.size()[(- 2):] for featmap in cls_scores] assert (len(featmap_sizes) == self.prior_generator.num_levels) device = cls_scores[0].device (anchor_list, valid_flag_list) = self.get_anchors(featmap_sizes, img_metas, device=device) label_channels = (self.cls_out_channels if self.use_sigmoid_cls else 1) cls_reg_targets = self.get_targets(anchor_list, valid_flag_list, gt_bboxes, img_metas, gt_bboxes_ignore_list=gt_bboxes_ignore, gt_labels_list=gt_labels, label_channels=label_channels, return_sampling_results=True) if (cls_reg_targets is None): return None (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_pos, num_total_neg, sampling_results_list) = cls_reg_targets num_total_samples = ((num_total_pos + num_total_neg) if self.sampling else num_total_pos) num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]] concat_anchor_list = [] for i in range(len(anchor_list)): concat_anchor_list.append(torch.cat(anchor_list[i])) all_anchor_list = images_to_levels(concat_anchor_list, num_level_anchors) num_imgs = len(img_metas) flatten_cls_scores = [cls_score.permute(0, 2, 3, 1).reshape(num_imgs, (- 1), label_channels) for cls_score in cls_scores] flatten_cls_scores = torch.cat(flatten_cls_scores, dim=1).reshape((- 1), flatten_cls_scores[0].size((- 1))) flatten_bbox_preds = [bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, (- 1), 4) for bbox_pred in bbox_preds] flatten_bbox_preds = torch.cat(flatten_bbox_preds, dim=1).view((- 1), flatten_bbox_preds[0].size((- 1))) flatten_labels = torch.cat(labels_list, dim=1).reshape((- 1)) flatten_label_weights = torch.cat(label_weights_list, dim=1).reshape((- 1)) flatten_anchors = torch.cat(all_anchor_list, dim=1).reshape((- 1), 4) flatten_bbox_targets = torch.cat(bbox_targets_list, dim=1).reshape((- 1), 4) flatten_bbox_weights = torch.cat(bbox_weights_list, dim=1).reshape((- 1), 4) isr_cfg = self.train_cfg.get('isr', None) if (isr_cfg is not None): all_targets = (flatten_labels, flatten_label_weights, flatten_bbox_targets, flatten_bbox_weights) with torch.no_grad(): all_targets = isr_p(flatten_cls_scores, flatten_bbox_preds, all_targets, flatten_anchors, sampling_results_list, bbox_coder=self.bbox_coder, loss_cls=self.loss_cls, num_class=self.num_classes, **self.train_cfg.isr) (flatten_labels, flatten_label_weights, flatten_bbox_targets, flatten_bbox_weights) = all_targets losses_cls = self.loss_cls(flatten_cls_scores, flatten_labels, flatten_label_weights, avg_factor=num_total_samples) losses_bbox = self.loss_bbox(flatten_bbox_preds, flatten_bbox_targets, flatten_bbox_weights, avg_factor=num_total_samples) loss_dict = dict(loss_cls=losses_cls, loss_bbox=losses_bbox) carl_cfg = self.train_cfg.get('carl', None) if (carl_cfg is not None): loss_carl = carl_loss(flatten_cls_scores, flatten_labels, flatten_bbox_preds, flatten_bbox_targets, self.loss_bbox, **self.train_cfg.carl, avg_factor=num_total_pos, sigmoid=True, num_class=self.num_classes) loss_dict.update(loss_carl) return loss_dict
@HEADS.register_module() class PISASSDHead(SSDHead): def loss(self, cls_scores, bbox_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None): 'Compute losses of the head.\n\n Args:\n cls_scores (list[Tensor]): Box scores for each scale level\n Has shape (N, num_anchors * num_classes, H, W)\n bbox_preds (list[Tensor]): Box energies / deltas for each scale\n level with shape (N, num_anchors * 4, H, W)\n gt_bboxes (list[Tensor]): Ground truth bboxes of each image\n with shape (num_obj, 4).\n gt_labels (list[Tensor]): Ground truth labels of each image\n with shape (num_obj, 4).\n img_metas (list[dict]): Meta information of each image, e.g.,\n image size, scaling factor, etc.\n gt_bboxes_ignore (list[Tensor]): Ignored gt bboxes of each image.\n Default: None.\n\n Returns:\n dict: Loss dict, comprise classification loss regression loss and\n carl loss.\n ' featmap_sizes = [featmap.size()[(- 2):] for featmap in cls_scores] assert (len(featmap_sizes) == self.prior_generator.num_levels) device = cls_scores[0].device (anchor_list, valid_flag_list) = self.get_anchors(featmap_sizes, img_metas, device=device) cls_reg_targets = self.get_targets(anchor_list, valid_flag_list, gt_bboxes, img_metas, gt_bboxes_ignore_list=gt_bboxes_ignore, gt_labels_list=gt_labels, label_channels=1, unmap_outputs=False, return_sampling_results=True) if (cls_reg_targets is None): return None (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_pos, num_total_neg, sampling_results_list) = cls_reg_targets num_images = len(img_metas) all_cls_scores = torch.cat([s.permute(0, 2, 3, 1).reshape(num_images, (- 1), self.cls_out_channels) for s in cls_scores], 1) all_labels = torch.cat(labels_list, (- 1)).view(num_images, (- 1)) all_label_weights = torch.cat(label_weights_list, (- 1)).view(num_images, (- 1)) all_bbox_preds = torch.cat([b.permute(0, 2, 3, 1).reshape(num_images, (- 1), 4) for b in bbox_preds], (- 2)) all_bbox_targets = torch.cat(bbox_targets_list, (- 2)).view(num_images, (- 1), 4) all_bbox_weights = torch.cat(bbox_weights_list, (- 2)).view(num_images, (- 1), 4) all_anchors = [] for i in range(num_images): all_anchors.append(torch.cat(anchor_list[i])) isr_cfg = self.train_cfg.get('isr', None) all_targets = (all_labels.view((- 1)), all_label_weights.view((- 1)), all_bbox_targets.view((- 1), 4), all_bbox_weights.view((- 1), 4)) if (isr_cfg is not None): all_targets = isr_p(all_cls_scores.view((- 1), all_cls_scores.size((- 1))), all_bbox_preds.view((- 1), 4), all_targets, torch.cat(all_anchors), sampling_results_list, loss_cls=CrossEntropyLoss(), bbox_coder=self.bbox_coder, **self.train_cfg.isr, num_class=self.num_classes) (new_labels, new_label_weights, new_bbox_targets, new_bbox_weights) = all_targets all_labels = new_labels.view(all_labels.shape) all_label_weights = new_label_weights.view(all_label_weights.shape) all_bbox_targets = new_bbox_targets.view(all_bbox_targets.shape) all_bbox_weights = new_bbox_weights.view(all_bbox_weights.shape) carl_loss_cfg = self.train_cfg.get('carl', None) if (carl_loss_cfg is not None): loss_carl = carl_loss(all_cls_scores.view((- 1), all_cls_scores.size((- 1))), all_targets[0], all_bbox_preds.view((- 1), 4), all_targets[2], SmoothL1Loss(beta=1.0), **self.train_cfg.carl, avg_factor=num_total_pos, num_class=self.num_classes) assert torch.isfinite(all_cls_scores).all().item(), 'classification scores become infinite or NaN!' assert torch.isfinite(all_bbox_preds).all().item(), 'bbox predications become infinite or NaN!' (losses_cls, losses_bbox) = multi_apply(self.loss_single, all_cls_scores, all_bbox_preds, all_anchors, all_labels, all_label_weights, all_bbox_targets, all_bbox_weights, num_total_samples=num_total_pos) loss_dict = dict(loss_cls=losses_cls, loss_bbox=losses_bbox) if (carl_loss_cfg is not None): loss_dict.update(loss_carl) return loss_dict
@HEADS.register_module() class RetinaHead(AnchorHead): 'An anchor-based head used in `RetinaNet\n <https://arxiv.org/pdf/1708.02002.pdf>`_.\n\n The head contains two subnetworks. The first classifies anchor boxes and\n the second regresses deltas for the anchors.\n\n Example:\n >>> import torch\n >>> self = RetinaHead(11, 7)\n >>> x = torch.rand(1, 7, 32, 32)\n >>> cls_score, bbox_pred = self.forward_single(x)\n >>> # Each anchor predicts a score for each class except background\n >>> cls_per_anchor = cls_score.shape[1] / self.num_anchors\n >>> box_per_anchor = bbox_pred.shape[1] / self.num_anchors\n >>> assert cls_per_anchor == (self.num_classes)\n >>> assert box_per_anchor == 4\n ' def __init__(self, num_classes, in_channels, stacked_convs=4, conv_cfg=None, norm_cfg=None, anchor_generator=dict(type='AnchorGenerator', octave_base_scale=4, scales_per_octave=3, ratios=[0.5, 1.0, 2.0], strides=[8, 16, 32, 64, 128]), init_cfg=dict(type='Normal', layer='Conv2d', std=0.01, override=dict(type='Normal', name='retina_cls', std=0.01, bias_prob=0.01)), **kwargs): self.stacked_convs = stacked_convs self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg super(RetinaHead, self).__init__(num_classes, in_channels, anchor_generator=anchor_generator, init_cfg=init_cfg, **kwargs) def _init_layers(self): 'Initialize layers of the head.' self.relu = nn.ReLU(inplace=True) self.cls_convs = nn.ModuleList() self.reg_convs = nn.ModuleList() for i in range(self.stacked_convs): chn = (self.in_channels if (i == 0) else self.feat_channels) self.cls_convs.append(ConvModule(chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) self.reg_convs.append(ConvModule(chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) self.retina_cls = nn.Conv2d(self.feat_channels, (self.num_base_priors * self.cls_out_channels), 3, padding=1) self.retina_reg = nn.Conv2d(self.feat_channels, (self.num_base_priors * 4), 3, padding=1) def forward_single(self, x): 'Forward feature of a single scale level.\n\n Args:\n x (Tensor): Features of a single scale level.\n\n Returns:\n tuple:\n cls_score (Tensor): Cls scores for a single scale level\n the channels number is num_anchors * num_classes.\n bbox_pred (Tensor): Box energies / deltas for a single scale\n level, the channels number is num_anchors * 4.\n ' cls_feat = x reg_feat = x for cls_conv in self.cls_convs: cls_feat = cls_conv(cls_feat) for reg_conv in self.reg_convs: reg_feat = reg_conv(reg_feat) cls_score = self.retina_cls(cls_feat) bbox_pred = self.retina_reg(reg_feat) return (cls_score, bbox_pred)
@HEADS.register_module() class RetinaSepBNHead(AnchorHead): '"RetinaHead with separate BN.\n\n In RetinaHead, conv/norm layers are shared across different FPN levels,\n while in RetinaSepBNHead, conv layers are shared across different FPN\n levels, but BN layers are separated.\n ' def __init__(self, num_classes, num_ins, in_channels, stacked_convs=4, conv_cfg=None, norm_cfg=None, init_cfg=None, **kwargs): assert (init_cfg is None), 'To prevent abnormal initialization behavior, init_cfg is not allowed to be set' self.stacked_convs = stacked_convs self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.num_ins = num_ins super(RetinaSepBNHead, self).__init__(num_classes, in_channels, init_cfg=init_cfg, **kwargs) def _init_layers(self): 'Initialize layers of the head.' self.relu = nn.ReLU(inplace=True) self.cls_convs = nn.ModuleList() self.reg_convs = nn.ModuleList() for i in range(self.num_ins): cls_convs = nn.ModuleList() reg_convs = nn.ModuleList() for i in range(self.stacked_convs): chn = (self.in_channels if (i == 0) else self.feat_channels) cls_convs.append(ConvModule(chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) reg_convs.append(ConvModule(chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) self.cls_convs.append(cls_convs) self.reg_convs.append(reg_convs) for i in range(self.stacked_convs): for j in range(1, self.num_ins): self.cls_convs[j][i].conv = self.cls_convs[0][i].conv self.reg_convs[j][i].conv = self.reg_convs[0][i].conv self.retina_cls = nn.Conv2d(self.feat_channels, (self.num_base_priors * self.cls_out_channels), 3, padding=1) self.retina_reg = nn.Conv2d(self.feat_channels, (self.num_base_priors * 4), 3, padding=1) def init_weights(self): 'Initialize weights of the head.' super(RetinaSepBNHead, self).init_weights() for m in self.cls_convs[0]: normal_init(m.conv, std=0.01) for m in self.reg_convs[0]: normal_init(m.conv, std=0.01) bias_cls = bias_init_with_prob(0.01) normal_init(self.retina_cls, std=0.01, bias=bias_cls) normal_init(self.retina_reg, std=0.01) def forward(self, feats): 'Forward features from the upstream network.\n\n Args:\n feats (tuple[Tensor]): Features from the upstream network, each is\n a 4D-tensor.\n\n Returns:\n tuple: Usually a tuple of classification scores and bbox prediction\n cls_scores (list[Tensor]): Classification scores for all scale\n levels, each is a 4D-tensor, the channels number is\n num_anchors * num_classes.\n bbox_preds (list[Tensor]): Box energies / deltas for all scale\n levels, each is a 4D-tensor, the channels number is\n num_anchors * 4.\n ' cls_scores = [] bbox_preds = [] for (i, x) in enumerate(feats): cls_feat = feats[i] reg_feat = feats[i] for cls_conv in self.cls_convs[i]: cls_feat = cls_conv(cls_feat) for reg_conv in self.reg_convs[i]: reg_feat = reg_conv(reg_feat) cls_score = self.retina_cls(cls_feat) bbox_pred = self.retina_reg(reg_feat) cls_scores.append(cls_score) bbox_preds.append(bbox_pred) return (cls_scores, bbox_preds)
@HEADS.register_module() class SSDHead(AnchorHead): 'SSD head used in https://arxiv.org/abs/1512.02325.\n\n Args:\n num_classes (int): Number of categories excluding the background\n category.\n in_channels (int): Number of channels in the input feature map.\n stacked_convs (int): Number of conv layers in cls and reg tower.\n Default: 0.\n feat_channels (int): Number of hidden channels when stacked_convs\n > 0. Default: 256.\n use_depthwise (bool): Whether to use DepthwiseSeparableConv.\n Default: False.\n conv_cfg (dict): Dictionary to construct and config conv layer.\n Default: None.\n norm_cfg (dict): Dictionary to construct and config norm layer.\n Default: None.\n act_cfg (dict): Dictionary to construct and config activation layer.\n Default: None.\n anchor_generator (dict): Config dict for anchor generator\n bbox_coder (dict): Config of bounding box coder.\n reg_decoded_bbox (bool): If true, the regression loss would be\n applied directly on decoded bounding boxes, converting both\n the predicted boxes and regression targets to absolute\n coordinates format. Default False. It should be `True` when\n using `IoULoss`, `GIoULoss`, or `DIoULoss` in the bbox head.\n train_cfg (dict): Training config of anchor head.\n test_cfg (dict): Testing config of anchor head.\n init_cfg (dict or list[dict], optional): Initialization config dict.\n ' def __init__(self, num_classes=80, in_channels=(512, 1024, 512, 256, 256, 256), stacked_convs=0, feat_channels=256, use_depthwise=False, conv_cfg=None, norm_cfg=None, act_cfg=None, anchor_generator=dict(type='SSDAnchorGenerator', scale_major=False, input_size=300, strides=[8, 16, 32, 64, 100, 300], ratios=([2], [2, 3], [2, 3], [2, 3], [2], [2]), basesize_ratio_range=(0.1, 0.9)), bbox_coder=dict(type='DeltaXYWHBBoxCoder', clip_border=True, target_means=[0.0, 0.0, 0.0, 0.0], target_stds=[1.0, 1.0, 1.0, 1.0]), reg_decoded_bbox=False, train_cfg=None, test_cfg=None, init_cfg=dict(type='Xavier', layer='Conv2d', distribution='uniform', bias=0)): super(AnchorHead, self).__init__(init_cfg) self.num_classes = num_classes self.in_channels = in_channels self.stacked_convs = stacked_convs self.feat_channels = feat_channels self.use_depthwise = use_depthwise self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.act_cfg = act_cfg self.cls_out_channels = (num_classes + 1) self.prior_generator = build_prior_generator(anchor_generator) self.num_base_priors = self.prior_generator.num_base_priors self._init_layers() self.bbox_coder = build_bbox_coder(bbox_coder) self.reg_decoded_bbox = reg_decoded_bbox self.use_sigmoid_cls = False self.cls_focal_loss = False self.train_cfg = train_cfg self.test_cfg = test_cfg self.sampling = False if self.train_cfg: self.assigner = build_assigner(self.train_cfg.assigner) sampler_cfg = dict(type='PseudoSampler') self.sampler = build_sampler(sampler_cfg, context=self) self.fp16_enabled = False @property def num_anchors(self): '\n Returns:\n list[int]: Number of base_anchors on each point of each level.\n ' warnings.warn('DeprecationWarning: `num_anchors` is deprecated, please use "num_base_priors" instead') return self.num_base_priors def _init_layers(self): 'Initialize layers of the head.' self.cls_convs = nn.ModuleList() self.reg_convs = nn.ModuleList() conv = (DepthwiseSeparableConvModule if self.use_depthwise else ConvModule) for (channel, num_base_priors) in zip(self.in_channels, self.num_base_priors): cls_layers = [] reg_layers = [] in_channel = channel for i in range(self.stacked_convs): cls_layers.append(conv(in_channel, self.feat_channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)) reg_layers.append(conv(in_channel, self.feat_channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)) in_channel = self.feat_channels if self.use_depthwise: cls_layers.append(ConvModule(in_channel, in_channel, 3, padding=1, groups=in_channel, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)) reg_layers.append(ConvModule(in_channel, in_channel, 3, padding=1, groups=in_channel, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)) cls_layers.append(nn.Conv2d(in_channel, (num_base_priors * self.cls_out_channels), kernel_size=(1 if self.use_depthwise else 3), padding=(0 if self.use_depthwise else 1))) reg_layers.append(nn.Conv2d(in_channel, (num_base_priors * 4), kernel_size=(1 if self.use_depthwise else 3), padding=(0 if self.use_depthwise else 1))) self.cls_convs.append(nn.Sequential(*cls_layers)) self.reg_convs.append(nn.Sequential(*reg_layers)) def forward(self, feats): 'Forward features from the upstream network.\n\n Args:\n feats (tuple[Tensor]): Features from the upstream network, each is\n a 4D-tensor.\n\n Returns:\n tuple:\n cls_scores (list[Tensor]): Classification scores for all scale\n levels, each is a 4D-tensor, the channels number is\n num_anchors * num_classes.\n bbox_preds (list[Tensor]): Box energies / deltas for all scale\n levels, each is a 4D-tensor, the channels number is\n num_anchors * 4.\n ' cls_scores = [] bbox_preds = [] for (feat, reg_conv, cls_conv) in zip(feats, self.reg_convs, self.cls_convs): cls_scores.append(cls_conv(feat)) bbox_preds.append(reg_conv(feat)) return (cls_scores, bbox_preds) def loss_single(self, cls_score, bbox_pred, anchor, labels, label_weights, bbox_targets, bbox_weights, num_total_samples): 'Compute loss of a single image.\n\n Args:\n cls_score (Tensor): Box scores for eachimage\n Has shape (num_total_anchors, num_classes).\n bbox_pred (Tensor): Box energies / deltas for each image\n level with shape (num_total_anchors, 4).\n anchors (Tensor): Box reference for each scale level with shape\n (num_total_anchors, 4).\n labels (Tensor): Labels of each anchors with shape\n (num_total_anchors,).\n label_weights (Tensor): Label weights of each anchor with shape\n (num_total_anchors,)\n bbox_targets (Tensor): BBox regression targets of each anchor\n weight shape (num_total_anchors, 4).\n bbox_weights (Tensor): BBox regression loss weights of each anchor\n with shape (num_total_anchors, 4).\n num_total_samples (int): If sampling, num total samples equal to\n the number of total anchors; Otherwise, it is the number of\n positive anchors.\n\n Returns:\n dict[str, Tensor]: A dictionary of loss components.\n ' loss_cls_all = (F.cross_entropy(cls_score, labels, reduction='none') * label_weights) pos_inds = ((labels >= 0) & (labels < self.num_classes)).nonzero(as_tuple=False).reshape((- 1)) neg_inds = (labels == self.num_classes).nonzero(as_tuple=False).view((- 1)) num_pos_samples = pos_inds.size(0) num_neg_samples = (self.train_cfg.neg_pos_ratio * num_pos_samples) if (num_neg_samples > neg_inds.size(0)): num_neg_samples = neg_inds.size(0) (topk_loss_cls_neg, _) = loss_cls_all[neg_inds].topk(num_neg_samples) loss_cls_pos = loss_cls_all[pos_inds].sum() loss_cls_neg = topk_loss_cls_neg.sum() loss_cls = ((loss_cls_pos + loss_cls_neg) / num_total_samples) if self.reg_decoded_bbox: bbox_pred = self.bbox_coder.decode(anchor, bbox_pred) loss_bbox = smooth_l1_loss(bbox_pred, bbox_targets, bbox_weights, beta=self.train_cfg.smoothl1_beta, avg_factor=num_total_samples) return (loss_cls[None], loss_bbox) @force_fp32(apply_to=('cls_scores', 'bbox_preds')) def loss(self, cls_scores, bbox_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None): 'Compute losses of the head.\n\n Args:\n cls_scores (list[Tensor]): Box scores for each scale level\n Has shape (N, num_anchors * num_classes, H, W)\n bbox_preds (list[Tensor]): Box energies / deltas for each scale\n level with shape (N, num_anchors * 4, H, W)\n gt_bboxes (list[Tensor]): each item are the truth boxes for each\n image in [tl_x, tl_y, br_x, br_y] format.\n gt_labels (list[Tensor]): class indices corresponding to each box\n img_metas (list[dict]): Meta information of each image, e.g.,\n image size, scaling factor, etc.\n gt_bboxes_ignore (None | list[Tensor]): specify which bounding\n boxes can be ignored when computing the loss.\n\n Returns:\n dict[str, Tensor]: A dictionary of loss components.\n ' featmap_sizes = [featmap.size()[(- 2):] for featmap in cls_scores] assert (len(featmap_sizes) == self.prior_generator.num_levels) device = cls_scores[0].device (anchor_list, valid_flag_list) = self.get_anchors(featmap_sizes, img_metas, device=device) cls_reg_targets = self.get_targets(anchor_list, valid_flag_list, gt_bboxes, img_metas, gt_bboxes_ignore_list=gt_bboxes_ignore, gt_labels_list=gt_labels, label_channels=1, unmap_outputs=False) if (cls_reg_targets is None): return None (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_pos, num_total_neg) = cls_reg_targets num_images = len(img_metas) all_cls_scores = torch.cat([s.permute(0, 2, 3, 1).reshape(num_images, (- 1), self.cls_out_channels) for s in cls_scores], 1) all_labels = torch.cat(labels_list, (- 1)).view(num_images, (- 1)) all_label_weights = torch.cat(label_weights_list, (- 1)).view(num_images, (- 1)) all_bbox_preds = torch.cat([b.permute(0, 2, 3, 1).reshape(num_images, (- 1), 4) for b in bbox_preds], (- 2)) all_bbox_targets = torch.cat(bbox_targets_list, (- 2)).view(num_images, (- 1), 4) all_bbox_weights = torch.cat(bbox_weights_list, (- 2)).view(num_images, (- 1), 4) all_anchors = [] for i in range(num_images): all_anchors.append(torch.cat(anchor_list[i])) (losses_cls, losses_bbox) = multi_apply(self.loss_single, all_cls_scores, all_bbox_preds, all_anchors, all_labels, all_label_weights, all_bbox_targets, all_bbox_weights, num_total_samples=num_total_pos) return dict(loss_cls=losses_cls, loss_bbox=losses_bbox)
@DETECTORS.register_module() class ATSS(SingleStageDetector): 'Implementation of `ATSS <https://arxiv.org/abs/1912.02424>`_.' def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None): super(ATSS, self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, pretrained, init_cfg)
@DETECTORS.register_module() class AutoAssign(SingleStageDetector): 'Implementation of `AutoAssign: Differentiable Label Assignment for Dense\n Object Detection <https://arxiv.org/abs/2007.03496>`_.' def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None): super(AutoAssign, self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, pretrained)
class BaseDetector(BaseModule, metaclass=ABCMeta): 'Base class for detectors.' def __init__(self, init_cfg=None): super(BaseDetector, self).__init__(init_cfg) self.fp16_enabled = False @property def with_neck(self): 'bool: whether the detector has a neck' return (hasattr(self, 'neck') and (self.neck is not None)) @property def with_shared_head(self): 'bool: whether the detector has a shared head in the RoI Head' return (hasattr(self, 'roi_head') and self.roi_head.with_shared_head) @property def with_bbox(self): 'bool: whether the detector has a bbox head' return ((hasattr(self, 'roi_head') and self.roi_head.with_bbox) or (hasattr(self, 'bbox_head') and (self.bbox_head is not None))) @property def with_mask(self): 'bool: whether the detector has a mask head' return ((hasattr(self, 'roi_head') and self.roi_head.with_mask) or (hasattr(self, 'mask_head') and (self.mask_head is not None))) @abstractmethod def extract_feat(self, imgs): 'Extract features from images.' pass def extract_feats(self, imgs): 'Extract features from multiple images.\n\n Args:\n imgs (list[torch.Tensor]): A list of images. The images are\n augmented from the same image but in different ways.\n\n Returns:\n list[torch.Tensor]: Features of different images\n ' assert isinstance(imgs, list) return [self.extract_feat(img) for img in imgs] def forward_train(self, imgs, img_metas, **kwargs): "\n Args:\n img (Tensor): of shape (N, C, H, W) encoding input images.\n Typically these should be mean centered and std scaled.\n img_metas (list[dict]): List of image info dict where each dict\n has: 'img_shape', 'scale_factor', 'flip', and may also contain\n 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.\n For details on the values of these keys, see\n :class:`mmdet.datasets.pipelines.Collect`.\n kwargs (keyword arguments): Specific to concrete implementation.\n " batch_input_shape = tuple(imgs[0].size()[(- 2):]) for img_meta in img_metas: img_meta['batch_input_shape'] = batch_input_shape async def async_simple_test(self, img, img_metas, **kwargs): raise NotImplementedError @abstractmethod def simple_test(self, img, img_metas, **kwargs): pass @abstractmethod def aug_test(self, imgs, img_metas, **kwargs): 'Test function with test time augmentation.' pass async def aforward_test(self, *, img, img_metas, **kwargs): for (var, name) in [(img, 'img'), (img_metas, 'img_metas')]: if (not isinstance(var, list)): raise TypeError(f'{name} must be a list, but got {type(var)}') num_augs = len(img) if (num_augs != len(img_metas)): raise ValueError(f'num of augmentations ({len(img)}) != num of image metas ({len(img_metas)})') samples_per_gpu = img[0].size(0) assert (samples_per_gpu == 1) if (num_augs == 1): return (await self.async_simple_test(img[0], img_metas[0], **kwargs)) else: raise NotImplementedError def forward_test(self, imgs, img_metas, **kwargs): '\n Args:\n imgs (List[Tensor]): the outer list indicates test-time\n augmentations and inner Tensor should have a shape NxCxHxW,\n which contains all images in the batch.\n img_metas (List[List[dict]]): the outer list indicates test-time\n augs (multiscale, flip, etc.) and the inner list indicates\n images in a batch.\n ' for (var, name) in [(imgs, 'imgs'), (img_metas, 'img_metas')]: if (not isinstance(var, list)): raise TypeError(f'{name} must be a list, but got {type(var)}') num_augs = len(imgs) if (num_augs != len(img_metas)): raise ValueError(f'num of augmentations ({len(imgs)}) != num of image meta ({len(img_metas)})') for (img, img_meta) in zip(imgs, img_metas): batch_size = len(img_meta) for img_id in range(batch_size): img_meta[img_id]['batch_input_shape'] = tuple(img.size()[(- 2):]) if (num_augs == 1): if ('proposals' in kwargs): kwargs['proposals'] = kwargs['proposals'][0] return self.simple_test(imgs[0], img_metas[0], **kwargs) else: assert (imgs[0].size(0) == 1), f'aug test does not support inference with batch size {imgs[0].size(0)}' assert ('proposals' not in kwargs) return self.aug_test(imgs, img_metas, **kwargs) @auto_fp16(apply_to=('img',)) def forward(self, img, img_metas, return_loss=True, **kwargs): 'Calls either :func:`forward_train` or :func:`forward_test` depending\n on whether ``return_loss`` is ``True``.\n\n Note this setting will change the expected inputs. When\n ``return_loss=True``, img and img_meta are single-nested (i.e. Tensor\n and List[dict]), and when ``resturn_loss=False``, img and img_meta\n should be double nested (i.e. List[Tensor], List[List[dict]]), with\n the outer list indicating test time augmentations.\n ' if torch.onnx.is_in_onnx_export(): assert (len(img_metas) == 1) return self.onnx_export(img[0], img_metas[0]) if return_loss: return self.forward_train(img, img_metas, **kwargs) else: return self.forward_test(img, img_metas, **kwargs) def _parse_losses(self, losses): 'Parse the raw outputs (losses) of the network.\n\n Args:\n losses (dict): Raw output of the network, which usually contain\n losses and other necessary information.\n\n Returns:\n tuple[Tensor, dict]: (loss, log_vars), loss is the loss tensor which may be a weighted sum of all losses, log_vars contains all the variables to be sent to the logger.\n ' log_vars = OrderedDict() for (loss_name, loss_value) in losses.items(): if isinstance(loss_value, torch.Tensor): log_vars[loss_name] = loss_value.mean() elif isinstance(loss_value, list): log_vars[loss_name] = sum((_loss.mean() for _loss in loss_value)) else: raise TypeError(f'{loss_name} is not a tensor or list of tensors') loss = sum((_value for (_key, _value) in log_vars.items() if ('loss' in _key))) if (dist.is_available() and dist.is_initialized()): log_var_length = torch.tensor(len(log_vars), device=loss.device) dist.all_reduce(log_var_length) message = (((f'rank {dist.get_rank()}' + f' len(log_vars): {len(log_vars)}') + ' keys: ') + ','.join(log_vars.keys())) assert (log_var_length == (len(log_vars) * dist.get_world_size())), ('loss log variables are different across GPUs!\n' + message) log_vars['loss'] = loss for (loss_name, loss_value) in log_vars.items(): if (dist.is_available() and dist.is_initialized()): loss_value = loss_value.data.clone() dist.all_reduce(loss_value.div_(dist.get_world_size())) log_vars[loss_name] = loss_value.item() return (loss, log_vars) def train_step(self, data, optimizer): 'The iteration step during training.\n\n This method defines an iteration step during training, except for the\n back propagation and optimizer updating, which are done in an optimizer\n hook. Note that in some complicated cases or models, the whole process\n including back propagation and optimizer updating is also defined in\n this method, such as GAN.\n\n Args:\n data (dict): The output of dataloader.\n optimizer (:obj:`torch.optim.Optimizer` | dict): The optimizer of\n runner is passed to ``train_step()``. This argument is unused\n and reserved.\n\n Returns:\n dict: It should contain at least 3 keys: ``loss``, ``log_vars``, ``num_samples``.\n\n - ``loss`` is a tensor for back propagation, which can be a\n weighted sum of multiple losses.\n - ``log_vars`` contains all the variables to be sent to the\n logger.\n - ``num_samples`` indicates the batch size (when the model is\n DDP, it means the batch size on each GPU), which is used for\n averaging the logs.\n ' losses = self(**data) (loss, log_vars) = self._parse_losses(losses) outputs = dict(loss=loss, log_vars=log_vars, num_samples=len(data['img_metas'])) return outputs def val_step(self, data, optimizer=None): 'The iteration step during validation.\n\n This method shares the same signature as :func:`train_step`, but used\n during val epochs. Note that the evaluation after training epochs is\n not implemented with this method, but an evaluation hook.\n ' losses = self(**data) (loss, log_vars) = self._parse_losses(losses) outputs = dict(loss=loss, log_vars=log_vars, num_samples=len(data['img_metas'])) return outputs def show_result(self, img, result, score_thr=0.3, bbox_color=(72, 101, 241), text_color=(72, 101, 241), mask_color=None, thickness=2, font_size=13, win_name='', show=False, wait_time=0, out_file=None): "Draw `result` over `img`.\n\n Args:\n img (str or Tensor): The image to be displayed.\n result (Tensor or tuple): The results to draw over `img`\n bbox_result or (bbox_result, segm_result).\n score_thr (float, optional): Minimum score of bboxes to be shown.\n Default: 0.3.\n bbox_color (str or tuple(int) or :obj:`Color`):Color of bbox lines.\n The tuple of color should be in BGR order. Default: 'green'\n text_color (str or tuple(int) or :obj:`Color`):Color of texts.\n The tuple of color should be in BGR order. Default: 'green'\n mask_color (None or str or tuple(int) or :obj:`Color`):\n Color of masks. The tuple of color should be in BGR order.\n Default: None\n thickness (int): Thickness of lines. Default: 2\n font_size (int): Font size of texts. Default: 13\n win_name (str): The window name. Default: ''\n wait_time (float): Value of waitKey param.\n Default: 0.\n show (bool): Whether to show the image.\n Default: False.\n out_file (str or None): The filename to write the image.\n Default: None.\n\n Returns:\n img (Tensor): Only if not `show` or `out_file`\n " img = mmcv.imread(img) img = img.copy() if isinstance(result, tuple): (bbox_result, segm_result) = result if isinstance(segm_result, tuple): segm_result = segm_result[0] else: (bbox_result, segm_result) = (result, None) bboxes = np.vstack(bbox_result) labels = [np.full(bbox.shape[0], i, dtype=np.int32) for (i, bbox) in enumerate(bbox_result)] labels = np.concatenate(labels) segms = None if ((segm_result is not None) and (len(labels) > 0)): segms = mmcv.concat_list(segm_result) if isinstance(segms[0], torch.Tensor): segms = torch.stack(segms, dim=0).detach().cpu().numpy() else: segms = np.stack(segms, axis=0) if (out_file is not None): show = False img = imshow_det_bboxes(img, bboxes, labels, segms, class_names=self.CLASSES, score_thr=score_thr, bbox_color=bbox_color, text_color=text_color, mask_color=mask_color, thickness=thickness, font_size=font_size, win_name=win_name, show=show, wait_time=wait_time, out_file=out_file) if (not (show or out_file)): return img def onnx_export(self, img, img_metas): raise NotImplementedError(f'{self.__class__.__name__} does not support ONNX EXPORT')
@DETECTORS.register_module() class CascadeRCNN(TwoStageDetector): 'Implementation of `Cascade R-CNN: Delving into High Quality Object\n Detection <https://arxiv.org/abs/1906.09756>`_' def __init__(self, backbone, neck=None, rpn_head=None, roi_head=None, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None): super(CascadeRCNN, self).__init__(backbone=backbone, neck=neck, rpn_head=rpn_head, roi_head=roi_head, train_cfg=train_cfg, test_cfg=test_cfg, pretrained=pretrained, init_cfg=init_cfg) def show_result(self, data, result, **kwargs): 'Show prediction results of the detector.\n\n Args:\n data (str or np.ndarray): Image filename or loaded image.\n result (Tensor or tuple): The results to draw over `img`\n bbox_result or (bbox_result, segm_result).\n\n Returns:\n np.ndarray: The image with bboxes drawn on it.\n ' if self.with_mask: (ms_bbox_result, ms_segm_result) = result if isinstance(ms_bbox_result, dict): result = (ms_bbox_result['ensemble'], ms_segm_result['ensemble']) elif isinstance(result, dict): result = result['ensemble'] return super(CascadeRCNN, self).show_result(data, result, **kwargs)
@DETECTORS.register_module() class CenterNet(SingleStageDetector): 'Implementation of CenterNet(Objects as Points)\n\n <https://arxiv.org/abs/1904.07850>.\n ' def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None): super(CenterNet, self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, pretrained, init_cfg) def merge_aug_results(self, aug_results, with_nms): 'Merge augmented detection bboxes and score.\n\n Args:\n aug_results (list[list[Tensor]]): Det_bboxes and det_labels of each\n image.\n with_nms (bool): If True, do nms before return boxes.\n\n Returns:\n tuple: (out_bboxes, out_labels)\n ' (recovered_bboxes, aug_labels) = ([], []) for single_result in aug_results: recovered_bboxes.append(single_result[0][0]) aug_labels.append(single_result[0][1]) bboxes = torch.cat(recovered_bboxes, dim=0).contiguous() labels = torch.cat(aug_labels).contiguous() if with_nms: (out_bboxes, out_labels) = self.bbox_head._bboxes_nms(bboxes, labels, self.bbox_head.test_cfg) else: (out_bboxes, out_labels) = (bboxes, labels) return (out_bboxes, out_labels) def aug_test(self, imgs, img_metas, rescale=True): 'Augment testing of CenterNet. Aug test must have flipped image pair,\n and unlike CornerNet, it will perform an averaging operation on the\n feature map instead of detecting bbox.\n\n Args:\n imgs (list[Tensor]): Augmented images.\n img_metas (list[list[dict]]): Meta information of each image, e.g.,\n image size, scaling factor, etc.\n rescale (bool): If True, return boxes in original image space.\n Default: True.\n\n Note:\n ``imgs`` must including flipped image pairs.\n\n Returns:\n list[list[np.ndarray]]: BBox results of each image and classes.\n The outer list corresponds to each image. The inner list\n corresponds to each class.\n ' img_inds = list(range(len(imgs))) assert (img_metas[0][0]['flip'] + img_metas[1][0]['flip']), 'aug test must have flipped image pair' aug_results = [] for (ind, flip_ind) in zip(img_inds[0::2], img_inds[1::2]): flip_direction = img_metas[flip_ind][0]['flip_direction'] img_pair = torch.cat([imgs[ind], imgs[flip_ind]]) x = self.extract_feat(img_pair) (center_heatmap_preds, wh_preds, offset_preds) = self.bbox_head(x) assert (len(center_heatmap_preds) == len(wh_preds) == len(offset_preds) == 1) center_heatmap_preds[0] = ((center_heatmap_preds[0][0:1] + flip_tensor(center_heatmap_preds[0][1:2], flip_direction)) / 2) wh_preds[0] = ((wh_preds[0][0:1] + flip_tensor(wh_preds[0][1:2], flip_direction)) / 2) bbox_list = self.bbox_head.get_bboxes(center_heatmap_preds, wh_preds, [offset_preds[0][0:1]], img_metas[ind], rescale=rescale, with_nms=False) aug_results.append(bbox_list) nms_cfg = self.bbox_head.test_cfg.get('nms_cfg', None) if (nms_cfg is None): with_nms = False else: with_nms = True bbox_list = [self.merge_aug_results(aug_results, with_nms)] bbox_results = [bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes) for (det_bboxes, det_labels) in bbox_list] return bbox_results
@DETECTORS.register_module() class DeformableDETR(DETR): def __init__(self, *args, **kwargs): super(DETR, self).__init__(*args, **kwargs)
@DETECTORS.register_module() class DETR(SingleStageDetector): 'Implementation of `DETR: End-to-End Object Detection with\n Transformers <https://arxiv.org/pdf/2005.12872>`_' def __init__(self, backbone, bbox_head, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None): super(DETR, self).__init__(backbone, None, bbox_head, train_cfg, test_cfg, pretrained, init_cfg) def forward_dummy(self, img): 'Used for computing network flops.\n\n See `mmdetection/tools/analysis_tools/get_flops.py`\n ' warnings.warn('Warning! MultiheadAttention in DETR does not support flops computation! Do not use the results in your papers!') (batch_size, _, height, width) = img.shape dummy_img_metas = [dict(batch_input_shape=(height, width), img_shape=(height, width, 3)) for _ in range(batch_size)] x = self.extract_feat(img) outs = self.bbox_head(x, dummy_img_metas) return outs def onnx_export(self, img, img_metas): 'Test function for exporting to ONNX, without test time augmentation.\n\n Args:\n img (torch.Tensor): input images.\n img_metas (list[dict]): List of image information.\n\n Returns:\n tuple[Tensor, Tensor]: dets of shape [N, num_det, 5]\n and class labels of shape [N, num_det].\n ' x = self.extract_feat(img) outs = self.bbox_head.forward_onnx(x, img_metas) img_shape = torch._shape_as_tensor(img)[2:] img_metas[0]['img_shape_for_onnx'] = img_shape (det_bboxes, det_labels) = self.bbox_head.onnx_export(*outs, img_metas) return (det_bboxes, det_labels)
@DETECTORS.register_module() class FastRCNN(TwoStageDetector): 'Implementation of `Fast R-CNN <https://arxiv.org/abs/1504.08083>`_' def __init__(self, backbone, roi_head, train_cfg, test_cfg, neck=None, pretrained=None, init_cfg=None): super(FastRCNN, self).__init__(backbone=backbone, neck=neck, roi_head=roi_head, train_cfg=train_cfg, test_cfg=test_cfg, pretrained=pretrained, init_cfg=init_cfg) def forward_test(self, imgs, img_metas, proposals, **kwargs): '\n Args:\n imgs (List[Tensor]): the outer list indicates test-time\n augmentations and inner Tensor should have a shape NxCxHxW,\n which contains all images in the batch.\n img_metas (List[List[dict]]): the outer list indicates test-time\n augs (multiscale, flip, etc.) and the inner list indicates\n images in a batch.\n proposals (List[List[Tensor]]): the outer list indicates test-time\n augs (multiscale, flip, etc.) and the inner list indicates\n images in a batch. The Tensor should have a shape Px4, where\n P is the number of proposals.\n ' for (var, name) in [(imgs, 'imgs'), (img_metas, 'img_metas')]: if (not isinstance(var, list)): raise TypeError(f'{name} must be a list, but got {type(var)}') num_augs = len(imgs) if (num_augs != len(img_metas)): raise ValueError(f'num of augmentations ({len(imgs)}) != num of image meta ({len(img_metas)})') if (num_augs == 1): return self.simple_test(imgs[0], img_metas[0], proposals[0], **kwargs) else: assert NotImplementedError
@DETECTORS.register_module() class FasterRCNN(TwoStageDetector): 'Implementation of `Faster R-CNN <https://arxiv.org/abs/1506.01497>`_' def __init__(self, backbone, rpn_head, roi_head, train_cfg, test_cfg, neck=None, pretrained=None, init_cfg=None): super(FasterRCNN, self).__init__(backbone=backbone, neck=neck, rpn_head=rpn_head, roi_head=roi_head, train_cfg=train_cfg, test_cfg=test_cfg, pretrained=pretrained, init_cfg=init_cfg)
@DETECTORS.register_module() class FCOS(SingleStageDetector): 'Implementation of `FCOS <https://arxiv.org/abs/1904.01355>`_' def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None): super(FCOS, self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, pretrained, init_cfg)
@DETECTORS.register_module() class FOVEA(SingleStageDetector): 'Implementation of `FoveaBox <https://arxiv.org/abs/1904.03797>`_' def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None): super(FOVEA, self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, pretrained, init_cfg)
@DETECTORS.register_module() class FSAF(SingleStageDetector): 'Implementation of `FSAF <https://arxiv.org/abs/1903.00621>`_' def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None): super(FSAF, self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, pretrained, init_cfg)
@DETECTORS.register_module() class GFL(SingleStageDetector): def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None): super(GFL, self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, pretrained, init_cfg)
@DETECTORS.register_module() class GridRCNN(TwoStageDetector): 'Grid R-CNN.\n\n This detector is the implementation of:\n - Grid R-CNN (https://arxiv.org/abs/1811.12030)\n - Grid R-CNN Plus: Faster and Better (https://arxiv.org/abs/1906.05688)\n ' def __init__(self, backbone, rpn_head, roi_head, train_cfg, test_cfg, neck=None, pretrained=None, init_cfg=None): super(GridRCNN, self).__init__(backbone=backbone, neck=neck, rpn_head=rpn_head, roi_head=roi_head, train_cfg=train_cfg, test_cfg=test_cfg, pretrained=pretrained, init_cfg=init_cfg)
@DETECTORS.register_module() class HybridTaskCascade(CascadeRCNN): 'Implementation of `HTC <https://arxiv.org/abs/1901.07518>`_' def __init__(self, **kwargs): super(HybridTaskCascade, self).__init__(**kwargs) @property def with_semantic(self): 'bool: whether the detector has a semantic head' return self.roi_head.with_semantic
@DETECTORS.register_module() class KnowledgeDistillationSingleStageDetector(SingleStageDetector): 'Implementation of `Distilling the Knowledge in a Neural Network.\n <https://arxiv.org/abs/1503.02531>`_.\n\n Args:\n teacher_config (str | dict): Config file path\n or the config object of teacher model.\n teacher_ckpt (str, optional): Checkpoint path of teacher model.\n If left as None, the model will not load any weights.\n ' def __init__(self, backbone, neck, bbox_head, teacher_config, teacher_ckpt=None, eval_teacher=True, train_cfg=None, test_cfg=None, pretrained=None): super().__init__(backbone, neck, bbox_head, train_cfg, test_cfg, pretrained) self.eval_teacher = eval_teacher if isinstance(teacher_config, str): teacher_config = mmcv.Config.fromfile(teacher_config) self.teacher_model = build_detector(teacher_config['model']) if (teacher_ckpt is not None): load_checkpoint(self.teacher_model, teacher_ckpt, map_location='cpu') def forward_train(self, img, img_metas, gt_bboxes, gt_labels, gt_bboxes_ignore=None): "\n Args:\n img (Tensor): Input images of shape (N, C, H, W).\n Typically these should be mean centered and std scaled.\n img_metas (list[dict]): A List of image info dict where each dict\n has: 'img_shape', 'scale_factor', 'flip', and may also contain\n 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.\n For details on the values of these keys see\n :class:`mmdet.datasets.pipelines.Collect`.\n gt_bboxes (list[Tensor]): Each item are the truth boxes for each\n image in [tl_x, tl_y, br_x, br_y] format.\n gt_labels (list[Tensor]): Class indices corresponding to each box\n gt_bboxes_ignore (None | list[Tensor]): Specify which bounding\n boxes can be ignored when computing the loss.\n Returns:\n dict[str, Tensor]: A dictionary of loss components.\n " x = self.extract_feat(img) with torch.no_grad(): teacher_x = self.teacher_model.extract_feat(img) out_teacher = self.teacher_model.bbox_head(teacher_x) losses = self.bbox_head.forward_train(x, out_teacher, img_metas, gt_bboxes, gt_labels, gt_bboxes_ignore) return losses def cuda(self, device=None): 'Since teacher_model is registered as a plain object, it is necessary\n to put the teacher model to cuda when calling cuda function.' self.teacher_model.cuda(device=device) return super().cuda(device=device) def train(self, mode=True): 'Set the same train mode for teacher and student model.' if self.eval_teacher: self.teacher_model.train(False) else: self.teacher_model.train(mode) super().train(mode) def __setattr__(self, name, value): 'Set attribute, i.e. self.name = value\n\n This reloading prevent the teacher model from being registered as a\n nn.Module. The teacher module is registered as a plain object, so that\n the teacher parameters will not show up when calling\n ``self.parameters``, ``self.modules``, ``self.children`` methods.\n ' if (name == 'teacher_model'): object.__setattr__(self, name, value) else: super().__setattr__(name, value)
@DETECTORS.register_module() class LAD(KnowledgeDistillationSingleStageDetector): 'Implementation of `LAD <https://arxiv.org/pdf/2108.10520.pdf>`_.' def __init__(self, backbone, neck, bbox_head, teacher_backbone, teacher_neck, teacher_bbox_head, teacher_ckpt, eval_teacher=True, train_cfg=None, test_cfg=None, pretrained=None): super(KnowledgeDistillationSingleStageDetector, self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, pretrained) self.eval_teacher = eval_teacher self.teacher_model = nn.Module() self.teacher_model.backbone = build_backbone(teacher_backbone) if (teacher_neck is not None): self.teacher_model.neck = build_neck(teacher_neck) teacher_bbox_head.update(train_cfg=train_cfg) teacher_bbox_head.update(test_cfg=test_cfg) self.teacher_model.bbox_head = build_head(teacher_bbox_head) if (teacher_ckpt is not None): load_checkpoint(self.teacher_model, teacher_ckpt, map_location='cpu') @property def with_teacher_neck(self): 'bool: whether the detector has a teacher_neck' return (hasattr(self.teacher_model, 'neck') and (self.teacher_model.neck is not None)) def extract_teacher_feat(self, img): 'Directly extract teacher features from the backbone+neck.' x = self.teacher_model.backbone(img) if self.with_teacher_neck: x = self.teacher_model.neck(x) return x def forward_train(self, img, img_metas, gt_bboxes, gt_labels, gt_bboxes_ignore=None): "\n Args:\n img (Tensor): Input images of shape (N, C, H, W).\n Typically these should be mean centered and std scaled.\n img_metas (list[dict]): A List of image info dict where each dict\n has: 'img_shape', 'scale_factor', 'flip', and may also contain\n 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.\n For details on the values of these keys see\n :class:`mmdet.datasets.pipelines.Collect`.\n gt_bboxes (list[Tensor]): Each item are the truth boxes for each\n image in [tl_x, tl_y, br_x, br_y] format.\n gt_labels (list[Tensor]): Class indices corresponding to each box\n gt_bboxes_ignore (None | list[Tensor]): Specify which bounding\n boxes can be ignored when computing the loss.\n\n Returns:\n dict[str, Tensor]: A dictionary of loss components.\n " with torch.no_grad(): x_teacher = self.extract_teacher_feat(img) outs_teacher = self.teacher_model.bbox_head(x_teacher) label_assignment_results = self.teacher_model.bbox_head.get_label_assignment(*outs_teacher, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore) x = self.extract_feat(img) losses = self.bbox_head.forward_train(x, label_assignment_results, img_metas, gt_bboxes, gt_labels, gt_bboxes_ignore) return losses
@DETECTORS.register_module() class MaskRCNN(TwoStageDetector): 'Implementation of `Mask R-CNN <https://arxiv.org/abs/1703.06870>`_' def __init__(self, backbone, rpn_head, roi_head, train_cfg, test_cfg, neck=None, pretrained=None, init_cfg=None): super(MaskRCNN, self).__init__(backbone=backbone, neck=neck, rpn_head=rpn_head, roi_head=roi_head, train_cfg=train_cfg, test_cfg=test_cfg, pretrained=pretrained, init_cfg=init_cfg)
@DETECTORS.register_module() class MaskScoringRCNN(TwoStageDetector): 'Mask Scoring RCNN.\n\n https://arxiv.org/abs/1903.00241\n ' def __init__(self, backbone, rpn_head, roi_head, train_cfg, test_cfg, neck=None, pretrained=None, init_cfg=None): super(MaskScoringRCNN, self).__init__(backbone=backbone, neck=neck, rpn_head=rpn_head, roi_head=roi_head, train_cfg=train_cfg, test_cfg=test_cfg, pretrained=pretrained, init_cfg=init_cfg)
@DETECTORS.register_module() class NASFCOS(SingleStageDetector): 'NAS-FCOS: Fast Neural Architecture Search for Object Detection.\n\n https://arxiv.org/abs/1906.0442\n ' def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None): super(NASFCOS, self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, pretrained, init_cfg)
@DETECTORS.register_module() class PAA(SingleStageDetector): 'Implementation of `PAA <https://arxiv.org/pdf/2007.08103.pdf>`_.' def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None): super(PAA, self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, pretrained, init_cfg)
@DETECTORS.register_module() class PanopticFPN(TwoStagePanopticSegmentor): 'Implementation of `Panoptic feature pyramid\n networks <https://arxiv.org/pdf/1901.02446>`_' def __init__(self, backbone, neck=None, rpn_head=None, roi_head=None, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None, semantic_head=None, panoptic_fusion_head=None): super(PanopticFPN, self).__init__(backbone=backbone, neck=neck, rpn_head=rpn_head, roi_head=roi_head, train_cfg=train_cfg, test_cfg=test_cfg, pretrained=pretrained, init_cfg=init_cfg, semantic_head=semantic_head, panoptic_fusion_head=panoptic_fusion_head)
@DETECTORS.register_module() class PointRend(TwoStageDetector): 'PointRend: Image Segmentation as Rendering\n\n This detector is the implementation of\n `PointRend <https://arxiv.org/abs/1912.08193>`_.\n\n ' def __init__(self, backbone, rpn_head, roi_head, train_cfg, test_cfg, neck=None, pretrained=None, init_cfg=None): super(PointRend, self).__init__(backbone=backbone, neck=neck, rpn_head=rpn_head, roi_head=roi_head, train_cfg=train_cfg, test_cfg=test_cfg, pretrained=pretrained, init_cfg=init_cfg)
@DETECTORS.register_module() class QueryInst(SparseRCNN): 'Implementation of\n `Instances as Queries <http://arxiv.org/abs/2105.01928>`_' def __init__(self, backbone, rpn_head, roi_head, train_cfg, test_cfg, neck=None, pretrained=None, init_cfg=None): super(QueryInst, self).__init__(backbone=backbone, neck=neck, rpn_head=rpn_head, roi_head=roi_head, train_cfg=train_cfg, test_cfg=test_cfg, pretrained=pretrained, init_cfg=init_cfg)
@DETECTORS.register_module() class RepPointsDetector(SingleStageDetector): 'RepPoints: Point Set Representation for Object Detection.\n\n This detector is the implementation of:\n - RepPoints detector (https://arxiv.org/pdf/1904.11490)\n ' def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None): super(RepPointsDetector, self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, pretrained, init_cfg)
@DETECTORS.register_module() class RetinaNet(SingleStageDetector): 'Implementation of `RetinaNet <https://arxiv.org/abs/1708.02002>`_' def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None): super(RetinaNet, self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, pretrained, init_cfg)
@DETECTORS.register_module() class SCNet(CascadeRCNN): 'Implementation of `SCNet <https://arxiv.org/abs/2012.10150>`_' def __init__(self, **kwargs): super(SCNet, self).__init__(**kwargs)
@DETECTORS.register_module() class SingleStageDetector(BaseDetector): 'Base class for single-stage detectors.\n\n Single-stage detectors directly and densely predict bounding boxes on the\n output features of the backbone+neck.\n ' def __init__(self, backbone, neck=None, bbox_head=None, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None): super(SingleStageDetector, self).__init__(init_cfg) if pretrained: warnings.warn('DeprecationWarning: pretrained is deprecated, please use "init_cfg" instead') backbone.pretrained = pretrained self.backbone = build_backbone(backbone) if (neck is not None): self.neck = build_neck(neck) bbox_head.update(train_cfg=train_cfg) bbox_head.update(test_cfg=test_cfg) self.bbox_head = build_head(bbox_head) self.train_cfg = train_cfg self.test_cfg = test_cfg def extract_feat(self, img): 'Directly extract features from the backbone+neck.' x = self.backbone(img) if self.with_neck: x = self.neck(x) return x def forward_dummy(self, img): 'Used for computing network flops.\n\n See `mmdetection/tools/analysis_tools/get_flops.py`\n ' x = self.extract_feat(img) outs = self.bbox_head(x) return outs def forward_train(self, img, img_metas, gt_bboxes, gt_labels, gt_bboxes_ignore=None): "\n Args:\n img (Tensor): Input images of shape (N, C, H, W).\n Typically these should be mean centered and std scaled.\n img_metas (list[dict]): A List of image info dict where each dict\n has: 'img_shape', 'scale_factor', 'flip', and may also contain\n 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.\n For details on the values of these keys see\n :class:`mmdet.datasets.pipelines.Collect`.\n gt_bboxes (list[Tensor]): Each item are the truth boxes for each\n image in [tl_x, tl_y, br_x, br_y] format.\n gt_labels (list[Tensor]): Class indices corresponding to each box\n gt_bboxes_ignore (None | list[Tensor]): Specify which bounding\n boxes can be ignored when computing the loss.\n\n Returns:\n dict[str, Tensor]: A dictionary of loss components.\n " super(SingleStageDetector, self).forward_train(img, img_metas) x = self.extract_feat(img) losses = self.bbox_head.forward_train(x, img_metas, gt_bboxes, gt_labels, gt_bboxes_ignore) return losses def simple_test(self, img, img_metas, rescale=False): 'Test function without test-time augmentation.\n\n Args:\n img (torch.Tensor): Images with shape (N, C, H, W).\n img_metas (list[dict]): List of image information.\n rescale (bool, optional): Whether to rescale the results.\n Defaults to False.\n\n Returns:\n list[list[np.ndarray]]: BBox results of each image and classes.\n The outer list corresponds to each image. The inner list\n corresponds to each class.\n ' feat = self.extract_feat(img) results_list = self.bbox_head.simple_test(feat, img_metas, rescale=rescale) bbox_results = [bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes) for (det_bboxes, det_labels) in results_list] return bbox_results def aug_test(self, imgs, img_metas, rescale=False): 'Test function with test time augmentation.\n\n Args:\n imgs (list[Tensor]): the outer list indicates test-time\n augmentations and inner Tensor should have a shape NxCxHxW,\n which contains all images in the batch.\n img_metas (list[list[dict]]): the outer list indicates test-time\n augs (multiscale, flip, etc.) and the inner list indicates\n images in a batch. each dict has image information.\n rescale (bool, optional): Whether to rescale the results.\n Defaults to False.\n\n Returns:\n list[list[np.ndarray]]: BBox results of each image and classes.\n The outer list corresponds to each image. The inner list\n corresponds to each class.\n ' assert hasattr(self.bbox_head, 'aug_test'), f'{self.bbox_head.__class__.__name__} does not support test-time augmentation' feats = self.extract_feats(imgs) results_list = self.bbox_head.aug_test(feats, img_metas, rescale=rescale) bbox_results = [bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes) for (det_bboxes, det_labels) in results_list] return bbox_results def onnx_export(self, img, img_metas, with_nms=True): 'Test function without test time augmentation.\n\n Args:\n img (torch.Tensor): input images.\n img_metas (list[dict]): List of image information.\n\n Returns:\n tuple[Tensor, Tensor]: dets of shape [N, num_det, 5]\n and class labels of shape [N, num_det].\n ' x = self.extract_feat(img) outs = self.bbox_head(x) img_shape = torch._shape_as_tensor(img)[2:] img_metas[0]['img_shape_for_onnx'] = img_shape img_metas[0]['pad_shape_for_onnx'] = img_shape if (len(outs) == 2): outs = (*outs, None) (det_bboxes, det_labels) = self.bbox_head.onnx_export(*outs, img_metas, with_nms=with_nms) return (det_bboxes, det_labels)
@DETECTORS.register_module() class SOLO(SingleStageInstanceSegmentor): '`SOLO: Segmenting Objects by Locations\n <https://arxiv.org/abs/1912.04488>`_\n\n ' def __init__(self, backbone, neck=None, bbox_head=None, mask_head=None, train_cfg=None, test_cfg=None, init_cfg=None, pretrained=None): super().__init__(backbone=backbone, neck=neck, bbox_head=bbox_head, mask_head=mask_head, train_cfg=train_cfg, test_cfg=test_cfg, init_cfg=init_cfg, pretrained=pretrained)
@DETECTORS.register_module() class SparseRCNN(TwoStageDetector): 'Implementation of `Sparse R-CNN: End-to-End Object Detection with\n Learnable Proposals <https://arxiv.org/abs/2011.12450>`_' def __init__(self, *args, **kwargs): super(SparseRCNN, self).__init__(*args, **kwargs) assert self.with_rpn, 'Sparse R-CNN and QueryInst do not support external proposals' def forward_train(self, img, img_metas, gt_bboxes, gt_labels, gt_bboxes_ignore=None, gt_masks=None, proposals=None, **kwargs): "Forward function of SparseR-CNN and QueryInst in train stage.\n\n Args:\n img (Tensor): of shape (N, C, H, W) encoding input images.\n Typically these should be mean centered and std scaled.\n img_metas (list[dict]): list of image info dict where each dict\n has: 'img_shape', 'scale_factor', 'flip', and may also contain\n 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.\n For details on the values of these keys see\n :class:`mmdet.datasets.pipelines.Collect`.\n gt_bboxes (list[Tensor]): Ground truth bboxes for each image with\n shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.\n gt_labels (list[Tensor]): class indices corresponding to each box\n gt_bboxes_ignore (None | list[Tensor): specify which bounding\n boxes can be ignored when computing the loss.\n gt_masks (List[Tensor], optional) : Segmentation masks for\n each box. This is required to train QueryInst.\n proposals (List[Tensor], optional): override rpn proposals with\n custom proposals. Use when `with_rpn` is False.\n\n Returns:\n dict[str, Tensor]: a dictionary of loss components\n " assert (proposals is None), 'Sparse R-CNN and QueryInst do not support external proposals' x = self.extract_feat(img) (proposal_boxes, proposal_features, imgs_whwh) = self.rpn_head.forward_train(x, img_metas) roi_losses = self.roi_head.forward_train(x, proposal_boxes, proposal_features, img_metas, gt_bboxes, gt_labels, gt_bboxes_ignore=gt_bboxes_ignore, gt_masks=gt_masks, imgs_whwh=imgs_whwh) return roi_losses def simple_test(self, img, img_metas, rescale=False): 'Test function without test time augmentation.\n\n Args:\n imgs (list[torch.Tensor]): List of multiple images\n img_metas (list[dict]): List of image information.\n rescale (bool): Whether to rescale the results.\n Defaults to False.\n\n Returns:\n list[list[np.ndarray]]: BBox results of each image and classes.\n The outer list corresponds to each image. The inner list\n corresponds to each class.\n ' x = self.extract_feat(img) (proposal_boxes, proposal_features, imgs_whwh) = self.rpn_head.simple_test_rpn(x, img_metas) results = self.roi_head.simple_test(x, proposal_boxes, proposal_features, img_metas, imgs_whwh=imgs_whwh, rescale=rescale) return results def forward_dummy(self, img): 'Used for computing network flops.\n\n See `mmdetection/tools/analysis_tools/get_flops.py`\n ' x = self.extract_feat(img) num_imgs = len(img) dummy_img_metas = [dict(img_shape=(800, 1333, 3)) for _ in range(num_imgs)] (proposal_boxes, proposal_features, imgs_whwh) = self.rpn_head.simple_test_rpn(x, dummy_img_metas) roi_outs = self.roi_head.forward_dummy(x, proposal_boxes, proposal_features, dummy_img_metas) return roi_outs
@DETECTORS.register_module() class TOOD(SingleStageDetector): 'Implementation of `TOOD: Task-aligned One-stage Object Detection.\n <https://arxiv.org/abs/2108.07755>`_.' def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None): super(TOOD, self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, pretrained, init_cfg) def set_epoch(self, epoch): self.bbox_head.epoch = epoch
@DETECTORS.register_module() class TridentFasterRCNN(FasterRCNN): 'Implementation of `TridentNet <https://arxiv.org/abs/1901.01892>`_' def __init__(self, backbone, rpn_head, roi_head, train_cfg, test_cfg, neck=None, pretrained=None, init_cfg=None): super(TridentFasterRCNN, self).__init__(backbone=backbone, neck=neck, rpn_head=rpn_head, roi_head=roi_head, train_cfg=train_cfg, test_cfg=test_cfg, pretrained=pretrained, init_cfg=init_cfg) assert (self.backbone.num_branch == self.roi_head.num_branch) assert (self.backbone.test_branch_idx == self.roi_head.test_branch_idx) self.num_branch = self.backbone.num_branch self.test_branch_idx = self.backbone.test_branch_idx def simple_test(self, img, img_metas, proposals=None, rescale=False): 'Test without augmentation.' assert self.with_bbox, 'Bbox head must be implemented.' x = self.extract_feat(img) if (proposals is None): num_branch = (self.num_branch if (self.test_branch_idx == (- 1)) else 1) trident_img_metas = (img_metas * num_branch) proposal_list = self.rpn_head.simple_test_rpn(x, trident_img_metas) else: proposal_list = proposals return self.roi_head.simple_test(x, proposal_list, trident_img_metas, rescale=rescale) def aug_test(self, imgs, img_metas, rescale=False): 'Test with augmentations.\n\n If rescale is False, then returned bboxes and masks will fit the scale\n of imgs[0].\n ' x = self.extract_feats(imgs) num_branch = (self.num_branch if (self.test_branch_idx == (- 1)) else 1) trident_img_metas = [(img_metas * num_branch) for img_metas in img_metas] proposal_list = self.rpn_head.aug_test_rpn(x, trident_img_metas) return self.roi_head.aug_test(x, proposal_list, img_metas, rescale=rescale) def forward_train(self, img, img_metas, gt_bboxes, gt_labels, **kwargs): 'make copies of img and gts to fit multi-branch.' trident_gt_bboxes = tuple((gt_bboxes * self.num_branch)) trident_gt_labels = tuple((gt_labels * self.num_branch)) trident_img_metas = tuple((img_metas * self.num_branch)) return super(TridentFasterRCNN, self).forward_train(img, trident_img_metas, trident_gt_bboxes, trident_gt_labels)
@DETECTORS.register_module() class TwoStageDetector(BaseDetector): 'Base class for two-stage detectors.\n\n Two-stage detectors typically consisting of a region proposal network and a\n task-specific regression head.\n ' def __init__(self, backbone, neck=None, rpn_head=None, roi_head=None, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None): super(TwoStageDetector, self).__init__(init_cfg) if pretrained: warnings.warn('DeprecationWarning: pretrained is deprecated, please use "init_cfg" instead') backbone.pretrained = pretrained self.backbone = build_backbone(backbone) if (neck is not None): self.neck = build_neck(neck) if (rpn_head is not None): rpn_train_cfg = (train_cfg.rpn if (train_cfg is not None) else None) rpn_head_ = rpn_head.copy() rpn_head_.update(train_cfg=rpn_train_cfg, test_cfg=test_cfg.rpn) self.rpn_head = build_head(rpn_head_) if (roi_head is not None): rcnn_train_cfg = (train_cfg.rcnn if (train_cfg is not None) else None) roi_head.update(train_cfg=rcnn_train_cfg) roi_head.update(test_cfg=test_cfg.rcnn) roi_head.pretrained = pretrained self.roi_head = build_head(roi_head) self.train_cfg = train_cfg self.test_cfg = test_cfg @property def with_rpn(self): 'bool: whether the detector has RPN' return (hasattr(self, 'rpn_head') and (self.rpn_head is not None)) @property def with_roi_head(self): 'bool: whether the detector has a RoI head' return (hasattr(self, 'roi_head') and (self.roi_head is not None)) def extract_feat(self, img): 'Directly extract features from the backbone+neck.' x = self.backbone(img) if self.with_neck: x = self.neck(x) return x def forward_dummy(self, img): 'Used for computing network flops.\n\n See `mmdetection/tools/analysis_tools/get_flops.py`\n ' outs = () x = self.extract_feat(img) if self.with_rpn: rpn_outs = self.rpn_head(x) outs = (outs + (rpn_outs,)) proposals = torch.randn(1000, 4).to(img.device) roi_outs = self.roi_head.forward_dummy(x, proposals) outs = (outs + (roi_outs,)) return outs def forward_train(self, img, img_metas, gt_bboxes, gt_labels, gt_bboxes_ignore=None, gt_masks=None, proposals=None, **kwargs): "\n Args:\n img (Tensor): of shape (N, C, H, W) encoding input images.\n Typically these should be mean centered and std scaled.\n\n img_metas (list[dict]): list of image info dict where each dict\n has: 'img_shape', 'scale_factor', 'flip', and may also contain\n 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.\n For details on the values of these keys see\n `mmdet/datasets/pipelines/formatting.py:Collect`.\n\n gt_bboxes (list[Tensor]): Ground truth bboxes for each image with\n shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.\n\n gt_labels (list[Tensor]): class indices corresponding to each box\n\n gt_bboxes_ignore (None | list[Tensor]): specify which bounding\n boxes can be ignored when computing the loss.\n\n gt_masks (None | Tensor) : true segmentation masks for each box\n used if the architecture supports a segmentation task.\n\n proposals : override rpn proposals with custom proposals. Use when\n `with_rpn` is False.\n\n Returns:\n dict[str, Tensor]: a dictionary of loss components\n " x = self.extract_feat(img) losses = dict() if self.with_rpn: proposal_cfg = self.train_cfg.get('rpn_proposal', self.test_cfg.rpn) (rpn_losses, proposal_list) = self.rpn_head.forward_train(x, img_metas, gt_bboxes, gt_labels=None, gt_bboxes_ignore=gt_bboxes_ignore, proposal_cfg=proposal_cfg, **kwargs) losses.update(rpn_losses) else: proposal_list = proposals roi_losses = self.roi_head.forward_train(x, img_metas, proposal_list, gt_bboxes, gt_labels, gt_bboxes_ignore, gt_masks, **kwargs) losses.update(roi_losses) return losses async def async_simple_test(self, img, img_meta, proposals=None, rescale=False): 'Async test without augmentation.' assert self.with_bbox, 'Bbox head must be implemented.' x = self.extract_feat(img) if (proposals is None): proposal_list = (await self.rpn_head.async_simple_test_rpn(x, img_meta)) else: proposal_list = proposals return (await self.roi_head.async_simple_test(x, proposal_list, img_meta, rescale=rescale)) def simple_test(self, img, img_metas, proposals=None, rescale=False): 'Test without augmentation.' assert self.with_bbox, 'Bbox head must be implemented.' x = self.extract_feat(img) if (proposals is None): proposal_list = self.rpn_head.simple_test_rpn(x, img_metas) else: proposal_list = proposals return self.roi_head.simple_test(x, proposal_list, img_metas, rescale=rescale) def aug_test(self, imgs, img_metas, rescale=False): 'Test with augmentations.\n\n If rescale is False, then returned bboxes and masks will fit the scale\n of imgs[0].\n ' x = self.extract_feats(imgs) proposal_list = self.rpn_head.aug_test_rpn(x, img_metas) return self.roi_head.aug_test(x, proposal_list, img_metas, rescale=rescale) def onnx_export(self, img, img_metas): img_shape = torch._shape_as_tensor(img)[2:] img_metas[0]['img_shape_for_onnx'] = img_shape x = self.extract_feat(img) proposals = self.rpn_head.onnx_export(x, img_metas) if hasattr(self.roi_head, 'onnx_export'): return self.roi_head.onnx_export(x, proposals, img_metas) else: raise NotImplementedError(f'{self.__class__.__name__} can not be exported to ONNX. Please refer to the list of supported models,https://mmdetection.readthedocs.io/en/latest/tutorials/pytorch2onnx.html#list-of-supported-models-exportable-to-onnx')
@DETECTORS.register_module() class VFNet(SingleStageDetector): 'Implementation of `VarifocalNet\n (VFNet).<https://arxiv.org/abs/2008.13367>`_' def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None): super(VFNet, self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, pretrained, init_cfg)
@DETECTORS.register_module() class YOLACT(SingleStageDetector): 'Implementation of `YOLACT <https://arxiv.org/abs/1904.02689>`_' def __init__(self, backbone, neck, bbox_head, segm_head, mask_head, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None): super(YOLACT, self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, pretrained, init_cfg) self.segm_head = build_head(segm_head) self.mask_head = build_head(mask_head) def forward_dummy(self, img): 'Used for computing network flops.\n\n See `mmdetection/tools/analysis_tools/get_flops.py`\n ' feat = self.extract_feat(img) bbox_outs = self.bbox_head(feat) prototypes = self.mask_head.forward_dummy(feat[0]) return (bbox_outs, prototypes) def forward_train(self, img, img_metas, gt_bboxes, gt_labels, gt_bboxes_ignore=None, gt_masks=None): "\n Args:\n img (Tensor): of shape (N, C, H, W) encoding input images.\n Typically these should be mean centered and std scaled.\n img_metas (list[dict]): list of image info dict where each dict\n has: 'img_shape', 'scale_factor', 'flip', and may also contain\n 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.\n For details on the values of these keys see\n `mmdet/datasets/pipelines/formatting.py:Collect`.\n gt_bboxes (list[Tensor]): Ground truth bboxes for each image with\n shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.\n gt_labels (list[Tensor]): class indices corresponding to each box\n gt_bboxes_ignore (None | list[Tensor]): specify which bounding\n boxes can be ignored when computing the loss.\n gt_masks (None | Tensor) : true segmentation masks for each box\n used if the architecture supports a segmentation task.\n\n Returns:\n dict[str, Tensor]: a dictionary of loss components\n " gt_masks = [gt_mask.to_tensor(dtype=torch.uint8, device=img.device) for gt_mask in gt_masks] x = self.extract_feat(img) (cls_score, bbox_pred, coeff_pred) = self.bbox_head(x) bbox_head_loss_inputs = ((cls_score, bbox_pred) + (gt_bboxes, gt_labels, img_metas)) (losses, sampling_results) = self.bbox_head.loss(*bbox_head_loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore) segm_head_outs = self.segm_head(x[0]) loss_segm = self.segm_head.loss(segm_head_outs, gt_masks, gt_labels) losses.update(loss_segm) mask_pred = self.mask_head(x[0], coeff_pred, gt_bboxes, img_metas, sampling_results) loss_mask = self.mask_head.loss(mask_pred, gt_masks, gt_bboxes, img_metas, sampling_results) losses.update(loss_mask) for loss_name in losses.keys(): assert torch.isfinite(torch.stack(losses[loss_name])).all().item(), '{} becomes infinite or NaN!'.format(loss_name) return losses def simple_test(self, img, img_metas, rescale=False): 'Test function without test-time augmentation.' feat = self.extract_feat(img) (det_bboxes, det_labels, det_coeffs) = self.bbox_head.simple_test(feat, img_metas, rescale=rescale) bbox_results = [bbox2result(det_bbox, det_label, self.bbox_head.num_classes) for (det_bbox, det_label) in zip(det_bboxes, det_labels)] segm_results = self.mask_head.simple_test(feat, det_bboxes, det_labels, det_coeffs, img_metas, rescale=rescale) return list(zip(bbox_results, segm_results)) def aug_test(self, imgs, img_metas, rescale=False): 'Test with augmentations.' raise NotImplementedError('YOLACT does not support test-time augmentation')
@DETECTORS.register_module() class YOLOV3(SingleStageDetector): def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None): super(YOLOV3, self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, pretrained, init_cfg) def onnx_export(self, img, img_metas): 'Test function for exporting to ONNX, without test time augmentation.\n\n Args:\n img (torch.Tensor): input images.\n img_metas (list[dict]): List of image information.\n\n Returns:\n tuple[Tensor, Tensor]: dets of shape [N, num_det, 5]\n and class labels of shape [N, num_det].\n ' x = self.extract_feat(img) outs = self.bbox_head.forward(x) img_shape = torch._shape_as_tensor(img)[2:] img_metas[0]['img_shape_for_onnx'] = img_shape (det_bboxes, det_labels) = self.bbox_head.onnx_export(*outs, img_metas) return (det_bboxes, det_labels)
@DETECTORS.register_module() class YOLOF(SingleStageDetector): 'Implementation of `You Only Look One-level Feature\n <https://arxiv.org/abs/2103.09460>`_' def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None): super(YOLOF, self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, pretrained)
@mmcv.jit(coderize=True) def accuracy(pred, target, topk=1, thresh=None): 'Calculate accuracy according to the prediction and target.\n\n Args:\n pred (torch.Tensor): The model prediction, shape (N, num_class)\n target (torch.Tensor): The target of each prediction, shape (N, )\n topk (int | tuple[int], optional): If the predictions in ``topk``\n matches the target, the predictions will be regarded as\n correct ones. Defaults to 1.\n thresh (float, optional): If not None, predictions with scores under\n this threshold are considered incorrect. Default to None.\n\n Returns:\n float | tuple[float]: If the input ``topk`` is a single integer,\n the function will return a single float as accuracy. If\n ``topk`` is a tuple containing multiple integers, the\n function will return a tuple containing accuracies of\n each ``topk`` number.\n ' assert isinstance(topk, (int, tuple)) if isinstance(topk, int): topk = (topk,) return_single = True else: return_single = False maxk = max(topk) if (pred.size(0) == 0): accu = [pred.new_tensor(0.0) for i in range(len(topk))] return (accu[0] if return_single else accu) assert ((pred.ndim == 2) and (target.ndim == 1)) assert (pred.size(0) == target.size(0)) assert (maxk <= pred.size(1)), f'maxk {maxk} exceeds pred dimension {pred.size(1)}' (pred_value, pred_label) = pred.topk(maxk, dim=1) pred_label = pred_label.t() correct = pred_label.eq(target.view(1, (- 1)).expand_as(pred_label)) if (thresh is not None): correct = (correct & (pred_value > thresh).t()) res = [] for k in topk: correct_k = correct[:k].reshape((- 1)).float().sum(0, keepdim=True) res.append(correct_k.mul_((100.0 / pred.size(0)))) return (res[0] if return_single else res)
class Accuracy(nn.Module): def __init__(self, topk=(1,), thresh=None): 'Module to calculate the accuracy.\n\n Args:\n topk (tuple, optional): The criterion used to calculate the\n accuracy. Defaults to (1,).\n thresh (float, optional): If not None, predictions with scores\n under this threshold are considered incorrect. Default to None.\n ' super().__init__() self.topk = topk self.thresh = thresh def forward(self, pred, target): 'Forward function to calculate accuracy.\n\n Args:\n pred (torch.Tensor): Prediction of models.\n target (torch.Tensor): Target for each prediction.\n\n Returns:\n tuple[float]: The accuracies under different topk criterions.\n ' return accuracy(pred, target, self.topk, self.thresh)
@mmcv.jit(derivate=True, coderize=True) @weighted_loss def balanced_l1_loss(pred, target, beta=1.0, alpha=0.5, gamma=1.5, reduction='mean'): 'Calculate balanced L1 loss.\n\n Please see the `Libra R-CNN <https://arxiv.org/pdf/1904.02701.pdf>`_\n\n Args:\n pred (torch.Tensor): The prediction with shape (N, 4).\n target (torch.Tensor): The learning target of the prediction with\n shape (N, 4).\n beta (float): The loss is a piecewise function of prediction and target\n and ``beta`` serves as a threshold for the difference between the\n prediction and target. Defaults to 1.0.\n alpha (float): The denominator ``alpha`` in the balanced L1 loss.\n Defaults to 0.5.\n gamma (float): The ``gamma`` in the balanced L1 loss.\n Defaults to 1.5.\n reduction (str, optional): The method that reduces the loss to a\n scalar. Options are "none", "mean" and "sum".\n\n Returns:\n torch.Tensor: The calculated loss\n ' assert (beta > 0) if (target.numel() == 0): return (pred.sum() * 0) assert (pred.size() == target.size()) diff = torch.abs((pred - target)) b = ((np.e ** (gamma / alpha)) - 1) loss = torch.where((diff < beta), ((((alpha / b) * ((b * diff) + 1)) * torch.log((((b * diff) / beta) + 1))) - (alpha * diff)), (((gamma * diff) + (gamma / b)) - (alpha * beta))) return loss
@LOSSES.register_module() class BalancedL1Loss(nn.Module): 'Balanced L1 Loss.\n\n arXiv: https://arxiv.org/pdf/1904.02701.pdf (CVPR 2019)\n\n Args:\n alpha (float): The denominator ``alpha`` in the balanced L1 loss.\n Defaults to 0.5.\n gamma (float): The ``gamma`` in the balanced L1 loss. Defaults to 1.5.\n beta (float, optional): The loss is a piecewise function of prediction\n and target. ``beta`` serves as a threshold for the difference\n between the prediction and target. Defaults to 1.0.\n reduction (str, optional): The method that reduces the loss to a\n scalar. Options are "none", "mean" and "sum".\n loss_weight (float, optional): The weight of the loss. Defaults to 1.0\n ' def __init__(self, alpha=0.5, gamma=1.5, beta=1.0, reduction='mean', loss_weight=1.0): super(BalancedL1Loss, self).__init__() self.alpha = alpha self.gamma = gamma self.beta = beta self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None, **kwargs): 'Forward function of loss.\n\n Args:\n pred (torch.Tensor): The prediction with shape (N, 4).\n target (torch.Tensor): The learning target of the prediction with\n shape (N, 4).\n weight (torch.Tensor, optional): Sample-wise loss weight with\n shape (N, ).\n avg_factor (int, optional): Average factor that is used to average\n the loss. Defaults to None.\n reduction_override (str, optional): The reduction method used to\n override the original reduction method of the loss.\n Options are "none", "mean" and "sum".\n\n Returns:\n torch.Tensor: The calculated loss\n ' assert (reduction_override in (None, 'none', 'mean', 'sum')) reduction = (reduction_override if reduction_override else self.reduction) loss_bbox = (self.loss_weight * balanced_l1_loss(pred, target, weight, alpha=self.alpha, gamma=self.gamma, beta=self.beta, reduction=reduction, avg_factor=avg_factor, **kwargs)) return loss_bbox
def cross_entropy(pred, label, weight=None, reduction='mean', avg_factor=None, class_weight=None, ignore_index=(- 100)): 'Calculate the CrossEntropy loss.\n\n Args:\n pred (torch.Tensor): The prediction with shape (N, C), C is the number\n of classes.\n label (torch.Tensor): The learning label of the prediction.\n weight (torch.Tensor, optional): Sample-wise loss weight.\n reduction (str, optional): The method used to reduce the loss.\n avg_factor (int, optional): Average factor that is used to average\n the loss. Defaults to None.\n class_weight (list[float], optional): The weight for each class.\n ignore_index (int | None): The label index to be ignored.\n If None, it will be set to default value. Default: -100.\n\n Returns:\n torch.Tensor: The calculated loss\n ' ignore_index = ((- 100) if (ignore_index is None) else ignore_index) loss = F.cross_entropy(pred, label, weight=class_weight, reduction='none', ignore_index=ignore_index) if (weight is not None): weight = weight.float() loss = weight_reduce_loss(loss, weight=weight, reduction=reduction, avg_factor=avg_factor) return loss
def _expand_onehot_labels(labels, label_weights, label_channels, ignore_index): 'Expand onehot labels to match the size of prediction.' bin_labels = labels.new_full((labels.size(0), label_channels), 0) valid_mask = ((labels >= 0) & (labels != ignore_index)) inds = torch.nonzero((valid_mask & (labels < label_channels)), as_tuple=False) if (inds.numel() > 0): bin_labels[(inds, labels[inds])] = 1 valid_mask = valid_mask.view((- 1), 1).expand(labels.size(0), label_channels).float() if (label_weights is None): bin_label_weights = valid_mask else: bin_label_weights = label_weights.view((- 1), 1).repeat(1, label_channels) bin_label_weights *= valid_mask return (bin_labels, bin_label_weights)
def binary_cross_entropy(pred, label, weight=None, reduction='mean', avg_factor=None, class_weight=None, ignore_index=(- 100)): 'Calculate the binary CrossEntropy loss.\n\n Args:\n pred (torch.Tensor): The prediction with shape (N, 1).\n label (torch.Tensor): The learning label of the prediction.\n weight (torch.Tensor, optional): Sample-wise loss weight.\n reduction (str, optional): The method used to reduce the loss.\n Options are "none", "mean" and "sum".\n avg_factor (int, optional): Average factor that is used to average\n the loss. Defaults to None.\n class_weight (list[float], optional): The weight for each class.\n ignore_index (int | None): The label index to be ignored.\n If None, it will be set to default value. Default: -100.\n\n Returns:\n torch.Tensor: The calculated loss.\n ' ignore_index = ((- 100) if (ignore_index is None) else ignore_index) if (pred.dim() != label.dim()): (label, weight) = _expand_onehot_labels(label, weight, pred.size((- 1)), ignore_index) if (weight is not None): weight = weight.float() loss = F.binary_cross_entropy_with_logits(pred, label.float(), pos_weight=class_weight, reduction='none') loss = weight_reduce_loss(loss, weight, reduction=reduction, avg_factor=avg_factor) return loss
def mask_cross_entropy(pred, target, label, reduction='mean', avg_factor=None, class_weight=None, ignore_index=None): 'Calculate the CrossEntropy loss for masks.\n\n Args:\n pred (torch.Tensor): The prediction with shape (N, C, *), C is the\n number of classes. The trailing * indicates arbitrary shape.\n target (torch.Tensor): The learning label of the prediction.\n label (torch.Tensor): ``label`` indicates the class label of the mask\n corresponding object. This will be used to select the mask in the\n of the class which the object belongs to when the mask prediction\n if not class-agnostic.\n reduction (str, optional): The method used to reduce the loss.\n Options are "none", "mean" and "sum".\n avg_factor (int, optional): Average factor that is used to average\n the loss. Defaults to None.\n class_weight (list[float], optional): The weight for each class.\n ignore_index (None): Placeholder, to be consistent with other loss.\n Default: None.\n\n Returns:\n torch.Tensor: The calculated loss\n\n Example:\n >>> N, C = 3, 11\n >>> H, W = 2, 2\n >>> pred = torch.randn(N, C, H, W) * 1000\n >>> target = torch.rand(N, H, W)\n >>> label = torch.randint(0, C, size=(N,))\n >>> reduction = \'mean\'\n >>> avg_factor = None\n >>> class_weights = None\n >>> loss = mask_cross_entropy(pred, target, label, reduction,\n >>> avg_factor, class_weights)\n >>> assert loss.shape == (1,)\n ' assert (ignore_index is None), 'BCE loss does not support ignore_index' assert ((reduction == 'mean') and (avg_factor is None)) num_rois = pred.size()[0] inds = torch.arange(0, num_rois, dtype=torch.long, device=pred.device) pred_slice = pred[(inds, label)].squeeze(1) return F.binary_cross_entropy_with_logits(pred_slice, target, weight=class_weight, reduction='mean')[None]
@LOSSES.register_module() class CrossEntropyLoss(nn.Module): def __init__(self, use_sigmoid=False, use_mask=False, reduction='mean', class_weight=None, ignore_index=None, loss_weight=1.0): 'CrossEntropyLoss.\n\n Args:\n use_sigmoid (bool, optional): Whether the prediction uses sigmoid\n of softmax. Defaults to False.\n use_mask (bool, optional): Whether to use mask cross entropy loss.\n Defaults to False.\n reduction (str, optional): . Defaults to \'mean\'.\n Options are "none", "mean" and "sum".\n class_weight (list[float], optional): Weight of each class.\n Defaults to None.\n ignore_index (int | None): The label index to be ignored.\n Defaults to None.\n loss_weight (float, optional): Weight of the loss. Defaults to 1.0.\n ' super(CrossEntropyLoss, self).__init__() assert ((use_sigmoid is False) or (use_mask is False)) self.use_sigmoid = use_sigmoid self.use_mask = use_mask self.reduction = reduction self.loss_weight = loss_weight self.class_weight = class_weight self.ignore_index = ignore_index if self.use_sigmoid: self.cls_criterion = binary_cross_entropy elif self.use_mask: self.cls_criterion = mask_cross_entropy else: self.cls_criterion = cross_entropy def forward(self, cls_score, label, weight=None, avg_factor=None, reduction_override=None, ignore_index=None, **kwargs): 'Forward function.\n\n Args:\n cls_score (torch.Tensor): The prediction.\n label (torch.Tensor): The learning label of the prediction.\n weight (torch.Tensor, optional): Sample-wise loss weight.\n avg_factor (int, optional): Average factor that is used to average\n the loss. Defaults to None.\n reduction_override (str, optional): The method used to reduce the\n loss. Options are "none", "mean" and "sum".\n ignore_index (int | None): The label index to be ignored.\n If not None, it will override the default value. Default: None.\n Returns:\n torch.Tensor: The calculated loss.\n ' assert (reduction_override in (None, 'none', 'mean', 'sum')) reduction = (reduction_override if reduction_override else self.reduction) if (ignore_index is None): ignore_index = self.ignore_index if (self.class_weight is not None): class_weight = cls_score.new_tensor(self.class_weight, device=cls_score.device) else: class_weight = None loss_cls = (self.loss_weight * self.cls_criterion(cls_score, label, weight, class_weight=class_weight, reduction=reduction, avg_factor=avg_factor, ignore_index=ignore_index, **kwargs)) return loss_cls
def dice_loss(pred, target, weight=None, eps=0.001, reduction='mean', naive_dice=False, avg_factor=None): 'Calculate dice loss, there are two forms of dice loss is supported:\n\n - the one proposed in `V-Net: Fully Convolutional Neural\n Networks for Volumetric Medical Image Segmentation\n <https://arxiv.org/abs/1606.04797>`_.\n - the dice loss in which the power of the number in the\n denominator is the first power instead of the second\n power.\n\n Args:\n pred (torch.Tensor): The prediction, has a shape (n, *)\n target (torch.Tensor): The learning label of the prediction,\n shape (n, *), same shape of pred.\n weight (torch.Tensor, optional): The weight of loss for each\n prediction, has a shape (n,). Defaults to None.\n eps (float): Avoid dividing by zero. Default: 1e-3.\n reduction (str, optional): The method used to reduce the loss into\n a scalar. Defaults to \'mean\'.\n Options are "none", "mean" and "sum".\n naive_dice (bool, optional): If false, use the dice\n loss defined in the V-Net paper, otherwise, use the\n naive dice loss in which the power of the number in the\n denominator is the first power instead of the second\n power.Defaults to False.\n avg_factor (int, optional): Average factor that is used to average\n the loss. Defaults to None.\n ' input = pred.flatten(1) target = target.flatten(1).float() a = torch.sum((input * target), 1) if naive_dice: b = torch.sum(input, 1) c = torch.sum(target, 1) d = (((2 * a) + eps) / ((b + c) + eps)) else: b = (torch.sum((input * input), 1) + eps) c = (torch.sum((target * target), 1) + eps) d = ((2 * a) / (b + c)) loss = (1 - d) if (weight is not None): assert (weight.ndim == loss.ndim) assert (len(weight) == len(pred)) loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss
@LOSSES.register_module() class DiceLoss(nn.Module): def __init__(self, use_sigmoid=True, activate=True, reduction='mean', naive_dice=False, loss_weight=1.0, eps=0.001): 'Compute dice loss.\n\n Args:\n use_sigmoid (bool, optional): Whether to the prediction is\n used for sigmoid or softmax. Defaults to True.\n activate (bool): Whether to activate the predictions inside,\n this will disable the inside sigmoid operation.\n Defaults to True.\n reduction (str, optional): The method used\n to reduce the loss. Options are "none",\n "mean" and "sum". Defaults to \'mean\'.\n naive_dice (bool, optional): If false, use the dice\n loss defined in the V-Net paper, otherwise, use the\n naive dice loss in which the power of the number in the\n denominator is the first power instead of the second\n power. Defaults to False.\n loss_weight (float, optional): Weight of loss. Defaults to 1.0.\n eps (float): Avoid dividing by zero. Defaults to 1e-3.\n ' super(DiceLoss, self).__init__() self.use_sigmoid = use_sigmoid self.reduction = reduction self.naive_dice = naive_dice self.loss_weight = loss_weight self.eps = eps self.activate = activate def forward(self, pred, target, weight=None, reduction_override=None, avg_factor=None): 'Forward function.\n\n Args:\n pred (torch.Tensor): The prediction, has a shape (n, *).\n target (torch.Tensor): The label of the prediction,\n shape (n, *), same shape of pred.\n weight (torch.Tensor, optional): The weight of loss for each\n prediction, has a shape (n,). Defaults to None.\n avg_factor (int, optional): Average factor that is used to average\n the loss. Defaults to None.\n reduction_override (str, optional): The reduction method used to\n override the original reduction method of the loss.\n Options are "none", "mean" and "sum".\n\n Returns:\n torch.Tensor: The calculated loss\n ' assert (reduction_override in (None, 'none', 'mean', 'sum')) reduction = (reduction_override if reduction_override else self.reduction) if self.activate: if self.use_sigmoid: pred = pred.sigmoid() else: raise NotImplementedError loss = (self.loss_weight * dice_loss(pred, target, weight, eps=self.eps, reduction=reduction, naive_dice=self.naive_dice, avg_factor=avg_factor)) return loss
@mmcv.jit(derivate=True, coderize=True) @weighted_loss def gaussian_focal_loss(pred, gaussian_target, alpha=2.0, gamma=4.0): '`Focal Loss <https://arxiv.org/abs/1708.02002>`_ for targets in gaussian\n distribution.\n\n Args:\n pred (torch.Tensor): The prediction.\n gaussian_target (torch.Tensor): The learning target of the prediction\n in gaussian distribution.\n alpha (float, optional): A balanced form for Focal Loss.\n Defaults to 2.0.\n gamma (float, optional): The gamma for calculating the modulating\n factor. Defaults to 4.0.\n ' eps = 1e-12 pos_weights = gaussian_target.eq(1) neg_weights = (1 - gaussian_target).pow(gamma) pos_loss = (((- (pred + eps).log()) * (1 - pred).pow(alpha)) * pos_weights) neg_loss = (((- ((1 - pred) + eps).log()) * pred.pow(alpha)) * neg_weights) return (pos_loss + neg_loss)
@LOSSES.register_module() class GaussianFocalLoss(nn.Module): 'GaussianFocalLoss is a variant of focal loss.\n\n More details can be found in the `paper\n <https://arxiv.org/abs/1808.01244>`_\n Code is modified from `kp_utils.py\n <https://github.com/princeton-vl/CornerNet/blob/master/models/py_utils/kp_utils.py#L152>`_ # noqa: E501\n Please notice that the target in GaussianFocalLoss is a gaussian heatmap,\n not 0/1 binary target.\n\n Args:\n alpha (float): Power of prediction.\n gamma (float): Power of target for negative samples.\n reduction (str): Options are "none", "mean" and "sum".\n loss_weight (float): Loss weight of current loss.\n ' def __init__(self, alpha=2.0, gamma=4.0, reduction='mean', loss_weight=1.0): super(GaussianFocalLoss, self).__init__() self.alpha = alpha self.gamma = gamma self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None): 'Forward function.\n\n Args:\n pred (torch.Tensor): The prediction.\n target (torch.Tensor): The learning target of the prediction\n in gaussian distribution.\n weight (torch.Tensor, optional): The weight of loss for each\n prediction. Defaults to None.\n avg_factor (int, optional): Average factor that is used to average\n the loss. Defaults to None.\n reduction_override (str, optional): The reduction method used to\n override the original reduction method of the loss.\n Defaults to None.\n ' assert (reduction_override in (None, 'none', 'mean', 'sum')) reduction = (reduction_override if reduction_override else self.reduction) loss_reg = (self.loss_weight * gaussian_focal_loss(pred, target, weight, alpha=self.alpha, gamma=self.gamma, reduction=reduction, avg_factor=avg_factor)) return loss_reg
@mmcv.jit(derivate=True, coderize=True) @weighted_loss def quality_focal_loss(pred, target, beta=2.0): 'Quality Focal Loss (QFL) is from `Generalized Focal Loss: Learning\n Qualified and Distributed Bounding Boxes for Dense Object Detection\n <https://arxiv.org/abs/2006.04388>`_.\n\n Args:\n pred (torch.Tensor): Predicted joint representation of classification\n and quality (IoU) estimation with shape (N, C), C is the number of\n classes.\n target (tuple([torch.Tensor])): Target category label with shape (N,)\n and target quality label with shape (N,).\n beta (float): The beta parameter for calculating the modulating factor.\n Defaults to 2.0.\n\n Returns:\n torch.Tensor: Loss tensor with shape (N,).\n ' assert (len(target) == 2), 'target for QFL must be a tuple of two elements,\n including category label and quality label, respectively' (label, score) = target pred_sigmoid = pred.sigmoid() scale_factor = pred_sigmoid zerolabel = scale_factor.new_zeros(pred.shape) loss = (F.binary_cross_entropy_with_logits(pred, zerolabel, reduction='none') * scale_factor.pow(beta)) bg_class_ind = pred.size(1) pos = ((label >= 0) & (label < bg_class_ind)).nonzero().squeeze(1) pos_label = label[pos].long() scale_factor = (score[pos] - pred_sigmoid[(pos, pos_label)]) loss[(pos, pos_label)] = (F.binary_cross_entropy_with_logits(pred[(pos, pos_label)], score[pos], reduction='none') * scale_factor.abs().pow(beta)) loss = loss.sum(dim=1, keepdim=False) return loss
@weighted_loss def quality_focal_loss_with_prob(pred, target, beta=2.0): 'Quality Focal Loss (QFL) is from `Generalized Focal Loss: Learning\n Qualified and Distributed Bounding Boxes for Dense Object Detection\n <https://arxiv.org/abs/2006.04388>`_.\n Different from `quality_focal_loss`, this function accepts probability\n as input.\n\n Args:\n pred (torch.Tensor): Predicted joint representation of classification\n and quality (IoU) estimation with shape (N, C), C is the number of\n classes.\n target (tuple([torch.Tensor])): Target category label with shape (N,)\n and target quality label with shape (N,).\n beta (float): The beta parameter for calculating the modulating factor.\n Defaults to 2.0.\n\n Returns:\n torch.Tensor: Loss tensor with shape (N,).\n ' assert (len(target) == 2), 'target for QFL must be a tuple of two elements,\n including category label and quality label, respectively' (label, score) = target pred_sigmoid = pred scale_factor = pred_sigmoid zerolabel = scale_factor.new_zeros(pred.shape) loss = (F.binary_cross_entropy(pred, zerolabel, reduction='none') * scale_factor.pow(beta)) bg_class_ind = pred.size(1) pos = ((label >= 0) & (label < bg_class_ind)).nonzero().squeeze(1) pos_label = label[pos].long() scale_factor = (score[pos] - pred_sigmoid[(pos, pos_label)]) loss[(pos, pos_label)] = (F.binary_cross_entropy(pred[(pos, pos_label)], score[pos], reduction='none') * scale_factor.abs().pow(beta)) loss = loss.sum(dim=1, keepdim=False) return loss
@mmcv.jit(derivate=True, coderize=True) @weighted_loss def distribution_focal_loss(pred, label): 'Distribution Focal Loss (DFL) is from `Generalized Focal Loss: Learning\n Qualified and Distributed Bounding Boxes for Dense Object Detection\n <https://arxiv.org/abs/2006.04388>`_.\n\n Args:\n pred (torch.Tensor): Predicted general distribution of bounding boxes\n (before softmax) with shape (N, n+1), n is the max value of the\n integral set `{0, ..., n}` in paper.\n label (torch.Tensor): Target distance label for bounding boxes with\n shape (N,).\n\n Returns:\n torch.Tensor: Loss tensor with shape (N,).\n ' dis_left = label.long() dis_right = (dis_left + 1) weight_left = (dis_right.float() - label) weight_right = (label - dis_left.float()) loss = ((F.cross_entropy(pred, dis_left, reduction='none') * weight_left) + (F.cross_entropy(pred, dis_right, reduction='none') * weight_right)) return loss
@LOSSES.register_module() class QualityFocalLoss(nn.Module): 'Quality Focal Loss (QFL) is a variant of `Generalized Focal Loss:\n Learning Qualified and Distributed Bounding Boxes for Dense Object\n Detection <https://arxiv.org/abs/2006.04388>`_.\n\n Args:\n use_sigmoid (bool): Whether sigmoid operation is conducted in QFL.\n Defaults to True.\n beta (float): The beta parameter for calculating the modulating factor.\n Defaults to 2.0.\n reduction (str): Options are "none", "mean" and "sum".\n loss_weight (float): Loss weight of current loss.\n activated (bool, optional): Whether the input is activated.\n If True, it means the input has been activated and can be\n treated as probabilities. Else, it should be treated as logits.\n Defaults to False.\n ' def __init__(self, use_sigmoid=True, beta=2.0, reduction='mean', loss_weight=1.0, activated=False): super(QualityFocalLoss, self).__init__() assert (use_sigmoid is True), 'Only sigmoid in QFL supported now.' self.use_sigmoid = use_sigmoid self.beta = beta self.reduction = reduction self.loss_weight = loss_weight self.activated = activated def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None): 'Forward function.\n\n Args:\n pred (torch.Tensor): Predicted joint representation of\n classification and quality (IoU) estimation with shape (N, C),\n C is the number of classes.\n target (tuple([torch.Tensor])): Target category label with shape\n (N,) and target quality label with shape (N,).\n weight (torch.Tensor, optional): The weight of loss for each\n prediction. Defaults to None.\n avg_factor (int, optional): Average factor that is used to average\n the loss. Defaults to None.\n reduction_override (str, optional): The reduction method used to\n override the original reduction method of the loss.\n Defaults to None.\n ' assert (reduction_override in (None, 'none', 'mean', 'sum')) reduction = (reduction_override if reduction_override else self.reduction) if self.use_sigmoid: if self.activated: calculate_loss_func = quality_focal_loss_with_prob else: calculate_loss_func = quality_focal_loss loss_cls = (self.loss_weight * calculate_loss_func(pred, target, weight, beta=self.beta, reduction=reduction, avg_factor=avg_factor)) else: raise NotImplementedError return loss_cls
@LOSSES.register_module() class DistributionFocalLoss(nn.Module): "Distribution Focal Loss (DFL) is a variant of `Generalized Focal Loss:\n Learning Qualified and Distributed Bounding Boxes for Dense Object\n Detection <https://arxiv.org/abs/2006.04388>`_.\n\n Args:\n reduction (str): Options are `'none'`, `'mean'` and `'sum'`.\n loss_weight (float): Loss weight of current loss.\n " def __init__(self, reduction='mean', loss_weight=1.0): super(DistributionFocalLoss, self).__init__() self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None): 'Forward function.\n\n Args:\n pred (torch.Tensor): Predicted general distribution of bounding\n boxes (before softmax) with shape (N, n+1), n is the max value\n of the integral set `{0, ..., n}` in paper.\n target (torch.Tensor): Target distance label for bounding boxes\n with shape (N,).\n weight (torch.Tensor, optional): The weight of loss for each\n prediction. Defaults to None.\n avg_factor (int, optional): Average factor that is used to average\n the loss. Defaults to None.\n reduction_override (str, optional): The reduction method used to\n override the original reduction method of the loss.\n Defaults to None.\n ' assert (reduction_override in (None, 'none', 'mean', 'sum')) reduction = (reduction_override if reduction_override else self.reduction) loss_cls = (self.loss_weight * distribution_focal_loss(pred, target, weight, reduction=reduction, avg_factor=avg_factor)) return loss_cls
def _expand_onehot_labels(labels, label_weights, label_channels): bin_labels = labels.new_full((labels.size(0), label_channels), 0) inds = torch.nonzero(((labels >= 0) & (labels < label_channels)), as_tuple=False).squeeze() if (inds.numel() > 0): bin_labels[(inds, labels[inds])] = 1 bin_label_weights = label_weights.view((- 1), 1).expand(label_weights.size(0), label_channels) return (bin_labels, bin_label_weights)
@LOSSES.register_module() class GHMC(nn.Module): 'GHM Classification Loss.\n\n Details of the theorem can be viewed in the paper\n `Gradient Harmonized Single-stage Detector\n <https://arxiv.org/abs/1811.05181>`_.\n\n Args:\n bins (int): Number of the unit regions for distribution calculation.\n momentum (float): The parameter for moving average.\n use_sigmoid (bool): Can only be true for BCE based loss now.\n loss_weight (float): The weight of the total GHM-C loss.\n reduction (str): Options are "none", "mean" and "sum".\n Defaults to "mean"\n ' def __init__(self, bins=10, momentum=0, use_sigmoid=True, loss_weight=1.0, reduction='mean'): super(GHMC, self).__init__() self.bins = bins self.momentum = momentum edges = (torch.arange((bins + 1)).float() / bins) self.register_buffer('edges', edges) self.edges[(- 1)] += 1e-06 if (momentum > 0): acc_sum = torch.zeros(bins) self.register_buffer('acc_sum', acc_sum) self.use_sigmoid = use_sigmoid if (not self.use_sigmoid): raise NotImplementedError self.loss_weight = loss_weight self.reduction = reduction def forward(self, pred, target, label_weight, reduction_override=None, **kwargs): 'Calculate the GHM-C loss.\n\n Args:\n pred (float tensor of size [batch_num, class_num]):\n The direct prediction of classification fc layer.\n target (float tensor of size [batch_num, class_num]):\n Binary class target for each sample.\n label_weight (float tensor of size [batch_num, class_num]):\n the value is 1 if the sample is valid and 0 if ignored.\n reduction_override (str, optional): The reduction method used to\n override the original reduction method of the loss.\n Defaults to None.\n Returns:\n The gradient harmonized loss.\n ' assert (reduction_override in (None, 'none', 'mean', 'sum')) reduction = (reduction_override if reduction_override else self.reduction) if (pred.dim() != target.dim()): (target, label_weight) = _expand_onehot_labels(target, label_weight, pred.size((- 1))) (target, label_weight) = (target.float(), label_weight.float()) edges = self.edges mmt = self.momentum weights = torch.zeros_like(pred) g = torch.abs((pred.sigmoid().detach() - target)) valid = (label_weight > 0) tot = max(valid.float().sum().item(), 1.0) n = 0 for i in range(self.bins): inds = (((g >= edges[i]) & (g < edges[(i + 1)])) & valid) num_in_bin = inds.sum().item() if (num_in_bin > 0): if (mmt > 0): self.acc_sum[i] = ((mmt * self.acc_sum[i]) + ((1 - mmt) * num_in_bin)) weights[inds] = (tot / self.acc_sum[i]) else: weights[inds] = (tot / num_in_bin) n += 1 if (n > 0): weights = (weights / n) loss = F.binary_cross_entropy_with_logits(pred, target, reduction='none') loss = weight_reduce_loss(loss, weights, reduction=reduction, avg_factor=tot) return (loss * self.loss_weight)
@LOSSES.register_module() class GHMR(nn.Module): 'GHM Regression Loss.\n\n Details of the theorem can be viewed in the paper\n `Gradient Harmonized Single-stage Detector\n <https://arxiv.org/abs/1811.05181>`_.\n\n Args:\n mu (float): The parameter for the Authentic Smooth L1 loss.\n bins (int): Number of the unit regions for distribution calculation.\n momentum (float): The parameter for moving average.\n loss_weight (float): The weight of the total GHM-R loss.\n reduction (str): Options are "none", "mean" and "sum".\n Defaults to "mean"\n ' def __init__(self, mu=0.02, bins=10, momentum=0, loss_weight=1.0, reduction='mean'): super(GHMR, self).__init__() self.mu = mu self.bins = bins edges = (torch.arange((bins + 1)).float() / bins) self.register_buffer('edges', edges) self.edges[(- 1)] = 1000.0 self.momentum = momentum if (momentum > 0): acc_sum = torch.zeros(bins) self.register_buffer('acc_sum', acc_sum) self.loss_weight = loss_weight self.reduction = reduction def forward(self, pred, target, label_weight, avg_factor=None, reduction_override=None): 'Calculate the GHM-R loss.\n\n Args:\n pred (float tensor of size [batch_num, 4 (* class_num)]):\n The prediction of box regression layer. Channel number can be 4\n or 4 * class_num depending on whether it is class-agnostic.\n target (float tensor of size [batch_num, 4 (* class_num)]):\n The target regression values with the same size of pred.\n label_weight (float tensor of size [batch_num, 4 (* class_num)]):\n The weight of each sample, 0 if ignored.\n reduction_override (str, optional): The reduction method used to\n override the original reduction method of the loss.\n Defaults to None.\n Returns:\n The gradient harmonized loss.\n ' assert (reduction_override in (None, 'none', 'mean', 'sum')) reduction = (reduction_override if reduction_override else self.reduction) mu = self.mu edges = self.edges mmt = self.momentum diff = (pred - target) loss = (torch.sqrt(((diff * diff) + (mu * mu))) - mu) g = torch.abs((diff / torch.sqrt(((mu * mu) + (diff * diff))))).detach() weights = torch.zeros_like(g) valid = (label_weight > 0) tot = max(label_weight.float().sum().item(), 1.0) n = 0 for i in range(self.bins): inds = (((g >= edges[i]) & (g < edges[(i + 1)])) & valid) num_in_bin = inds.sum().item() if (num_in_bin > 0): n += 1 if (mmt > 0): self.acc_sum[i] = ((mmt * self.acc_sum[i]) + ((1 - mmt) * num_in_bin)) weights[inds] = (tot / self.acc_sum[i]) else: weights[inds] = (tot / num_in_bin) if (n > 0): weights /= n loss = weight_reduce_loss(loss, weights, reduction=reduction, avg_factor=tot) return (loss * self.loss_weight)
@mmcv.jit(derivate=True, coderize=True) @weighted_loss def knowledge_distillation_kl_div_loss(pred, soft_label, T, detach_target=True): 'Loss function for knowledge distilling using KL divergence.\n\n Args:\n pred (Tensor): Predicted logits with shape (N, n + 1).\n soft_label (Tensor): Target logits with shape (N, N + 1).\n T (int): Temperature for distillation.\n detach_target (bool): Remove soft_label from automatic differentiation\n\n Returns:\n torch.Tensor: Loss tensor with shape (N,).\n ' assert (pred.size() == soft_label.size()) target = F.softmax((soft_label / T), dim=1) if detach_target: target = target.detach() kd_loss = (F.kl_div(F.log_softmax((pred / T), dim=1), target, reduction='none').mean(1) * (T * T)) return kd_loss
@LOSSES.register_module() class KnowledgeDistillationKLDivLoss(nn.Module): "Loss function for knowledge distilling using KL divergence.\n\n Args:\n reduction (str): Options are `'none'`, `'mean'` and `'sum'`.\n loss_weight (float): Loss weight of current loss.\n T (int): Temperature for distillation.\n " def __init__(self, reduction='mean', loss_weight=1.0, T=10): super(KnowledgeDistillationKLDivLoss, self).__init__() assert (T >= 1) self.reduction = reduction self.loss_weight = loss_weight self.T = T def forward(self, pred, soft_label, weight=None, avg_factor=None, reduction_override=None): 'Forward function.\n\n Args:\n pred (Tensor): Predicted logits with shape (N, n + 1).\n soft_label (Tensor): Target logits with shape (N, N + 1).\n weight (torch.Tensor, optional): The weight of loss for each\n prediction. Defaults to None.\n avg_factor (int, optional): Average factor that is used to average\n the loss. Defaults to None.\n reduction_override (str, optional): The reduction method used to\n override the original reduction method of the loss.\n Defaults to None.\n ' assert (reduction_override in (None, 'none', 'mean', 'sum')) reduction = (reduction_override if reduction_override else self.reduction) loss_kd = (self.loss_weight * knowledge_distillation_kl_div_loss(pred, soft_label, weight, reduction=reduction, avg_factor=avg_factor, T=self.T)) return loss_kd
@weighted_loss def mse_loss(pred, target): 'Warpper of mse loss.' return F.mse_loss(pred, target, reduction='none')
@LOSSES.register_module() class MSELoss(nn.Module): 'MSELoss.\n\n Args:\n reduction (str, optional): The method that reduces the loss to a\n scalar. Options are "none", "mean" and "sum".\n loss_weight (float, optional): The weight of the loss. Defaults to 1.0\n ' def __init__(self, reduction='mean', loss_weight=1.0): super().__init__() self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None): 'Forward function of loss.\n\n Args:\n pred (torch.Tensor): The prediction.\n target (torch.Tensor): The learning target of the prediction.\n weight (torch.Tensor, optional): Weight of the loss for each\n prediction. Defaults to None.\n avg_factor (int, optional): Average factor that is used to average\n the loss. Defaults to None.\n reduction_override (str, optional): The reduction method used to\n override the original reduction method of the loss.\n Defaults to None.\n\n Returns:\n torch.Tensor: The calculated loss\n ' assert (reduction_override in (None, 'none', 'mean', 'sum')) reduction = (reduction_override if reduction_override else self.reduction) loss = (self.loss_weight * mse_loss(pred, target, weight, reduction=reduction, avg_factor=avg_factor)) return loss
@mmcv.jit(derivate=True, coderize=True) @weighted_loss def smooth_l1_loss(pred, target, beta=1.0): 'Smooth L1 loss.\n\n Args:\n pred (torch.Tensor): The prediction.\n target (torch.Tensor): The learning target of the prediction.\n beta (float, optional): The threshold in the piecewise function.\n Defaults to 1.0.\n\n Returns:\n torch.Tensor: Calculated loss\n ' assert (beta > 0) if (target.numel() == 0): return (pred.sum() * 0) assert (pred.size() == target.size()) diff = torch.abs((pred - target)) loss = torch.where((diff < beta), (((0.5 * diff) * diff) / beta), (diff - (0.5 * beta))) return loss
@mmcv.jit(derivate=True, coderize=True) @weighted_loss def l1_loss(pred, target): 'L1 loss.\n\n Args:\n pred (torch.Tensor): The prediction.\n target (torch.Tensor): The learning target of the prediction.\n\n Returns:\n torch.Tensor: Calculated loss\n ' if (target.numel() == 0): return (pred.sum() * 0) assert (pred.size() == target.size()) loss = torch.abs((pred - target)) return loss
@LOSSES.register_module() class SmoothL1Loss(nn.Module): 'Smooth L1 loss.\n\n Args:\n beta (float, optional): The threshold in the piecewise function.\n Defaults to 1.0.\n reduction (str, optional): The method to reduce the loss.\n Options are "none", "mean" and "sum". Defaults to "mean".\n loss_weight (float, optional): The weight of loss.\n ' def __init__(self, beta=1.0, reduction='mean', loss_weight=1.0): super(SmoothL1Loss, self).__init__() self.beta = beta self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None, **kwargs): 'Forward function.\n\n Args:\n pred (torch.Tensor): The prediction.\n target (torch.Tensor): The learning target of the prediction.\n weight (torch.Tensor, optional): The weight of loss for each\n prediction. Defaults to None.\n avg_factor (int, optional): Average factor that is used to average\n the loss. Defaults to None.\n reduction_override (str, optional): The reduction method used to\n override the original reduction method of the loss.\n Defaults to None.\n ' assert (reduction_override in (None, 'none', 'mean', 'sum')) reduction = (reduction_override if reduction_override else self.reduction) loss_bbox = (self.loss_weight * smooth_l1_loss(pred, target, weight, beta=self.beta, reduction=reduction, avg_factor=avg_factor, **kwargs)) return loss_bbox
@LOSSES.register_module() class L1Loss(nn.Module): 'L1 loss.\n\n Args:\n reduction (str, optional): The method to reduce the loss.\n Options are "none", "mean" and "sum".\n loss_weight (float, optional): The weight of loss.\n ' def __init__(self, reduction='mean', loss_weight=1.0): super(L1Loss, self).__init__() self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None): 'Forward function.\n\n Args:\n pred (torch.Tensor): The prediction.\n target (torch.Tensor): The learning target of the prediction.\n weight (torch.Tensor, optional): The weight of loss for each\n prediction. Defaults to None.\n avg_factor (int, optional): Average factor that is used to average\n the loss. Defaults to None.\n reduction_override (str, optional): The reduction method used to\n override the original reduction method of the loss.\n Defaults to None.\n ' assert (reduction_override in (None, 'none', 'mean', 'sum')) reduction = (reduction_override if reduction_override else self.reduction) loss_bbox = (self.loss_weight * l1_loss(pred, target, weight, reduction=reduction, avg_factor=avg_factor)) return loss_bbox
def reduce_loss(loss, reduction): 'Reduce loss as specified.\n\n Args:\n loss (Tensor): Elementwise loss tensor.\n reduction (str): Options are "none", "mean" and "sum".\n\n Return:\n Tensor: Reduced loss tensor.\n ' reduction_enum = F._Reduction.get_enum(reduction) if (reduction_enum == 0): return loss elif (reduction_enum == 1): return loss.mean() elif (reduction_enum == 2): return loss.sum()
@mmcv.jit(derivate=True, coderize=True) def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): 'Apply element-wise weight and reduce loss.\n\n Args:\n loss (Tensor): Element-wise loss.\n weight (Tensor): Element-wise weights.\n reduction (str): Same as built-in losses of PyTorch.\n avg_factor (float): Average factor when computing the mean of losses.\n\n Returns:\n Tensor: Processed loss values.\n ' if (weight is not None): loss = (loss * weight) if (avg_factor is None): loss = reduce_loss(loss, reduction) elif (reduction == 'mean'): loss = (loss.sum() / avg_factor) elif (reduction != 'none'): raise ValueError('avg_factor can not be used with reduction="sum"') return loss
def weighted_loss(loss_func): "Create a weighted version of a given loss function.\n\n To use this decorator, the loss function must have the signature like\n `loss_func(pred, target, **kwargs)`. The function only needs to compute\n element-wise loss without any reduction. This decorator will add weight\n and reduction arguments to the function. The decorated function will have\n the signature like `loss_func(pred, target, weight=None, reduction='mean',\n avg_factor=None, **kwargs)`.\n\n :Example:\n\n >>> import torch\n >>> @weighted_loss\n >>> def l1_loss(pred, target):\n >>> return (pred - target).abs()\n\n >>> pred = torch.Tensor([0, 2, 3])\n >>> target = torch.Tensor([1, 1, 1])\n >>> weight = torch.Tensor([1, 0, 1])\n\n >>> l1_loss(pred, target)\n tensor(1.3333)\n >>> l1_loss(pred, target, weight)\n tensor(1.)\n >>> l1_loss(pred, target, reduction='none')\n tensor([1., 1., 2.])\n >>> l1_loss(pred, target, weight, avg_factor=2)\n tensor(1.5000)\n " @functools.wraps(loss_func) def wrapper(pred, target, weight=None, reduction='mean', avg_factor=None, **kwargs): loss = loss_func(pred, target, **kwargs) loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss return wrapper
@mmcv.jit(derivate=True, coderize=True) def varifocal_loss(pred, target, weight=None, alpha=0.75, gamma=2.0, iou_weighted=True, reduction='mean', avg_factor=None): '`Varifocal Loss <https://arxiv.org/abs/2008.13367>`_\n\n Args:\n pred (torch.Tensor): The prediction with shape (N, C), C is the\n number of classes\n target (torch.Tensor): The learning target of the iou-aware\n classification score with shape (N, C), C is the number of classes.\n weight (torch.Tensor, optional): The weight of loss for each\n prediction. Defaults to None.\n alpha (float, optional): A balance factor for the negative part of\n Varifocal Loss, which is different from the alpha of Focal Loss.\n Defaults to 0.75.\n gamma (float, optional): The gamma for calculating the modulating\n factor. Defaults to 2.0.\n iou_weighted (bool, optional): Whether to weight the loss of the\n positive example with the iou target. Defaults to True.\n reduction (str, optional): The method used to reduce the loss into\n a scalar. Defaults to \'mean\'. Options are "none", "mean" and\n "sum".\n avg_factor (int, optional): Average factor that is used to average\n the loss. Defaults to None.\n ' assert (pred.size() == target.size()) pred_sigmoid = pred.sigmoid() target = target.type_as(pred) if iou_weighted: focal_weight = ((target * (target > 0.0).float()) + ((alpha * (pred_sigmoid - target).abs().pow(gamma)) * (target <= 0.0).float())) else: focal_weight = ((target > 0.0).float() + ((alpha * (pred_sigmoid - target).abs().pow(gamma)) * (target <= 0.0).float())) loss = (F.binary_cross_entropy_with_logits(pred, target, reduction='none') * focal_weight) loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss
@LOSSES.register_module() class VarifocalLoss(nn.Module): def __init__(self, use_sigmoid=True, alpha=0.75, gamma=2.0, iou_weighted=True, reduction='mean', loss_weight=1.0): '`Varifocal Loss <https://arxiv.org/abs/2008.13367>`_\n\n Args:\n use_sigmoid (bool, optional): Whether the prediction is\n used for sigmoid or softmax. Defaults to True.\n alpha (float, optional): A balance factor for the negative part of\n Varifocal Loss, which is different from the alpha of Focal\n Loss. Defaults to 0.75.\n gamma (float, optional): The gamma for calculating the modulating\n factor. Defaults to 2.0.\n iou_weighted (bool, optional): Whether to weight the loss of the\n positive examples with the iou target. Defaults to True.\n reduction (str, optional): The method used to reduce the loss into\n a scalar. Defaults to \'mean\'. Options are "none", "mean" and\n "sum".\n loss_weight (float, optional): Weight of loss. Defaults to 1.0.\n ' super(VarifocalLoss, self).__init__() assert (use_sigmoid is True), 'Only sigmoid varifocal loss supported now.' assert (alpha >= 0.0) self.use_sigmoid = use_sigmoid self.alpha = alpha self.gamma = gamma self.iou_weighted = iou_weighted self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None): 'Forward function.\n\n Args:\n pred (torch.Tensor): The prediction.\n target (torch.Tensor): The learning target of the prediction.\n weight (torch.Tensor, optional): The weight of loss for each\n prediction. Defaults to None.\n avg_factor (int, optional): Average factor that is used to average\n the loss. Defaults to None.\n reduction_override (str, optional): The reduction method used to\n override the original reduction method of the loss.\n Options are "none", "mean" and "sum".\n\n Returns:\n torch.Tensor: The calculated loss\n ' assert (reduction_override in (None, 'none', 'mean', 'sum')) reduction = (reduction_override if reduction_override else self.reduction) if self.use_sigmoid: loss_cls = (self.loss_weight * varifocal_loss(pred, target, weight, alpha=self.alpha, gamma=self.gamma, iou_weighted=self.iou_weighted, reduction=reduction, avg_factor=avg_factor)) else: raise NotImplementedError return loss_cls
@NECKS.register_module() class BFP(BaseModule): "BFP (Balanced Feature Pyramids)\n\n BFP takes multi-level features as inputs and gather them into a single one,\n then refine the gathered feature and scatter the refined results to\n multi-level features. This module is used in Libra R-CNN (CVPR 2019), see\n the paper `Libra R-CNN: Towards Balanced Learning for Object Detection\n <https://arxiv.org/abs/1904.02701>`_ for details.\n\n Args:\n in_channels (int): Number of input channels (feature maps of all levels\n should have the same channels).\n num_levels (int): Number of input feature levels.\n conv_cfg (dict): The config dict for convolution layers.\n norm_cfg (dict): The config dict for normalization layers.\n refine_level (int): Index of integration and refine level of BSF in\n multi-level features from bottom to top.\n refine_type (str): Type of the refine op, currently support\n [None, 'conv', 'non_local'].\n init_cfg (dict or list[dict], optional): Initialization config dict.\n " def __init__(self, in_channels, num_levels, refine_level=2, refine_type=None, conv_cfg=None, norm_cfg=None, init_cfg=dict(type='Xavier', layer='Conv2d', distribution='uniform')): super(BFP, self).__init__(init_cfg) assert (refine_type in [None, 'conv', 'non_local']) self.in_channels = in_channels self.num_levels = num_levels self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.refine_level = refine_level self.refine_type = refine_type assert (0 <= self.refine_level < self.num_levels) if (self.refine_type == 'conv'): self.refine = ConvModule(self.in_channels, self.in_channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg) elif (self.refine_type == 'non_local'): self.refine = NonLocal2d(self.in_channels, reduction=1, use_scale=False, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg) def forward(self, inputs): 'Forward function.' assert (len(inputs) == self.num_levels) feats = [] gather_size = inputs[self.refine_level].size()[2:] for i in range(self.num_levels): if (i < self.refine_level): gathered = F.adaptive_max_pool2d(inputs[i], output_size=gather_size) else: gathered = F.interpolate(inputs[i], size=gather_size, mode='nearest') feats.append(gathered) bsf = (sum(feats) / len(feats)) if (self.refine_type is not None): bsf = self.refine(bsf) outs = [] for i in range(self.num_levels): out_size = inputs[i].size()[2:] if (i < self.refine_level): residual = F.interpolate(bsf, size=out_size, mode='nearest') else: residual = F.adaptive_max_pool2d(bsf, output_size=out_size) outs.append((residual + inputs[i])) return tuple(outs)
@NECKS.register_module() class ChannelMapper(BaseModule): "Channel Mapper to reduce/increase channels of backbone features.\n\n This is used to reduce/increase channels of backbone features.\n\n Args:\n in_channels (List[int]): Number of input channels per scale.\n out_channels (int): Number of output channels (used at each scale).\n kernel_size (int, optional): kernel_size for reducing channels (used\n at each scale). Default: 3.\n conv_cfg (dict, optional): Config dict for convolution layer.\n Default: None.\n norm_cfg (dict, optional): Config dict for normalization layer.\n Default: None.\n act_cfg (dict, optional): Config dict for activation layer in\n ConvModule. Default: dict(type='ReLU').\n num_outs (int, optional): Number of output feature maps. There\n would be extra_convs when num_outs larger than the length\n of in_channels.\n init_cfg (dict or list[dict], optional): Initialization config dict.\n Example:\n >>> import torch\n >>> in_channels = [2, 3, 5, 7]\n >>> scales = [340, 170, 84, 43]\n >>> inputs = [torch.rand(1, c, s, s)\n ... for c, s in zip(in_channels, scales)]\n >>> self = ChannelMapper(in_channels, 11, 3).eval()\n >>> outputs = self.forward(inputs)\n >>> for i in range(len(outputs)):\n ... print(f'outputs[{i}].shape = {outputs[i].shape}')\n outputs[0].shape = torch.Size([1, 11, 340, 340])\n outputs[1].shape = torch.Size([1, 11, 170, 170])\n outputs[2].shape = torch.Size([1, 11, 84, 84])\n outputs[3].shape = torch.Size([1, 11, 43, 43])\n " def __init__(self, in_channels, out_channels, kernel_size=3, conv_cfg=None, norm_cfg=None, act_cfg=dict(type='ReLU'), num_outs=None, init_cfg=dict(type='Xavier', layer='Conv2d', distribution='uniform')): super(ChannelMapper, self).__init__(init_cfg) assert isinstance(in_channels, list) self.extra_convs = None if (num_outs is None): num_outs = len(in_channels) self.convs = nn.ModuleList() for in_channel in in_channels: self.convs.append(ConvModule(in_channel, out_channels, kernel_size, padding=((kernel_size - 1) // 2), conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)) if (num_outs > len(in_channels)): self.extra_convs = nn.ModuleList() for i in range(len(in_channels), num_outs): if (i == len(in_channels)): in_channel = in_channels[(- 1)] else: in_channel = out_channels self.extra_convs.append(ConvModule(in_channel, out_channels, 3, stride=2, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)) def forward(self, inputs): 'Forward function.' assert (len(inputs) == len(self.convs)) outs = [self.convs[i](inputs[i]) for i in range(len(inputs))] if self.extra_convs: for i in range(len(self.extra_convs)): if (i == 0): outs.append(self.extra_convs[0](inputs[(- 1)])) else: outs.append(self.extra_convs[i](outs[(- 1)])) return tuple(outs)
class Bottleneck(nn.Module): 'Bottleneck block for DilatedEncoder used in `YOLOF.\n\n <https://arxiv.org/abs/2103.09460>`.\n\n The Bottleneck contains three ConvLayers and one residual connection.\n\n Args:\n in_channels (int): The number of input channels.\n mid_channels (int): The number of middle output channels.\n dilation (int): Dilation rate.\n norm_cfg (dict): Dictionary to construct and config norm layer.\n ' def __init__(self, in_channels, mid_channels, dilation, norm_cfg=dict(type='BN', requires_grad=True)): super(Bottleneck, self).__init__() self.conv1 = ConvModule(in_channels, mid_channels, 1, norm_cfg=norm_cfg) self.conv2 = ConvModule(mid_channels, mid_channels, 3, padding=dilation, dilation=dilation, norm_cfg=norm_cfg) self.conv3 = ConvModule(mid_channels, in_channels, 1, norm_cfg=norm_cfg) def forward(self, x): identity = x out = self.conv1(x) out = self.conv2(out) out = self.conv3(out) out = (out + identity) return out
@NECKS.register_module() class DilatedEncoder(nn.Module): 'Dilated Encoder for YOLOF <https://arxiv.org/abs/2103.09460>`.\n\n This module contains two types of components:\n - the original FPN lateral convolution layer and fpn convolution layer,\n which are 1x1 conv + 3x3 conv\n - the dilated residual block\n\n Args:\n in_channels (int): The number of input channels.\n out_channels (int): The number of output channels.\n block_mid_channels (int): The number of middle block output channels\n num_residual_blocks (int): The number of residual blocks.\n ' def __init__(self, in_channels, out_channels, block_mid_channels, num_residual_blocks): super(DilatedEncoder, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.block_mid_channels = block_mid_channels self.num_residual_blocks = num_residual_blocks self.block_dilations = [2, 4, 6, 8] self._init_layers() def _init_layers(self): self.lateral_conv = nn.Conv2d(self.in_channels, self.out_channels, kernel_size=1) self.lateral_norm = BatchNorm2d(self.out_channels) self.fpn_conv = nn.Conv2d(self.out_channels, self.out_channels, kernel_size=3, padding=1) self.fpn_norm = BatchNorm2d(self.out_channels) encoder_blocks = [] for i in range(self.num_residual_blocks): dilation = self.block_dilations[i] encoder_blocks.append(Bottleneck(self.out_channels, self.block_mid_channels, dilation=dilation)) self.dilated_encoder_blocks = nn.Sequential(*encoder_blocks) def init_weights(self): caffe2_xavier_init(self.lateral_conv) caffe2_xavier_init(self.fpn_conv) for m in [self.lateral_norm, self.fpn_norm]: constant_init(m, 1) for m in self.dilated_encoder_blocks.modules(): if isinstance(m, nn.Conv2d): normal_init(m, mean=0, std=0.01) if is_norm(m): constant_init(m, 1) def forward(self, feature): out = self.lateral_norm(self.lateral_conv(feature[(- 1)])) out = self.fpn_norm(self.fpn_conv(out)) return (self.dilated_encoder_blocks(out),)
class Transition(BaseModule): 'Base class for transition.\n\n Args:\n in_channels (int): Number of input channels.\n out_channels (int): Number of output channels.\n ' def __init__(self, in_channels, out_channels, init_cfg=None): super().__init__(init_cfg) self.in_channels = in_channels self.out_channels = out_channels def forward(x): pass
class UpInterpolationConv(Transition): 'A transition used for up-sampling.\n\n Up-sample the input by interpolation then refines the feature by\n a convolution layer.\n\n Args:\n in_channels (int): Number of input channels.\n out_channels (int): Number of output channels.\n scale_factor (int): Up-sampling factor. Default: 2.\n mode (int): Interpolation mode. Default: nearest.\n align_corners (bool): Whether align corners when interpolation.\n Default: None.\n kernel_size (int): Kernel size for the conv. Default: 3.\n ' def __init__(self, in_channels, out_channels, scale_factor=2, mode='nearest', align_corners=None, kernel_size=3, init_cfg=None, **kwargs): super().__init__(in_channels, out_channels, init_cfg) self.mode = mode self.scale_factor = scale_factor self.align_corners = align_corners self.conv = ConvModule(in_channels, out_channels, kernel_size, padding=((kernel_size - 1) // 2), **kwargs) def forward(self, x): x = F.interpolate(x, scale_factor=self.scale_factor, mode=self.mode, align_corners=self.align_corners) x = self.conv(x) return x
class LastConv(Transition): 'A transition used for refining the output of the last stage.\n\n Args:\n in_channels (int): Number of input channels.\n out_channels (int): Number of output channels.\n num_inputs (int): Number of inputs of the FPN features.\n kernel_size (int): Kernel size for the conv. Default: 3.\n ' def __init__(self, in_channels, out_channels, num_inputs, kernel_size=3, init_cfg=None, **kwargs): super().__init__(in_channels, out_channels, init_cfg) self.num_inputs = num_inputs self.conv_out = ConvModule(in_channels, out_channels, kernel_size, padding=((kernel_size - 1) // 2), **kwargs) def forward(self, inputs): assert (len(inputs) == self.num_inputs) return self.conv_out(inputs[(- 1)])
@NECKS.register_module() class FPG(BaseModule): "FPG.\n\n Implementation of `Feature Pyramid Grids (FPG)\n <https://arxiv.org/abs/2004.03580>`_.\n This implementation only gives the basic structure stated in the paper.\n But users can implement different type of transitions to fully explore the\n the potential power of the structure of FPG.\n\n Args:\n in_channels (int): Number of input channels (feature maps of all levels\n should have the same channels).\n out_channels (int): Number of output channels (used at each scale)\n num_outs (int): Number of output scales.\n stack_times (int): The number of times the pyramid architecture will\n be stacked.\n paths (list[str]): Specify the path order of each stack level.\n Each element in the list should be either 'bu' (bottom-up) or\n 'td' (top-down).\n inter_channels (int): Number of inter channels.\n same_up_trans (dict): Transition that goes down at the same stage.\n same_down_trans (dict): Transition that goes up at the same stage.\n across_lateral_trans (dict): Across-pathway same-stage\n across_down_trans (dict): Across-pathway bottom-up connection.\n across_up_trans (dict): Across-pathway top-down connection.\n across_skip_trans (dict): Across-pathway skip connection.\n output_trans (dict): Transition that trans the output of the\n last stage.\n start_level (int): Index of the start input backbone level used to\n build the feature pyramid. Default: 0.\n end_level (int): Index of the end input backbone level (exclusive) to\n build the feature pyramid. Default: -1, which means the last level.\n add_extra_convs (bool): It decides whether to add conv\n layers on top of the original feature maps. Default to False.\n If True, its actual mode is specified by `extra_convs_on_inputs`.\n norm_cfg (dict): Config dict for normalization layer. Default: None.\n init_cfg (dict or list[dict], optional): Initialization config dict.\n " transition_types = {'conv': ConvModule, 'interpolation_conv': UpInterpolationConv, 'last_conv': LastConv} def __init__(self, in_channels, out_channels, num_outs, stack_times, paths, inter_channels=None, same_down_trans=None, same_up_trans=dict(type='conv', kernel_size=3, stride=2, padding=1), across_lateral_trans=dict(type='conv', kernel_size=1), across_down_trans=dict(type='conv', kernel_size=3), across_up_trans=None, across_skip_trans=dict(type='identity'), output_trans=dict(type='last_conv', kernel_size=3), start_level=0, end_level=(- 1), add_extra_convs=False, norm_cfg=None, skip_inds=None, init_cfg=[dict(type='Caffe2Xavier', layer='Conv2d'), dict(type='Constant', layer=['_BatchNorm', '_InstanceNorm', 'GroupNorm', 'LayerNorm'], val=1.0)]): super(FPG, self).__init__(init_cfg) assert isinstance(in_channels, list) self.in_channels = in_channels self.out_channels = out_channels self.num_ins = len(in_channels) self.num_outs = num_outs if (inter_channels is None): self.inter_channels = [out_channels for _ in range(num_outs)] elif isinstance(inter_channels, int): self.inter_channels = [inter_channels for _ in range(num_outs)] else: assert isinstance(inter_channels, list) assert (len(inter_channels) == num_outs) self.inter_channels = inter_channels self.stack_times = stack_times self.paths = paths assert (isinstance(paths, list) and (len(paths) == stack_times)) for d in paths: assert (d in ('bu', 'td')) self.same_down_trans = same_down_trans self.same_up_trans = same_up_trans self.across_lateral_trans = across_lateral_trans self.across_down_trans = across_down_trans self.across_up_trans = across_up_trans self.output_trans = output_trans self.across_skip_trans = across_skip_trans self.with_bias = (norm_cfg is None) if (self.across_skip_trans is not None): (skip_inds is not None) self.skip_inds = skip_inds assert (len(self.skip_inds[0]) <= self.stack_times) if (end_level == (- 1)): self.backbone_end_level = self.num_ins assert (num_outs >= (self.num_ins - start_level)) else: self.backbone_end_level = end_level assert (end_level <= len(in_channels)) assert (num_outs == (end_level - start_level)) self.start_level = start_level self.end_level = end_level self.add_extra_convs = add_extra_convs self.lateral_convs = nn.ModuleList() for i in range(self.start_level, self.backbone_end_level): l_conv = nn.Conv2d(self.in_channels[i], self.inter_channels[(i - self.start_level)], 1) self.lateral_convs.append(l_conv) extra_levels = ((num_outs - self.backbone_end_level) + self.start_level) self.extra_downsamples = nn.ModuleList() for i in range(extra_levels): if self.add_extra_convs: fpn_idx = ((self.backbone_end_level - self.start_level) + i) extra_conv = nn.Conv2d(self.inter_channels[(fpn_idx - 1)], self.inter_channels[fpn_idx], 3, stride=2, padding=1) self.extra_downsamples.append(extra_conv) else: self.extra_downsamples.append(nn.MaxPool2d(1, stride=2)) self.fpn_transitions = nn.ModuleList() for s in range(self.stack_times): stage_trans = nn.ModuleList() for i in range(self.num_outs): trans = nn.ModuleDict() if (s in self.skip_inds[i]): stage_trans.append(trans) continue if ((i == 0) or (self.same_up_trans is None)): same_up_trans = None else: same_up_trans = self.build_trans(self.same_up_trans, self.inter_channels[(i - 1)], self.inter_channels[i]) trans['same_up'] = same_up_trans if ((i == (self.num_outs - 1)) or (self.same_down_trans is None)): same_down_trans = None else: same_down_trans = self.build_trans(self.same_down_trans, self.inter_channels[(i + 1)], self.inter_channels[i]) trans['same_down'] = same_down_trans across_lateral_trans = self.build_trans(self.across_lateral_trans, self.inter_channels[i], self.inter_channels[i]) trans['across_lateral'] = across_lateral_trans if ((i == (self.num_outs - 1)) or (self.across_down_trans is None)): across_down_trans = None else: across_down_trans = self.build_trans(self.across_down_trans, self.inter_channels[(i + 1)], self.inter_channels[i]) trans['across_down'] = across_down_trans if ((i == 0) or (self.across_up_trans is None)): across_up_trans = None else: across_up_trans = self.build_trans(self.across_up_trans, self.inter_channels[(i - 1)], self.inter_channels[i]) trans['across_up'] = across_up_trans if (self.across_skip_trans is None): across_skip_trans = None else: across_skip_trans = self.build_trans(self.across_skip_trans, self.inter_channels[(i - 1)], self.inter_channels[i]) trans['across_skip'] = across_skip_trans stage_trans.append(trans) self.fpn_transitions.append(stage_trans) self.output_transition = nn.ModuleList() for i in range(self.num_outs): trans = self.build_trans(self.output_trans, self.inter_channels[i], self.out_channels, num_inputs=(self.stack_times + 1)) self.output_transition.append(trans) self.relu = nn.ReLU(inplace=True) def build_trans(self, cfg, in_channels, out_channels, **extra_args): cfg_ = cfg.copy() trans_type = cfg_.pop('type') trans_cls = self.transition_types[trans_type] return trans_cls(in_channels, out_channels, **cfg_, **extra_args) def fuse(self, fuse_dict): out = None for item in fuse_dict.values(): if (item is not None): if (out is None): out = item else: out = (out + item) return out def forward(self, inputs): assert (len(inputs) == len(self.in_channels)) feats = [lateral_conv(inputs[(i + self.start_level)]) for (i, lateral_conv) in enumerate(self.lateral_convs)] for downsample in self.extra_downsamples: feats.append(downsample(feats[(- 1)])) outs = [feats] for i in range(self.stack_times): current_outs = outs[(- 1)] next_outs = [] direction = self.paths[i] for j in range(self.num_outs): if (i in self.skip_inds[j]): next_outs.append(outs[(- 1)][j]) continue if (direction == 'td'): lvl = ((self.num_outs - j) - 1) else: lvl = j if (direction == 'td'): same_trans = self.fpn_transitions[i][lvl]['same_down'] else: same_trans = self.fpn_transitions[i][lvl]['same_up'] across_lateral_trans = self.fpn_transitions[i][lvl]['across_lateral'] across_down_trans = self.fpn_transitions[i][lvl]['across_down'] across_up_trans = self.fpn_transitions[i][lvl]['across_up'] across_skip_trans = self.fpn_transitions[i][lvl]['across_skip'] to_fuse = dict(same=None, lateral=None, across_up=None, across_down=None) if (same_trans is not None): to_fuse['same'] = same_trans(next_outs[(- 1)]) if (across_lateral_trans is not None): to_fuse['lateral'] = across_lateral_trans(current_outs[lvl]) if ((lvl > 0) and (across_up_trans is not None)): to_fuse['across_up'] = across_up_trans(current_outs[(lvl - 1)]) if ((lvl < (self.num_outs - 1)) and (across_down_trans is not None)): to_fuse['across_down'] = across_down_trans(current_outs[(lvl + 1)]) if (across_skip_trans is not None): to_fuse['across_skip'] = across_skip_trans(outs[0][lvl]) x = self.fuse(to_fuse) next_outs.append(x) if (direction == 'td'): outs.append(next_outs[::(- 1)]) else: outs.append(next_outs) final_outs = [] for i in range(self.num_outs): lvl_out_list = [] for s in range(len(outs)): lvl_out_list.append(outs[s][i]) lvl_out = self.output_transition[i](lvl_out_list) final_outs.append(lvl_out) return final_outs
@NECKS.register_module() class FPN(BaseModule): "Feature Pyramid Network.\n\n This is an implementation of paper `Feature Pyramid Networks for Object\n Detection <https://arxiv.org/abs/1612.03144>`_.\n\n Args:\n in_channels (List[int]): Number of input channels per scale.\n out_channels (int): Number of output channels (used at each scale)\n num_outs (int): Number of output scales.\n start_level (int): Index of the start input backbone level used to\n build the feature pyramid. Default: 0.\n end_level (int): Index of the end input backbone level (exclusive) to\n build the feature pyramid. Default: -1, which means the last level.\n add_extra_convs (bool | str): If bool, it decides whether to add conv\n layers on top of the original feature maps. Default to False.\n If True, it is equivalent to `add_extra_convs='on_input'`.\n If str, it specifies the source feature map of the extra convs.\n Only the following options are allowed\n\n - 'on_input': Last feat map of neck inputs (i.e. backbone feature).\n - 'on_lateral': Last feature map after lateral convs.\n - 'on_output': The last output feature map after fpn convs.\n relu_before_extra_convs (bool): Whether to apply relu before the extra\n conv. Default: False.\n no_norm_on_lateral (bool): Whether to apply norm on lateral.\n Default: False.\n conv_cfg (dict): Config dict for convolution layer. Default: None.\n norm_cfg (dict): Config dict for normalization layer. Default: None.\n act_cfg (str): Config dict for activation layer in ConvModule.\n Default: None.\n upsample_cfg (dict): Config dict for interpolate layer.\n Default: `dict(mode='nearest')`\n init_cfg (dict or list[dict], optional): Initialization config dict.\n\n Example:\n >>> import torch\n >>> in_channels = [2, 3, 5, 7]\n >>> scales = [340, 170, 84, 43]\n >>> inputs = [torch.rand(1, c, s, s)\n ... for c, s in zip(in_channels, scales)]\n >>> self = FPN(in_channels, 11, len(in_channels)).eval()\n >>> outputs = self.forward(inputs)\n >>> for i in range(len(outputs)):\n ... print(f'outputs[{i}].shape = {outputs[i].shape}')\n outputs[0].shape = torch.Size([1, 11, 340, 340])\n outputs[1].shape = torch.Size([1, 11, 170, 170])\n outputs[2].shape = torch.Size([1, 11, 84, 84])\n outputs[3].shape = torch.Size([1, 11, 43, 43])\n " def __init__(self, in_channels, out_channels, num_outs, start_level=0, end_level=(- 1), add_extra_convs=False, relu_before_extra_convs=False, no_norm_on_lateral=False, conv_cfg=None, norm_cfg=None, act_cfg=None, upsample_cfg=dict(mode='nearest'), init_cfg=dict(type='Xavier', layer='Conv2d', distribution='uniform')): super(FPN, self).__init__(init_cfg) assert isinstance(in_channels, list) self.in_channels = in_channels self.out_channels = out_channels self.num_ins = len(in_channels) self.num_outs = num_outs self.relu_before_extra_convs = relu_before_extra_convs self.no_norm_on_lateral = no_norm_on_lateral self.fp16_enabled = False self.upsample_cfg = upsample_cfg.copy() if (end_level == (- 1)): self.backbone_end_level = self.num_ins assert (num_outs >= (self.num_ins - start_level)) else: self.backbone_end_level = end_level assert (end_level <= len(in_channels)) assert (num_outs == (end_level - start_level)) self.start_level = start_level self.end_level = end_level self.add_extra_convs = add_extra_convs assert isinstance(add_extra_convs, (str, bool)) if isinstance(add_extra_convs, str): assert (add_extra_convs in ('on_input', 'on_lateral', 'on_output')) elif add_extra_convs: self.add_extra_convs = 'on_input' self.lateral_convs = nn.ModuleList() self.fpn_convs = nn.ModuleList() for i in range(self.start_level, self.backbone_end_level): l_conv = ConvModule(in_channels[i], out_channels, 1, conv_cfg=conv_cfg, norm_cfg=(norm_cfg if (not self.no_norm_on_lateral) else None), act_cfg=act_cfg, inplace=False) fpn_conv = ConvModule(out_channels, out_channels, 3, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg, inplace=False) self.lateral_convs.append(l_conv) self.fpn_convs.append(fpn_conv) extra_levels = ((num_outs - self.backbone_end_level) + self.start_level) if (self.add_extra_convs and (extra_levels >= 1)): for i in range(extra_levels): if ((i == 0) and (self.add_extra_convs == 'on_input')): in_channels = self.in_channels[(self.backbone_end_level - 1)] else: in_channels = out_channels extra_fpn_conv = ConvModule(in_channels, out_channels, 3, stride=2, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg, inplace=False) self.fpn_convs.append(extra_fpn_conv) @auto_fp16() def forward(self, inputs): 'Forward function.' assert (len(inputs) == len(self.in_channels)) laterals = [lateral_conv(inputs[(i + self.start_level)]) for (i, lateral_conv) in enumerate(self.lateral_convs)] used_backbone_levels = len(laterals) for i in range((used_backbone_levels - 1), 0, (- 1)): if ('scale_factor' in self.upsample_cfg): laterals[(i - 1)] = (laterals[(i - 1)] + F.interpolate(laterals[i], **self.upsample_cfg)) else: prev_shape = laterals[(i - 1)].shape[2:] laterals[(i - 1)] = (laterals[(i - 1)] + F.interpolate(laterals[i], size=prev_shape, **self.upsample_cfg)) outs = [self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels)] if (self.num_outs > len(outs)): if (not self.add_extra_convs): for i in range((self.num_outs - used_backbone_levels)): outs.append(F.max_pool2d(outs[(- 1)], 1, stride=2)) else: if (self.add_extra_convs == 'on_input'): extra_source = inputs[(self.backbone_end_level - 1)] elif (self.add_extra_convs == 'on_lateral'): extra_source = laterals[(- 1)] elif (self.add_extra_convs == 'on_output'): extra_source = outs[(- 1)] else: raise NotImplementedError outs.append(self.fpn_convs[used_backbone_levels](extra_source)) for i in range((used_backbone_levels + 1), self.num_outs): if self.relu_before_extra_convs: outs.append(self.fpn_convs[i](F.relu(outs[(- 1)]))) else: outs.append(self.fpn_convs[i](outs[(- 1)])) return tuple(outs)
@NECKS.register_module() class HRFPN(BaseModule): 'HRFPN (High Resolution Feature Pyramids)\n\n paper: `High-Resolution Representations for Labeling Pixels and Regions\n <https://arxiv.org/abs/1904.04514>`_.\n\n Args:\n in_channels (list): number of channels for each branch.\n out_channels (int): output channels of feature pyramids.\n num_outs (int): number of output stages.\n pooling_type (str): pooling for generating feature pyramids\n from {MAX, AVG}.\n conv_cfg (dict): dictionary to construct and config conv layer.\n norm_cfg (dict): dictionary to construct and config norm layer.\n with_cp (bool): Use checkpoint or not. Using checkpoint will save some\n memory while slowing down the training speed.\n stride (int): stride of 3x3 convolutional layers\n init_cfg (dict or list[dict], optional): Initialization config dict.\n ' def __init__(self, in_channels, out_channels, num_outs=5, pooling_type='AVG', conv_cfg=None, norm_cfg=None, with_cp=False, stride=1, init_cfg=dict(type='Caffe2Xavier', layer='Conv2d')): super(HRFPN, self).__init__(init_cfg) assert isinstance(in_channels, list) self.in_channels = in_channels self.out_channels = out_channels self.num_ins = len(in_channels) self.num_outs = num_outs self.with_cp = with_cp self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.reduction_conv = ConvModule(sum(in_channels), out_channels, kernel_size=1, conv_cfg=self.conv_cfg, act_cfg=None) self.fpn_convs = nn.ModuleList() for i in range(self.num_outs): self.fpn_convs.append(ConvModule(out_channels, out_channels, kernel_size=3, padding=1, stride=stride, conv_cfg=self.conv_cfg, act_cfg=None)) if (pooling_type == 'MAX'): self.pooling = F.max_pool2d else: self.pooling = F.avg_pool2d def forward(self, inputs): 'Forward function.' assert (len(inputs) == self.num_ins) outs = [inputs[0]] for i in range(1, self.num_ins): outs.append(F.interpolate(inputs[i], scale_factor=(2 ** i), mode='bilinear')) out = torch.cat(outs, dim=1) if (out.requires_grad and self.with_cp): out = checkpoint(self.reduction_conv, out) else: out = self.reduction_conv(out) outs = [out] for i in range(1, self.num_outs): outs.append(self.pooling(out, kernel_size=(2 ** i), stride=(2 ** i))) outputs = [] for i in range(self.num_outs): if (outs[i].requires_grad and self.with_cp): tmp_out = checkpoint(self.fpn_convs[i], outs[i]) else: tmp_out = self.fpn_convs[i](outs[i]) outputs.append(tmp_out) return tuple(outputs)
@NECKS.register_module() class NASFPN(BaseModule): 'NAS-FPN.\n\n Implementation of `NAS-FPN: Learning Scalable Feature Pyramid Architecture\n for Object Detection <https://arxiv.org/abs/1904.07392>`_\n\n Args:\n in_channels (List[int]): Number of input channels per scale.\n out_channels (int): Number of output channels (used at each scale)\n num_outs (int): Number of output scales.\n stack_times (int): The number of times the pyramid architecture will\n be stacked.\n start_level (int): Index of the start input backbone level used to\n build the feature pyramid. Default: 0.\n end_level (int): Index of the end input backbone level (exclusive) to\n build the feature pyramid. Default: -1, which means the last level.\n add_extra_convs (bool): It decides whether to add conv\n layers on top of the original feature maps. Default to False.\n If True, its actual mode is specified by `extra_convs_on_inputs`.\n init_cfg (dict or list[dict], optional): Initialization config dict.\n ' def __init__(self, in_channels, out_channels, num_outs, stack_times, start_level=0, end_level=(- 1), add_extra_convs=False, norm_cfg=None, init_cfg=dict(type='Caffe2Xavier', layer='Conv2d')): super(NASFPN, self).__init__(init_cfg) assert isinstance(in_channels, list) self.in_channels = in_channels self.out_channels = out_channels self.num_ins = len(in_channels) self.num_outs = num_outs self.stack_times = stack_times self.norm_cfg = norm_cfg if (end_level == (- 1)): self.backbone_end_level = self.num_ins assert (num_outs >= (self.num_ins - start_level)) else: self.backbone_end_level = end_level assert (end_level <= len(in_channels)) assert (num_outs == (end_level - start_level)) self.start_level = start_level self.end_level = end_level self.add_extra_convs = add_extra_convs self.lateral_convs = nn.ModuleList() for i in range(self.start_level, self.backbone_end_level): l_conv = ConvModule(in_channels[i], out_channels, 1, norm_cfg=norm_cfg, act_cfg=None) self.lateral_convs.append(l_conv) extra_levels = ((num_outs - self.backbone_end_level) + self.start_level) self.extra_downsamples = nn.ModuleList() for i in range(extra_levels): extra_conv = ConvModule(out_channels, out_channels, 1, norm_cfg=norm_cfg, act_cfg=None) self.extra_downsamples.append(nn.Sequential(extra_conv, nn.MaxPool2d(2, 2))) self.fpn_stages = ModuleList() for _ in range(self.stack_times): stage = nn.ModuleDict() stage['gp_64_4'] = GlobalPoolingCell(in_channels=out_channels, out_channels=out_channels, out_norm_cfg=norm_cfg) stage['sum_44_4'] = SumCell(in_channels=out_channels, out_channels=out_channels, out_norm_cfg=norm_cfg) stage['sum_43_3'] = SumCell(in_channels=out_channels, out_channels=out_channels, out_norm_cfg=norm_cfg) stage['sum_34_4'] = SumCell(in_channels=out_channels, out_channels=out_channels, out_norm_cfg=norm_cfg) stage['gp_43_5'] = GlobalPoolingCell(with_out_conv=False) stage['sum_55_5'] = SumCell(in_channels=out_channels, out_channels=out_channels, out_norm_cfg=norm_cfg) stage['gp_54_7'] = GlobalPoolingCell(with_out_conv=False) stage['sum_77_7'] = SumCell(in_channels=out_channels, out_channels=out_channels, out_norm_cfg=norm_cfg) stage['gp_75_6'] = GlobalPoolingCell(in_channels=out_channels, out_channels=out_channels, out_norm_cfg=norm_cfg) self.fpn_stages.append(stage) def forward(self, inputs): 'Forward function.' feats = [lateral_conv(inputs[(i + self.start_level)]) for (i, lateral_conv) in enumerate(self.lateral_convs)] for downsample in self.extra_downsamples: feats.append(downsample(feats[(- 1)])) (p3, p4, p5, p6, p7) = feats for stage in self.fpn_stages: p4_1 = stage['gp_64_4'](p6, p4, out_size=p4.shape[(- 2):]) p4_2 = stage['sum_44_4'](p4_1, p4, out_size=p4.shape[(- 2):]) p3 = stage['sum_43_3'](p4_2, p3, out_size=p3.shape[(- 2):]) p4 = stage['sum_34_4'](p3, p4_2, out_size=p4.shape[(- 2):]) p5_tmp = stage['gp_43_5'](p4, p3, out_size=p5.shape[(- 2):]) p5 = stage['sum_55_5'](p5, p5_tmp, out_size=p5.shape[(- 2):]) p7_tmp = stage['gp_54_7'](p5, p4_2, out_size=p7.shape[(- 2):]) p7 = stage['sum_77_7'](p7, p7_tmp, out_size=p7.shape[(- 2):]) p6 = stage['gp_75_6'](p7, p5, out_size=p6.shape[(- 2):]) return (p3, p4, p5, p6, p7)
@NECKS.register_module() class NASFCOS_FPN(BaseModule): 'FPN structure in NASFPN.\n\n Implementation of paper `NAS-FCOS: Fast Neural Architecture Search for\n Object Detection <https://arxiv.org/abs/1906.04423>`_\n\n Args:\n in_channels (List[int]): Number of input channels per scale.\n out_channels (int): Number of output channels (used at each scale)\n num_outs (int): Number of output scales.\n start_level (int): Index of the start input backbone level used to\n build the feature pyramid. Default: 0.\n end_level (int): Index of the end input backbone level (exclusive) to\n build the feature pyramid. Default: -1, which means the last level.\n add_extra_convs (bool): It decides whether to add conv\n layers on top of the original feature maps. Default to False.\n If True, its actual mode is specified by `extra_convs_on_inputs`.\n conv_cfg (dict): dictionary to construct and config conv layer.\n norm_cfg (dict): dictionary to construct and config norm layer.\n init_cfg (dict or list[dict], optional): Initialization config dict.\n Default: None\n ' def __init__(self, in_channels, out_channels, num_outs, start_level=1, end_level=(- 1), add_extra_convs=False, conv_cfg=None, norm_cfg=None, init_cfg=None): assert (init_cfg is None), 'To prevent abnormal initialization behavior, init_cfg is not allowed to be set' super(NASFCOS_FPN, self).__init__(init_cfg) assert isinstance(in_channels, list) self.in_channels = in_channels self.out_channels = out_channels self.num_ins = len(in_channels) self.num_outs = num_outs self.norm_cfg = norm_cfg self.conv_cfg = conv_cfg if (end_level == (- 1)): self.backbone_end_level = self.num_ins assert (num_outs >= (self.num_ins - start_level)) else: self.backbone_end_level = end_level assert (end_level <= len(in_channels)) assert (num_outs == (end_level - start_level)) self.start_level = start_level self.end_level = end_level self.add_extra_convs = add_extra_convs self.adapt_convs = nn.ModuleList() for i in range(self.start_level, self.backbone_end_level): adapt_conv = ConvModule(in_channels[i], out_channels, 1, stride=1, padding=0, bias=False, norm_cfg=dict(type='BN'), act_cfg=dict(type='ReLU', inplace=False)) self.adapt_convs.append(adapt_conv) extra_levels = ((num_outs - self.backbone_end_level) + self.start_level) def build_concat_cell(with_input1_conv, with_input2_conv): cell_conv_cfg = dict(kernel_size=1, padding=0, bias=False, groups=out_channels) return ConcatCell(in_channels=out_channels, out_channels=out_channels, with_out_conv=True, out_conv_cfg=cell_conv_cfg, out_norm_cfg=dict(type='BN'), out_conv_order=('norm', 'act', 'conv'), with_input1_conv=with_input1_conv, with_input2_conv=with_input2_conv, input_conv_cfg=conv_cfg, input_norm_cfg=norm_cfg, upsample_mode='nearest') self.fpn = nn.ModuleDict() self.fpn['c22_1'] = build_concat_cell(True, True) self.fpn['c22_2'] = build_concat_cell(True, True) self.fpn['c32'] = build_concat_cell(True, False) self.fpn['c02'] = build_concat_cell(True, False) self.fpn['c42'] = build_concat_cell(True, True) self.fpn['c36'] = build_concat_cell(True, True) self.fpn['c61'] = build_concat_cell(True, True) self.extra_downsamples = nn.ModuleList() for i in range(extra_levels): extra_act_cfg = (None if (i == 0) else dict(type='ReLU', inplace=False)) self.extra_downsamples.append(ConvModule(out_channels, out_channels, 3, stride=2, padding=1, act_cfg=extra_act_cfg, order=('act', 'norm', 'conv'))) def forward(self, inputs): 'Forward function.' feats = [adapt_conv(inputs[(i + self.start_level)]) for (i, adapt_conv) in enumerate(self.adapt_convs)] for (i, module_name) in enumerate(self.fpn): (idx_1, idx_2) = (int(module_name[1]), int(module_name[2])) res = self.fpn[module_name](feats[idx_1], feats[idx_2]) feats.append(res) ret = [] for (idx, input_idx) in zip([9, 8, 7], [1, 2, 3]): (feats1, feats2) = (feats[idx], feats[5]) feats2_resize = F.interpolate(feats2, size=feats1.size()[2:], mode='bilinear', align_corners=False) feats_sum = (feats1 + feats2_resize) ret.append(F.interpolate(feats_sum, size=inputs[input_idx].size()[2:], mode='bilinear', align_corners=False)) for submodule in self.extra_downsamples: ret.append(submodule(ret[(- 1)])) return tuple(ret) def init_weights(self): 'Initialize the weights of module.' super(NASFCOS_FPN, self).init_weights() for module in self.fpn.values(): if hasattr(module, 'conv_out'): caffe2_xavier_init(module.out_conv.conv) for modules in [self.adapt_convs.modules(), self.extra_downsamples.modules()]: for module in modules: if isinstance(module, nn.Conv2d): caffe2_xavier_init(module)
@NECKS.register_module() class PAFPN(FPN): "Path Aggregation Network for Instance Segmentation.\n\n This is an implementation of the `PAFPN in Path Aggregation Network\n <https://arxiv.org/abs/1803.01534>`_.\n\n Args:\n in_channels (List[int]): Number of input channels per scale.\n out_channels (int): Number of output channels (used at each scale)\n num_outs (int): Number of output scales.\n start_level (int): Index of the start input backbone level used to\n build the feature pyramid. Default: 0.\n end_level (int): Index of the end input backbone level (exclusive) to\n build the feature pyramid. Default: -1, which means the last level.\n add_extra_convs (bool | str): If bool, it decides whether to add conv\n layers on top of the original feature maps. Default to False.\n If True, it is equivalent to `add_extra_convs='on_input'`.\n If str, it specifies the source feature map of the extra convs.\n Only the following options are allowed\n\n - 'on_input': Last feat map of neck inputs (i.e. backbone feature).\n - 'on_lateral': Last feature map after lateral convs.\n - 'on_output': The last output feature map after fpn convs.\n relu_before_extra_convs (bool): Whether to apply relu before the extra\n conv. Default: False.\n no_norm_on_lateral (bool): Whether to apply norm on lateral.\n Default: False.\n conv_cfg (dict): Config dict for convolution layer. Default: None.\n norm_cfg (dict): Config dict for normalization layer. Default: None.\n act_cfg (str): Config dict for activation layer in ConvModule.\n Default: None.\n init_cfg (dict or list[dict], optional): Initialization config dict.\n " def __init__(self, in_channels, out_channels, num_outs, start_level=0, end_level=(- 1), add_extra_convs=False, relu_before_extra_convs=False, no_norm_on_lateral=False, conv_cfg=None, norm_cfg=None, act_cfg=None, init_cfg=dict(type='Xavier', layer='Conv2d', distribution='uniform')): super(PAFPN, self).__init__(in_channels, out_channels, num_outs, start_level, end_level, add_extra_convs, relu_before_extra_convs, no_norm_on_lateral, conv_cfg, norm_cfg, act_cfg, init_cfg=init_cfg) self.downsample_convs = nn.ModuleList() self.pafpn_convs = nn.ModuleList() for i in range((self.start_level + 1), self.backbone_end_level): d_conv = ConvModule(out_channels, out_channels, 3, stride=2, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg, inplace=False) pafpn_conv = ConvModule(out_channels, out_channels, 3, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg, inplace=False) self.downsample_convs.append(d_conv) self.pafpn_convs.append(pafpn_conv) @auto_fp16() def forward(self, inputs): 'Forward function.' assert (len(inputs) == len(self.in_channels)) laterals = [lateral_conv(inputs[(i + self.start_level)]) for (i, lateral_conv) in enumerate(self.lateral_convs)] used_backbone_levels = len(laterals) for i in range((used_backbone_levels - 1), 0, (- 1)): prev_shape = laterals[(i - 1)].shape[2:] laterals[(i - 1)] += F.interpolate(laterals[i], size=prev_shape, mode='nearest') inter_outs = [self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels)] for i in range(0, (used_backbone_levels - 1)): inter_outs[(i + 1)] += self.downsample_convs[i](inter_outs[i]) outs = [] outs.append(inter_outs[0]) outs.extend([self.pafpn_convs[(i - 1)](inter_outs[i]) for i in range(1, used_backbone_levels)]) if (self.num_outs > len(outs)): if (not self.add_extra_convs): for i in range((self.num_outs - used_backbone_levels)): outs.append(F.max_pool2d(outs[(- 1)], 1, stride=2)) else: if (self.add_extra_convs == 'on_input'): orig = inputs[(self.backbone_end_level - 1)] outs.append(self.fpn_convs[used_backbone_levels](orig)) elif (self.add_extra_convs == 'on_lateral'): outs.append(self.fpn_convs[used_backbone_levels](laterals[(- 1)])) elif (self.add_extra_convs == 'on_output'): outs.append(self.fpn_convs[used_backbone_levels](outs[(- 1)])) else: raise NotImplementedError for i in range((used_backbone_levels + 1), self.num_outs): if self.relu_before_extra_convs: outs.append(self.fpn_convs[i](F.relu(outs[(- 1)]))) else: outs.append(self.fpn_convs[i](outs[(- 1)])) return tuple(outs)