code
stringlengths
17
6.64M
@DETECTORS.register_module() class FasterRCNN(TwoStageDetector): 'Implementation of `Faster R-CNN <https://arxiv.org/abs/1506.01497>`_' def __init__(self, backbone, rpn_head, roi_head, train_cfg, test_cfg, neck=None, pretrained=None, init_cfg=None): super(FasterRCNN, self).__init__(backbone=backbone, neck=neck, rpn_head=rpn_head, roi_head=roi_head, train_cfg=train_cfg, test_cfg=test_cfg, pretrained=pretrained, init_cfg=init_cfg)
@DETECTORS.register_module() class FCOS(SingleStageDetector): 'Implementation of `FCOS <https://arxiv.org/abs/1904.01355>`_' def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None): super(FCOS, self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, pretrained, init_cfg)
@DETECTORS.register_module() class FOVEA(SingleStageDetector): 'Implementation of `FoveaBox <https://arxiv.org/abs/1904.03797>`_' def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None): super(FOVEA, self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, pretrained, init_cfg)
@DETECTORS.register_module() class FSAF(SingleStageDetector): 'Implementation of `FSAF <https://arxiv.org/abs/1903.00621>`_' def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None): super(FSAF, self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, pretrained, init_cfg)
@DETECTORS.register_module() class GFL(SingleStageDetector): def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None): super(GFL, self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, pretrained, init_cfg)
@DETECTORS.register_module() class GridRCNN(TwoStageDetector): 'Grid R-CNN.\n\n This detector is the implementation of:\n - Grid R-CNN (https://arxiv.org/abs/1811.12030)\n - Grid R-CNN Plus: Faster and Better (https://arxiv.org/abs/1906.05688)\n ' def __init__(self, backbone, rpn_head, roi_head, train_cfg, test_cfg, neck=None, pretrained=None, init_cfg=None): super(GridRCNN, self).__init__(backbone=backbone, neck=neck, rpn_head=rpn_head, roi_head=roi_head, train_cfg=train_cfg, test_cfg=test_cfg, pretrained=pretrained, init_cfg=init_cfg)
@DETECTORS.register_module() class HybridTaskCascade(CascadeRCNN): 'Implementation of `HTC <https://arxiv.org/abs/1901.07518>`_' def __init__(self, **kwargs): super(HybridTaskCascade, self).__init__(**kwargs) @property def with_semantic(self): 'bool: whether the detector has a semantic head' return self.roi_head.with_semantic
@DETECTORS.register_module() class KnowledgeDistillationSingleStageDetector(SingleStageDetector): 'Implementation of `Distilling the Knowledge in a Neural Network.\n <https://arxiv.org/abs/1503.02531>`_.\n\n Args:\n teacher_config (str | dict): Config file path\n or the config object of teacher model.\n teacher_ckpt (str, optional): Checkpoint path of teacher model.\n If left as None, the model will not load any weights.\n ' def __init__(self, backbone, neck, bbox_head, teacher_config, teacher_ckpt=None, eval_teacher=True, train_cfg=None, test_cfg=None, pretrained=None): super().__init__(backbone, neck, bbox_head, train_cfg, test_cfg, pretrained) self.eval_teacher = eval_teacher if isinstance(teacher_config, str): teacher_config = mmcv.Config.fromfile(teacher_config) self.teacher_model = build_detector(teacher_config['model']) if (teacher_ckpt is not None): load_checkpoint(self.teacher_model, teacher_ckpt, map_location='cpu') def forward_train(self, img, img_metas, gt_bboxes, gt_labels, gt_bboxes_ignore=None): "\n Args:\n img (Tensor): Input images of shape (N, C, H, W).\n Typically these should be mean centered and std scaled.\n img_metas (list[dict]): A List of image info dict where each dict\n has: 'img_shape', 'scale_factor', 'flip', and may also contain\n 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.\n For details on the values of these keys see\n :class:`mmdet.datasets.pipelines.Collect`.\n gt_bboxes (list[Tensor]): Each item are the truth boxes for each\n image in [tl_x, tl_y, br_x, br_y] format.\n gt_labels (list[Tensor]): Class indices corresponding to each box\n gt_bboxes_ignore (None | list[Tensor]): Specify which bounding\n boxes can be ignored when computing the loss.\n Returns:\n dict[str, Tensor]: A dictionary of loss components.\n " x = self.extract_feat(img) with torch.no_grad(): teacher_x = self.teacher_model.extract_feat(img) out_teacher = self.teacher_model.bbox_head(teacher_x) losses = self.bbox_head.forward_train(x, out_teacher, img_metas, gt_bboxes, gt_labels, gt_bboxes_ignore) return losses def cuda(self, device=None): 'Since teacher_model is registered as a plain object, it is necessary\n to put the teacher model to cuda when calling cuda function.' self.teacher_model.cuda(device=device) return super().cuda(device=device) def train(self, mode=True): 'Set the same train mode for teacher and student model.' if self.eval_teacher: self.teacher_model.train(False) else: self.teacher_model.train(mode) super().train(mode) def __setattr__(self, name, value): 'Set attribute, i.e. self.name = value\n\n This reloading prevent the teacher model from being registered as a\n nn.Module. The teacher module is registered as a plain object, so that\n the teacher parameters will not show up when calling\n ``self.parameters``, ``self.modules``, ``self.children`` methods.\n ' if (name == 'teacher_model'): object.__setattr__(self, name, value) else: super().__setattr__(name, value)
@DETECTORS.register_module() class LAD(KnowledgeDistillationSingleStageDetector): 'Implementation of `LAD <https://arxiv.org/pdf/2108.10520.pdf>`_.' def __init__(self, backbone, neck, bbox_head, teacher_backbone, teacher_neck, teacher_bbox_head, teacher_ckpt, eval_teacher=True, train_cfg=None, test_cfg=None, pretrained=None): super(KnowledgeDistillationSingleStageDetector, self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, pretrained) self.eval_teacher = eval_teacher self.teacher_model = nn.Module() self.teacher_model.backbone = build_backbone(teacher_backbone) if (teacher_neck is not None): self.teacher_model.neck = build_neck(teacher_neck) teacher_bbox_head.update(train_cfg=train_cfg) teacher_bbox_head.update(test_cfg=test_cfg) self.teacher_model.bbox_head = build_head(teacher_bbox_head) if (teacher_ckpt is not None): load_checkpoint(self.teacher_model, teacher_ckpt, map_location='cpu') @property def with_teacher_neck(self): 'bool: whether the detector has a teacher_neck' return (hasattr(self.teacher_model, 'neck') and (self.teacher_model.neck is not None)) def extract_teacher_feat(self, img): 'Directly extract teacher features from the backbone+neck.' x = self.teacher_model.backbone(img) if self.with_teacher_neck: x = self.teacher_model.neck(x) return x def forward_train(self, img, img_metas, gt_bboxes, gt_labels, gt_bboxes_ignore=None): "\n Args:\n img (Tensor): Input images of shape (N, C, H, W).\n Typically these should be mean centered and std scaled.\n img_metas (list[dict]): A List of image info dict where each dict\n has: 'img_shape', 'scale_factor', 'flip', and may also contain\n 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.\n For details on the values of these keys see\n :class:`mmdet.datasets.pipelines.Collect`.\n gt_bboxes (list[Tensor]): Each item are the truth boxes for each\n image in [tl_x, tl_y, br_x, br_y] format.\n gt_labels (list[Tensor]): Class indices corresponding to each box\n gt_bboxes_ignore (None | list[Tensor]): Specify which bounding\n boxes can be ignored when computing the loss.\n\n Returns:\n dict[str, Tensor]: A dictionary of loss components.\n " with torch.no_grad(): x_teacher = self.extract_teacher_feat(img) outs_teacher = self.teacher_model.bbox_head(x_teacher) label_assignment_results = self.teacher_model.bbox_head.get_label_assignment(*outs_teacher, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore) x = self.extract_feat(img) losses = self.bbox_head.forward_train(x, label_assignment_results, img_metas, gt_bboxes, gt_labels, gt_bboxes_ignore) return losses
@DETECTORS.register_module() class MaskRCNN(TwoStageDetector): 'Implementation of `Mask R-CNN <https://arxiv.org/abs/1703.06870>`_' def __init__(self, backbone, rpn_head, roi_head, train_cfg, test_cfg, neck=None, pretrained=None, init_cfg=None): super(MaskRCNN, self).__init__(backbone=backbone, neck=neck, rpn_head=rpn_head, roi_head=roi_head, train_cfg=train_cfg, test_cfg=test_cfg, pretrained=pretrained, init_cfg=init_cfg)
@DETECTORS.register_module() class MaskScoringRCNN(TwoStageDetector): 'Mask Scoring RCNN.\n\n https://arxiv.org/abs/1903.00241\n ' def __init__(self, backbone, rpn_head, roi_head, train_cfg, test_cfg, neck=None, pretrained=None, init_cfg=None): super(MaskScoringRCNN, self).__init__(backbone=backbone, neck=neck, rpn_head=rpn_head, roi_head=roi_head, train_cfg=train_cfg, test_cfg=test_cfg, pretrained=pretrained, init_cfg=init_cfg)
@DETECTORS.register_module() class NASFCOS(SingleStageDetector): 'NAS-FCOS: Fast Neural Architecture Search for Object Detection.\n\n https://arxiv.org/abs/1906.0442\n ' def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None): super(NASFCOS, self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, pretrained, init_cfg)
@DETECTORS.register_module() class PAA(SingleStageDetector): 'Implementation of `PAA <https://arxiv.org/pdf/2007.08103.pdf>`_.' def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None): super(PAA, self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, pretrained, init_cfg)
@DETECTORS.register_module() class PanopticFPN(TwoStagePanopticSegmentor): 'Implementation of `Panoptic feature pyramid\n networks <https://arxiv.org/pdf/1901.02446>`_' def __init__(self, backbone, neck=None, rpn_head=None, roi_head=None, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None, semantic_head=None, panoptic_fusion_head=None): super(PanopticFPN, self).__init__(backbone=backbone, neck=neck, rpn_head=rpn_head, roi_head=roi_head, train_cfg=train_cfg, test_cfg=test_cfg, pretrained=pretrained, init_cfg=init_cfg, semantic_head=semantic_head, panoptic_fusion_head=panoptic_fusion_head)
@DETECTORS.register_module() class PointRend(TwoStageDetector): 'PointRend: Image Segmentation as Rendering\n\n This detector is the implementation of\n `PointRend <https://arxiv.org/abs/1912.08193>`_.\n\n ' def __init__(self, backbone, rpn_head, roi_head, train_cfg, test_cfg, neck=None, pretrained=None, init_cfg=None): super(PointRend, self).__init__(backbone=backbone, neck=neck, rpn_head=rpn_head, roi_head=roi_head, train_cfg=train_cfg, test_cfg=test_cfg, pretrained=pretrained, init_cfg=init_cfg)
@DETECTORS.register_module() class QueryInst(SparseRCNN): 'Implementation of\n `Instances as Queries <http://arxiv.org/abs/2105.01928>`_' def __init__(self, backbone, rpn_head, roi_head, train_cfg, test_cfg, neck=None, pretrained=None, init_cfg=None): super(QueryInst, self).__init__(backbone=backbone, neck=neck, rpn_head=rpn_head, roi_head=roi_head, train_cfg=train_cfg, test_cfg=test_cfg, pretrained=pretrained, init_cfg=init_cfg)
@DETECTORS.register_module() class RepPointsDetector(SingleStageDetector): 'RepPoints: Point Set Representation for Object Detection.\n\n This detector is the implementation of:\n - RepPoints detector (https://arxiv.org/pdf/1904.11490)\n ' def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None): super(RepPointsDetector, self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, pretrained, init_cfg)
@DETECTORS.register_module() class RetinaNet(SingleStageDetector): 'Implementation of `RetinaNet <https://arxiv.org/abs/1708.02002>`_' def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None): super(RetinaNet, self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, pretrained, init_cfg)
@DETECTORS.register_module() class SCNet(CascadeRCNN): 'Implementation of `SCNet <https://arxiv.org/abs/2012.10150>`_' def __init__(self, **kwargs): super(SCNet, self).__init__(**kwargs)
@DETECTORS.register_module() class SingleStageDetector(BaseDetector): 'Base class for single-stage detectors.\n\n Single-stage detectors directly and densely predict bounding boxes on the\n output features of the backbone+neck.\n ' def __init__(self, backbone, neck=None, bbox_head=None, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None): super(SingleStageDetector, self).__init__(init_cfg) if pretrained: warnings.warn('DeprecationWarning: pretrained is deprecated, please use "init_cfg" instead') backbone.pretrained = pretrained self.backbone = build_backbone(backbone) if (neck is not None): self.neck = build_neck(neck) bbox_head.update(train_cfg=train_cfg) bbox_head.update(test_cfg=test_cfg) self.bbox_head = build_head(bbox_head) self.train_cfg = train_cfg self.test_cfg = test_cfg def extract_feat(self, img): 'Directly extract features from the backbone+neck.' x = self.backbone(img) if self.with_neck: x = self.neck(x) return x def forward_dummy(self, img): 'Used for computing network flops.\n\n See `mmdetection/tools/analysis_tools/get_flops.py`\n ' x = self.extract_feat(img) outs = self.bbox_head(x) return outs def forward_train(self, img, img_metas, gt_bboxes, gt_labels, gt_bboxes_ignore=None): "\n Args:\n img (Tensor): Input images of shape (N, C, H, W).\n Typically these should be mean centered and std scaled.\n img_metas (list[dict]): A List of image info dict where each dict\n has: 'img_shape', 'scale_factor', 'flip', and may also contain\n 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.\n For details on the values of these keys see\n :class:`mmdet.datasets.pipelines.Collect`.\n gt_bboxes (list[Tensor]): Each item are the truth boxes for each\n image in [tl_x, tl_y, br_x, br_y] format.\n gt_labels (list[Tensor]): Class indices corresponding to each box\n gt_bboxes_ignore (None | list[Tensor]): Specify which bounding\n boxes can be ignored when computing the loss.\n\n Returns:\n dict[str, Tensor]: A dictionary of loss components.\n " super(SingleStageDetector, self).forward_train(img, img_metas) x = self.extract_feat(img) losses = self.bbox_head.forward_train(x, img_metas, gt_bboxes, gt_labels, gt_bboxes_ignore) return losses def simple_test(self, img, img_metas, rescale=False): 'Test function without test-time augmentation.\n\n Args:\n img (torch.Tensor): Images with shape (N, C, H, W).\n img_metas (list[dict]): List of image information.\n rescale (bool, optional): Whether to rescale the results.\n Defaults to False.\n\n Returns:\n list[list[np.ndarray]]: BBox results of each image and classes.\n The outer list corresponds to each image. The inner list\n corresponds to each class.\n ' feat = self.extract_feat(img) results_list = self.bbox_head.simple_test(feat, img_metas, rescale=rescale) bbox_results = [bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes) for (det_bboxes, det_labels) in results_list] return bbox_results def aug_test(self, imgs, img_metas, rescale=False): 'Test function with test time augmentation.\n\n Args:\n imgs (list[Tensor]): the outer list indicates test-time\n augmentations and inner Tensor should have a shape NxCxHxW,\n which contains all images in the batch.\n img_metas (list[list[dict]]): the outer list indicates test-time\n augs (multiscale, flip, etc.) and the inner list indicates\n images in a batch. each dict has image information.\n rescale (bool, optional): Whether to rescale the results.\n Defaults to False.\n\n Returns:\n list[list[np.ndarray]]: BBox results of each image and classes.\n The outer list corresponds to each image. The inner list\n corresponds to each class.\n ' assert hasattr(self.bbox_head, 'aug_test'), f'{self.bbox_head.__class__.__name__} does not support test-time augmentation' feats = self.extract_feats(imgs) results_list = self.bbox_head.aug_test(feats, img_metas, rescale=rescale) bbox_results = [bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes) for (det_bboxes, det_labels) in results_list] return bbox_results def onnx_export(self, img, img_metas, with_nms=True): 'Test function without test time augmentation.\n\n Args:\n img (torch.Tensor): input images.\n img_metas (list[dict]): List of image information.\n\n Returns:\n tuple[Tensor, Tensor]: dets of shape [N, num_det, 5]\n and class labels of shape [N, num_det].\n ' x = self.extract_feat(img) outs = self.bbox_head(x) img_shape = torch._shape_as_tensor(img)[2:] img_metas[0]['img_shape_for_onnx'] = img_shape img_metas[0]['pad_shape_for_onnx'] = img_shape if (len(outs) == 2): outs = (*outs, None) (det_bboxes, det_labels) = self.bbox_head.onnx_export(*outs, img_metas, with_nms=with_nms) return (det_bboxes, det_labels)
@DETECTORS.register_module() class SOLO(SingleStageInstanceSegmentor): '`SOLO: Segmenting Objects by Locations\n <https://arxiv.org/abs/1912.04488>`_\n\n ' def __init__(self, backbone, neck=None, bbox_head=None, mask_head=None, train_cfg=None, test_cfg=None, init_cfg=None, pretrained=None): super().__init__(backbone=backbone, neck=neck, bbox_head=bbox_head, mask_head=mask_head, train_cfg=train_cfg, test_cfg=test_cfg, init_cfg=init_cfg, pretrained=pretrained)
@DETECTORS.register_module() class SparseRCNN(TwoStageDetector): 'Implementation of `Sparse R-CNN: End-to-End Object Detection with\n Learnable Proposals <https://arxiv.org/abs/2011.12450>`_' def __init__(self, *args, **kwargs): super(SparseRCNN, self).__init__(*args, **kwargs) assert self.with_rpn, 'Sparse R-CNN and QueryInst do not support external proposals' def forward_train(self, img, img_metas, gt_bboxes, gt_labels, gt_bboxes_ignore=None, gt_masks=None, proposals=None, **kwargs): "Forward function of SparseR-CNN and QueryInst in train stage.\n\n Args:\n img (Tensor): of shape (N, C, H, W) encoding input images.\n Typically these should be mean centered and std scaled.\n img_metas (list[dict]): list of image info dict where each dict\n has: 'img_shape', 'scale_factor', 'flip', and may also contain\n 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.\n For details on the values of these keys see\n :class:`mmdet.datasets.pipelines.Collect`.\n gt_bboxes (list[Tensor]): Ground truth bboxes for each image with\n shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.\n gt_labels (list[Tensor]): class indices corresponding to each box\n gt_bboxes_ignore (None | list[Tensor): specify which bounding\n boxes can be ignored when computing the loss.\n gt_masks (List[Tensor], optional) : Segmentation masks for\n each box. This is required to train QueryInst.\n proposals (List[Tensor], optional): override rpn proposals with\n custom proposals. Use when `with_rpn` is False.\n\n Returns:\n dict[str, Tensor]: a dictionary of loss components\n " assert (proposals is None), 'Sparse R-CNN and QueryInst do not support external proposals' x = self.extract_feat(img) (proposal_boxes, proposal_features, imgs_whwh) = self.rpn_head.forward_train(x, img_metas) roi_losses = self.roi_head.forward_train(x, proposal_boxes, proposal_features, img_metas, gt_bboxes, gt_labels, gt_bboxes_ignore=gt_bboxes_ignore, gt_masks=gt_masks, imgs_whwh=imgs_whwh) return roi_losses def simple_test(self, img, img_metas, rescale=False): 'Test function without test time augmentation.\n\n Args:\n imgs (list[torch.Tensor]): List of multiple images\n img_metas (list[dict]): List of image information.\n rescale (bool): Whether to rescale the results.\n Defaults to False.\n\n Returns:\n list[list[np.ndarray]]: BBox results of each image and classes.\n The outer list corresponds to each image. The inner list\n corresponds to each class.\n ' x = self.extract_feat(img) (proposal_boxes, proposal_features, imgs_whwh) = self.rpn_head.simple_test_rpn(x, img_metas) results = self.roi_head.simple_test(x, proposal_boxes, proposal_features, img_metas, imgs_whwh=imgs_whwh, rescale=rescale) return results def forward_dummy(self, img): 'Used for computing network flops.\n\n See `mmdetection/tools/analysis_tools/get_flops.py`\n ' x = self.extract_feat(img) num_imgs = len(img) dummy_img_metas = [dict(img_shape=(800, 1333, 3)) for _ in range(num_imgs)] (proposal_boxes, proposal_features, imgs_whwh) = self.rpn_head.simple_test_rpn(x, dummy_img_metas) roi_outs = self.roi_head.forward_dummy(x, proposal_boxes, proposal_features, dummy_img_metas) return roi_outs
@DETECTORS.register_module() class TOOD(SingleStageDetector): 'Implementation of `TOOD: Task-aligned One-stage Object Detection.\n <https://arxiv.org/abs/2108.07755>`_.' def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None): super(TOOD, self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, pretrained, init_cfg) def set_epoch(self, epoch): self.bbox_head.epoch = epoch
@DETECTORS.register_module() class TridentFasterRCNN(FasterRCNN): 'Implementation of `TridentNet <https://arxiv.org/abs/1901.01892>`_' def __init__(self, backbone, rpn_head, roi_head, train_cfg, test_cfg, neck=None, pretrained=None, init_cfg=None): super(TridentFasterRCNN, self).__init__(backbone=backbone, neck=neck, rpn_head=rpn_head, roi_head=roi_head, train_cfg=train_cfg, test_cfg=test_cfg, pretrained=pretrained, init_cfg=init_cfg) assert (self.backbone.num_branch == self.roi_head.num_branch) assert (self.backbone.test_branch_idx == self.roi_head.test_branch_idx) self.num_branch = self.backbone.num_branch self.test_branch_idx = self.backbone.test_branch_idx def simple_test(self, img, img_metas, proposals=None, rescale=False): 'Test without augmentation.' assert self.with_bbox, 'Bbox head must be implemented.' x = self.extract_feat(img) if (proposals is None): num_branch = (self.num_branch if (self.test_branch_idx == (- 1)) else 1) trident_img_metas = (img_metas * num_branch) proposal_list = self.rpn_head.simple_test_rpn(x, trident_img_metas) else: proposal_list = proposals return self.roi_head.simple_test(x, proposal_list, trident_img_metas, rescale=rescale) def aug_test(self, imgs, img_metas, rescale=False): 'Test with augmentations.\n\n If rescale is False, then returned bboxes and masks will fit the scale\n of imgs[0].\n ' x = self.extract_feats(imgs) num_branch = (self.num_branch if (self.test_branch_idx == (- 1)) else 1) trident_img_metas = [(img_metas * num_branch) for img_metas in img_metas] proposal_list = self.rpn_head.aug_test_rpn(x, trident_img_metas) return self.roi_head.aug_test(x, proposal_list, img_metas, rescale=rescale) def forward_train(self, img, img_metas, gt_bboxes, gt_labels, **kwargs): 'make copies of img and gts to fit multi-branch.' trident_gt_bboxes = tuple((gt_bboxes * self.num_branch)) trident_gt_labels = tuple((gt_labels * self.num_branch)) trident_img_metas = tuple((img_metas * self.num_branch)) return super(TridentFasterRCNN, self).forward_train(img, trident_img_metas, trident_gt_bboxes, trident_gt_labels)
@DETECTORS.register_module() class TwoStageDetector(BaseDetector): 'Base class for two-stage detectors.\n\n Two-stage detectors typically consisting of a region proposal network and a\n task-specific regression head.\n ' def __init__(self, backbone, neck=None, rpn_head=None, roi_head=None, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None): super(TwoStageDetector, self).__init__(init_cfg) if pretrained: warnings.warn('DeprecationWarning: pretrained is deprecated, please use "init_cfg" instead') backbone.pretrained = pretrained self.backbone = build_backbone(backbone) if (neck is not None): self.neck = build_neck(neck) if (rpn_head is not None): rpn_train_cfg = (train_cfg.rpn if (train_cfg is not None) else None) rpn_head_ = rpn_head.copy() rpn_head_.update(train_cfg=rpn_train_cfg, test_cfg=test_cfg.rpn) self.rpn_head = build_head(rpn_head_) if (roi_head is not None): rcnn_train_cfg = (train_cfg.rcnn if (train_cfg is not None) else None) roi_head.update(train_cfg=rcnn_train_cfg) roi_head.update(test_cfg=test_cfg.rcnn) roi_head.pretrained = pretrained self.roi_head = build_head(roi_head) self.train_cfg = train_cfg self.test_cfg = test_cfg @property def with_rpn(self): 'bool: whether the detector has RPN' return (hasattr(self, 'rpn_head') and (self.rpn_head is not None)) @property def with_roi_head(self): 'bool: whether the detector has a RoI head' return (hasattr(self, 'roi_head') and (self.roi_head is not None)) def extract_feat(self, img): 'Directly extract features from the backbone+neck.' x = self.backbone(img) if self.with_neck: x = self.neck(x) return x def forward_dummy(self, img): 'Used for computing network flops.\n\n See `mmdetection/tools/analysis_tools/get_flops.py`\n ' outs = () x = self.extract_feat(img) if self.with_rpn: rpn_outs = self.rpn_head(x) outs = (outs + (rpn_outs,)) proposals = torch.randn(1000, 4).to(img.device) roi_outs = self.roi_head.forward_dummy(x, proposals) outs = (outs + (roi_outs,)) return outs def forward_train(self, img, img_metas, gt_bboxes, gt_labels, gt_bboxes_ignore=None, gt_masks=None, proposals=None, **kwargs): "\n Args:\n img (Tensor): of shape (N, C, H, W) encoding input images.\n Typically these should be mean centered and std scaled.\n\n img_metas (list[dict]): list of image info dict where each dict\n has: 'img_shape', 'scale_factor', 'flip', and may also contain\n 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.\n For details on the values of these keys see\n `mmdet/datasets/pipelines/formatting.py:Collect`.\n\n gt_bboxes (list[Tensor]): Ground truth bboxes for each image with\n shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.\n\n gt_labels (list[Tensor]): class indices corresponding to each box\n\n gt_bboxes_ignore (None | list[Tensor]): specify which bounding\n boxes can be ignored when computing the loss.\n\n gt_masks (None | Tensor) : true segmentation masks for each box\n used if the architecture supports a segmentation task.\n\n proposals : override rpn proposals with custom proposals. Use when\n `with_rpn` is False.\n\n Returns:\n dict[str, Tensor]: a dictionary of loss components\n " x = self.extract_feat(img) losses = dict() if self.with_rpn: proposal_cfg = self.train_cfg.get('rpn_proposal', self.test_cfg.rpn) (rpn_losses, proposal_list) = self.rpn_head.forward_train(x, img_metas, gt_bboxes, gt_labels=None, gt_bboxes_ignore=gt_bboxes_ignore, proposal_cfg=proposal_cfg, **kwargs) losses.update(rpn_losses) else: proposal_list = proposals roi_losses = self.roi_head.forward_train(x, img_metas, proposal_list, gt_bboxes, gt_labels, gt_bboxes_ignore, gt_masks, **kwargs) losses.update(roi_losses) return losses async def async_simple_test(self, img, img_meta, proposals=None, rescale=False): 'Async test without augmentation.' assert self.with_bbox, 'Bbox head must be implemented.' x = self.extract_feat(img) if (proposals is None): proposal_list = (await self.rpn_head.async_simple_test_rpn(x, img_meta)) else: proposal_list = proposals return (await self.roi_head.async_simple_test(x, proposal_list, img_meta, rescale=rescale)) def simple_test(self, img, img_metas, proposals=None, rescale=False): 'Test without augmentation.' assert self.with_bbox, 'Bbox head must be implemented.' x = self.extract_feat(img) if (proposals is None): proposal_list = self.rpn_head.simple_test_rpn(x, img_metas) else: proposal_list = proposals return self.roi_head.simple_test(x, proposal_list, img_metas, rescale=rescale) def aug_test(self, imgs, img_metas, rescale=False): 'Test with augmentations.\n\n If rescale is False, then returned bboxes and masks will fit the scale\n of imgs[0].\n ' x = self.extract_feats(imgs) proposal_list = self.rpn_head.aug_test_rpn(x, img_metas) return self.roi_head.aug_test(x, proposal_list, img_metas, rescale=rescale) def onnx_export(self, img, img_metas): img_shape = torch._shape_as_tensor(img)[2:] img_metas[0]['img_shape_for_onnx'] = img_shape x = self.extract_feat(img) proposals = self.rpn_head.onnx_export(x, img_metas) if hasattr(self.roi_head, 'onnx_export'): return self.roi_head.onnx_export(x, proposals, img_metas) else: raise NotImplementedError(f'{self.__class__.__name__} can not be exported to ONNX. Please refer to the list of supported models,https://mmdetection.readthedocs.io/en/latest/tutorials/pytorch2onnx.html#list-of-supported-models-exportable-to-onnx')
@DETECTORS.register_module() class VFNet(SingleStageDetector): 'Implementation of `VarifocalNet\n (VFNet).<https://arxiv.org/abs/2008.13367>`_' def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None): super(VFNet, self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, pretrained, init_cfg)
@DETECTORS.register_module() class YOLACT(SingleStageDetector): 'Implementation of `YOLACT <https://arxiv.org/abs/1904.02689>`_' def __init__(self, backbone, neck, bbox_head, segm_head, mask_head, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None): super(YOLACT, self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, pretrained, init_cfg) self.segm_head = build_head(segm_head) self.mask_head = build_head(mask_head) def forward_dummy(self, img): 'Used for computing network flops.\n\n See `mmdetection/tools/analysis_tools/get_flops.py`\n ' feat = self.extract_feat(img) bbox_outs = self.bbox_head(feat) prototypes = self.mask_head.forward_dummy(feat[0]) return (bbox_outs, prototypes) def forward_train(self, img, img_metas, gt_bboxes, gt_labels, gt_bboxes_ignore=None, gt_masks=None): "\n Args:\n img (Tensor): of shape (N, C, H, W) encoding input images.\n Typically these should be mean centered and std scaled.\n img_metas (list[dict]): list of image info dict where each dict\n has: 'img_shape', 'scale_factor', 'flip', and may also contain\n 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.\n For details on the values of these keys see\n `mmdet/datasets/pipelines/formatting.py:Collect`.\n gt_bboxes (list[Tensor]): Ground truth bboxes for each image with\n shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.\n gt_labels (list[Tensor]): class indices corresponding to each box\n gt_bboxes_ignore (None | list[Tensor]): specify which bounding\n boxes can be ignored when computing the loss.\n gt_masks (None | Tensor) : true segmentation masks for each box\n used if the architecture supports a segmentation task.\n\n Returns:\n dict[str, Tensor]: a dictionary of loss components\n " gt_masks = [gt_mask.to_tensor(dtype=torch.uint8, device=img.device) for gt_mask in gt_masks] x = self.extract_feat(img) (cls_score, bbox_pred, coeff_pred) = self.bbox_head(x) bbox_head_loss_inputs = ((cls_score, bbox_pred) + (gt_bboxes, gt_labels, img_metas)) (losses, sampling_results) = self.bbox_head.loss(*bbox_head_loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore) segm_head_outs = self.segm_head(x[0]) loss_segm = self.segm_head.loss(segm_head_outs, gt_masks, gt_labels) losses.update(loss_segm) mask_pred = self.mask_head(x[0], coeff_pred, gt_bboxes, img_metas, sampling_results) loss_mask = self.mask_head.loss(mask_pred, gt_masks, gt_bboxes, img_metas, sampling_results) losses.update(loss_mask) for loss_name in losses.keys(): assert torch.isfinite(torch.stack(losses[loss_name])).all().item(), '{} becomes infinite or NaN!'.format(loss_name) return losses def simple_test(self, img, img_metas, rescale=False): 'Test function without test-time augmentation.' feat = self.extract_feat(img) (det_bboxes, det_labels, det_coeffs) = self.bbox_head.simple_test(feat, img_metas, rescale=rescale) bbox_results = [bbox2result(det_bbox, det_label, self.bbox_head.num_classes) for (det_bbox, det_label) in zip(det_bboxes, det_labels)] segm_results = self.mask_head.simple_test(feat, det_bboxes, det_labels, det_coeffs, img_metas, rescale=rescale) return list(zip(bbox_results, segm_results)) def aug_test(self, imgs, img_metas, rescale=False): 'Test with augmentations.' raise NotImplementedError('YOLACT does not support test-time augmentation')
@DETECTORS.register_module() class YOLOV3(SingleStageDetector): def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None): super(YOLOV3, self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, pretrained, init_cfg) def onnx_export(self, img, img_metas): 'Test function for exporting to ONNX, without test time augmentation.\n\n Args:\n img (torch.Tensor): input images.\n img_metas (list[dict]): List of image information.\n\n Returns:\n tuple[Tensor, Tensor]: dets of shape [N, num_det, 5]\n and class labels of shape [N, num_det].\n ' x = self.extract_feat(img) outs = self.bbox_head.forward(x) img_shape = torch._shape_as_tensor(img)[2:] img_metas[0]['img_shape_for_onnx'] = img_shape (det_bboxes, det_labels) = self.bbox_head.onnx_export(*outs, img_metas) return (det_bboxes, det_labels)
@DETECTORS.register_module() class YOLOF(SingleStageDetector): 'Implementation of `You Only Look One-level Feature\n <https://arxiv.org/abs/2103.09460>`_' def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None): super(YOLOF, self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, pretrained)
@mmcv.jit(coderize=True) def accuracy(pred, target, topk=1, thresh=None): 'Calculate accuracy according to the prediction and target.\n\n Args:\n pred (torch.Tensor): The model prediction, shape (N, num_class)\n target (torch.Tensor): The target of each prediction, shape (N, )\n topk (int | tuple[int], optional): If the predictions in ``topk``\n matches the target, the predictions will be regarded as\n correct ones. Defaults to 1.\n thresh (float, optional): If not None, predictions with scores under\n this threshold are considered incorrect. Default to None.\n\n Returns:\n float | tuple[float]: If the input ``topk`` is a single integer,\n the function will return a single float as accuracy. If\n ``topk`` is a tuple containing multiple integers, the\n function will return a tuple containing accuracies of\n each ``topk`` number.\n ' assert isinstance(topk, (int, tuple)) if isinstance(topk, int): topk = (topk,) return_single = True else: return_single = False maxk = max(topk) if (pred.size(0) == 0): accu = [pred.new_tensor(0.0) for i in range(len(topk))] return (accu[0] if return_single else accu) assert ((pred.ndim == 2) and (target.ndim == 1)) assert (pred.size(0) == target.size(0)) assert (maxk <= pred.size(1)), f'maxk {maxk} exceeds pred dimension {pred.size(1)}' (pred_value, pred_label) = pred.topk(maxk, dim=1) pred_label = pred_label.t() correct = pred_label.eq(target.view(1, (- 1)).expand_as(pred_label)) if (thresh is not None): correct = (correct & (pred_value > thresh).t()) res = [] for k in topk: correct_k = correct[:k].reshape((- 1)).float().sum(0, keepdim=True) res.append(correct_k.mul_((100.0 / pred.size(0)))) return (res[0] if return_single else res)
class Accuracy(nn.Module): def __init__(self, topk=(1,), thresh=None): 'Module to calculate the accuracy.\n\n Args:\n topk (tuple, optional): The criterion used to calculate the\n accuracy. Defaults to (1,).\n thresh (float, optional): If not None, predictions with scores\n under this threshold are considered incorrect. Default to None.\n ' super().__init__() self.topk = topk self.thresh = thresh def forward(self, pred, target): 'Forward function to calculate accuracy.\n\n Args:\n pred (torch.Tensor): Prediction of models.\n target (torch.Tensor): Target for each prediction.\n\n Returns:\n tuple[float]: The accuracies under different topk criterions.\n ' return accuracy(pred, target, self.topk, self.thresh)
@mmcv.jit(derivate=True, coderize=True) @weighted_loss def balanced_l1_loss(pred, target, beta=1.0, alpha=0.5, gamma=1.5, reduction='mean'): 'Calculate balanced L1 loss.\n\n Please see the `Libra R-CNN <https://arxiv.org/pdf/1904.02701.pdf>`_\n\n Args:\n pred (torch.Tensor): The prediction with shape (N, 4).\n target (torch.Tensor): The learning target of the prediction with\n shape (N, 4).\n beta (float): The loss is a piecewise function of prediction and target\n and ``beta`` serves as a threshold for the difference between the\n prediction and target. Defaults to 1.0.\n alpha (float): The denominator ``alpha`` in the balanced L1 loss.\n Defaults to 0.5.\n gamma (float): The ``gamma`` in the balanced L1 loss.\n Defaults to 1.5.\n reduction (str, optional): The method that reduces the loss to a\n scalar. Options are "none", "mean" and "sum".\n\n Returns:\n torch.Tensor: The calculated loss\n ' assert (beta > 0) if (target.numel() == 0): return (pred.sum() * 0) assert (pred.size() == target.size()) diff = torch.abs((pred - target)) b = ((np.e ** (gamma / alpha)) - 1) loss = torch.where((diff < beta), ((((alpha / b) * ((b * diff) + 1)) * torch.log((((b * diff) / beta) + 1))) - (alpha * diff)), (((gamma * diff) + (gamma / b)) - (alpha * beta))) return loss
@LOSSES.register_module() class BalancedL1Loss(nn.Module): 'Balanced L1 Loss.\n\n arXiv: https://arxiv.org/pdf/1904.02701.pdf (CVPR 2019)\n\n Args:\n alpha (float): The denominator ``alpha`` in the balanced L1 loss.\n Defaults to 0.5.\n gamma (float): The ``gamma`` in the balanced L1 loss. Defaults to 1.5.\n beta (float, optional): The loss is a piecewise function of prediction\n and target. ``beta`` serves as a threshold for the difference\n between the prediction and target. Defaults to 1.0.\n reduction (str, optional): The method that reduces the loss to a\n scalar. Options are "none", "mean" and "sum".\n loss_weight (float, optional): The weight of the loss. Defaults to 1.0\n ' def __init__(self, alpha=0.5, gamma=1.5, beta=1.0, reduction='mean', loss_weight=1.0): super(BalancedL1Loss, self).__init__() self.alpha = alpha self.gamma = gamma self.beta = beta self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None, **kwargs): 'Forward function of loss.\n\n Args:\n pred (torch.Tensor): The prediction with shape (N, 4).\n target (torch.Tensor): The learning target of the prediction with\n shape (N, 4).\n weight (torch.Tensor, optional): Sample-wise loss weight with\n shape (N, ).\n avg_factor (int, optional): Average factor that is used to average\n the loss. Defaults to None.\n reduction_override (str, optional): The reduction method used to\n override the original reduction method of the loss.\n Options are "none", "mean" and "sum".\n\n Returns:\n torch.Tensor: The calculated loss\n ' assert (reduction_override in (None, 'none', 'mean', 'sum')) reduction = (reduction_override if reduction_override else self.reduction) loss_bbox = (self.loss_weight * balanced_l1_loss(pred, target, weight, alpha=self.alpha, gamma=self.gamma, beta=self.beta, reduction=reduction, avg_factor=avg_factor, **kwargs)) return loss_bbox
def cross_entropy(pred, label, weight=None, reduction='mean', avg_factor=None, class_weight=None, ignore_index=(- 100)): 'Calculate the CrossEntropy loss.\n\n Args:\n pred (torch.Tensor): The prediction with shape (N, C), C is the number\n of classes.\n label (torch.Tensor): The learning label of the prediction.\n weight (torch.Tensor, optional): Sample-wise loss weight.\n reduction (str, optional): The method used to reduce the loss.\n avg_factor (int, optional): Average factor that is used to average\n the loss. Defaults to None.\n class_weight (list[float], optional): The weight for each class.\n ignore_index (int | None): The label index to be ignored.\n If None, it will be set to default value. Default: -100.\n\n Returns:\n torch.Tensor: The calculated loss\n ' ignore_index = ((- 100) if (ignore_index is None) else ignore_index) loss = F.cross_entropy(pred, label, weight=class_weight, reduction='none', ignore_index=ignore_index) if (weight is not None): weight = weight.float() loss = weight_reduce_loss(loss, weight=weight, reduction=reduction, avg_factor=avg_factor) return loss
def _expand_onehot_labels(labels, label_weights, label_channels, ignore_index): 'Expand onehot labels to match the size of prediction.' bin_labels = labels.new_full((labels.size(0), label_channels), 0) valid_mask = ((labels >= 0) & (labels != ignore_index)) inds = torch.nonzero((valid_mask & (labels < label_channels)), as_tuple=False) if (inds.numel() > 0): bin_labels[(inds, labels[inds])] = 1 valid_mask = valid_mask.view((- 1), 1).expand(labels.size(0), label_channels).float() if (label_weights is None): bin_label_weights = valid_mask else: bin_label_weights = label_weights.view((- 1), 1).repeat(1, label_channels) bin_label_weights *= valid_mask return (bin_labels, bin_label_weights)
def binary_cross_entropy(pred, label, weight=None, reduction='mean', avg_factor=None, class_weight=None, ignore_index=(- 100)): 'Calculate the binary CrossEntropy loss.\n\n Args:\n pred (torch.Tensor): The prediction with shape (N, 1).\n label (torch.Tensor): The learning label of the prediction.\n weight (torch.Tensor, optional): Sample-wise loss weight.\n reduction (str, optional): The method used to reduce the loss.\n Options are "none", "mean" and "sum".\n avg_factor (int, optional): Average factor that is used to average\n the loss. Defaults to None.\n class_weight (list[float], optional): The weight for each class.\n ignore_index (int | None): The label index to be ignored.\n If None, it will be set to default value. Default: -100.\n\n Returns:\n torch.Tensor: The calculated loss.\n ' ignore_index = ((- 100) if (ignore_index is None) else ignore_index) if (pred.dim() != label.dim()): (label, weight) = _expand_onehot_labels(label, weight, pred.size((- 1)), ignore_index) if (weight is not None): weight = weight.float() loss = F.binary_cross_entropy_with_logits(pred, label.float(), pos_weight=class_weight, reduction='none') loss = weight_reduce_loss(loss, weight, reduction=reduction, avg_factor=avg_factor) return loss
def mask_cross_entropy(pred, target, label, reduction='mean', avg_factor=None, class_weight=None, ignore_index=None): 'Calculate the CrossEntropy loss for masks.\n\n Args:\n pred (torch.Tensor): The prediction with shape (N, C, *), C is the\n number of classes. The trailing * indicates arbitrary shape.\n target (torch.Tensor): The learning label of the prediction.\n label (torch.Tensor): ``label`` indicates the class label of the mask\n corresponding object. This will be used to select the mask in the\n of the class which the object belongs to when the mask prediction\n if not class-agnostic.\n reduction (str, optional): The method used to reduce the loss.\n Options are "none", "mean" and "sum".\n avg_factor (int, optional): Average factor that is used to average\n the loss. Defaults to None.\n class_weight (list[float], optional): The weight for each class.\n ignore_index (None): Placeholder, to be consistent with other loss.\n Default: None.\n\n Returns:\n torch.Tensor: The calculated loss\n\n Example:\n >>> N, C = 3, 11\n >>> H, W = 2, 2\n >>> pred = torch.randn(N, C, H, W) * 1000\n >>> target = torch.rand(N, H, W)\n >>> label = torch.randint(0, C, size=(N,))\n >>> reduction = \'mean\'\n >>> avg_factor = None\n >>> class_weights = None\n >>> loss = mask_cross_entropy(pred, target, label, reduction,\n >>> avg_factor, class_weights)\n >>> assert loss.shape == (1,)\n ' assert (ignore_index is None), 'BCE loss does not support ignore_index' assert ((reduction == 'mean') and (avg_factor is None)) num_rois = pred.size()[0] inds = torch.arange(0, num_rois, dtype=torch.long, device=pred.device) pred_slice = pred[(inds, label)].squeeze(1) return F.binary_cross_entropy_with_logits(pred_slice, target, weight=class_weight, reduction='mean')[None]
@LOSSES.register_module() class CrossEntropyLoss(nn.Module): def __init__(self, use_sigmoid=False, use_mask=False, reduction='mean', class_weight=None, ignore_index=None, loss_weight=1.0): 'CrossEntropyLoss.\n\n Args:\n use_sigmoid (bool, optional): Whether the prediction uses sigmoid\n of softmax. Defaults to False.\n use_mask (bool, optional): Whether to use mask cross entropy loss.\n Defaults to False.\n reduction (str, optional): . Defaults to \'mean\'.\n Options are "none", "mean" and "sum".\n class_weight (list[float], optional): Weight of each class.\n Defaults to None.\n ignore_index (int | None): The label index to be ignored.\n Defaults to None.\n loss_weight (float, optional): Weight of the loss. Defaults to 1.0.\n ' super(CrossEntropyLoss, self).__init__() assert ((use_sigmoid is False) or (use_mask is False)) self.use_sigmoid = use_sigmoid self.use_mask = use_mask self.reduction = reduction self.loss_weight = loss_weight self.class_weight = class_weight self.ignore_index = ignore_index if self.use_sigmoid: self.cls_criterion = binary_cross_entropy elif self.use_mask: self.cls_criterion = mask_cross_entropy else: self.cls_criterion = cross_entropy def forward(self, cls_score, label, weight=None, avg_factor=None, reduction_override=None, ignore_index=None, **kwargs): 'Forward function.\n\n Args:\n cls_score (torch.Tensor): The prediction.\n label (torch.Tensor): The learning label of the prediction.\n weight (torch.Tensor, optional): Sample-wise loss weight.\n avg_factor (int, optional): Average factor that is used to average\n the loss. Defaults to None.\n reduction_override (str, optional): The method used to reduce the\n loss. Options are "none", "mean" and "sum".\n ignore_index (int | None): The label index to be ignored.\n If not None, it will override the default value. Default: None.\n Returns:\n torch.Tensor: The calculated loss.\n ' assert (reduction_override in (None, 'none', 'mean', 'sum')) reduction = (reduction_override if reduction_override else self.reduction) if (ignore_index is None): ignore_index = self.ignore_index if (self.class_weight is not None): class_weight = cls_score.new_tensor(self.class_weight, device=cls_score.device) else: class_weight = None loss_cls = (self.loss_weight * self.cls_criterion(cls_score, label, weight, class_weight=class_weight, reduction=reduction, avg_factor=avg_factor, ignore_index=ignore_index, **kwargs)) return loss_cls
def dice_loss(pred, target, weight=None, eps=0.001, reduction='mean', naive_dice=False, avg_factor=None): 'Calculate dice loss, there are two forms of dice loss is supported:\n\n - the one proposed in `V-Net: Fully Convolutional Neural\n Networks for Volumetric Medical Image Segmentation\n <https://arxiv.org/abs/1606.04797>`_.\n - the dice loss in which the power of the number in the\n denominator is the first power instead of the second\n power.\n\n Args:\n pred (torch.Tensor): The prediction, has a shape (n, *)\n target (torch.Tensor): The learning label of the prediction,\n shape (n, *), same shape of pred.\n weight (torch.Tensor, optional): The weight of loss for each\n prediction, has a shape (n,). Defaults to None.\n eps (float): Avoid dividing by zero. Default: 1e-3.\n reduction (str, optional): The method used to reduce the loss into\n a scalar. Defaults to \'mean\'.\n Options are "none", "mean" and "sum".\n naive_dice (bool, optional): If false, use the dice\n loss defined in the V-Net paper, otherwise, use the\n naive dice loss in which the power of the number in the\n denominator is the first power instead of the second\n power.Defaults to False.\n avg_factor (int, optional): Average factor that is used to average\n the loss. Defaults to None.\n ' input = pred.flatten(1) target = target.flatten(1).float() a = torch.sum((input * target), 1) if naive_dice: b = torch.sum(input, 1) c = torch.sum(target, 1) d = (((2 * a) + eps) / ((b + c) + eps)) else: b = (torch.sum((input * input), 1) + eps) c = (torch.sum((target * target), 1) + eps) d = ((2 * a) / (b + c)) loss = (1 - d) if (weight is not None): assert (weight.ndim == loss.ndim) assert (len(weight) == len(pred)) loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss
@LOSSES.register_module() class DiceLoss(nn.Module): def __init__(self, use_sigmoid=True, activate=True, reduction='mean', naive_dice=False, loss_weight=1.0, eps=0.001): 'Compute dice loss.\n\n Args:\n use_sigmoid (bool, optional): Whether to the prediction is\n used for sigmoid or softmax. Defaults to True.\n activate (bool): Whether to activate the predictions inside,\n this will disable the inside sigmoid operation.\n Defaults to True.\n reduction (str, optional): The method used\n to reduce the loss. Options are "none",\n "mean" and "sum". Defaults to \'mean\'.\n naive_dice (bool, optional): If false, use the dice\n loss defined in the V-Net paper, otherwise, use the\n naive dice loss in which the power of the number in the\n denominator is the first power instead of the second\n power. Defaults to False.\n loss_weight (float, optional): Weight of loss. Defaults to 1.0.\n eps (float): Avoid dividing by zero. Defaults to 1e-3.\n ' super(DiceLoss, self).__init__() self.use_sigmoid = use_sigmoid self.reduction = reduction self.naive_dice = naive_dice self.loss_weight = loss_weight self.eps = eps self.activate = activate def forward(self, pred, target, weight=None, reduction_override=None, avg_factor=None): 'Forward function.\n\n Args:\n pred (torch.Tensor): The prediction, has a shape (n, *).\n target (torch.Tensor): The label of the prediction,\n shape (n, *), same shape of pred.\n weight (torch.Tensor, optional): The weight of loss for each\n prediction, has a shape (n,). Defaults to None.\n avg_factor (int, optional): Average factor that is used to average\n the loss. Defaults to None.\n reduction_override (str, optional): The reduction method used to\n override the original reduction method of the loss.\n Options are "none", "mean" and "sum".\n\n Returns:\n torch.Tensor: The calculated loss\n ' assert (reduction_override in (None, 'none', 'mean', 'sum')) reduction = (reduction_override if reduction_override else self.reduction) if self.activate: if self.use_sigmoid: pred = pred.sigmoid() else: raise NotImplementedError loss = (self.loss_weight * dice_loss(pred, target, weight, eps=self.eps, reduction=reduction, naive_dice=self.naive_dice, avg_factor=avg_factor)) return loss
@mmcv.jit(derivate=True, coderize=True) @weighted_loss def gaussian_focal_loss(pred, gaussian_target, alpha=2.0, gamma=4.0): '`Focal Loss <https://arxiv.org/abs/1708.02002>`_ for targets in gaussian\n distribution.\n\n Args:\n pred (torch.Tensor): The prediction.\n gaussian_target (torch.Tensor): The learning target of the prediction\n in gaussian distribution.\n alpha (float, optional): A balanced form for Focal Loss.\n Defaults to 2.0.\n gamma (float, optional): The gamma for calculating the modulating\n factor. Defaults to 4.0.\n ' eps = 1e-12 pos_weights = gaussian_target.eq(1) neg_weights = (1 - gaussian_target).pow(gamma) pos_loss = (((- (pred + eps).log()) * (1 - pred).pow(alpha)) * pos_weights) neg_loss = (((- ((1 - pred) + eps).log()) * pred.pow(alpha)) * neg_weights) return (pos_loss + neg_loss)
@LOSSES.register_module() class GaussianFocalLoss(nn.Module): 'GaussianFocalLoss is a variant of focal loss.\n\n More details can be found in the `paper\n <https://arxiv.org/abs/1808.01244>`_\n Code is modified from `kp_utils.py\n <https://github.com/princeton-vl/CornerNet/blob/master/models/py_utils/kp_utils.py#L152>`_ # noqa: E501\n Please notice that the target in GaussianFocalLoss is a gaussian heatmap,\n not 0/1 binary target.\n\n Args:\n alpha (float): Power of prediction.\n gamma (float): Power of target for negative samples.\n reduction (str): Options are "none", "mean" and "sum".\n loss_weight (float): Loss weight of current loss.\n ' def __init__(self, alpha=2.0, gamma=4.0, reduction='mean', loss_weight=1.0): super(GaussianFocalLoss, self).__init__() self.alpha = alpha self.gamma = gamma self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None): 'Forward function.\n\n Args:\n pred (torch.Tensor): The prediction.\n target (torch.Tensor): The learning target of the prediction\n in gaussian distribution.\n weight (torch.Tensor, optional): The weight of loss for each\n prediction. Defaults to None.\n avg_factor (int, optional): Average factor that is used to average\n the loss. Defaults to None.\n reduction_override (str, optional): The reduction method used to\n override the original reduction method of the loss.\n Defaults to None.\n ' assert (reduction_override in (None, 'none', 'mean', 'sum')) reduction = (reduction_override if reduction_override else self.reduction) loss_reg = (self.loss_weight * gaussian_focal_loss(pred, target, weight, alpha=self.alpha, gamma=self.gamma, reduction=reduction, avg_factor=avg_factor)) return loss_reg
@mmcv.jit(derivate=True, coderize=True) @weighted_loss def quality_focal_loss(pred, target, beta=2.0): 'Quality Focal Loss (QFL) is from `Generalized Focal Loss: Learning\n Qualified and Distributed Bounding Boxes for Dense Object Detection\n <https://arxiv.org/abs/2006.04388>`_.\n\n Args:\n pred (torch.Tensor): Predicted joint representation of classification\n and quality (IoU) estimation with shape (N, C), C is the number of\n classes.\n target (tuple([torch.Tensor])): Target category label with shape (N,)\n and target quality label with shape (N,).\n beta (float): The beta parameter for calculating the modulating factor.\n Defaults to 2.0.\n\n Returns:\n torch.Tensor: Loss tensor with shape (N,).\n ' assert (len(target) == 2), 'target for QFL must be a tuple of two elements,\n including category label and quality label, respectively' (label, score) = target pred_sigmoid = pred.sigmoid() scale_factor = pred_sigmoid zerolabel = scale_factor.new_zeros(pred.shape) loss = (F.binary_cross_entropy_with_logits(pred, zerolabel, reduction='none') * scale_factor.pow(beta)) bg_class_ind = pred.size(1) pos = ((label >= 0) & (label < bg_class_ind)).nonzero().squeeze(1) pos_label = label[pos].long() scale_factor = (score[pos] - pred_sigmoid[(pos, pos_label)]) loss[(pos, pos_label)] = (F.binary_cross_entropy_with_logits(pred[(pos, pos_label)], score[pos], reduction='none') * scale_factor.abs().pow(beta)) loss = loss.sum(dim=1, keepdim=False) return loss
@weighted_loss def quality_focal_loss_with_prob(pred, target, beta=2.0): 'Quality Focal Loss (QFL) is from `Generalized Focal Loss: Learning\n Qualified and Distributed Bounding Boxes for Dense Object Detection\n <https://arxiv.org/abs/2006.04388>`_.\n Different from `quality_focal_loss`, this function accepts probability\n as input.\n\n Args:\n pred (torch.Tensor): Predicted joint representation of classification\n and quality (IoU) estimation with shape (N, C), C is the number of\n classes.\n target (tuple([torch.Tensor])): Target category label with shape (N,)\n and target quality label with shape (N,).\n beta (float): The beta parameter for calculating the modulating factor.\n Defaults to 2.0.\n\n Returns:\n torch.Tensor: Loss tensor with shape (N,).\n ' assert (len(target) == 2), 'target for QFL must be a tuple of two elements,\n including category label and quality label, respectively' (label, score) = target pred_sigmoid = pred scale_factor = pred_sigmoid zerolabel = scale_factor.new_zeros(pred.shape) loss = (F.binary_cross_entropy(pred, zerolabel, reduction='none') * scale_factor.pow(beta)) bg_class_ind = pred.size(1) pos = ((label >= 0) & (label < bg_class_ind)).nonzero().squeeze(1) pos_label = label[pos].long() scale_factor = (score[pos] - pred_sigmoid[(pos, pos_label)]) loss[(pos, pos_label)] = (F.binary_cross_entropy(pred[(pos, pos_label)], score[pos], reduction='none') * scale_factor.abs().pow(beta)) loss = loss.sum(dim=1, keepdim=False) return loss
@mmcv.jit(derivate=True, coderize=True) @weighted_loss def distribution_focal_loss(pred, label): 'Distribution Focal Loss (DFL) is from `Generalized Focal Loss: Learning\n Qualified and Distributed Bounding Boxes for Dense Object Detection\n <https://arxiv.org/abs/2006.04388>`_.\n\n Args:\n pred (torch.Tensor): Predicted general distribution of bounding boxes\n (before softmax) with shape (N, n+1), n is the max value of the\n integral set `{0, ..., n}` in paper.\n label (torch.Tensor): Target distance label for bounding boxes with\n shape (N,).\n\n Returns:\n torch.Tensor: Loss tensor with shape (N,).\n ' dis_left = label.long() dis_right = (dis_left + 1) weight_left = (dis_right.float() - label) weight_right = (label - dis_left.float()) loss = ((F.cross_entropy(pred, dis_left, reduction='none') * weight_left) + (F.cross_entropy(pred, dis_right, reduction='none') * weight_right)) return loss
@LOSSES.register_module() class QualityFocalLoss(nn.Module): 'Quality Focal Loss (QFL) is a variant of `Generalized Focal Loss:\n Learning Qualified and Distributed Bounding Boxes for Dense Object\n Detection <https://arxiv.org/abs/2006.04388>`_.\n\n Args:\n use_sigmoid (bool): Whether sigmoid operation is conducted in QFL.\n Defaults to True.\n beta (float): The beta parameter for calculating the modulating factor.\n Defaults to 2.0.\n reduction (str): Options are "none", "mean" and "sum".\n loss_weight (float): Loss weight of current loss.\n activated (bool, optional): Whether the input is activated.\n If True, it means the input has been activated and can be\n treated as probabilities. Else, it should be treated as logits.\n Defaults to False.\n ' def __init__(self, use_sigmoid=True, beta=2.0, reduction='mean', loss_weight=1.0, activated=False): super(QualityFocalLoss, self).__init__() assert (use_sigmoid is True), 'Only sigmoid in QFL supported now.' self.use_sigmoid = use_sigmoid self.beta = beta self.reduction = reduction self.loss_weight = loss_weight self.activated = activated def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None): 'Forward function.\n\n Args:\n pred (torch.Tensor): Predicted joint representation of\n classification and quality (IoU) estimation with shape (N, C),\n C is the number of classes.\n target (tuple([torch.Tensor])): Target category label with shape\n (N,) and target quality label with shape (N,).\n weight (torch.Tensor, optional): The weight of loss for each\n prediction. Defaults to None.\n avg_factor (int, optional): Average factor that is used to average\n the loss. Defaults to None.\n reduction_override (str, optional): The reduction method used to\n override the original reduction method of the loss.\n Defaults to None.\n ' assert (reduction_override in (None, 'none', 'mean', 'sum')) reduction = (reduction_override if reduction_override else self.reduction) if self.use_sigmoid: if self.activated: calculate_loss_func = quality_focal_loss_with_prob else: calculate_loss_func = quality_focal_loss loss_cls = (self.loss_weight * calculate_loss_func(pred, target, weight, beta=self.beta, reduction=reduction, avg_factor=avg_factor)) else: raise NotImplementedError return loss_cls
@LOSSES.register_module() class DistributionFocalLoss(nn.Module): "Distribution Focal Loss (DFL) is a variant of `Generalized Focal Loss:\n Learning Qualified and Distributed Bounding Boxes for Dense Object\n Detection <https://arxiv.org/abs/2006.04388>`_.\n\n Args:\n reduction (str): Options are `'none'`, `'mean'` and `'sum'`.\n loss_weight (float): Loss weight of current loss.\n " def __init__(self, reduction='mean', loss_weight=1.0): super(DistributionFocalLoss, self).__init__() self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None): 'Forward function.\n\n Args:\n pred (torch.Tensor): Predicted general distribution of bounding\n boxes (before softmax) with shape (N, n+1), n is the max value\n of the integral set `{0, ..., n}` in paper.\n target (torch.Tensor): Target distance label for bounding boxes\n with shape (N,).\n weight (torch.Tensor, optional): The weight of loss for each\n prediction. Defaults to None.\n avg_factor (int, optional): Average factor that is used to average\n the loss. Defaults to None.\n reduction_override (str, optional): The reduction method used to\n override the original reduction method of the loss.\n Defaults to None.\n ' assert (reduction_override in (None, 'none', 'mean', 'sum')) reduction = (reduction_override if reduction_override else self.reduction) loss_cls = (self.loss_weight * distribution_focal_loss(pred, target, weight, reduction=reduction, avg_factor=avg_factor)) return loss_cls
def _expand_onehot_labels(labels, label_weights, label_channels): bin_labels = labels.new_full((labels.size(0), label_channels), 0) inds = torch.nonzero(((labels >= 0) & (labels < label_channels)), as_tuple=False).squeeze() if (inds.numel() > 0): bin_labels[(inds, labels[inds])] = 1 bin_label_weights = label_weights.view((- 1), 1).expand(label_weights.size(0), label_channels) return (bin_labels, bin_label_weights)
@LOSSES.register_module() class GHMC(nn.Module): 'GHM Classification Loss.\n\n Details of the theorem can be viewed in the paper\n `Gradient Harmonized Single-stage Detector\n <https://arxiv.org/abs/1811.05181>`_.\n\n Args:\n bins (int): Number of the unit regions for distribution calculation.\n momentum (float): The parameter for moving average.\n use_sigmoid (bool): Can only be true for BCE based loss now.\n loss_weight (float): The weight of the total GHM-C loss.\n reduction (str): Options are "none", "mean" and "sum".\n Defaults to "mean"\n ' def __init__(self, bins=10, momentum=0, use_sigmoid=True, loss_weight=1.0, reduction='mean'): super(GHMC, self).__init__() self.bins = bins self.momentum = momentum edges = (torch.arange((bins + 1)).float() / bins) self.register_buffer('edges', edges) self.edges[(- 1)] += 1e-06 if (momentum > 0): acc_sum = torch.zeros(bins) self.register_buffer('acc_sum', acc_sum) self.use_sigmoid = use_sigmoid if (not self.use_sigmoid): raise NotImplementedError self.loss_weight = loss_weight self.reduction = reduction def forward(self, pred, target, label_weight, reduction_override=None, **kwargs): 'Calculate the GHM-C loss.\n\n Args:\n pred (float tensor of size [batch_num, class_num]):\n The direct prediction of classification fc layer.\n target (float tensor of size [batch_num, class_num]):\n Binary class target for each sample.\n label_weight (float tensor of size [batch_num, class_num]):\n the value is 1 if the sample is valid and 0 if ignored.\n reduction_override (str, optional): The reduction method used to\n override the original reduction method of the loss.\n Defaults to None.\n Returns:\n The gradient harmonized loss.\n ' assert (reduction_override in (None, 'none', 'mean', 'sum')) reduction = (reduction_override if reduction_override else self.reduction) if (pred.dim() != target.dim()): (target, label_weight) = _expand_onehot_labels(target, label_weight, pred.size((- 1))) (target, label_weight) = (target.float(), label_weight.float()) edges = self.edges mmt = self.momentum weights = torch.zeros_like(pred) g = torch.abs((pred.sigmoid().detach() - target)) valid = (label_weight > 0) tot = max(valid.float().sum().item(), 1.0) n = 0 for i in range(self.bins): inds = (((g >= edges[i]) & (g < edges[(i + 1)])) & valid) num_in_bin = inds.sum().item() if (num_in_bin > 0): if (mmt > 0): self.acc_sum[i] = ((mmt * self.acc_sum[i]) + ((1 - mmt) * num_in_bin)) weights[inds] = (tot / self.acc_sum[i]) else: weights[inds] = (tot / num_in_bin) n += 1 if (n > 0): weights = (weights / n) loss = F.binary_cross_entropy_with_logits(pred, target, reduction='none') loss = weight_reduce_loss(loss, weights, reduction=reduction, avg_factor=tot) return (loss * self.loss_weight)
@LOSSES.register_module() class GHMR(nn.Module): 'GHM Regression Loss.\n\n Details of the theorem can be viewed in the paper\n `Gradient Harmonized Single-stage Detector\n <https://arxiv.org/abs/1811.05181>`_.\n\n Args:\n mu (float): The parameter for the Authentic Smooth L1 loss.\n bins (int): Number of the unit regions for distribution calculation.\n momentum (float): The parameter for moving average.\n loss_weight (float): The weight of the total GHM-R loss.\n reduction (str): Options are "none", "mean" and "sum".\n Defaults to "mean"\n ' def __init__(self, mu=0.02, bins=10, momentum=0, loss_weight=1.0, reduction='mean'): super(GHMR, self).__init__() self.mu = mu self.bins = bins edges = (torch.arange((bins + 1)).float() / bins) self.register_buffer('edges', edges) self.edges[(- 1)] = 1000.0 self.momentum = momentum if (momentum > 0): acc_sum = torch.zeros(bins) self.register_buffer('acc_sum', acc_sum) self.loss_weight = loss_weight self.reduction = reduction def forward(self, pred, target, label_weight, avg_factor=None, reduction_override=None): 'Calculate the GHM-R loss.\n\n Args:\n pred (float tensor of size [batch_num, 4 (* class_num)]):\n The prediction of box regression layer. Channel number can be 4\n or 4 * class_num depending on whether it is class-agnostic.\n target (float tensor of size [batch_num, 4 (* class_num)]):\n The target regression values with the same size of pred.\n label_weight (float tensor of size [batch_num, 4 (* class_num)]):\n The weight of each sample, 0 if ignored.\n reduction_override (str, optional): The reduction method used to\n override the original reduction method of the loss.\n Defaults to None.\n Returns:\n The gradient harmonized loss.\n ' assert (reduction_override in (None, 'none', 'mean', 'sum')) reduction = (reduction_override if reduction_override else self.reduction) mu = self.mu edges = self.edges mmt = self.momentum diff = (pred - target) loss = (torch.sqrt(((diff * diff) + (mu * mu))) - mu) g = torch.abs((diff / torch.sqrt(((mu * mu) + (diff * diff))))).detach() weights = torch.zeros_like(g) valid = (label_weight > 0) tot = max(label_weight.float().sum().item(), 1.0) n = 0 for i in range(self.bins): inds = (((g >= edges[i]) & (g < edges[(i + 1)])) & valid) num_in_bin = inds.sum().item() if (num_in_bin > 0): n += 1 if (mmt > 0): self.acc_sum[i] = ((mmt * self.acc_sum[i]) + ((1 - mmt) * num_in_bin)) weights[inds] = (tot / self.acc_sum[i]) else: weights[inds] = (tot / num_in_bin) if (n > 0): weights /= n loss = weight_reduce_loss(loss, weights, reduction=reduction, avg_factor=tot) return (loss * self.loss_weight)
@mmcv.jit(derivate=True, coderize=True) @weighted_loss def knowledge_distillation_kl_div_loss(pred, soft_label, T, detach_target=True): 'Loss function for knowledge distilling using KL divergence.\n\n Args:\n pred (Tensor): Predicted logits with shape (N, n + 1).\n soft_label (Tensor): Target logits with shape (N, N + 1).\n T (int): Temperature for distillation.\n detach_target (bool): Remove soft_label from automatic differentiation\n\n Returns:\n torch.Tensor: Loss tensor with shape (N,).\n ' assert (pred.size() == soft_label.size()) target = F.softmax((soft_label / T), dim=1) if detach_target: target = target.detach() kd_loss = (F.kl_div(F.log_softmax((pred / T), dim=1), target, reduction='none').mean(1) * (T * T)) return kd_loss
@LOSSES.register_module() class KnowledgeDistillationKLDivLoss(nn.Module): "Loss function for knowledge distilling using KL divergence.\n\n Args:\n reduction (str): Options are `'none'`, `'mean'` and `'sum'`.\n loss_weight (float): Loss weight of current loss.\n T (int): Temperature for distillation.\n " def __init__(self, reduction='mean', loss_weight=1.0, T=10): super(KnowledgeDistillationKLDivLoss, self).__init__() assert (T >= 1) self.reduction = reduction self.loss_weight = loss_weight self.T = T def forward(self, pred, soft_label, weight=None, avg_factor=None, reduction_override=None): 'Forward function.\n\n Args:\n pred (Tensor): Predicted logits with shape (N, n + 1).\n soft_label (Tensor): Target logits with shape (N, N + 1).\n weight (torch.Tensor, optional): The weight of loss for each\n prediction. Defaults to None.\n avg_factor (int, optional): Average factor that is used to average\n the loss. Defaults to None.\n reduction_override (str, optional): The reduction method used to\n override the original reduction method of the loss.\n Defaults to None.\n ' assert (reduction_override in (None, 'none', 'mean', 'sum')) reduction = (reduction_override if reduction_override else self.reduction) loss_kd = (self.loss_weight * knowledge_distillation_kl_div_loss(pred, soft_label, weight, reduction=reduction, avg_factor=avg_factor, T=self.T)) return loss_kd
@weighted_loss def mse_loss(pred, target): 'Warpper of mse loss.' return F.mse_loss(pred, target, reduction='none')
@LOSSES.register_module() class MSELoss(nn.Module): 'MSELoss.\n\n Args:\n reduction (str, optional): The method that reduces the loss to a\n scalar. Options are "none", "mean" and "sum".\n loss_weight (float, optional): The weight of the loss. Defaults to 1.0\n ' def __init__(self, reduction='mean', loss_weight=1.0): super().__init__() self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None): 'Forward function of loss.\n\n Args:\n pred (torch.Tensor): The prediction.\n target (torch.Tensor): The learning target of the prediction.\n weight (torch.Tensor, optional): Weight of the loss for each\n prediction. Defaults to None.\n avg_factor (int, optional): Average factor that is used to average\n the loss. Defaults to None.\n reduction_override (str, optional): The reduction method used to\n override the original reduction method of the loss.\n Defaults to None.\n\n Returns:\n torch.Tensor: The calculated loss\n ' assert (reduction_override in (None, 'none', 'mean', 'sum')) reduction = (reduction_override if reduction_override else self.reduction) loss = (self.loss_weight * mse_loss(pred, target, weight, reduction=reduction, avg_factor=avg_factor)) return loss
@mmcv.jit(derivate=True, coderize=True) @weighted_loss def smooth_l1_loss(pred, target, beta=1.0): 'Smooth L1 loss.\n\n Args:\n pred (torch.Tensor): The prediction.\n target (torch.Tensor): The learning target of the prediction.\n beta (float, optional): The threshold in the piecewise function.\n Defaults to 1.0.\n\n Returns:\n torch.Tensor: Calculated loss\n ' assert (beta > 0) if (target.numel() == 0): return (pred.sum() * 0) assert (pred.size() == target.size()) diff = torch.abs((pred - target)) loss = torch.where((diff < beta), (((0.5 * diff) * diff) / beta), (diff - (0.5 * beta))) return loss
@mmcv.jit(derivate=True, coderize=True) @weighted_loss def l1_loss(pred, target): 'L1 loss.\n\n Args:\n pred (torch.Tensor): The prediction.\n target (torch.Tensor): The learning target of the prediction.\n\n Returns:\n torch.Tensor: Calculated loss\n ' if (target.numel() == 0): return (pred.sum() * 0) assert (pred.size() == target.size()) loss = torch.abs((pred - target)) return loss
@LOSSES.register_module() class SmoothL1Loss(nn.Module): 'Smooth L1 loss.\n\n Args:\n beta (float, optional): The threshold in the piecewise function.\n Defaults to 1.0.\n reduction (str, optional): The method to reduce the loss.\n Options are "none", "mean" and "sum". Defaults to "mean".\n loss_weight (float, optional): The weight of loss.\n ' def __init__(self, beta=1.0, reduction='mean', loss_weight=1.0): super(SmoothL1Loss, self).__init__() self.beta = beta self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None, **kwargs): 'Forward function.\n\n Args:\n pred (torch.Tensor): The prediction.\n target (torch.Tensor): The learning target of the prediction.\n weight (torch.Tensor, optional): The weight of loss for each\n prediction. Defaults to None.\n avg_factor (int, optional): Average factor that is used to average\n the loss. Defaults to None.\n reduction_override (str, optional): The reduction method used to\n override the original reduction method of the loss.\n Defaults to None.\n ' assert (reduction_override in (None, 'none', 'mean', 'sum')) reduction = (reduction_override if reduction_override else self.reduction) loss_bbox = (self.loss_weight * smooth_l1_loss(pred, target, weight, beta=self.beta, reduction=reduction, avg_factor=avg_factor, **kwargs)) return loss_bbox
@LOSSES.register_module() class L1Loss(nn.Module): 'L1 loss.\n\n Args:\n reduction (str, optional): The method to reduce the loss.\n Options are "none", "mean" and "sum".\n loss_weight (float, optional): The weight of loss.\n ' def __init__(self, reduction='mean', loss_weight=1.0): super(L1Loss, self).__init__() self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None): 'Forward function.\n\n Args:\n pred (torch.Tensor): The prediction.\n target (torch.Tensor): The learning target of the prediction.\n weight (torch.Tensor, optional): The weight of loss for each\n prediction. Defaults to None.\n avg_factor (int, optional): Average factor that is used to average\n the loss. Defaults to None.\n reduction_override (str, optional): The reduction method used to\n override the original reduction method of the loss.\n Defaults to None.\n ' assert (reduction_override in (None, 'none', 'mean', 'sum')) reduction = (reduction_override if reduction_override else self.reduction) loss_bbox = (self.loss_weight * l1_loss(pred, target, weight, reduction=reduction, avg_factor=avg_factor)) return loss_bbox
def reduce_loss(loss, reduction): 'Reduce loss as specified.\n\n Args:\n loss (Tensor): Elementwise loss tensor.\n reduction (str): Options are "none", "mean" and "sum".\n\n Return:\n Tensor: Reduced loss tensor.\n ' reduction_enum = F._Reduction.get_enum(reduction) if (reduction_enum == 0): return loss elif (reduction_enum == 1): return loss.mean() elif (reduction_enum == 2): return loss.sum()
@mmcv.jit(derivate=True, coderize=True) def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): 'Apply element-wise weight and reduce loss.\n\n Args:\n loss (Tensor): Element-wise loss.\n weight (Tensor): Element-wise weights.\n reduction (str): Same as built-in losses of PyTorch.\n avg_factor (float): Average factor when computing the mean of losses.\n\n Returns:\n Tensor: Processed loss values.\n ' if (weight is not None): loss = (loss * weight) if (avg_factor is None): loss = reduce_loss(loss, reduction) elif (reduction == 'mean'): loss = (loss.sum() / avg_factor) elif (reduction != 'none'): raise ValueError('avg_factor can not be used with reduction="sum"') return loss
def weighted_loss(loss_func): "Create a weighted version of a given loss function.\n\n To use this decorator, the loss function must have the signature like\n `loss_func(pred, target, **kwargs)`. The function only needs to compute\n element-wise loss without any reduction. This decorator will add weight\n and reduction arguments to the function. The decorated function will have\n the signature like `loss_func(pred, target, weight=None, reduction='mean',\n avg_factor=None, **kwargs)`.\n\n :Example:\n\n >>> import torch\n >>> @weighted_loss\n >>> def l1_loss(pred, target):\n >>> return (pred - target).abs()\n\n >>> pred = torch.Tensor([0, 2, 3])\n >>> target = torch.Tensor([1, 1, 1])\n >>> weight = torch.Tensor([1, 0, 1])\n\n >>> l1_loss(pred, target)\n tensor(1.3333)\n >>> l1_loss(pred, target, weight)\n tensor(1.)\n >>> l1_loss(pred, target, reduction='none')\n tensor([1., 1., 2.])\n >>> l1_loss(pred, target, weight, avg_factor=2)\n tensor(1.5000)\n " @functools.wraps(loss_func) def wrapper(pred, target, weight=None, reduction='mean', avg_factor=None, **kwargs): loss = loss_func(pred, target, **kwargs) loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss return wrapper
@mmcv.jit(derivate=True, coderize=True) def varifocal_loss(pred, target, weight=None, alpha=0.75, gamma=2.0, iou_weighted=True, reduction='mean', avg_factor=None): '`Varifocal Loss <https://arxiv.org/abs/2008.13367>`_\n\n Args:\n pred (torch.Tensor): The prediction with shape (N, C), C is the\n number of classes\n target (torch.Tensor): The learning target of the iou-aware\n classification score with shape (N, C), C is the number of classes.\n weight (torch.Tensor, optional): The weight of loss for each\n prediction. Defaults to None.\n alpha (float, optional): A balance factor for the negative part of\n Varifocal Loss, which is different from the alpha of Focal Loss.\n Defaults to 0.75.\n gamma (float, optional): The gamma for calculating the modulating\n factor. Defaults to 2.0.\n iou_weighted (bool, optional): Whether to weight the loss of the\n positive example with the iou target. Defaults to True.\n reduction (str, optional): The method used to reduce the loss into\n a scalar. Defaults to \'mean\'. Options are "none", "mean" and\n "sum".\n avg_factor (int, optional): Average factor that is used to average\n the loss. Defaults to None.\n ' assert (pred.size() == target.size()) pred_sigmoid = pred.sigmoid() target = target.type_as(pred) if iou_weighted: focal_weight = ((target * (target > 0.0).float()) + ((alpha * (pred_sigmoid - target).abs().pow(gamma)) * (target <= 0.0).float())) else: focal_weight = ((target > 0.0).float() + ((alpha * (pred_sigmoid - target).abs().pow(gamma)) * (target <= 0.0).float())) loss = (F.binary_cross_entropy_with_logits(pred, target, reduction='none') * focal_weight) loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss
@LOSSES.register_module() class VarifocalLoss(nn.Module): def __init__(self, use_sigmoid=True, alpha=0.75, gamma=2.0, iou_weighted=True, reduction='mean', loss_weight=1.0): '`Varifocal Loss <https://arxiv.org/abs/2008.13367>`_\n\n Args:\n use_sigmoid (bool, optional): Whether the prediction is\n used for sigmoid or softmax. Defaults to True.\n alpha (float, optional): A balance factor for the negative part of\n Varifocal Loss, which is different from the alpha of Focal\n Loss. Defaults to 0.75.\n gamma (float, optional): The gamma for calculating the modulating\n factor. Defaults to 2.0.\n iou_weighted (bool, optional): Whether to weight the loss of the\n positive examples with the iou target. Defaults to True.\n reduction (str, optional): The method used to reduce the loss into\n a scalar. Defaults to \'mean\'. Options are "none", "mean" and\n "sum".\n loss_weight (float, optional): Weight of loss. Defaults to 1.0.\n ' super(VarifocalLoss, self).__init__() assert (use_sigmoid is True), 'Only sigmoid varifocal loss supported now.' assert (alpha >= 0.0) self.use_sigmoid = use_sigmoid self.alpha = alpha self.gamma = gamma self.iou_weighted = iou_weighted self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None): 'Forward function.\n\n Args:\n pred (torch.Tensor): The prediction.\n target (torch.Tensor): The learning target of the prediction.\n weight (torch.Tensor, optional): The weight of loss for each\n prediction. Defaults to None.\n avg_factor (int, optional): Average factor that is used to average\n the loss. Defaults to None.\n reduction_override (str, optional): The reduction method used to\n override the original reduction method of the loss.\n Options are "none", "mean" and "sum".\n\n Returns:\n torch.Tensor: The calculated loss\n ' assert (reduction_override in (None, 'none', 'mean', 'sum')) reduction = (reduction_override if reduction_override else self.reduction) if self.use_sigmoid: loss_cls = (self.loss_weight * varifocal_loss(pred, target, weight, alpha=self.alpha, gamma=self.gamma, iou_weighted=self.iou_weighted, reduction=reduction, avg_factor=avg_factor)) else: raise NotImplementedError return loss_cls
@NECKS.register_module() class BFP(BaseModule): "BFP (Balanced Feature Pyramids)\n\n BFP takes multi-level features as inputs and gather them into a single one,\n then refine the gathered feature and scatter the refined results to\n multi-level features. This module is used in Libra R-CNN (CVPR 2019), see\n the paper `Libra R-CNN: Towards Balanced Learning for Object Detection\n <https://arxiv.org/abs/1904.02701>`_ for details.\n\n Args:\n in_channels (int): Number of input channels (feature maps of all levels\n should have the same channels).\n num_levels (int): Number of input feature levels.\n conv_cfg (dict): The config dict for convolution layers.\n norm_cfg (dict): The config dict for normalization layers.\n refine_level (int): Index of integration and refine level of BSF in\n multi-level features from bottom to top.\n refine_type (str): Type of the refine op, currently support\n [None, 'conv', 'non_local'].\n init_cfg (dict or list[dict], optional): Initialization config dict.\n " def __init__(self, in_channels, num_levels, refine_level=2, refine_type=None, conv_cfg=None, norm_cfg=None, init_cfg=dict(type='Xavier', layer='Conv2d', distribution='uniform')): super(BFP, self).__init__(init_cfg) assert (refine_type in [None, 'conv', 'non_local']) self.in_channels = in_channels self.num_levels = num_levels self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.refine_level = refine_level self.refine_type = refine_type assert (0 <= self.refine_level < self.num_levels) if (self.refine_type == 'conv'): self.refine = ConvModule(self.in_channels, self.in_channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg) elif (self.refine_type == 'non_local'): self.refine = NonLocal2d(self.in_channels, reduction=1, use_scale=False, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg) def forward(self, inputs): 'Forward function.' assert (len(inputs) == self.num_levels) feats = [] gather_size = inputs[self.refine_level].size()[2:] for i in range(self.num_levels): if (i < self.refine_level): gathered = F.adaptive_max_pool2d(inputs[i], output_size=gather_size) else: gathered = F.interpolate(inputs[i], size=gather_size, mode='nearest') feats.append(gathered) bsf = (sum(feats) / len(feats)) if (self.refine_type is not None): bsf = self.refine(bsf) outs = [] for i in range(self.num_levels): out_size = inputs[i].size()[2:] if (i < self.refine_level): residual = F.interpolate(bsf, size=out_size, mode='nearest') else: residual = F.adaptive_max_pool2d(bsf, output_size=out_size) outs.append((residual + inputs[i])) return tuple(outs)
@NECKS.register_module() class ChannelMapper(BaseModule): "Channel Mapper to reduce/increase channels of backbone features.\n\n This is used to reduce/increase channels of backbone features.\n\n Args:\n in_channels (List[int]): Number of input channels per scale.\n out_channels (int): Number of output channels (used at each scale).\n kernel_size (int, optional): kernel_size for reducing channels (used\n at each scale). Default: 3.\n conv_cfg (dict, optional): Config dict for convolution layer.\n Default: None.\n norm_cfg (dict, optional): Config dict for normalization layer.\n Default: None.\n act_cfg (dict, optional): Config dict for activation layer in\n ConvModule. Default: dict(type='ReLU').\n num_outs (int, optional): Number of output feature maps. There\n would be extra_convs when num_outs larger than the length\n of in_channels.\n init_cfg (dict or list[dict], optional): Initialization config dict.\n Example:\n >>> import torch\n >>> in_channels = [2, 3, 5, 7]\n >>> scales = [340, 170, 84, 43]\n >>> inputs = [torch.rand(1, c, s, s)\n ... for c, s in zip(in_channels, scales)]\n >>> self = ChannelMapper(in_channels, 11, 3).eval()\n >>> outputs = self.forward(inputs)\n >>> for i in range(len(outputs)):\n ... print(f'outputs[{i}].shape = {outputs[i].shape}')\n outputs[0].shape = torch.Size([1, 11, 340, 340])\n outputs[1].shape = torch.Size([1, 11, 170, 170])\n outputs[2].shape = torch.Size([1, 11, 84, 84])\n outputs[3].shape = torch.Size([1, 11, 43, 43])\n " def __init__(self, in_channels, out_channels, kernel_size=3, conv_cfg=None, norm_cfg=None, act_cfg=dict(type='ReLU'), num_outs=None, init_cfg=dict(type='Xavier', layer='Conv2d', distribution='uniform')): super(ChannelMapper, self).__init__(init_cfg) assert isinstance(in_channels, list) self.extra_convs = None if (num_outs is None): num_outs = len(in_channels) self.convs = nn.ModuleList() for in_channel in in_channels: self.convs.append(ConvModule(in_channel, out_channels, kernel_size, padding=((kernel_size - 1) // 2), conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)) if (num_outs > len(in_channels)): self.extra_convs = nn.ModuleList() for i in range(len(in_channels), num_outs): if (i == len(in_channels)): in_channel = in_channels[(- 1)] else: in_channel = out_channels self.extra_convs.append(ConvModule(in_channel, out_channels, 3, stride=2, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)) def forward(self, inputs): 'Forward function.' assert (len(inputs) == len(self.convs)) outs = [self.convs[i](inputs[i]) for i in range(len(inputs))] if self.extra_convs: for i in range(len(self.extra_convs)): if (i == 0): outs.append(self.extra_convs[0](inputs[(- 1)])) else: outs.append(self.extra_convs[i](outs[(- 1)])) return tuple(outs)
class Bottleneck(nn.Module): 'Bottleneck block for DilatedEncoder used in `YOLOF.\n\n <https://arxiv.org/abs/2103.09460>`.\n\n The Bottleneck contains three ConvLayers and one residual connection.\n\n Args:\n in_channels (int): The number of input channels.\n mid_channels (int): The number of middle output channels.\n dilation (int): Dilation rate.\n norm_cfg (dict): Dictionary to construct and config norm layer.\n ' def __init__(self, in_channels, mid_channels, dilation, norm_cfg=dict(type='BN', requires_grad=True)): super(Bottleneck, self).__init__() self.conv1 = ConvModule(in_channels, mid_channels, 1, norm_cfg=norm_cfg) self.conv2 = ConvModule(mid_channels, mid_channels, 3, padding=dilation, dilation=dilation, norm_cfg=norm_cfg) self.conv3 = ConvModule(mid_channels, in_channels, 1, norm_cfg=norm_cfg) def forward(self, x): identity = x out = self.conv1(x) out = self.conv2(out) out = self.conv3(out) out = (out + identity) return out
@NECKS.register_module() class DilatedEncoder(nn.Module): 'Dilated Encoder for YOLOF <https://arxiv.org/abs/2103.09460>`.\n\n This module contains two types of components:\n - the original FPN lateral convolution layer and fpn convolution layer,\n which are 1x1 conv + 3x3 conv\n - the dilated residual block\n\n Args:\n in_channels (int): The number of input channels.\n out_channels (int): The number of output channels.\n block_mid_channels (int): The number of middle block output channels\n num_residual_blocks (int): The number of residual blocks.\n ' def __init__(self, in_channels, out_channels, block_mid_channels, num_residual_blocks): super(DilatedEncoder, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.block_mid_channels = block_mid_channels self.num_residual_blocks = num_residual_blocks self.block_dilations = [2, 4, 6, 8] self._init_layers() def _init_layers(self): self.lateral_conv = nn.Conv2d(self.in_channels, self.out_channels, kernel_size=1) self.lateral_norm = BatchNorm2d(self.out_channels) self.fpn_conv = nn.Conv2d(self.out_channels, self.out_channels, kernel_size=3, padding=1) self.fpn_norm = BatchNorm2d(self.out_channels) encoder_blocks = [] for i in range(self.num_residual_blocks): dilation = self.block_dilations[i] encoder_blocks.append(Bottleneck(self.out_channels, self.block_mid_channels, dilation=dilation)) self.dilated_encoder_blocks = nn.Sequential(*encoder_blocks) def init_weights(self): caffe2_xavier_init(self.lateral_conv) caffe2_xavier_init(self.fpn_conv) for m in [self.lateral_norm, self.fpn_norm]: constant_init(m, 1) for m in self.dilated_encoder_blocks.modules(): if isinstance(m, nn.Conv2d): normal_init(m, mean=0, std=0.01) if is_norm(m): constant_init(m, 1) def forward(self, feature): out = self.lateral_norm(self.lateral_conv(feature[(- 1)])) out = self.fpn_norm(self.fpn_conv(out)) return (self.dilated_encoder_blocks(out),)
class Transition(BaseModule): 'Base class for transition.\n\n Args:\n in_channels (int): Number of input channels.\n out_channels (int): Number of output channels.\n ' def __init__(self, in_channels, out_channels, init_cfg=None): super().__init__(init_cfg) self.in_channels = in_channels self.out_channels = out_channels def forward(x): pass
class UpInterpolationConv(Transition): 'A transition used for up-sampling.\n\n Up-sample the input by interpolation then refines the feature by\n a convolution layer.\n\n Args:\n in_channels (int): Number of input channels.\n out_channels (int): Number of output channels.\n scale_factor (int): Up-sampling factor. Default: 2.\n mode (int): Interpolation mode. Default: nearest.\n align_corners (bool): Whether align corners when interpolation.\n Default: None.\n kernel_size (int): Kernel size for the conv. Default: 3.\n ' def __init__(self, in_channels, out_channels, scale_factor=2, mode='nearest', align_corners=None, kernel_size=3, init_cfg=None, **kwargs): super().__init__(in_channels, out_channels, init_cfg) self.mode = mode self.scale_factor = scale_factor self.align_corners = align_corners self.conv = ConvModule(in_channels, out_channels, kernel_size, padding=((kernel_size - 1) // 2), **kwargs) def forward(self, x): x = F.interpolate(x, scale_factor=self.scale_factor, mode=self.mode, align_corners=self.align_corners) x = self.conv(x) return x
class LastConv(Transition): 'A transition used for refining the output of the last stage.\n\n Args:\n in_channels (int): Number of input channels.\n out_channels (int): Number of output channels.\n num_inputs (int): Number of inputs of the FPN features.\n kernel_size (int): Kernel size for the conv. Default: 3.\n ' def __init__(self, in_channels, out_channels, num_inputs, kernel_size=3, init_cfg=None, **kwargs): super().__init__(in_channels, out_channels, init_cfg) self.num_inputs = num_inputs self.conv_out = ConvModule(in_channels, out_channels, kernel_size, padding=((kernel_size - 1) // 2), **kwargs) def forward(self, inputs): assert (len(inputs) == self.num_inputs) return self.conv_out(inputs[(- 1)])
@NECKS.register_module() class FPG(BaseModule): "FPG.\n\n Implementation of `Feature Pyramid Grids (FPG)\n <https://arxiv.org/abs/2004.03580>`_.\n This implementation only gives the basic structure stated in the paper.\n But users can implement different type of transitions to fully explore the\n the potential power of the structure of FPG.\n\n Args:\n in_channels (int): Number of input channels (feature maps of all levels\n should have the same channels).\n out_channels (int): Number of output channels (used at each scale)\n num_outs (int): Number of output scales.\n stack_times (int): The number of times the pyramid architecture will\n be stacked.\n paths (list[str]): Specify the path order of each stack level.\n Each element in the list should be either 'bu' (bottom-up) or\n 'td' (top-down).\n inter_channels (int): Number of inter channels.\n same_up_trans (dict): Transition that goes down at the same stage.\n same_down_trans (dict): Transition that goes up at the same stage.\n across_lateral_trans (dict): Across-pathway same-stage\n across_down_trans (dict): Across-pathway bottom-up connection.\n across_up_trans (dict): Across-pathway top-down connection.\n across_skip_trans (dict): Across-pathway skip connection.\n output_trans (dict): Transition that trans the output of the\n last stage.\n start_level (int): Index of the start input backbone level used to\n build the feature pyramid. Default: 0.\n end_level (int): Index of the end input backbone level (exclusive) to\n build the feature pyramid. Default: -1, which means the last level.\n add_extra_convs (bool): It decides whether to add conv\n layers on top of the original feature maps. Default to False.\n If True, its actual mode is specified by `extra_convs_on_inputs`.\n norm_cfg (dict): Config dict for normalization layer. Default: None.\n init_cfg (dict or list[dict], optional): Initialization config dict.\n " transition_types = {'conv': ConvModule, 'interpolation_conv': UpInterpolationConv, 'last_conv': LastConv} def __init__(self, in_channels, out_channels, num_outs, stack_times, paths, inter_channels=None, same_down_trans=None, same_up_trans=dict(type='conv', kernel_size=3, stride=2, padding=1), across_lateral_trans=dict(type='conv', kernel_size=1), across_down_trans=dict(type='conv', kernel_size=3), across_up_trans=None, across_skip_trans=dict(type='identity'), output_trans=dict(type='last_conv', kernel_size=3), start_level=0, end_level=(- 1), add_extra_convs=False, norm_cfg=None, skip_inds=None, init_cfg=[dict(type='Caffe2Xavier', layer='Conv2d'), dict(type='Constant', layer=['_BatchNorm', '_InstanceNorm', 'GroupNorm', 'LayerNorm'], val=1.0)]): super(FPG, self).__init__(init_cfg) assert isinstance(in_channels, list) self.in_channels = in_channels self.out_channels = out_channels self.num_ins = len(in_channels) self.num_outs = num_outs if (inter_channels is None): self.inter_channels = [out_channels for _ in range(num_outs)] elif isinstance(inter_channels, int): self.inter_channels = [inter_channels for _ in range(num_outs)] else: assert isinstance(inter_channels, list) assert (len(inter_channels) == num_outs) self.inter_channels = inter_channels self.stack_times = stack_times self.paths = paths assert (isinstance(paths, list) and (len(paths) == stack_times)) for d in paths: assert (d in ('bu', 'td')) self.same_down_trans = same_down_trans self.same_up_trans = same_up_trans self.across_lateral_trans = across_lateral_trans self.across_down_trans = across_down_trans self.across_up_trans = across_up_trans self.output_trans = output_trans self.across_skip_trans = across_skip_trans self.with_bias = (norm_cfg is None) if (self.across_skip_trans is not None): (skip_inds is not None) self.skip_inds = skip_inds assert (len(self.skip_inds[0]) <= self.stack_times) if (end_level == (- 1)): self.backbone_end_level = self.num_ins assert (num_outs >= (self.num_ins - start_level)) else: self.backbone_end_level = end_level assert (end_level <= len(in_channels)) assert (num_outs == (end_level - start_level)) self.start_level = start_level self.end_level = end_level self.add_extra_convs = add_extra_convs self.lateral_convs = nn.ModuleList() for i in range(self.start_level, self.backbone_end_level): l_conv = nn.Conv2d(self.in_channels[i], self.inter_channels[(i - self.start_level)], 1) self.lateral_convs.append(l_conv) extra_levels = ((num_outs - self.backbone_end_level) + self.start_level) self.extra_downsamples = nn.ModuleList() for i in range(extra_levels): if self.add_extra_convs: fpn_idx = ((self.backbone_end_level - self.start_level) + i) extra_conv = nn.Conv2d(self.inter_channels[(fpn_idx - 1)], self.inter_channels[fpn_idx], 3, stride=2, padding=1) self.extra_downsamples.append(extra_conv) else: self.extra_downsamples.append(nn.MaxPool2d(1, stride=2)) self.fpn_transitions = nn.ModuleList() for s in range(self.stack_times): stage_trans = nn.ModuleList() for i in range(self.num_outs): trans = nn.ModuleDict() if (s in self.skip_inds[i]): stage_trans.append(trans) continue if ((i == 0) or (self.same_up_trans is None)): same_up_trans = None else: same_up_trans = self.build_trans(self.same_up_trans, self.inter_channels[(i - 1)], self.inter_channels[i]) trans['same_up'] = same_up_trans if ((i == (self.num_outs - 1)) or (self.same_down_trans is None)): same_down_trans = None else: same_down_trans = self.build_trans(self.same_down_trans, self.inter_channels[(i + 1)], self.inter_channels[i]) trans['same_down'] = same_down_trans across_lateral_trans = self.build_trans(self.across_lateral_trans, self.inter_channels[i], self.inter_channels[i]) trans['across_lateral'] = across_lateral_trans if ((i == (self.num_outs - 1)) or (self.across_down_trans is None)): across_down_trans = None else: across_down_trans = self.build_trans(self.across_down_trans, self.inter_channels[(i + 1)], self.inter_channels[i]) trans['across_down'] = across_down_trans if ((i == 0) or (self.across_up_trans is None)): across_up_trans = None else: across_up_trans = self.build_trans(self.across_up_trans, self.inter_channels[(i - 1)], self.inter_channels[i]) trans['across_up'] = across_up_trans if (self.across_skip_trans is None): across_skip_trans = None else: across_skip_trans = self.build_trans(self.across_skip_trans, self.inter_channels[(i - 1)], self.inter_channels[i]) trans['across_skip'] = across_skip_trans stage_trans.append(trans) self.fpn_transitions.append(stage_trans) self.output_transition = nn.ModuleList() for i in range(self.num_outs): trans = self.build_trans(self.output_trans, self.inter_channels[i], self.out_channels, num_inputs=(self.stack_times + 1)) self.output_transition.append(trans) self.relu = nn.ReLU(inplace=True) def build_trans(self, cfg, in_channels, out_channels, **extra_args): cfg_ = cfg.copy() trans_type = cfg_.pop('type') trans_cls = self.transition_types[trans_type] return trans_cls(in_channels, out_channels, **cfg_, **extra_args) def fuse(self, fuse_dict): out = None for item in fuse_dict.values(): if (item is not None): if (out is None): out = item else: out = (out + item) return out def forward(self, inputs): assert (len(inputs) == len(self.in_channels)) feats = [lateral_conv(inputs[(i + self.start_level)]) for (i, lateral_conv) in enumerate(self.lateral_convs)] for downsample in self.extra_downsamples: feats.append(downsample(feats[(- 1)])) outs = [feats] for i in range(self.stack_times): current_outs = outs[(- 1)] next_outs = [] direction = self.paths[i] for j in range(self.num_outs): if (i in self.skip_inds[j]): next_outs.append(outs[(- 1)][j]) continue if (direction == 'td'): lvl = ((self.num_outs - j) - 1) else: lvl = j if (direction == 'td'): same_trans = self.fpn_transitions[i][lvl]['same_down'] else: same_trans = self.fpn_transitions[i][lvl]['same_up'] across_lateral_trans = self.fpn_transitions[i][lvl]['across_lateral'] across_down_trans = self.fpn_transitions[i][lvl]['across_down'] across_up_trans = self.fpn_transitions[i][lvl]['across_up'] across_skip_trans = self.fpn_transitions[i][lvl]['across_skip'] to_fuse = dict(same=None, lateral=None, across_up=None, across_down=None) if (same_trans is not None): to_fuse['same'] = same_trans(next_outs[(- 1)]) if (across_lateral_trans is not None): to_fuse['lateral'] = across_lateral_trans(current_outs[lvl]) if ((lvl > 0) and (across_up_trans is not None)): to_fuse['across_up'] = across_up_trans(current_outs[(lvl - 1)]) if ((lvl < (self.num_outs - 1)) and (across_down_trans is not None)): to_fuse['across_down'] = across_down_trans(current_outs[(lvl + 1)]) if (across_skip_trans is not None): to_fuse['across_skip'] = across_skip_trans(outs[0][lvl]) x = self.fuse(to_fuse) next_outs.append(x) if (direction == 'td'): outs.append(next_outs[::(- 1)]) else: outs.append(next_outs) final_outs = [] for i in range(self.num_outs): lvl_out_list = [] for s in range(len(outs)): lvl_out_list.append(outs[s][i]) lvl_out = self.output_transition[i](lvl_out_list) final_outs.append(lvl_out) return final_outs
@NECKS.register_module() class FPN(BaseModule): "Feature Pyramid Network.\n\n This is an implementation of paper `Feature Pyramid Networks for Object\n Detection <https://arxiv.org/abs/1612.03144>`_.\n\n Args:\n in_channels (List[int]): Number of input channels per scale.\n out_channels (int): Number of output channels (used at each scale)\n num_outs (int): Number of output scales.\n start_level (int): Index of the start input backbone level used to\n build the feature pyramid. Default: 0.\n end_level (int): Index of the end input backbone level (exclusive) to\n build the feature pyramid. Default: -1, which means the last level.\n add_extra_convs (bool | str): If bool, it decides whether to add conv\n layers on top of the original feature maps. Default to False.\n If True, it is equivalent to `add_extra_convs='on_input'`.\n If str, it specifies the source feature map of the extra convs.\n Only the following options are allowed\n\n - 'on_input': Last feat map of neck inputs (i.e. backbone feature).\n - 'on_lateral': Last feature map after lateral convs.\n - 'on_output': The last output feature map after fpn convs.\n relu_before_extra_convs (bool): Whether to apply relu before the extra\n conv. Default: False.\n no_norm_on_lateral (bool): Whether to apply norm on lateral.\n Default: False.\n conv_cfg (dict): Config dict for convolution layer. Default: None.\n norm_cfg (dict): Config dict for normalization layer. Default: None.\n act_cfg (str): Config dict for activation layer in ConvModule.\n Default: None.\n upsample_cfg (dict): Config dict for interpolate layer.\n Default: `dict(mode='nearest')`\n init_cfg (dict or list[dict], optional): Initialization config dict.\n\n Example:\n >>> import torch\n >>> in_channels = [2, 3, 5, 7]\n >>> scales = [340, 170, 84, 43]\n >>> inputs = [torch.rand(1, c, s, s)\n ... for c, s in zip(in_channels, scales)]\n >>> self = FPN(in_channels, 11, len(in_channels)).eval()\n >>> outputs = self.forward(inputs)\n >>> for i in range(len(outputs)):\n ... print(f'outputs[{i}].shape = {outputs[i].shape}')\n outputs[0].shape = torch.Size([1, 11, 340, 340])\n outputs[1].shape = torch.Size([1, 11, 170, 170])\n outputs[2].shape = torch.Size([1, 11, 84, 84])\n outputs[3].shape = torch.Size([1, 11, 43, 43])\n " def __init__(self, in_channels, out_channels, num_outs, start_level=0, end_level=(- 1), add_extra_convs=False, relu_before_extra_convs=False, no_norm_on_lateral=False, conv_cfg=None, norm_cfg=None, act_cfg=None, upsample_cfg=dict(mode='nearest'), init_cfg=dict(type='Xavier', layer='Conv2d', distribution='uniform')): super(FPN, self).__init__(init_cfg) assert isinstance(in_channels, list) self.in_channels = in_channels self.out_channels = out_channels self.num_ins = len(in_channels) self.num_outs = num_outs self.relu_before_extra_convs = relu_before_extra_convs self.no_norm_on_lateral = no_norm_on_lateral self.fp16_enabled = False self.upsample_cfg = upsample_cfg.copy() if (end_level == (- 1)): self.backbone_end_level = self.num_ins assert (num_outs >= (self.num_ins - start_level)) else: self.backbone_end_level = end_level assert (end_level <= len(in_channels)) assert (num_outs == (end_level - start_level)) self.start_level = start_level self.end_level = end_level self.add_extra_convs = add_extra_convs assert isinstance(add_extra_convs, (str, bool)) if isinstance(add_extra_convs, str): assert (add_extra_convs in ('on_input', 'on_lateral', 'on_output')) elif add_extra_convs: self.add_extra_convs = 'on_input' self.lateral_convs = nn.ModuleList() self.fpn_convs = nn.ModuleList() for i in range(self.start_level, self.backbone_end_level): l_conv = ConvModule(in_channels[i], out_channels, 1, conv_cfg=conv_cfg, norm_cfg=(norm_cfg if (not self.no_norm_on_lateral) else None), act_cfg=act_cfg, inplace=False) fpn_conv = ConvModule(out_channels, out_channels, 3, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg, inplace=False) self.lateral_convs.append(l_conv) self.fpn_convs.append(fpn_conv) extra_levels = ((num_outs - self.backbone_end_level) + self.start_level) if (self.add_extra_convs and (extra_levels >= 1)): for i in range(extra_levels): if ((i == 0) and (self.add_extra_convs == 'on_input')): in_channels = self.in_channels[(self.backbone_end_level - 1)] else: in_channels = out_channels extra_fpn_conv = ConvModule(in_channels, out_channels, 3, stride=2, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg, inplace=False) self.fpn_convs.append(extra_fpn_conv) @auto_fp16() def forward(self, inputs): 'Forward function.' assert (len(inputs) == len(self.in_channels)) laterals = [lateral_conv(inputs[(i + self.start_level)]) for (i, lateral_conv) in enumerate(self.lateral_convs)] used_backbone_levels = len(laterals) for i in range((used_backbone_levels - 1), 0, (- 1)): if ('scale_factor' in self.upsample_cfg): laterals[(i - 1)] = (laterals[(i - 1)] + F.interpolate(laterals[i], **self.upsample_cfg)) else: prev_shape = laterals[(i - 1)].shape[2:] laterals[(i - 1)] = (laterals[(i - 1)] + F.interpolate(laterals[i], size=prev_shape, **self.upsample_cfg)) outs = [self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels)] if (self.num_outs > len(outs)): if (not self.add_extra_convs): for i in range((self.num_outs - used_backbone_levels)): outs.append(F.max_pool2d(outs[(- 1)], 1, stride=2)) else: if (self.add_extra_convs == 'on_input'): extra_source = inputs[(self.backbone_end_level - 1)] elif (self.add_extra_convs == 'on_lateral'): extra_source = laterals[(- 1)] elif (self.add_extra_convs == 'on_output'): extra_source = outs[(- 1)] else: raise NotImplementedError outs.append(self.fpn_convs[used_backbone_levels](extra_source)) for i in range((used_backbone_levels + 1), self.num_outs): if self.relu_before_extra_convs: outs.append(self.fpn_convs[i](F.relu(outs[(- 1)]))) else: outs.append(self.fpn_convs[i](outs[(- 1)])) return tuple(outs)
@NECKS.register_module() class HRFPN(BaseModule): 'HRFPN (High Resolution Feature Pyramids)\n\n paper: `High-Resolution Representations for Labeling Pixels and Regions\n <https://arxiv.org/abs/1904.04514>`_.\n\n Args:\n in_channels (list): number of channels for each branch.\n out_channels (int): output channels of feature pyramids.\n num_outs (int): number of output stages.\n pooling_type (str): pooling for generating feature pyramids\n from {MAX, AVG}.\n conv_cfg (dict): dictionary to construct and config conv layer.\n norm_cfg (dict): dictionary to construct and config norm layer.\n with_cp (bool): Use checkpoint or not. Using checkpoint will save some\n memory while slowing down the training speed.\n stride (int): stride of 3x3 convolutional layers\n init_cfg (dict or list[dict], optional): Initialization config dict.\n ' def __init__(self, in_channels, out_channels, num_outs=5, pooling_type='AVG', conv_cfg=None, norm_cfg=None, with_cp=False, stride=1, init_cfg=dict(type='Caffe2Xavier', layer='Conv2d')): super(HRFPN, self).__init__(init_cfg) assert isinstance(in_channels, list) self.in_channels = in_channels self.out_channels = out_channels self.num_ins = len(in_channels) self.num_outs = num_outs self.with_cp = with_cp self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.reduction_conv = ConvModule(sum(in_channels), out_channels, kernel_size=1, conv_cfg=self.conv_cfg, act_cfg=None) self.fpn_convs = nn.ModuleList() for i in range(self.num_outs): self.fpn_convs.append(ConvModule(out_channels, out_channels, kernel_size=3, padding=1, stride=stride, conv_cfg=self.conv_cfg, act_cfg=None)) if (pooling_type == 'MAX'): self.pooling = F.max_pool2d else: self.pooling = F.avg_pool2d def forward(self, inputs): 'Forward function.' assert (len(inputs) == self.num_ins) outs = [inputs[0]] for i in range(1, self.num_ins): outs.append(F.interpolate(inputs[i], scale_factor=(2 ** i), mode='bilinear')) out = torch.cat(outs, dim=1) if (out.requires_grad and self.with_cp): out = checkpoint(self.reduction_conv, out) else: out = self.reduction_conv(out) outs = [out] for i in range(1, self.num_outs): outs.append(self.pooling(out, kernel_size=(2 ** i), stride=(2 ** i))) outputs = [] for i in range(self.num_outs): if (outs[i].requires_grad and self.with_cp): tmp_out = checkpoint(self.fpn_convs[i], outs[i]) else: tmp_out = self.fpn_convs[i](outs[i]) outputs.append(tmp_out) return tuple(outputs)
@NECKS.register_module() class NASFPN(BaseModule): 'NAS-FPN.\n\n Implementation of `NAS-FPN: Learning Scalable Feature Pyramid Architecture\n for Object Detection <https://arxiv.org/abs/1904.07392>`_\n\n Args:\n in_channels (List[int]): Number of input channels per scale.\n out_channels (int): Number of output channels (used at each scale)\n num_outs (int): Number of output scales.\n stack_times (int): The number of times the pyramid architecture will\n be stacked.\n start_level (int): Index of the start input backbone level used to\n build the feature pyramid. Default: 0.\n end_level (int): Index of the end input backbone level (exclusive) to\n build the feature pyramid. Default: -1, which means the last level.\n add_extra_convs (bool): It decides whether to add conv\n layers on top of the original feature maps. Default to False.\n If True, its actual mode is specified by `extra_convs_on_inputs`.\n init_cfg (dict or list[dict], optional): Initialization config dict.\n ' def __init__(self, in_channels, out_channels, num_outs, stack_times, start_level=0, end_level=(- 1), add_extra_convs=False, norm_cfg=None, init_cfg=dict(type='Caffe2Xavier', layer='Conv2d')): super(NASFPN, self).__init__(init_cfg) assert isinstance(in_channels, list) self.in_channels = in_channels self.out_channels = out_channels self.num_ins = len(in_channels) self.num_outs = num_outs self.stack_times = stack_times self.norm_cfg = norm_cfg if (end_level == (- 1)): self.backbone_end_level = self.num_ins assert (num_outs >= (self.num_ins - start_level)) else: self.backbone_end_level = end_level assert (end_level <= len(in_channels)) assert (num_outs == (end_level - start_level)) self.start_level = start_level self.end_level = end_level self.add_extra_convs = add_extra_convs self.lateral_convs = nn.ModuleList() for i in range(self.start_level, self.backbone_end_level): l_conv = ConvModule(in_channels[i], out_channels, 1, norm_cfg=norm_cfg, act_cfg=None) self.lateral_convs.append(l_conv) extra_levels = ((num_outs - self.backbone_end_level) + self.start_level) self.extra_downsamples = nn.ModuleList() for i in range(extra_levels): extra_conv = ConvModule(out_channels, out_channels, 1, norm_cfg=norm_cfg, act_cfg=None) self.extra_downsamples.append(nn.Sequential(extra_conv, nn.MaxPool2d(2, 2))) self.fpn_stages = ModuleList() for _ in range(self.stack_times): stage = nn.ModuleDict() stage['gp_64_4'] = GlobalPoolingCell(in_channels=out_channels, out_channels=out_channels, out_norm_cfg=norm_cfg) stage['sum_44_4'] = SumCell(in_channels=out_channels, out_channels=out_channels, out_norm_cfg=norm_cfg) stage['sum_43_3'] = SumCell(in_channels=out_channels, out_channels=out_channels, out_norm_cfg=norm_cfg) stage['sum_34_4'] = SumCell(in_channels=out_channels, out_channels=out_channels, out_norm_cfg=norm_cfg) stage['gp_43_5'] = GlobalPoolingCell(with_out_conv=False) stage['sum_55_5'] = SumCell(in_channels=out_channels, out_channels=out_channels, out_norm_cfg=norm_cfg) stage['gp_54_7'] = GlobalPoolingCell(with_out_conv=False) stage['sum_77_7'] = SumCell(in_channels=out_channels, out_channels=out_channels, out_norm_cfg=norm_cfg) stage['gp_75_6'] = GlobalPoolingCell(in_channels=out_channels, out_channels=out_channels, out_norm_cfg=norm_cfg) self.fpn_stages.append(stage) def forward(self, inputs): 'Forward function.' feats = [lateral_conv(inputs[(i + self.start_level)]) for (i, lateral_conv) in enumerate(self.lateral_convs)] for downsample in self.extra_downsamples: feats.append(downsample(feats[(- 1)])) (p3, p4, p5, p6, p7) = feats for stage in self.fpn_stages: p4_1 = stage['gp_64_4'](p6, p4, out_size=p4.shape[(- 2):]) p4_2 = stage['sum_44_4'](p4_1, p4, out_size=p4.shape[(- 2):]) p3 = stage['sum_43_3'](p4_2, p3, out_size=p3.shape[(- 2):]) p4 = stage['sum_34_4'](p3, p4_2, out_size=p4.shape[(- 2):]) p5_tmp = stage['gp_43_5'](p4, p3, out_size=p5.shape[(- 2):]) p5 = stage['sum_55_5'](p5, p5_tmp, out_size=p5.shape[(- 2):]) p7_tmp = stage['gp_54_7'](p5, p4_2, out_size=p7.shape[(- 2):]) p7 = stage['sum_77_7'](p7, p7_tmp, out_size=p7.shape[(- 2):]) p6 = stage['gp_75_6'](p7, p5, out_size=p6.shape[(- 2):]) return (p3, p4, p5, p6, p7)
@NECKS.register_module() class NASFCOS_FPN(BaseModule): 'FPN structure in NASFPN.\n\n Implementation of paper `NAS-FCOS: Fast Neural Architecture Search for\n Object Detection <https://arxiv.org/abs/1906.04423>`_\n\n Args:\n in_channels (List[int]): Number of input channels per scale.\n out_channels (int): Number of output channels (used at each scale)\n num_outs (int): Number of output scales.\n start_level (int): Index of the start input backbone level used to\n build the feature pyramid. Default: 0.\n end_level (int): Index of the end input backbone level (exclusive) to\n build the feature pyramid. Default: -1, which means the last level.\n add_extra_convs (bool): It decides whether to add conv\n layers on top of the original feature maps. Default to False.\n If True, its actual mode is specified by `extra_convs_on_inputs`.\n conv_cfg (dict): dictionary to construct and config conv layer.\n norm_cfg (dict): dictionary to construct and config norm layer.\n init_cfg (dict or list[dict], optional): Initialization config dict.\n Default: None\n ' def __init__(self, in_channels, out_channels, num_outs, start_level=1, end_level=(- 1), add_extra_convs=False, conv_cfg=None, norm_cfg=None, init_cfg=None): assert (init_cfg is None), 'To prevent abnormal initialization behavior, init_cfg is not allowed to be set' super(NASFCOS_FPN, self).__init__(init_cfg) assert isinstance(in_channels, list) self.in_channels = in_channels self.out_channels = out_channels self.num_ins = len(in_channels) self.num_outs = num_outs self.norm_cfg = norm_cfg self.conv_cfg = conv_cfg if (end_level == (- 1)): self.backbone_end_level = self.num_ins assert (num_outs >= (self.num_ins - start_level)) else: self.backbone_end_level = end_level assert (end_level <= len(in_channels)) assert (num_outs == (end_level - start_level)) self.start_level = start_level self.end_level = end_level self.add_extra_convs = add_extra_convs self.adapt_convs = nn.ModuleList() for i in range(self.start_level, self.backbone_end_level): adapt_conv = ConvModule(in_channels[i], out_channels, 1, stride=1, padding=0, bias=False, norm_cfg=dict(type='BN'), act_cfg=dict(type='ReLU', inplace=False)) self.adapt_convs.append(adapt_conv) extra_levels = ((num_outs - self.backbone_end_level) + self.start_level) def build_concat_cell(with_input1_conv, with_input2_conv): cell_conv_cfg = dict(kernel_size=1, padding=0, bias=False, groups=out_channels) return ConcatCell(in_channels=out_channels, out_channels=out_channels, with_out_conv=True, out_conv_cfg=cell_conv_cfg, out_norm_cfg=dict(type='BN'), out_conv_order=('norm', 'act', 'conv'), with_input1_conv=with_input1_conv, with_input2_conv=with_input2_conv, input_conv_cfg=conv_cfg, input_norm_cfg=norm_cfg, upsample_mode='nearest') self.fpn = nn.ModuleDict() self.fpn['c22_1'] = build_concat_cell(True, True) self.fpn['c22_2'] = build_concat_cell(True, True) self.fpn['c32'] = build_concat_cell(True, False) self.fpn['c02'] = build_concat_cell(True, False) self.fpn['c42'] = build_concat_cell(True, True) self.fpn['c36'] = build_concat_cell(True, True) self.fpn['c61'] = build_concat_cell(True, True) self.extra_downsamples = nn.ModuleList() for i in range(extra_levels): extra_act_cfg = (None if (i == 0) else dict(type='ReLU', inplace=False)) self.extra_downsamples.append(ConvModule(out_channels, out_channels, 3, stride=2, padding=1, act_cfg=extra_act_cfg, order=('act', 'norm', 'conv'))) def forward(self, inputs): 'Forward function.' feats = [adapt_conv(inputs[(i + self.start_level)]) for (i, adapt_conv) in enumerate(self.adapt_convs)] for (i, module_name) in enumerate(self.fpn): (idx_1, idx_2) = (int(module_name[1]), int(module_name[2])) res = self.fpn[module_name](feats[idx_1], feats[idx_2]) feats.append(res) ret = [] for (idx, input_idx) in zip([9, 8, 7], [1, 2, 3]): (feats1, feats2) = (feats[idx], feats[5]) feats2_resize = F.interpolate(feats2, size=feats1.size()[2:], mode='bilinear', align_corners=False) feats_sum = (feats1 + feats2_resize) ret.append(F.interpolate(feats_sum, size=inputs[input_idx].size()[2:], mode='bilinear', align_corners=False)) for submodule in self.extra_downsamples: ret.append(submodule(ret[(- 1)])) return tuple(ret) def init_weights(self): 'Initialize the weights of module.' super(NASFCOS_FPN, self).init_weights() for module in self.fpn.values(): if hasattr(module, 'conv_out'): caffe2_xavier_init(module.out_conv.conv) for modules in [self.adapt_convs.modules(), self.extra_downsamples.modules()]: for module in modules: if isinstance(module, nn.Conv2d): caffe2_xavier_init(module)
@NECKS.register_module() class PAFPN(FPN): "Path Aggregation Network for Instance Segmentation.\n\n This is an implementation of the `PAFPN in Path Aggregation Network\n <https://arxiv.org/abs/1803.01534>`_.\n\n Args:\n in_channels (List[int]): Number of input channels per scale.\n out_channels (int): Number of output channels (used at each scale)\n num_outs (int): Number of output scales.\n start_level (int): Index of the start input backbone level used to\n build the feature pyramid. Default: 0.\n end_level (int): Index of the end input backbone level (exclusive) to\n build the feature pyramid. Default: -1, which means the last level.\n add_extra_convs (bool | str): If bool, it decides whether to add conv\n layers on top of the original feature maps. Default to False.\n If True, it is equivalent to `add_extra_convs='on_input'`.\n If str, it specifies the source feature map of the extra convs.\n Only the following options are allowed\n\n - 'on_input': Last feat map of neck inputs (i.e. backbone feature).\n - 'on_lateral': Last feature map after lateral convs.\n - 'on_output': The last output feature map after fpn convs.\n relu_before_extra_convs (bool): Whether to apply relu before the extra\n conv. Default: False.\n no_norm_on_lateral (bool): Whether to apply norm on lateral.\n Default: False.\n conv_cfg (dict): Config dict for convolution layer. Default: None.\n norm_cfg (dict): Config dict for normalization layer. Default: None.\n act_cfg (str): Config dict for activation layer in ConvModule.\n Default: None.\n init_cfg (dict or list[dict], optional): Initialization config dict.\n " def __init__(self, in_channels, out_channels, num_outs, start_level=0, end_level=(- 1), add_extra_convs=False, relu_before_extra_convs=False, no_norm_on_lateral=False, conv_cfg=None, norm_cfg=None, act_cfg=None, init_cfg=dict(type='Xavier', layer='Conv2d', distribution='uniform')): super(PAFPN, self).__init__(in_channels, out_channels, num_outs, start_level, end_level, add_extra_convs, relu_before_extra_convs, no_norm_on_lateral, conv_cfg, norm_cfg, act_cfg, init_cfg=init_cfg) self.downsample_convs = nn.ModuleList() self.pafpn_convs = nn.ModuleList() for i in range((self.start_level + 1), self.backbone_end_level): d_conv = ConvModule(out_channels, out_channels, 3, stride=2, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg, inplace=False) pafpn_conv = ConvModule(out_channels, out_channels, 3, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg, inplace=False) self.downsample_convs.append(d_conv) self.pafpn_convs.append(pafpn_conv) @auto_fp16() def forward(self, inputs): 'Forward function.' assert (len(inputs) == len(self.in_channels)) laterals = [lateral_conv(inputs[(i + self.start_level)]) for (i, lateral_conv) in enumerate(self.lateral_convs)] used_backbone_levels = len(laterals) for i in range((used_backbone_levels - 1), 0, (- 1)): prev_shape = laterals[(i - 1)].shape[2:] laterals[(i - 1)] += F.interpolate(laterals[i], size=prev_shape, mode='nearest') inter_outs = [self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels)] for i in range(0, (used_backbone_levels - 1)): inter_outs[(i + 1)] += self.downsample_convs[i](inter_outs[i]) outs = [] outs.append(inter_outs[0]) outs.extend([self.pafpn_convs[(i - 1)](inter_outs[i]) for i in range(1, used_backbone_levels)]) if (self.num_outs > len(outs)): if (not self.add_extra_convs): for i in range((self.num_outs - used_backbone_levels)): outs.append(F.max_pool2d(outs[(- 1)], 1, stride=2)) else: if (self.add_extra_convs == 'on_input'): orig = inputs[(self.backbone_end_level - 1)] outs.append(self.fpn_convs[used_backbone_levels](orig)) elif (self.add_extra_convs == 'on_lateral'): outs.append(self.fpn_convs[used_backbone_levels](laterals[(- 1)])) elif (self.add_extra_convs == 'on_output'): outs.append(self.fpn_convs[used_backbone_levels](outs[(- 1)])) else: raise NotImplementedError for i in range((used_backbone_levels + 1), self.num_outs): if self.relu_before_extra_convs: outs.append(self.fpn_convs[i](F.relu(outs[(- 1)]))) else: outs.append(self.fpn_convs[i](outs[(- 1)])) return tuple(outs)
class ASPP(BaseModule): 'ASPP (Atrous Spatial Pyramid Pooling)\n\n This is an implementation of the ASPP module used in DetectoRS\n (https://arxiv.org/pdf/2006.02334.pdf)\n\n Args:\n in_channels (int): Number of input channels.\n out_channels (int): Number of channels produced by this module\n dilations (tuple[int]): Dilations of the four branches.\n Default: (1, 3, 6, 1)\n init_cfg (dict or list[dict], optional): Initialization config dict.\n ' def __init__(self, in_channels, out_channels, dilations=(1, 3, 6, 1), init_cfg=dict(type='Kaiming', layer='Conv2d')): super().__init__(init_cfg) assert (dilations[(- 1)] == 1) self.aspp = nn.ModuleList() for dilation in dilations: kernel_size = (3 if (dilation > 1) else 1) padding = (dilation if (dilation > 1) else 0) conv = nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=1, dilation=dilation, padding=padding, bias=True) self.aspp.append(conv) self.gap = nn.AdaptiveAvgPool2d(1) def forward(self, x): avg_x = self.gap(x) out = [] for aspp_idx in range(len(self.aspp)): inp = (avg_x if (aspp_idx == (len(self.aspp) - 1)) else x) out.append(F.relu_(self.aspp[aspp_idx](inp))) out[(- 1)] = out[(- 1)].expand_as(out[(- 2)]) out = torch.cat(out, dim=1) return out
@NECKS.register_module() class RFP(FPN): 'RFP (Recursive Feature Pyramid)\n\n This is an implementation of RFP in `DetectoRS\n <https://arxiv.org/pdf/2006.02334.pdf>`_. Different from standard FPN, the\n input of RFP should be multi level features along with origin input image\n of backbone.\n\n Args:\n rfp_steps (int): Number of unrolled steps of RFP.\n rfp_backbone (dict): Configuration of the backbone for RFP.\n aspp_out_channels (int): Number of output channels of ASPP module.\n aspp_dilations (tuple[int]): Dilation rates of four branches.\n Default: (1, 3, 6, 1)\n init_cfg (dict or list[dict], optional): Initialization config dict.\n Default: None\n ' def __init__(self, rfp_steps, rfp_backbone, aspp_out_channels, aspp_dilations=(1, 3, 6, 1), init_cfg=None, **kwargs): assert (init_cfg is None), 'To prevent abnormal initialization behavior, init_cfg is not allowed to be set' super().__init__(init_cfg=init_cfg, **kwargs) self.rfp_steps = rfp_steps self.rfp_modules = ModuleList() for rfp_idx in range(1, rfp_steps): rfp_module = build_backbone(rfp_backbone) self.rfp_modules.append(rfp_module) self.rfp_aspp = ASPP(self.out_channels, aspp_out_channels, aspp_dilations) self.rfp_weight = nn.Conv2d(self.out_channels, 1, kernel_size=1, stride=1, padding=0, bias=True) def init_weights(self): for convs in [self.lateral_convs, self.fpn_convs]: for m in convs.modules(): if isinstance(m, nn.Conv2d): xavier_init(m, distribution='uniform') for rfp_idx in range((self.rfp_steps - 1)): self.rfp_modules[rfp_idx].init_weights() constant_init(self.rfp_weight, 0) def forward(self, inputs): inputs = list(inputs) assert (len(inputs) == (len(self.in_channels) + 1)) img = inputs.pop(0) x = super().forward(tuple(inputs)) for rfp_idx in range((self.rfp_steps - 1)): rfp_feats = ([x[0]] + list((self.rfp_aspp(x[i]) for i in range(1, len(x))))) x_idx = self.rfp_modules[rfp_idx].rfp_forward(img, rfp_feats) x_idx = super().forward(x_idx) x_new = [] for ft_idx in range(len(x_idx)): add_weight = torch.sigmoid(self.rfp_weight(x_idx[ft_idx])) x_new.append(((add_weight * x_idx[ft_idx]) + ((1 - add_weight) * x[ft_idx]))) x = x_new return x
class DetectionBlock(BaseModule): "Detection block in YOLO neck.\n\n Let out_channels = n, the DetectionBlock contains:\n Six ConvLayers, 1 Conv2D Layer and 1 YoloLayer.\n The first 6 ConvLayers are formed the following way:\n 1x1xn, 3x3x2n, 1x1xn, 3x3x2n, 1x1xn, 3x3x2n.\n The Conv2D layer is 1x1x255.\n Some block will have branch after the fifth ConvLayer.\n The input channel is arbitrary (in_channels)\n\n Args:\n in_channels (int): The number of input channels.\n out_channels (int): The number of output channels.\n conv_cfg (dict): Config dict for convolution layer. Default: None.\n norm_cfg (dict): Dictionary to construct and config norm layer.\n Default: dict(type='BN', requires_grad=True)\n act_cfg (dict): Config dict for activation layer.\n Default: dict(type='LeakyReLU', negative_slope=0.1).\n init_cfg (dict or list[dict], optional): Initialization config dict.\n Default: None\n " def __init__(self, in_channels, out_channels, conv_cfg=None, norm_cfg=dict(type='BN', requires_grad=True), act_cfg=dict(type='LeakyReLU', negative_slope=0.1), init_cfg=None): super(DetectionBlock, self).__init__(init_cfg) double_out_channels = (out_channels * 2) cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) self.conv1 = ConvModule(in_channels, out_channels, 1, **cfg) self.conv2 = ConvModule(out_channels, double_out_channels, 3, padding=1, **cfg) self.conv3 = ConvModule(double_out_channels, out_channels, 1, **cfg) self.conv4 = ConvModule(out_channels, double_out_channels, 3, padding=1, **cfg) self.conv5 = ConvModule(double_out_channels, out_channels, 1, **cfg) def forward(self, x): tmp = self.conv1(x) tmp = self.conv2(tmp) tmp = self.conv3(tmp) tmp = self.conv4(tmp) out = self.conv5(tmp) return out
@NECKS.register_module() class YOLOV3Neck(BaseModule): "The neck of YOLOV3.\n\n It can be treated as a simplified version of FPN. It\n will take the result from Darknet backbone and do some upsampling and\n concatenation. It will finally output the detection result.\n\n Note:\n The input feats should be from top to bottom.\n i.e., from high-lvl to low-lvl\n But YOLOV3Neck will process them in reversed order.\n i.e., from bottom (high-lvl) to top (low-lvl)\n\n Args:\n num_scales (int): The number of scales / stages.\n in_channels (List[int]): The number of input channels per scale.\n out_channels (List[int]): The number of output channels per scale.\n conv_cfg (dict, optional): Config dict for convolution layer.\n Default: None.\n norm_cfg (dict, optional): Dictionary to construct and config norm\n layer. Default: dict(type='BN', requires_grad=True)\n act_cfg (dict, optional): Config dict for activation layer.\n Default: dict(type='LeakyReLU', negative_slope=0.1).\n init_cfg (dict or list[dict], optional): Initialization config dict.\n Default: None\n " def __init__(self, num_scales, in_channels, out_channels, conv_cfg=None, norm_cfg=dict(type='BN', requires_grad=True), act_cfg=dict(type='LeakyReLU', negative_slope=0.1), init_cfg=None): super(YOLOV3Neck, self).__init__(init_cfg) assert (num_scales == len(in_channels) == len(out_channels)) self.num_scales = num_scales self.in_channels = in_channels self.out_channels = out_channels cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) self.detect1 = DetectionBlock(in_channels[0], out_channels[0], **cfg) for i in range(1, self.num_scales): (in_c, out_c) = (self.in_channels[i], self.out_channels[i]) inter_c = out_channels[(i - 1)] self.add_module(f'conv{i}', ConvModule(inter_c, out_c, 1, **cfg)) self.add_module(f'detect{(i + 1)}', DetectionBlock((in_c + out_c), out_c, **cfg)) def forward(self, feats): assert (len(feats) == self.num_scales) outs = [] out = self.detect1(feats[(- 1)]) outs.append(out) for (i, x) in enumerate(reversed(feats[:(- 1)])): conv = getattr(self, f'conv{(i + 1)}') tmp = conv(out) tmp = F.interpolate(tmp, scale_factor=2) tmp = torch.cat((tmp, x), 1) detect = getattr(self, f'detect{(i + 2)}') out = detect(tmp) outs.append(out) return tuple(outs)
@NECKS.register_module() class YOLOXPAFPN(BaseModule): "Path Aggregation Network used in YOLOX.\n\n Args:\n in_channels (List[int]): Number of input channels per scale.\n out_channels (int): Number of output channels (used at each scale)\n num_csp_blocks (int): Number of bottlenecks in CSPLayer. Default: 3\n use_depthwise (bool): Whether to depthwise separable convolution in\n blocks. Default: False\n upsample_cfg (dict): Config dict for interpolate layer.\n Default: `dict(scale_factor=2, mode='nearest')`\n conv_cfg (dict, optional): Config dict for convolution layer.\n Default: None, which means using conv2d.\n norm_cfg (dict): Config dict for normalization layer.\n Default: dict(type='BN')\n act_cfg (dict): Config dict for activation layer.\n Default: dict(type='Swish')\n init_cfg (dict or list[dict], optional): Initialization config dict.\n Default: None.\n " def __init__(self, in_channels, out_channels, num_csp_blocks=3, use_depthwise=False, upsample_cfg=dict(scale_factor=2, mode='nearest'), conv_cfg=None, norm_cfg=dict(type='BN', momentum=0.03, eps=0.001), act_cfg=dict(type='Swish'), init_cfg=dict(type='Kaiming', layer='Conv2d', a=math.sqrt(5), distribution='uniform', mode='fan_in', nonlinearity='leaky_relu')): super(YOLOXPAFPN, self).__init__(init_cfg) self.in_channels = in_channels self.out_channels = out_channels conv = (DepthwiseSeparableConvModule if use_depthwise else ConvModule) self.upsample = nn.Upsample(**upsample_cfg) self.reduce_layers = nn.ModuleList() self.top_down_blocks = nn.ModuleList() for idx in range((len(in_channels) - 1), 0, (- 1)): self.reduce_layers.append(ConvModule(in_channels[idx], in_channels[(idx - 1)], 1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)) self.top_down_blocks.append(CSPLayer((in_channels[(idx - 1)] * 2), in_channels[(idx - 1)], num_blocks=num_csp_blocks, add_identity=False, use_depthwise=use_depthwise, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)) self.downsamples = nn.ModuleList() self.bottom_up_blocks = nn.ModuleList() for idx in range((len(in_channels) - 1)): self.downsamples.append(conv(in_channels[idx], in_channels[idx], 3, stride=2, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)) self.bottom_up_blocks.append(CSPLayer((in_channels[idx] * 2), in_channels[(idx + 1)], num_blocks=num_csp_blocks, add_identity=False, use_depthwise=use_depthwise, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)) self.out_convs = nn.ModuleList() for i in range(len(in_channels)): self.out_convs.append(ConvModule(in_channels[i], out_channels, 1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)) def forward(self, inputs): '\n Args:\n inputs (tuple[Tensor]): input features.\n\n Returns:\n tuple[Tensor]: YOLOXPAFPN features.\n ' assert (len(inputs) == len(self.in_channels)) inner_outs = [inputs[(- 1)]] for idx in range((len(self.in_channels) - 1), 0, (- 1)): feat_heigh = inner_outs[0] feat_low = inputs[(idx - 1)] feat_heigh = self.reduce_layers[((len(self.in_channels) - 1) - idx)](feat_heigh) inner_outs[0] = feat_heigh upsample_feat = self.upsample(feat_heigh) inner_out = self.top_down_blocks[((len(self.in_channels) - 1) - idx)](torch.cat([upsample_feat, feat_low], 1)) inner_outs.insert(0, inner_out) outs = [inner_outs[0]] for idx in range((len(self.in_channels) - 1)): feat_low = outs[(- 1)] feat_height = inner_outs[(idx + 1)] downsample_feat = self.downsamples[idx](feat_low) out = self.bottom_up_blocks[idx](torch.cat([downsample_feat, feat_height], 1)) outs.append(out) for (idx, conv) in enumerate(self.out_convs): outs[idx] = conv(outs[idx]) return tuple(outs)
@PLUGIN_LAYERS.register_module() class DropBlock(nn.Module): 'Randomly drop some regions of feature maps.\n\n Please refer to the method proposed in `DropBlock\n <https://arxiv.org/abs/1810.12890>`_ for details.\n\n Args:\n drop_prob (float): The probability of dropping each block.\n block_size (int): The size of dropped blocks.\n warmup_iters (int): The drop probability will linearly increase\n from `0` to `drop_prob` during the first `warmup_iters` iterations.\n Default: 2000.\n ' def __init__(self, drop_prob, block_size, warmup_iters=2000, **kwargs): super(DropBlock, self).__init__() assert ((block_size % 2) == 1) assert (0 < drop_prob <= 1) assert (warmup_iters >= 0) self.drop_prob = drop_prob self.block_size = block_size self.warmup_iters = warmup_iters self.iter_cnt = 0 def forward(self, x): '\n Args:\n x (Tensor): Input feature map on which some areas will be randomly\n dropped.\n\n Returns:\n Tensor: The tensor after DropBlock layer.\n ' if (not self.training): return x self.iter_cnt += 1 (N, C, H, W) = list(x.shape) gamma = self._compute_gamma((H, W)) mask_shape = (N, C, ((H - self.block_size) + 1), ((W - self.block_size) + 1)) mask = torch.bernoulli(torch.full(mask_shape, gamma, device=x.device)) mask = F.pad(mask, ([(self.block_size // 2)] * 4), value=0) mask = F.max_pool2d(input=mask, stride=(1, 1), kernel_size=(self.block_size, self.block_size), padding=(self.block_size // 2)) mask = (1 - mask) x = (((x * mask) * mask.numel()) / (eps + mask.sum())) return x def _compute_gamma(self, feat_size): 'Compute the value of gamma according to paper. gamma is the\n parameter of bernoulli distribution, which controls the number of\n features to drop.\n\n gamma = (drop_prob * fm_area) / (drop_area * keep_area)\n\n Args:\n feat_size (tuple[int, int]): The height and width of feature map.\n\n Returns:\n float: The value of gamma.\n ' gamma = ((self.drop_prob * feat_size[0]) * feat_size[1]) gamma /= (((feat_size[0] - self.block_size) + 1) * ((feat_size[1] - self.block_size) + 1)) gamma /= (self.block_size ** 2) factor = (1.0 if (self.iter_cnt > self.warmup_iters) else (self.iter_cnt / self.warmup_iters)) return (gamma * factor) def extra_repr(self): return f'drop_prob={self.drop_prob}, block_size={self.block_size}, warmup_iters={self.warmup_iters}'
class BaseRoIHead(BaseModule, metaclass=ABCMeta): 'Base class for RoIHeads.' def __init__(self, bbox_roi_extractor=None, bbox_head=None, mask_roi_extractor=None, mask_head=None, shared_head=None, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None): super(BaseRoIHead, self).__init__(init_cfg) self.train_cfg = train_cfg self.test_cfg = test_cfg if (shared_head is not None): shared_head.pretrained = pretrained self.shared_head = build_shared_head(shared_head) if (bbox_head is not None): self.init_bbox_head(bbox_roi_extractor, bbox_head) if (mask_head is not None): self.init_mask_head(mask_roi_extractor, mask_head) self.init_assigner_sampler() @property def with_bbox(self): 'bool: whether the RoI head contains a `bbox_head`' return (hasattr(self, 'bbox_head') and (self.bbox_head is not None)) @property def with_mask(self): 'bool: whether the RoI head contains a `mask_head`' return (hasattr(self, 'mask_head') and (self.mask_head is not None)) @property def with_shared_head(self): 'bool: whether the RoI head contains a `shared_head`' return (hasattr(self, 'shared_head') and (self.shared_head is not None)) @abstractmethod def init_bbox_head(self): 'Initialize ``bbox_head``' pass @abstractmethod def init_mask_head(self): 'Initialize ``mask_head``' pass @abstractmethod def init_assigner_sampler(self): 'Initialize assigner and sampler.' pass @abstractmethod def forward_train(self, x, img_meta, proposal_list, gt_bboxes, gt_labels, gt_bboxes_ignore=None, gt_masks=None, **kwargs): 'Forward function during training.' async def async_simple_test(self, x, proposal_list, img_metas, proposals=None, rescale=False, **kwargs): 'Asynchronized test function.' raise NotImplementedError def simple_test(self, x, proposal_list, img_meta, proposals=None, rescale=False, **kwargs): 'Test without augmentation.' def aug_test(self, x, proposal_list, img_metas, rescale=False, **kwargs): 'Test with augmentations.\n\n If rescale is False, then returned bboxes and masks will fit the scale\n of imgs[0].\n '
@HEADS.register_module() class ConvFCBBoxHead(BBoxHead): 'More general bbox head, with shared conv and fc layers and two optional\n separated branches.\n\n .. code-block:: none\n\n /-> cls convs -> cls fcs -> cls\n shared convs -> shared fcs\n \\-> reg convs -> reg fcs -> reg\n ' def __init__(self, num_shared_convs=0, num_shared_fcs=0, num_cls_convs=0, num_cls_fcs=0, num_reg_convs=0, num_reg_fcs=0, conv_out_channels=256, fc_out_channels=1024, conv_cfg=None, norm_cfg=None, init_cfg=None, *args, **kwargs): super(ConvFCBBoxHead, self).__init__(*args, init_cfg=init_cfg, **kwargs) assert ((((((num_shared_convs + num_shared_fcs) + num_cls_convs) + num_cls_fcs) + num_reg_convs) + num_reg_fcs) > 0) if ((num_cls_convs > 0) or (num_reg_convs > 0)): assert (num_shared_fcs == 0) if (not self.with_cls): assert ((num_cls_convs == 0) and (num_cls_fcs == 0)) if (not self.with_reg): assert ((num_reg_convs == 0) and (num_reg_fcs == 0)) self.num_shared_convs = num_shared_convs self.num_shared_fcs = num_shared_fcs self.num_cls_convs = num_cls_convs self.num_cls_fcs = num_cls_fcs self.num_reg_convs = num_reg_convs self.num_reg_fcs = num_reg_fcs self.conv_out_channels = conv_out_channels self.fc_out_channels = fc_out_channels self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg (self.shared_convs, self.shared_fcs, last_layer_dim) = self._add_conv_fc_branch(self.num_shared_convs, self.num_shared_fcs, self.in_channels, True) self.shared_out_channels = last_layer_dim (self.cls_convs, self.cls_fcs, self.cls_last_dim) = self._add_conv_fc_branch(self.num_cls_convs, self.num_cls_fcs, self.shared_out_channels) (self.reg_convs, self.reg_fcs, self.reg_last_dim) = self._add_conv_fc_branch(self.num_reg_convs, self.num_reg_fcs, self.shared_out_channels) if ((self.num_shared_fcs == 0) and (not self.with_avg_pool)): if (self.num_cls_fcs == 0): self.cls_last_dim *= self.roi_feat_area if (self.num_reg_fcs == 0): self.reg_last_dim *= self.roi_feat_area self.relu = nn.ReLU(inplace=True) if self.with_cls: if self.custom_cls_channels: cls_channels = self.loss_cls.get_cls_channels(self.num_classes) else: cls_channels = (self.num_classes + 1) self.fc_cls = build_linear_layer(self.cls_predictor_cfg, in_features=self.cls_last_dim, out_features=cls_channels) if self.with_reg: out_dim_reg = (4 if self.reg_class_agnostic else (4 * self.num_classes)) self.fc_reg = build_linear_layer(self.reg_predictor_cfg, in_features=self.reg_last_dim, out_features=out_dim_reg) if (init_cfg is None): self.init_cfg += [dict(type='Xavier', distribution='uniform', override=[dict(name='shared_fcs'), dict(name='cls_fcs'), dict(name='reg_fcs')])] def _add_conv_fc_branch(self, num_branch_convs, num_branch_fcs, in_channels, is_shared=False): 'Add shared or separable branch.\n\n convs -> avg pool (optional) -> fcs\n ' last_layer_dim = in_channels branch_convs = nn.ModuleList() if (num_branch_convs > 0): for i in range(num_branch_convs): conv_in_channels = (last_layer_dim if (i == 0) else self.conv_out_channels) branch_convs.append(ConvModule(conv_in_channels, self.conv_out_channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) last_layer_dim = self.conv_out_channels branch_fcs = nn.ModuleList() if (num_branch_fcs > 0): if ((is_shared or (self.num_shared_fcs == 0)) and (not self.with_avg_pool)): last_layer_dim *= self.roi_feat_area for i in range(num_branch_fcs): fc_in_channels = (last_layer_dim if (i == 0) else self.fc_out_channels) branch_fcs.append(nn.Linear(fc_in_channels, self.fc_out_channels)) last_layer_dim = self.fc_out_channels return (branch_convs, branch_fcs, last_layer_dim) def forward(self, x): if (self.num_shared_convs > 0): for conv in self.shared_convs: x = conv(x) if (self.num_shared_fcs > 0): if self.with_avg_pool: x = self.avg_pool(x) x = x.flatten(1) for fc in self.shared_fcs: x = self.relu(fc(x)) x_cls = x x_reg = x for conv in self.cls_convs: x_cls = conv(x_cls) if (x_cls.dim() > 2): if self.with_avg_pool: x_cls = self.avg_pool(x_cls) x_cls = x_cls.flatten(1) for fc in self.cls_fcs: x_cls = self.relu(fc(x_cls)) for conv in self.reg_convs: x_reg = conv(x_reg) if (x_reg.dim() > 2): if self.with_avg_pool: x_reg = self.avg_pool(x_reg) x_reg = x_reg.flatten(1) for fc in self.reg_fcs: x_reg = self.relu(fc(x_reg)) cls_score = (self.fc_cls(x_cls) if self.with_cls else None) bbox_pred = (self.fc_reg(x_reg) if self.with_reg else None) return (cls_score, bbox_pred)
@HEADS.register_module() class Shared2FCBBoxHead(ConvFCBBoxHead): def __init__(self, fc_out_channels=1024, *args, **kwargs): super(Shared2FCBBoxHead, self).__init__(*args, num_shared_convs=0, num_shared_fcs=2, num_cls_convs=0, num_cls_fcs=0, num_reg_convs=0, num_reg_fcs=0, fc_out_channels=fc_out_channels, **kwargs)
@HEADS.register_module() class Shared4Conv1FCBBoxHead(ConvFCBBoxHead): def __init__(self, fc_out_channels=1024, *args, **kwargs): super(Shared4Conv1FCBBoxHead, self).__init__(*args, num_shared_convs=4, num_shared_fcs=1, num_cls_convs=0, num_cls_fcs=0, num_reg_convs=0, num_reg_fcs=0, fc_out_channels=fc_out_channels, **kwargs)
class BasicResBlock(BaseModule): 'Basic residual block.\n\n This block is a little different from the block in the ResNet backbone.\n The kernel size of conv1 is 1 in this block while 3 in ResNet BasicBlock.\n\n Args:\n in_channels (int): Channels of the input feature map.\n out_channels (int): Channels of the output feature map.\n conv_cfg (dict): The config dict for convolution layers.\n norm_cfg (dict): The config dict for normalization layers.\n init_cfg (dict or list[dict], optional): Initialization config dict.\n Default: None\n ' def __init__(self, in_channels, out_channels, conv_cfg=None, norm_cfg=dict(type='BN'), init_cfg=None): super(BasicResBlock, self).__init__(init_cfg) self.conv1 = ConvModule(in_channels, in_channels, kernel_size=3, padding=1, bias=False, conv_cfg=conv_cfg, norm_cfg=norm_cfg) self.conv2 = ConvModule(in_channels, out_channels, kernel_size=1, bias=False, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=None) self.conv_identity = ConvModule(in_channels, out_channels, kernel_size=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=None) self.relu = nn.ReLU(inplace=True) def forward(self, x): identity = x x = self.conv1(x) x = self.conv2(x) identity = self.conv_identity(identity) out = (x + identity) out = self.relu(out) return out
@HEADS.register_module() class DoubleConvFCBBoxHead(BBoxHead): 'Bbox head used in Double-Head R-CNN\n\n .. code-block:: none\n\n /-> cls\n /-> shared convs ->\n \\-> reg\n roi features\n /-> cls\n \\-> shared fc ->\n \\-> reg\n ' def __init__(self, num_convs=0, num_fcs=0, conv_out_channels=1024, fc_out_channels=1024, conv_cfg=None, norm_cfg=dict(type='BN'), init_cfg=dict(type='Normal', override=[dict(type='Normal', name='fc_cls', std=0.01), dict(type='Normal', name='fc_reg', std=0.001), dict(type='Xavier', name='fc_branch', distribution='uniform')]), **kwargs): kwargs.setdefault('with_avg_pool', True) super(DoubleConvFCBBoxHead, self).__init__(init_cfg=init_cfg, **kwargs) assert self.with_avg_pool assert (num_convs > 0) assert (num_fcs > 0) self.num_convs = num_convs self.num_fcs = num_fcs self.conv_out_channels = conv_out_channels self.fc_out_channels = fc_out_channels self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.res_block = BasicResBlock(self.in_channels, self.conv_out_channels) self.conv_branch = self._add_conv_branch() self.fc_branch = self._add_fc_branch() out_dim_reg = (4 if self.reg_class_agnostic else (4 * self.num_classes)) self.fc_reg = nn.Linear(self.conv_out_channels, out_dim_reg) self.fc_cls = nn.Linear(self.fc_out_channels, (self.num_classes + 1)) self.relu = nn.ReLU(inplace=True) def _add_conv_branch(self): 'Add the fc branch which consists of a sequential of conv layers.' branch_convs = ModuleList() for i in range(self.num_convs): branch_convs.append(Bottleneck(inplanes=self.conv_out_channels, planes=(self.conv_out_channels // 4), conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) return branch_convs def _add_fc_branch(self): 'Add the fc branch which consists of a sequential of fc layers.' branch_fcs = ModuleList() for i in range(self.num_fcs): fc_in_channels = ((self.in_channels * self.roi_feat_area) if (i == 0) else self.fc_out_channels) branch_fcs.append(nn.Linear(fc_in_channels, self.fc_out_channels)) return branch_fcs def forward(self, x_cls, x_reg): x_conv = self.res_block(x_reg) for conv in self.conv_branch: x_conv = conv(x_conv) if self.with_avg_pool: x_conv = self.avg_pool(x_conv) x_conv = x_conv.view(x_conv.size(0), (- 1)) bbox_pred = self.fc_reg(x_conv) x_fc = x_cls.view(x_cls.size(0), (- 1)) for fc in self.fc_branch: x_fc = self.relu(fc(x_fc)) cls_score = self.fc_cls(x_fc) return (cls_score, bbox_pred)
@HEADS.register_module() class SCNetBBoxHead(ConvFCBBoxHead): 'BBox head for `SCNet <https://arxiv.org/abs/2012.10150>`_.\n\n This inherits ``ConvFCBBoxHead`` with modified forward() function, allow us\n to get intermediate shared feature.\n ' def _forward_shared(self, x): 'Forward function for shared part.' if (self.num_shared_convs > 0): for conv in self.shared_convs: x = conv(x) if (self.num_shared_fcs > 0): if self.with_avg_pool: x = self.avg_pool(x) x = x.flatten(1) for fc in self.shared_fcs: x = self.relu(fc(x)) return x def _forward_cls_reg(self, x): 'Forward function for classification and regression parts.' x_cls = x x_reg = x for conv in self.cls_convs: x_cls = conv(x_cls) if (x_cls.dim() > 2): if self.with_avg_pool: x_cls = self.avg_pool(x_cls) x_cls = x_cls.flatten(1) for fc in self.cls_fcs: x_cls = self.relu(fc(x_cls)) for conv in self.reg_convs: x_reg = conv(x_reg) if (x_reg.dim() > 2): if self.with_avg_pool: x_reg = self.avg_pool(x_reg) x_reg = x_reg.flatten(1) for fc in self.reg_fcs: x_reg = self.relu(fc(x_reg)) cls_score = (self.fc_cls(x_cls) if self.with_cls else None) bbox_pred = (self.fc_reg(x_reg) if self.with_reg else None) return (cls_score, bbox_pred) def forward(self, x, return_shared_feat=False): 'Forward function.\n\n Args:\n x (Tensor): input features\n return_shared_feat (bool): If True, return cls-reg-shared feature.\n\n Return:\n out (tuple[Tensor]): contain ``cls_score`` and ``bbox_pred``,\n if ``return_shared_feat`` is True, append ``x_shared`` to the\n returned tuple.\n ' x_shared = self._forward_shared(x) out = self._forward_cls_reg(x_shared) if return_shared_feat: out += (x_shared,) return out
@HEADS.register_module() class DoubleHeadRoIHead(StandardRoIHead): 'RoI head for Double Head RCNN.\n\n https://arxiv.org/abs/1904.06493\n ' def __init__(self, reg_roi_scale_factor, **kwargs): super(DoubleHeadRoIHead, self).__init__(**kwargs) self.reg_roi_scale_factor = reg_roi_scale_factor def _bbox_forward(self, x, rois): 'Box head forward function used in both training and testing time.' bbox_cls_feats = self.bbox_roi_extractor(x[:self.bbox_roi_extractor.num_inputs], rois) bbox_reg_feats = self.bbox_roi_extractor(x[:self.bbox_roi_extractor.num_inputs], rois, roi_scale_factor=self.reg_roi_scale_factor) if self.with_shared_head: bbox_cls_feats = self.shared_head(bbox_cls_feats) bbox_reg_feats = self.shared_head(bbox_reg_feats) (cls_score, bbox_pred) = self.bbox_head(bbox_cls_feats, bbox_reg_feats) bbox_results = dict(cls_score=cls_score, bbox_pred=bbox_pred, bbox_feats=bbox_cls_feats) return bbox_results
@HEADS.register_module() class CoarseMaskHead(FCNMaskHead): 'Coarse mask head used in PointRend.\n\n Compared with standard ``FCNMaskHead``, ``CoarseMaskHead`` will downsample\n the input feature map instead of upsample it.\n\n Args:\n num_convs (int): Number of conv layers in the head. Default: 0.\n num_fcs (int): Number of fc layers in the head. Default: 2.\n fc_out_channels (int): Number of output channels of fc layer.\n Default: 1024.\n downsample_factor (int): The factor that feature map is downsampled by.\n Default: 2.\n init_cfg (dict or list[dict], optional): Initialization config dict.\n ' def __init__(self, num_convs=0, num_fcs=2, fc_out_channels=1024, downsample_factor=2, init_cfg=dict(type='Xavier', override=[dict(name='fcs'), dict(type='Constant', val=0.001, name='fc_logits')]), *arg, **kwarg): super(CoarseMaskHead, self).__init__(*arg, num_convs=num_convs, upsample_cfg=dict(type=None), init_cfg=None, **kwarg) self.init_cfg = init_cfg self.num_fcs = num_fcs assert (self.num_fcs > 0) self.fc_out_channels = fc_out_channels self.downsample_factor = downsample_factor assert (self.downsample_factor >= 1) delattr(self, 'conv_logits') if (downsample_factor > 1): downsample_in_channels = (self.conv_out_channels if (self.num_convs > 0) else self.in_channels) self.downsample_conv = ConvModule(downsample_in_channels, self.conv_out_channels, kernel_size=downsample_factor, stride=downsample_factor, padding=0, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg) else: self.downsample_conv = None self.output_size = ((self.roi_feat_size[0] // downsample_factor), (self.roi_feat_size[1] // downsample_factor)) self.output_area = (self.output_size[0] * self.output_size[1]) last_layer_dim = (self.conv_out_channels * self.output_area) self.fcs = ModuleList() for i in range(num_fcs): fc_in_channels = (last_layer_dim if (i == 0) else self.fc_out_channels) self.fcs.append(Linear(fc_in_channels, self.fc_out_channels)) last_layer_dim = self.fc_out_channels output_channels = (self.num_classes * self.output_area) self.fc_logits = Linear(last_layer_dim, output_channels) def init_weights(self): super(FCNMaskHead, self).init_weights() @auto_fp16() def forward(self, x): for conv in self.convs: x = conv(x) if (self.downsample_conv is not None): x = self.downsample_conv(x) x = x.flatten(1) for fc in self.fcs: x = self.relu(fc(x)) mask_pred = self.fc_logits(x).view(x.size(0), self.num_classes, *self.output_size) return mask_pred
@HEADS.register_module() class DynamicMaskHead(FCNMaskHead): 'Dynamic Mask Head for\n `Instances as Queries <http://arxiv.org/abs/2105.01928>`_\n\n Args:\n num_convs (int): Number of convolution layer.\n Defaults to 4.\n roi_feat_size (int): The output size of RoI extractor,\n Defaults to 14.\n in_channels (int): Input feature channels.\n Defaults to 256.\n conv_kernel_size (int): Kernel size of convolution layers.\n Defaults to 3.\n conv_out_channels (int): Output channels of convolution layers.\n Defaults to 256.\n num_classes (int): Number of classes.\n Defaults to 80\n class_agnostic (int): Whether generate class agnostic prediction.\n Defaults to False.\n dropout (float): Probability of drop the channel.\n Defaults to 0.0\n upsample_cfg (dict): The config for upsample layer.\n conv_cfg (dict): The convolution layer config.\n norm_cfg (dict): The norm layer config.\n dynamic_conv_cfg (dict): The dynamic convolution layer config.\n loss_mask (dict): The config for mask loss.\n ' def __init__(self, num_convs=4, roi_feat_size=14, in_channels=256, conv_kernel_size=3, conv_out_channels=256, num_classes=80, class_agnostic=False, upsample_cfg=dict(type='deconv', scale_factor=2), conv_cfg=None, norm_cfg=None, dynamic_conv_cfg=dict(type='DynamicConv', in_channels=256, feat_channels=64, out_channels=256, input_feat_shape=14, with_proj=False, act_cfg=dict(type='ReLU', inplace=True), norm_cfg=dict(type='LN')), loss_mask=dict(type='DiceLoss', loss_weight=8.0), **kwargs): super(DynamicMaskHead, self).__init__(num_convs=num_convs, roi_feat_size=roi_feat_size, in_channels=in_channels, conv_kernel_size=conv_kernel_size, conv_out_channels=conv_out_channels, num_classes=num_classes, class_agnostic=class_agnostic, upsample_cfg=upsample_cfg, conv_cfg=conv_cfg, norm_cfg=norm_cfg, loss_mask=loss_mask, **kwargs) assert (class_agnostic is False), 'DynamicMaskHead only support class_agnostic=False' self.fp16_enabled = False self.instance_interactive_conv = build_transformer(dynamic_conv_cfg) def init_weights(self): 'Use xavier initialization for all weight parameter and set\n classification head bias as a specific value when use focal loss.' for p in self.parameters(): if (p.dim() > 1): nn.init.xavier_uniform_(p) nn.init.constant_(self.conv_logits.bias, 0.0) @auto_fp16() def forward(self, roi_feat, proposal_feat): 'Forward function of DynamicMaskHead.\n\n Args:\n roi_feat (Tensor): Roi-pooling features with shape\n (batch_size*num_proposals, feature_dimensions,\n pooling_h , pooling_w).\n proposal_feat (Tensor): Intermediate feature get from\n diihead in last stage, has shape\n (batch_size*num_proposals, feature_dimensions)\n\n Returns:\n mask_pred (Tensor): Predicted foreground masks with shape\n (batch_size*num_proposals, num_classes,\n pooling_h*2, pooling_w*2).\n ' proposal_feat = proposal_feat.reshape((- 1), self.in_channels) proposal_feat_iic = self.instance_interactive_conv(proposal_feat, roi_feat) x = proposal_feat_iic.permute(0, 2, 1).reshape(roi_feat.size()) for conv in self.convs: x = conv(x) if (self.upsample is not None): x = self.upsample(x) if (self.upsample_method == 'deconv'): x = self.relu(x) mask_pred = self.conv_logits(x) return mask_pred @force_fp32(apply_to=('mask_pred',)) def loss(self, mask_pred, mask_targets, labels): num_pos = labels.new_ones(labels.size()).float().sum() avg_factor = torch.clamp(reduce_mean(num_pos), min=1.0).item() loss = dict() if (mask_pred.size(0) == 0): loss_mask = mask_pred.sum() else: loss_mask = self.loss_mask(mask_pred[(torch.arange(num_pos).long(), labels, ...)].sigmoid(), mask_targets, avg_factor=avg_factor) loss['loss_mask'] = loss_mask return loss def get_targets(self, sampling_results, gt_masks, rcnn_train_cfg): pos_proposals = [res.pos_bboxes for res in sampling_results] pos_assigned_gt_inds = [res.pos_assigned_gt_inds for res in sampling_results] mask_targets = mask_target(pos_proposals, pos_assigned_gt_inds, gt_masks, rcnn_train_cfg) return mask_targets
@HEADS.register_module() class FeatureRelayHead(BaseModule): 'Feature Relay Head used in `SCNet <https://arxiv.org/abs/2012.10150>`_.\n\n Args:\n in_channels (int, optional): number of input channels. Default: 256.\n conv_out_channels (int, optional): number of output channels before\n classification layer. Default: 256.\n roi_feat_size (int, optional): roi feat size at box head. Default: 7.\n scale_factor (int, optional): scale factor to match roi feat size\n at mask head. Default: 2.\n init_cfg (dict or list[dict], optional): Initialization config dict.\n ' def __init__(self, in_channels=1024, out_conv_channels=256, roi_feat_size=7, scale_factor=2, init_cfg=dict(type='Kaiming', layer='Linear')): super(FeatureRelayHead, self).__init__(init_cfg) assert isinstance(roi_feat_size, int) self.in_channels = in_channels self.out_conv_channels = out_conv_channels self.roi_feat_size = roi_feat_size self.out_channels = ((roi_feat_size ** 2) * out_conv_channels) self.scale_factor = scale_factor self.fp16_enabled = False self.fc = nn.Linear(self.in_channels, self.out_channels) self.upsample = nn.Upsample(scale_factor=scale_factor, mode='bilinear', align_corners=True) @auto_fp16() def forward(self, x): 'Forward function.' (N, in_C) = x.shape if (N > 0): out_C = self.out_conv_channels out_HW = self.roi_feat_size x = self.fc(x) x = x.reshape(N, out_C, out_HW, out_HW) x = self.upsample(x) return x return None
@HEADS.register_module() class FusedSemanticHead(BaseModule): 'Multi-level fused semantic segmentation head.\n\n .. code-block:: none\n\n in_1 -> 1x1 conv ---\n |\n in_2 -> 1x1 conv -- |\n ||\n in_3 -> 1x1 conv - ||\n ||| /-> 1x1 conv (mask prediction)\n in_4 -> 1x1 conv -----> 3x3 convs (*4)\n | \\-> 1x1 conv (feature)\n in_5 -> 1x1 conv ---\n ' def __init__(self, num_ins, fusion_level, num_convs=4, in_channels=256, conv_out_channels=256, num_classes=183, conv_cfg=None, norm_cfg=None, ignore_label=None, loss_weight=None, loss_seg=dict(type='CrossEntropyLoss', ignore_index=255, loss_weight=0.2), init_cfg=dict(type='Kaiming', override=dict(name='conv_logits'))): super(FusedSemanticHead, self).__init__(init_cfg) self.num_ins = num_ins self.fusion_level = fusion_level self.num_convs = num_convs self.in_channels = in_channels self.conv_out_channels = conv_out_channels self.num_classes = num_classes self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.fp16_enabled = False self.lateral_convs = nn.ModuleList() for i in range(self.num_ins): self.lateral_convs.append(ConvModule(self.in_channels, self.in_channels, 1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, inplace=False)) self.convs = nn.ModuleList() for i in range(self.num_convs): in_channels = (self.in_channels if (i == 0) else conv_out_channels) self.convs.append(ConvModule(in_channels, conv_out_channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) self.conv_embedding = ConvModule(conv_out_channels, conv_out_channels, 1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg) self.conv_logits = nn.Conv2d(conv_out_channels, self.num_classes, 1) if ignore_label: loss_seg['ignore_index'] = ignore_label if loss_weight: loss_seg['loss_weight'] = loss_weight if (ignore_label or loss_weight): warnings.warn('``ignore_label`` and ``loss_weight`` would be deprecated soon. Please set ``ingore_index`` and ``loss_weight`` in ``loss_seg`` instead.') self.criterion = build_loss(loss_seg) @auto_fp16() def forward(self, feats): x = self.lateral_convs[self.fusion_level](feats[self.fusion_level]) fused_size = tuple(x.shape[(- 2):]) for (i, feat) in enumerate(feats): if (i != self.fusion_level): feat = F.interpolate(feat, size=fused_size, mode='bilinear', align_corners=True) x += self.lateral_convs[i](feat) for i in range(self.num_convs): x = self.convs[i](x) mask_pred = self.conv_logits(x) x = self.conv_embedding(x) return (mask_pred, x) @force_fp32(apply_to=('mask_pred',)) def loss(self, mask_pred, labels): labels = labels.squeeze(1).long() loss_semantic_seg = self.criterion(mask_pred, labels) return loss_semantic_seg
@HEADS.register_module() class GlobalContextHead(BaseModule): 'Global context head used in `SCNet <https://arxiv.org/abs/2012.10150>`_.\n\n Args:\n num_convs (int, optional): number of convolutional layer in GlbCtxHead.\n Default: 4.\n in_channels (int, optional): number of input channels. Default: 256.\n conv_out_channels (int, optional): number of output channels before\n classification layer. Default: 256.\n num_classes (int, optional): number of classes. Default: 80.\n loss_weight (float, optional): global context loss weight. Default: 1.\n conv_cfg (dict, optional): config to init conv layer. Default: None.\n norm_cfg (dict, optional): config to init norm layer. Default: None.\n conv_to_res (bool, optional): if True, 2 convs will be grouped into\n 1 `SimplifiedBasicBlock` using a skip connection. Default: False.\n init_cfg (dict or list[dict], optional): Initialization config dict.\n ' def __init__(self, num_convs=4, in_channels=256, conv_out_channels=256, num_classes=80, loss_weight=1.0, conv_cfg=None, norm_cfg=None, conv_to_res=False, init_cfg=dict(type='Normal', std=0.01, override=dict(name='fc'))): super(GlobalContextHead, self).__init__(init_cfg) self.num_convs = num_convs self.in_channels = in_channels self.conv_out_channels = conv_out_channels self.num_classes = num_classes self.loss_weight = loss_weight self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.conv_to_res = conv_to_res self.fp16_enabled = False if self.conv_to_res: num_res_blocks = (num_convs // 2) self.convs = ResLayer(SimplifiedBasicBlock, in_channels, self.conv_out_channels, num_res_blocks, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg) self.num_convs = num_res_blocks else: self.convs = nn.ModuleList() for i in range(self.num_convs): in_channels = (self.in_channels if (i == 0) else conv_out_channels) self.convs.append(ConvModule(in_channels, conv_out_channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) self.pool = nn.AdaptiveAvgPool2d(1) self.fc = nn.Linear(conv_out_channels, num_classes) self.criterion = nn.BCEWithLogitsLoss() @auto_fp16() def forward(self, feats): 'Forward function.' x = feats[(- 1)] for i in range(self.num_convs): x = self.convs[i](x) x = self.pool(x) mc_pred = x.reshape(x.size(0), (- 1)) mc_pred = self.fc(mc_pred) return (mc_pred, x) @force_fp32(apply_to=('pred',)) def loss(self, pred, labels): 'Loss function.' labels = [lbl.unique() for lbl in labels] targets = pred.new_zeros(pred.size()) for (i, label) in enumerate(labels): targets[(i, label)] = 1.0 loss = (self.loss_weight * self.criterion(pred, targets)) return loss
@HEADS.register_module() class HTCMaskHead(FCNMaskHead): def __init__(self, with_conv_res=True, *args, **kwargs): super(HTCMaskHead, self).__init__(*args, **kwargs) self.with_conv_res = with_conv_res if self.with_conv_res: self.conv_res = ConvModule(self.conv_out_channels, self.conv_out_channels, 1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg) def forward(self, x, res_feat=None, return_logits=True, return_feat=True): if (res_feat is not None): assert self.with_conv_res res_feat = self.conv_res(res_feat) x = (x + res_feat) for conv in self.convs: x = conv(x) res_feat = x outs = [] if return_logits: x = self.upsample(x) if (self.upsample_method == 'deconv'): x = self.relu(x) mask_pred = self.conv_logits(x) outs.append(mask_pred) if return_feat: outs.append(res_feat) return (outs if (len(outs) > 1) else outs[0])
@HEADS.register_module() class SCNetMaskHead(FCNMaskHead): 'Mask head for `SCNet <https://arxiv.org/abs/2012.10150>`_.\n\n Args:\n conv_to_res (bool, optional): if True, change the conv layers to\n ``SimplifiedBasicBlock``.\n ' def __init__(self, conv_to_res=True, **kwargs): super(SCNetMaskHead, self).__init__(**kwargs) self.conv_to_res = conv_to_res if conv_to_res: assert (self.conv_kernel_size == 3) self.num_res_blocks = (self.num_convs // 2) self.convs = ResLayer(SimplifiedBasicBlock, self.in_channels, self.conv_out_channels, self.num_res_blocks, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)
@HEADS.register_module() class SCNetSemanticHead(FusedSemanticHead): 'Mask head for `SCNet <https://arxiv.org/abs/2012.10150>`_.\n\n Args:\n conv_to_res (bool, optional): if True, change the conv layers to\n ``SimplifiedBasicBlock``.\n ' def __init__(self, conv_to_res=True, **kwargs): super(SCNetSemanticHead, self).__init__(**kwargs) self.conv_to_res = conv_to_res if self.conv_to_res: num_res_blocks = (self.num_convs // 2) self.convs = ResLayer(SimplifiedBasicBlock, self.in_channels, self.conv_out_channels, num_res_blocks, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg) self.num_convs = num_res_blocks
@HEADS.register_module() class PISARoIHead(StandardRoIHead): 'The RoI head for `Prime Sample Attention in Object Detection\n <https://arxiv.org/abs/1904.04821>`_.' def forward_train(self, x, img_metas, proposal_list, gt_bboxes, gt_labels, gt_bboxes_ignore=None, gt_masks=None): "Forward function for training.\n\n Args:\n x (list[Tensor]): List of multi-level img features.\n img_metas (list[dict]): List of image info dict where each dict\n has: 'img_shape', 'scale_factor', 'flip', and may also contain\n 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.\n For details on the values of these keys see\n `mmdet/datasets/pipelines/formatting.py:Collect`.\n proposals (list[Tensors]): List of region proposals.\n gt_bboxes (list[Tensor]): Each item are the truth boxes for each\n image in [tl_x, tl_y, br_x, br_y] format.\n gt_labels (list[Tensor]): Class indices corresponding to each box\n gt_bboxes_ignore (list[Tensor], optional): Specify which bounding\n boxes can be ignored when computing the loss.\n gt_masks (None | Tensor) : True segmentation masks for each box\n used if the architecture supports a segmentation task.\n\n Returns:\n dict[str, Tensor]: a dictionary of loss components\n " if (self.with_bbox or self.with_mask): num_imgs = len(img_metas) if (gt_bboxes_ignore is None): gt_bboxes_ignore = [None for _ in range(num_imgs)] sampling_results = [] neg_label_weights = [] for i in range(num_imgs): assign_result = self.bbox_assigner.assign(proposal_list[i], gt_bboxes[i], gt_bboxes_ignore[i], gt_labels[i]) sampling_result = self.bbox_sampler.sample(assign_result, proposal_list[i], gt_bboxes[i], gt_labels[i], feats=[lvl_feat[i][None] for lvl_feat in x]) neg_label_weight = None if isinstance(sampling_result, tuple): (sampling_result, neg_label_weight) = sampling_result sampling_results.append(sampling_result) neg_label_weights.append(neg_label_weight) losses = dict() if self.with_bbox: bbox_results = self._bbox_forward_train(x, sampling_results, gt_bboxes, gt_labels, img_metas, neg_label_weights=neg_label_weights) losses.update(bbox_results['loss_bbox']) if self.with_mask: mask_results = self._mask_forward_train(x, sampling_results, bbox_results['bbox_feats'], gt_masks, img_metas) losses.update(mask_results['loss_mask']) return losses def _bbox_forward(self, x, rois): 'Box forward function used in both training and testing.' bbox_feats = self.bbox_roi_extractor(x[:self.bbox_roi_extractor.num_inputs], rois) if self.with_shared_head: bbox_feats = self.shared_head(bbox_feats) (cls_score, bbox_pred) = self.bbox_head(bbox_feats) bbox_results = dict(cls_score=cls_score, bbox_pred=bbox_pred, bbox_feats=bbox_feats) return bbox_results def _bbox_forward_train(self, x, sampling_results, gt_bboxes, gt_labels, img_metas, neg_label_weights=None): 'Run forward function and calculate loss for box head in training.' rois = bbox2roi([res.bboxes for res in sampling_results]) bbox_results = self._bbox_forward(x, rois) bbox_targets = self.bbox_head.get_targets(sampling_results, gt_bboxes, gt_labels, self.train_cfg) if (neg_label_weights[0] is not None): label_weights = bbox_targets[1] cur_num_rois = 0 for i in range(len(sampling_results)): num_pos = sampling_results[i].pos_inds.size(0) num_neg = sampling_results[i].neg_inds.size(0) label_weights[(cur_num_rois + num_pos):((cur_num_rois + num_pos) + num_neg)] = neg_label_weights[i] cur_num_rois += (num_pos + num_neg) cls_score = bbox_results['cls_score'] bbox_pred = bbox_results['bbox_pred'] isr_cfg = self.train_cfg.get('isr', None) if (isr_cfg is not None): bbox_targets = isr_p(cls_score, bbox_pred, bbox_targets, rois, sampling_results, self.bbox_head.loss_cls, self.bbox_head.bbox_coder, **isr_cfg, num_class=self.bbox_head.num_classes) loss_bbox = self.bbox_head.loss(cls_score, bbox_pred, rois, *bbox_targets) carl_cfg = self.train_cfg.get('carl', None) if (carl_cfg is not None): loss_carl = carl_loss(cls_score, bbox_targets[0], bbox_pred, bbox_targets[2], self.bbox_head.loss_bbox, **carl_cfg, num_class=self.bbox_head.num_classes) loss_bbox.update(loss_carl) bbox_results.update(loss_bbox=loss_bbox) return bbox_results
@SHARED_HEADS.register_module() class ResLayer(BaseModule): def __init__(self, depth, stage=3, stride=2, dilation=1, style='pytorch', norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, with_cp=False, dcn=None, pretrained=None, init_cfg=None): super(ResLayer, self).__init__(init_cfg) self.norm_eval = norm_eval self.norm_cfg = norm_cfg self.stage = stage self.fp16_enabled = False (block, stage_blocks) = ResNet.arch_settings[depth] stage_block = stage_blocks[stage] planes = (64 * (2 ** stage)) inplanes = ((64 * (2 ** (stage - 1))) * block.expansion) res_layer = _ResLayer(block, inplanes, planes, stage_block, stride=stride, dilation=dilation, style=style, with_cp=with_cp, norm_cfg=self.norm_cfg, dcn=dcn) self.add_module(f'layer{(stage + 1)}', res_layer) assert (not (init_cfg and pretrained)), 'init_cfg and pretrained cannot be specified at the same time' if isinstance(pretrained, str): warnings.warn('DeprecationWarning: pretrained is a deprecated, please use "init_cfg" instead') self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) elif (pretrained is None): if (init_cfg is None): self.init_cfg = [dict(type='Kaiming', layer='Conv2d'), dict(type='Constant', val=1, layer=['_BatchNorm', 'GroupNorm'])] else: raise TypeError('pretrained must be a str or None') @auto_fp16() def forward(self, x): res_layer = getattr(self, f'layer{(self.stage + 1)}') out = res_layer(x) return out def train(self, mode=True): super(ResLayer, self).train(mode) if self.norm_eval: for m in self.modules(): if isinstance(m, nn.BatchNorm2d): m.eval()