repo
stringlengths
1
99
file
stringlengths
13
215
code
stringlengths
12
59.2M
file_length
int64
12
59.2M
avg_line_length
float64
3.82
1.48M
max_line_length
int64
12
2.51M
extension_type
stringclasses
1 value
GFocalV2
GFocalV2-master/mmdet/models/dense_heads/centripetal_head.py
import torch.nn as nn from mmcv.cnn import ConvModule, normal_init from mmcv.ops import DeformConv2d from mmdet.core import multi_apply from ..builder import HEADS, build_loss from .corner_head import CornerHead @HEADS.register_module() class CentripetalHead(CornerHead): """Head of CentripetalNet: Pursuing High-quality Keypoint Pairs for Object Detection. CentripetalHead inherits from :class:`CornerHead`. It removes the embedding branch and adds guiding shift and centripetal shift branches. More details can be found in the `paper <https://arxiv.org/abs/2003.09119>`_ . Args: num_classes (int): Number of categories excluding the background category. in_channels (int): Number of channels in the input feature map. num_feat_levels (int): Levels of feature from the previous module. 2 for HourglassNet-104 and 1 for HourglassNet-52. HourglassNet-104 outputs the final feature and intermediate supervision feature and HourglassNet-52 only outputs the final feature. Default: 2. corner_emb_channels (int): Channel of embedding vector. Default: 1. train_cfg (dict | None): Training config. Useless in CornerHead, but we keep this variable for SingleStageDetector. Default: None. test_cfg (dict | None): Testing config of CornerHead. Default: None. loss_heatmap (dict | None): Config of corner heatmap loss. Default: GaussianFocalLoss. loss_embedding (dict | None): Config of corner embedding loss. Default: AssociativeEmbeddingLoss. loss_offset (dict | None): Config of corner offset loss. Default: SmoothL1Loss. loss_guiding_shift (dict): Config of guiding shift loss. Default: SmoothL1Loss. loss_centripetal_shift (dict): Config of centripetal shift loss. Default: SmoothL1Loss. """ def __init__(self, *args, centripetal_shift_channels=2, guiding_shift_channels=2, feat_adaption_conv_kernel=3, loss_guiding_shift=dict( type='SmoothL1Loss', beta=1.0, loss_weight=0.05), loss_centripetal_shift=dict( type='SmoothL1Loss', beta=1.0, loss_weight=1), **kwargs): assert centripetal_shift_channels == 2, ( 'CentripetalHead only support centripetal_shift_channels == 2') self.centripetal_shift_channels = centripetal_shift_channels assert guiding_shift_channels == 2, ( 'CentripetalHead only support guiding_shift_channels == 2') self.guiding_shift_channels = guiding_shift_channels self.feat_adaption_conv_kernel = feat_adaption_conv_kernel super(CentripetalHead, self).__init__(*args, **kwargs) self.loss_guiding_shift = build_loss(loss_guiding_shift) self.loss_centripetal_shift = build_loss(loss_centripetal_shift) def _init_centripetal_layers(self): """Initialize centripetal layers. Including feature adaption deform convs (feat_adaption), deform offset prediction convs (dcn_off), guiding shift (guiding_shift) and centripetal shift ( centripetal_shift). Each branch has two parts: prefix `tl_` for top-left and `br_` for bottom-right. """ self.tl_feat_adaption = nn.ModuleList() self.br_feat_adaption = nn.ModuleList() self.tl_dcn_offset = nn.ModuleList() self.br_dcn_offset = nn.ModuleList() self.tl_guiding_shift = nn.ModuleList() self.br_guiding_shift = nn.ModuleList() self.tl_centripetal_shift = nn.ModuleList() self.br_centripetal_shift = nn.ModuleList() for _ in range(self.num_feat_levels): self.tl_feat_adaption.append( DeformConv2d(self.in_channels, self.in_channels, self.feat_adaption_conv_kernel, 1, 1)) self.br_feat_adaption.append( DeformConv2d(self.in_channels, self.in_channels, self.feat_adaption_conv_kernel, 1, 1)) self.tl_guiding_shift.append( self._make_layers( out_channels=self.guiding_shift_channels, in_channels=self.in_channels)) self.br_guiding_shift.append( self._make_layers( out_channels=self.guiding_shift_channels, in_channels=self.in_channels)) self.tl_dcn_offset.append( ConvModule( self.guiding_shift_channels, self.feat_adaption_conv_kernel**2 * self.guiding_shift_channels, 1, bias=False, act_cfg=None)) self.br_dcn_offset.append( ConvModule( self.guiding_shift_channels, self.feat_adaption_conv_kernel**2 * self.guiding_shift_channels, 1, bias=False, act_cfg=None)) self.tl_centripetal_shift.append( self._make_layers( out_channels=self.centripetal_shift_channels, in_channels=self.in_channels)) self.br_centripetal_shift.append( self._make_layers( out_channels=self.centripetal_shift_channels, in_channels=self.in_channels)) def _init_layers(self): """Initialize layers for CentripetalHead. Including two parts: CornerHead layers and CentripetalHead layers """ super()._init_layers() # using _init_layers in CornerHead self._init_centripetal_layers() def init_weights(self): """Initialize weights of the head.""" super().init_weights() for i in range(self.num_feat_levels): normal_init(self.tl_feat_adaption[i], std=0.01) normal_init(self.br_feat_adaption[i], std=0.01) normal_init(self.tl_dcn_offset[i].conv, std=0.1) normal_init(self.br_dcn_offset[i].conv, std=0.1) _ = [x.conv.reset_parameters() for x in self.tl_guiding_shift[i]] _ = [x.conv.reset_parameters() for x in self.br_guiding_shift[i]] _ = [ x.conv.reset_parameters() for x in self.tl_centripetal_shift[i] ] _ = [ x.conv.reset_parameters() for x in self.br_centripetal_shift[i] ] def forward_single(self, x, lvl_ind): """Forward feature of a single level. Args: x (Tensor): Feature of a single level. lvl_ind (int): Level index of current feature. Returns: tuple[Tensor]: A tuple of CentripetalHead's output for current feature level. Containing the following Tensors: - tl_heat (Tensor): Predicted top-left corner heatmap. - br_heat (Tensor): Predicted bottom-right corner heatmap. - tl_off (Tensor): Predicted top-left offset heatmap. - br_off (Tensor): Predicted bottom-right offset heatmap. - tl_guiding_shift (Tensor): Predicted top-left guiding shift heatmap. - br_guiding_shift (Tensor): Predicted bottom-right guiding shift heatmap. - tl_centripetal_shift (Tensor): Predicted top-left centripetal shift heatmap. - br_centripetal_shift (Tensor): Predicted bottom-right centripetal shift heatmap. """ tl_heat, br_heat, _, _, tl_off, br_off, tl_pool, br_pool = super( ).forward_single( x, lvl_ind, return_pool=True) tl_guiding_shift = self.tl_guiding_shift[lvl_ind](tl_pool) br_guiding_shift = self.br_guiding_shift[lvl_ind](br_pool) tl_dcn_offset = self.tl_dcn_offset[lvl_ind](tl_guiding_shift.detach()) br_dcn_offset = self.br_dcn_offset[lvl_ind](br_guiding_shift.detach()) tl_feat_adaption = self.tl_feat_adaption[lvl_ind](tl_pool, tl_dcn_offset) br_feat_adaption = self.br_feat_adaption[lvl_ind](br_pool, br_dcn_offset) tl_centripetal_shift = self.tl_centripetal_shift[lvl_ind]( tl_feat_adaption) br_centripetal_shift = self.br_centripetal_shift[lvl_ind]( br_feat_adaption) result_list = [ tl_heat, br_heat, tl_off, br_off, tl_guiding_shift, br_guiding_shift, tl_centripetal_shift, br_centripetal_shift ] return result_list def loss(self, tl_heats, br_heats, tl_offs, br_offs, tl_guiding_shifts, br_guiding_shifts, tl_centripetal_shifts, br_centripetal_shifts, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None): """Compute losses of the head. Args: tl_heats (list[Tensor]): Top-left corner heatmaps for each level with shape (N, num_classes, H, W). br_heats (list[Tensor]): Bottom-right corner heatmaps for each level with shape (N, num_classes, H, W). tl_offs (list[Tensor]): Top-left corner offsets for each level with shape (N, corner_offset_channels, H, W). br_offs (list[Tensor]): Bottom-right corner offsets for each level with shape (N, corner_offset_channels, H, W). tl_guiding_shifts (list[Tensor]): Top-left guiding shifts for each level with shape (N, guiding_shift_channels, H, W). br_guiding_shifts (list[Tensor]): Bottom-right guiding shifts for each level with shape (N, guiding_shift_channels, H, W). tl_centripetal_shifts (list[Tensor]): Top-left centripetal shifts for each level with shape (N, centripetal_shift_channels, H, W). br_centripetal_shifts (list[Tensor]): Bottom-right centripetal shifts for each level with shape (N, centripetal_shift_channels, H, W). gt_bboxes (list[Tensor]): Ground truth bboxes for each image with shape (num_gts, 4) in [left, top, right, bottom] format. gt_labels (list[Tensor]): Class indices corresponding to each box. img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. gt_bboxes_ignore (list[Tensor] | None): Specify which bounding boxes can be ignored when computing the loss. Returns: dict[str, Tensor]: A dictionary of loss components. Containing the following losses: - det_loss (list[Tensor]): Corner keypoint losses of all feature levels. - off_loss (list[Tensor]): Corner offset losses of all feature levels. - guiding_loss (list[Tensor]): Guiding shift losses of all feature levels. - centripetal_loss (list[Tensor]): Centripetal shift losses of all feature levels. """ targets = self.get_targets( gt_bboxes, gt_labels, tl_heats[-1].shape, img_metas[0]['pad_shape'], with_corner_emb=self.with_corner_emb, with_guiding_shift=True, with_centripetal_shift=True) mlvl_targets = [targets for _ in range(self.num_feat_levels)] [det_losses, off_losses, guiding_losses, centripetal_losses ] = multi_apply(self.loss_single, tl_heats, br_heats, tl_offs, br_offs, tl_guiding_shifts, br_guiding_shifts, tl_centripetal_shifts, br_centripetal_shifts, mlvl_targets) loss_dict = dict( det_loss=det_losses, off_loss=off_losses, guiding_loss=guiding_losses, centripetal_loss=centripetal_losses) return loss_dict def loss_single(self, tl_hmp, br_hmp, tl_off, br_off, tl_guiding_shift, br_guiding_shift, tl_centripetal_shift, br_centripetal_shift, targets): """Compute losses for single level. Args: tl_hmp (Tensor): Top-left corner heatmap for current level with shape (N, num_classes, H, W). br_hmp (Tensor): Bottom-right corner heatmap for current level with shape (N, num_classes, H, W). tl_off (Tensor): Top-left corner offset for current level with shape (N, corner_offset_channels, H, W). br_off (Tensor): Bottom-right corner offset for current level with shape (N, corner_offset_channels, H, W). tl_guiding_shift (Tensor): Top-left guiding shift for current level with shape (N, guiding_shift_channels, H, W). br_guiding_shift (Tensor): Bottom-right guiding shift for current level with shape (N, guiding_shift_channels, H, W). tl_centripetal_shift (Tensor): Top-left centripetal shift for current level with shape (N, centripetal_shift_channels, H, W). br_centripetal_shift (Tensor): Bottom-right centripetal shift for current level with shape (N, centripetal_shift_channels, H, W). targets (dict): Corner target generated by `get_targets`. Returns: tuple[torch.Tensor]: Losses of the head's differnet branches containing the following losses: - det_loss (Tensor): Corner keypoint loss. - off_loss (Tensor): Corner offset loss. - guiding_loss (Tensor): Guiding shift loss. - centripetal_loss (Tensor): Centripetal shift loss. """ targets['corner_embedding'] = None det_loss, _, _, off_loss = super().loss_single(tl_hmp, br_hmp, None, None, tl_off, br_off, targets) gt_tl_guiding_shift = targets['topleft_guiding_shift'] gt_br_guiding_shift = targets['bottomright_guiding_shift'] gt_tl_centripetal_shift = targets['topleft_centripetal_shift'] gt_br_centripetal_shift = targets['bottomright_centripetal_shift'] gt_tl_heatmap = targets['topleft_heatmap'] gt_br_heatmap = targets['bottomright_heatmap'] # We only compute the offset loss at the real corner position. # The value of real corner would be 1 in heatmap ground truth. # The mask is computed in class agnostic mode and its shape is # batch * 1 * width * height. tl_mask = gt_tl_heatmap.eq(1).sum(1).gt(0).unsqueeze(1).type_as( gt_tl_heatmap) br_mask = gt_br_heatmap.eq(1).sum(1).gt(0).unsqueeze(1).type_as( gt_br_heatmap) # Guiding shift loss tl_guiding_loss = self.loss_guiding_shift( tl_guiding_shift, gt_tl_guiding_shift, tl_mask, avg_factor=tl_mask.sum()) br_guiding_loss = self.loss_guiding_shift( br_guiding_shift, gt_br_guiding_shift, br_mask, avg_factor=br_mask.sum()) guiding_loss = (tl_guiding_loss + br_guiding_loss) / 2.0 # Centripetal shift loss tl_centripetal_loss = self.loss_centripetal_shift( tl_centripetal_shift, gt_tl_centripetal_shift, tl_mask, avg_factor=tl_mask.sum()) br_centripetal_loss = self.loss_centripetal_shift( br_centripetal_shift, gt_br_centripetal_shift, br_mask, avg_factor=br_mask.sum()) centripetal_loss = (tl_centripetal_loss + br_centripetal_loss) / 2.0 return det_loss, off_loss, guiding_loss, centripetal_loss def get_bboxes(self, tl_heats, br_heats, tl_offs, br_offs, tl_guiding_shifts, br_guiding_shifts, tl_centripetal_shifts, br_centripetal_shifts, img_metas, rescale=False, with_nms=True): """Transform network output for a batch into bbox predictions. Args: tl_heats (list[Tensor]): Top-left corner heatmaps for each level with shape (N, num_classes, H, W). br_heats (list[Tensor]): Bottom-right corner heatmaps for each level with shape (N, num_classes, H, W). tl_offs (list[Tensor]): Top-left corner offsets for each level with shape (N, corner_offset_channels, H, W). br_offs (list[Tensor]): Bottom-right corner offsets for each level with shape (N, corner_offset_channels, H, W). tl_guiding_shifts (list[Tensor]): Top-left guiding shifts for each level with shape (N, guiding_shift_channels, H, W). Useless in this function, we keep this arg because it's the raw output from CentripetalHead. br_guiding_shifts (list[Tensor]): Bottom-right guiding shifts for each level with shape (N, guiding_shift_channels, H, W). Useless in this function, we keep this arg because it's the raw output from CentripetalHead. tl_centripetal_shifts (list[Tensor]): Top-left centripetal shifts for each level with shape (N, centripetal_shift_channels, H, W). br_centripetal_shifts (list[Tensor]): Bottom-right centripetal shifts for each level with shape (N, centripetal_shift_channels, H, W). img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. rescale (bool): If True, return boxes in original image space. Default: False. with_nms (bool): If True, do nms before return boxes. Default: True. """ assert tl_heats[-1].shape[0] == br_heats[-1].shape[0] == len(img_metas) result_list = [] for img_id in range(len(img_metas)): result_list.append( self._get_bboxes_single( tl_heats[-1][img_id:img_id + 1, :], br_heats[-1][img_id:img_id + 1, :], tl_offs[-1][img_id:img_id + 1, :], br_offs[-1][img_id:img_id + 1, :], img_metas[img_id], tl_emb=None, br_emb=None, tl_centripetal_shift=tl_centripetal_shifts[-1][ img_id:img_id + 1, :], br_centripetal_shift=br_centripetal_shifts[-1][ img_id:img_id + 1, :], rescale=rescale, with_nms=with_nms)) return result_list
19,469
45.137441
79
py
GFocalV2
GFocalV2-master/mmdet/models/dense_heads/paa_head.py
import numpy as np import torch from mmcv.runner import force_fp32 from mmdet.core import multi_apply, multiclass_nms from mmdet.core.bbox.iou_calculators import bbox_overlaps from mmdet.models import HEADS from mmdet.models.dense_heads import ATSSHead EPS = 1e-12 try: import sklearn.mixture as skm except ImportError: skm = None def levels_to_images(mlvl_tensor): """Concat multi-level feature maps by image. [feature_level0, feature_level1...] -> [feature_image0, feature_image1...] Convert the shape of each element in mlvl_tensor from (N, C, H, W) to (N, H*W , C), then split the element to N elements with shape (H*W, C), and concat elements in same image of all level along first dimension. Args: mlvl_tensor (list[torch.Tensor]): list of Tensor which collect from corresponding level. Each element is of shape (N, C, H, W) Returns: list[torch.Tensor]: A list that contains N tensors and each tensor is of shape (num_elements, C) """ batch_size = mlvl_tensor[0].size(0) batch_list = [[] for _ in range(batch_size)] channels = mlvl_tensor[0].size(1) for t in mlvl_tensor: t = t.permute(0, 2, 3, 1) t = t.view(batch_size, -1, channels).contiguous() for img in range(batch_size): batch_list[img].append(t[img]) return [torch.cat(item, 0) for item in batch_list] @HEADS.register_module() class PAAHead(ATSSHead): """Head of PAAAssignment: Probabilistic Anchor Assignment with IoU Prediction for Object Detection. Code is modified from the `official github repo <https://github.com/kkhoot/PAA/blob/master/paa_core /modeling/rpn/paa/loss.py>`_. More details can be found in the `paper <https://arxiv.org/abs/2007.08103>`_ . Args: topk (int): Select topk samples with smallest loss in each level. score_voting (bool): Whether to use score voting in post-process. covariance_type : String describing the type of covariance parameters to be used in :class:`sklearn.mixture.GaussianMixture`. It must be one of: - 'full': each component has its own general covariance matrix - 'tied': all components share the same general covariance matrix - 'diag': each component has its own diagonal covariance matrix - 'spherical': each component has its own single variance Default: 'diag'. From 'full' to 'spherical', the gmm fitting process is faster yet the performance could be influenced. For most cases, 'diag' should be a good choice. """ def __init__(self, *args, topk=9, score_voting=True, covariance_type='diag', **kwargs): # topk used in paa reassign process self.topk = topk self.with_score_voting = score_voting self.covariance_type = covariance_type super(PAAHead, self).__init__(*args, **kwargs) @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'iou_preds')) def loss(self, cls_scores, bbox_preds, iou_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None): """Compute losses of the head. Args: cls_scores (list[Tensor]): Box scores for each scale level Has shape (N, num_anchors * num_classes, H, W) bbox_preds (list[Tensor]): Box energies / deltas for each scale level with shape (N, num_anchors * 4, H, W) iou_preds (list[Tensor]): iou_preds for each scale level with shape (N, num_anchors * 1, H, W) gt_bboxes (list[Tensor]): Ground truth bboxes for each image with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. gt_labels (list[Tensor]): class indices corresponding to each box img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. gt_bboxes_ignore (list[Tensor] | None): Specify which bounding boxes can be ignored when are computing the loss. Returns: dict[str, Tensor]: A dictionary of loss gmm_assignment. """ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] assert len(featmap_sizes) == self.anchor_generator.num_levels device = cls_scores[0].device anchor_list, valid_flag_list = self.get_anchors( featmap_sizes, img_metas, device=device) label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 cls_reg_targets = self.get_targets( anchor_list, valid_flag_list, gt_bboxes, img_metas, gt_bboxes_ignore_list=gt_bboxes_ignore, gt_labels_list=gt_labels, label_channels=label_channels, ) (labels, labels_weight, bboxes_target, bboxes_weight, pos_inds, pos_gt_index) = cls_reg_targets cls_scores = levels_to_images(cls_scores) cls_scores = [ item.reshape(-1, self.cls_out_channels) for item in cls_scores ] bbox_preds = levels_to_images(bbox_preds) bbox_preds = [item.reshape(-1, 4) for item in bbox_preds] iou_preds = levels_to_images(iou_preds) iou_preds = [item.reshape(-1, 1) for item in iou_preds] pos_losses_list, = multi_apply(self.get_pos_loss, anchor_list, cls_scores, bbox_preds, labels, labels_weight, bboxes_target, bboxes_weight, pos_inds) with torch.no_grad(): labels, label_weights, bbox_weights, num_pos = multi_apply( self.paa_reassign, pos_losses_list, labels, labels_weight, bboxes_weight, pos_inds, pos_gt_index, anchor_list, ) num_pos = sum(num_pos) # convert all tensor list to a flatten tensor cls_scores = torch.cat(cls_scores, 0).view(-1, cls_scores[0].size(-1)) bbox_preds = torch.cat(bbox_preds, 0).view(-1, bbox_preds[0].size(-1)) iou_preds = torch.cat(iou_preds, 0).view(-1, iou_preds[0].size(-1)) labels = torch.cat(labels, 0).view(-1) flatten_anchors = torch.cat( [torch.cat(item, 0) for item in anchor_list]) labels_weight = torch.cat(labels_weight, 0).view(-1) bboxes_target = torch.cat(bboxes_target, 0).view(-1, bboxes_target[0].size(-1)) pos_inds_flatten = ((labels >= 0) & (labels < self.num_classes)).nonzero().reshape(-1) losses_cls = self.loss_cls( cls_scores, labels, labels_weight, avg_factor=max(num_pos, len(img_metas))) # avoid num_pos=0 if num_pos: pos_bbox_pred = self.bbox_coder.decode( flatten_anchors[pos_inds_flatten], bbox_preds[pos_inds_flatten]) pos_bbox_target = bboxes_target[pos_inds_flatten] iou_target = bbox_overlaps( pos_bbox_pred.detach(), pos_bbox_target, is_aligned=True) losses_iou = self.loss_centerness( iou_preds[pos_inds_flatten], iou_target.unsqueeze(-1), avg_factor=num_pos) losses_bbox = self.loss_bbox( pos_bbox_pred, pos_bbox_target, iou_target.clamp(min=EPS), avg_factor=iou_target.sum()) else: losses_iou = iou_preds.sum() * 0 losses_bbox = bbox_preds.sum() * 0 return dict( loss_cls=losses_cls, loss_bbox=losses_bbox, loss_iou=losses_iou) def get_pos_loss(self, anchors, cls_score, bbox_pred, label, label_weight, bbox_target, bbox_weight, pos_inds): """Calculate loss of all potential positive samples obtained from first match process. Args: anchors (list[Tensor]): Anchors of each scale. cls_score (Tensor): Box scores of single image with shape (num_anchors, num_classes) bbox_pred (Tensor): Box energies / deltas of single image with shape (num_anchors, 4) label (Tensor): classification target of each anchor with shape (num_anchors,) label_weight (Tensor): Classification loss weight of each anchor with shape (num_anchors). bbox_target (dict): Regression target of each anchor with shape (num_anchors, 4). bbox_weight (Tensor): Bbox weight of each anchor with shape (num_anchors, 4). pos_inds (Tensor): Index of all positive samples got from first assign process. Returns: Tensor: Losses of all positive samples in single image. """ if not len(pos_inds): return cls_score.new([]), anchors_all_level = torch.cat(anchors, 0) pos_scores = cls_score[pos_inds] pos_bbox_pred = bbox_pred[pos_inds] pos_label = label[pos_inds] pos_label_weight = label_weight[pos_inds] pos_bbox_target = bbox_target[pos_inds] pos_bbox_weight = bbox_weight[pos_inds] pos_anchors = anchors_all_level[pos_inds] pos_bbox_pred = self.bbox_coder.decode(pos_anchors, pos_bbox_pred) # to keep loss dimension loss_cls = self.loss_cls( pos_scores, pos_label, pos_label_weight, avg_factor=self.loss_cls.loss_weight, reduction_override='none') loss_bbox = self.loss_bbox( pos_bbox_pred, pos_bbox_target, pos_bbox_weight, avg_factor=self.loss_cls.loss_weight, reduction_override='none') loss_cls = loss_cls.sum(-1) pos_loss = loss_bbox + loss_cls return pos_loss, def paa_reassign(self, pos_losses, label, label_weight, bbox_weight, pos_inds, pos_gt_inds, anchors): """Fit loss to GMM distribution and separate positive, ignore, negative samples again with GMM model. Args: pos_losses (Tensor): Losses of all positive samples in single image. label (Tensor): classification target of each anchor with shape (num_anchors,) label_weight (Tensor): Classification loss weight of each anchor with shape (num_anchors). bbox_weight (Tensor): Bbox weight of each anchor with shape (num_anchors, 4). pos_inds (Tensor): Index of all positive samples got from first assign process. pos_gt_inds (Tensor): Gt_index of all positive samples got from first assign process. anchors (list[Tensor]): Anchors of each scale. Returns: tuple: Usually returns a tuple containing learning targets. - label (Tensor): classification target of each anchor after paa assign, with shape (num_anchors,) - label_weight (Tensor): Classification loss weight of each anchor after paa assign, with shape (num_anchors). - bbox_weight (Tensor): Bbox weight of each anchor with shape (num_anchors, 4). - num_pos (int): The number of positive samples after paa assign. """ if not len(pos_inds): return label, label_weight, bbox_weight, 0 num_gt = pos_gt_inds.max() + 1 num_level = len(anchors) num_anchors_each_level = [item.size(0) for item in anchors] num_anchors_each_level.insert(0, 0) inds_level_interval = np.cumsum(num_anchors_each_level) pos_level_mask = [] for i in range(num_level): mask = (pos_inds >= inds_level_interval[i]) & ( pos_inds < inds_level_interval[i + 1]) pos_level_mask.append(mask) pos_inds_after_paa = [label.new_tensor([])] ignore_inds_after_paa = [label.new_tensor([])] for gt_ind in range(num_gt): pos_inds_gmm = [] pos_loss_gmm = [] gt_mask = pos_gt_inds == gt_ind for level in range(num_level): level_mask = pos_level_mask[level] level_gt_mask = level_mask & gt_mask value, topk_inds = pos_losses[level_gt_mask].topk( min(level_gt_mask.sum(), self.topk), largest=False) pos_inds_gmm.append(pos_inds[level_gt_mask][topk_inds]) pos_loss_gmm.append(value) pos_inds_gmm = torch.cat(pos_inds_gmm) pos_loss_gmm = torch.cat(pos_loss_gmm) # fix gmm need at least two sample if len(pos_inds_gmm) < 2: continue device = pos_inds_gmm.device pos_loss_gmm, sort_inds = pos_loss_gmm.sort() pos_inds_gmm = pos_inds_gmm[sort_inds] pos_loss_gmm = pos_loss_gmm.view(-1, 1).cpu().numpy() min_loss, max_loss = pos_loss_gmm.min(), pos_loss_gmm.max() means_init = np.array([min_loss, max_loss]).reshape(2, 1) weights_init = np.array([0.5, 0.5]) precisions_init = np.array([1.0, 1.0]).reshape(2, 1, 1) # full if self.covariance_type == 'spherical': precisions_init = precisions_init.reshape(2) elif self.covariance_type == 'diag': precisions_init = precisions_init.reshape(2, 1) elif self.covariance_type == 'tied': precisions_init = np.array([[1.0]]) if skm is None: raise ImportError('Please run "pip install sklearn" ' 'to install sklearn first.') gmm = skm.GaussianMixture( 2, weights_init=weights_init, means_init=means_init, precisions_init=precisions_init, covariance_type=self.covariance_type) gmm.fit(pos_loss_gmm) gmm_assignment = gmm.predict(pos_loss_gmm) scores = gmm.score_samples(pos_loss_gmm) gmm_assignment = torch.from_numpy(gmm_assignment).to(device) scores = torch.from_numpy(scores).to(device) pos_inds_temp, ignore_inds_temp = self.gmm_separation_scheme( gmm_assignment, scores, pos_inds_gmm) pos_inds_after_paa.append(pos_inds_temp) ignore_inds_after_paa.append(ignore_inds_temp) pos_inds_after_paa = torch.cat(pos_inds_after_paa) ignore_inds_after_paa = torch.cat(ignore_inds_after_paa) reassign_mask = (pos_inds.unsqueeze(1) != pos_inds_after_paa).all(1) reassign_ids = pos_inds[reassign_mask] label[reassign_ids] = self.num_classes label_weight[ignore_inds_after_paa] = 0 bbox_weight[reassign_ids] = 0 num_pos = len(pos_inds_after_paa) return label, label_weight, bbox_weight, num_pos def gmm_separation_scheme(self, gmm_assignment, scores, pos_inds_gmm): """A general separation scheme for gmm model. It separates a GMM distribution of candidate samples into three parts, 0 1 and uncertain areas, and you can implement other separation schemes by rewriting this function. Args: gmm_assignment (Tensor): The prediction of GMM which is of shape (num_samples,). The 0/1 value indicates the distribution that each sample comes from. scores (Tensor): The probability of sample coming from the fit GMM distribution. The tensor is of shape (num_samples,). pos_inds_gmm (Tensor): All the indexes of samples which are used to fit GMM model. The tensor is of shape (num_samples,) Returns: tuple[Tensor]: The indices of positive and ignored samples. - pos_inds_temp (Tensor): Indices of positive samples. - ignore_inds_temp (Tensor): Indices of ignore samples. """ # The implementation is (c) in Fig.3 in origin paper intead of (b). # You can refer to issues such as # https://github.com/kkhoot/PAA/issues/8 and # https://github.com/kkhoot/PAA/issues/9. fgs = gmm_assignment == 0 pos_inds_temp = fgs.new_tensor([], dtype=torch.long) ignore_inds_temp = fgs.new_tensor([], dtype=torch.long) if fgs.nonzero().numel(): _, pos_thr_ind = scores[fgs].topk(1) pos_inds_temp = pos_inds_gmm[fgs][:pos_thr_ind + 1] ignore_inds_temp = pos_inds_gmm.new_tensor([]) return pos_inds_temp, ignore_inds_temp def get_targets( self, anchor_list, valid_flag_list, gt_bboxes_list, img_metas, gt_bboxes_ignore_list=None, gt_labels_list=None, label_channels=1, unmap_outputs=True, ): """Get targets for PAA head. This method is almost the same as `AnchorHead.get_targets()`. We direct return the results from _get_targets_single instead map it to levels by images_to_levels function. Args: anchor_list (list[list[Tensor]]): Multi level anchors of each image. The outer list indicates images, and the inner list corresponds to feature levels of the image. Each element of the inner list is a tensor of shape (num_anchors, 4). valid_flag_list (list[list[Tensor]]): Multi level valid flags of each image. The outer list indicates images, and the inner list corresponds to feature levels of the image. Each element of the inner list is a tensor of shape (num_anchors, ) gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image. img_metas (list[dict]): Meta info of each image. gt_bboxes_ignore_list (list[Tensor]): Ground truth bboxes to be ignored. gt_labels_list (list[Tensor]): Ground truth labels of each box. label_channels (int): Channel of label. unmap_outputs (bool): Whether to map outputs back to the original set of anchors. Returns: tuple: Usually returns a tuple containing learning targets. - labels (list[Tensor]): Labels of all anchors, each with shape (num_anchors,). - label_weights (list[Tensor]): Label weights of all anchor. each with shape (num_anchors,). - bbox_targets (list[Tensor]): BBox targets of all anchors. each with shape (num_anchors, 4). - bbox_weights (list[Tensor]): BBox weights of all anchors. each with shape (num_anchors, 4). - pos_inds (list[Tensor]): Contains all index of positive sample in all anchor. - gt_inds (list[Tensor]): Contains all gt_index of positive sample in all anchor. """ num_imgs = len(img_metas) assert len(anchor_list) == len(valid_flag_list) == num_imgs concat_anchor_list = [] concat_valid_flag_list = [] for i in range(num_imgs): assert len(anchor_list[i]) == len(valid_flag_list[i]) concat_anchor_list.append(torch.cat(anchor_list[i])) concat_valid_flag_list.append(torch.cat(valid_flag_list[i])) # compute targets for each image if gt_bboxes_ignore_list is None: gt_bboxes_ignore_list = [None for _ in range(num_imgs)] if gt_labels_list is None: gt_labels_list = [None for _ in range(num_imgs)] results = multi_apply( self._get_targets_single, concat_anchor_list, concat_valid_flag_list, gt_bboxes_list, gt_bboxes_ignore_list, gt_labels_list, img_metas, label_channels=label_channels, unmap_outputs=unmap_outputs) (labels, label_weights, bbox_targets, bbox_weights, valid_pos_inds, valid_neg_inds, sampling_result) = results # Due to valid flag of anchors, we have to calculate the real pos_inds # in origin anchor set. pos_inds = [] for i, single_labels in enumerate(labels): pos_mask = (0 <= single_labels) & ( single_labels < self.num_classes) pos_inds.append(pos_mask.nonzero().view(-1)) gt_inds = [item.pos_assigned_gt_inds for item in sampling_result] return (labels, label_weights, bbox_targets, bbox_weights, pos_inds, gt_inds) def _get_targets_single(self, flat_anchors, valid_flags, gt_bboxes, gt_bboxes_ignore, gt_labels, img_meta, label_channels=1, unmap_outputs=True): """Compute regression and classification targets for anchors in a single image. This method is same as `AnchorHead._get_targets_single()`. """ assert unmap_outputs, 'We must map outputs back to the original' \ 'set of anchors in PAAhead' return super(ATSSHead, self)._get_targets_single( flat_anchors, valid_flags, gt_bboxes, gt_bboxes_ignore, gt_labels, img_meta, label_channels=1, unmap_outputs=True) def _get_bboxes_single(self, cls_scores, bbox_preds, iou_preds, mlvl_anchors, img_shape, scale_factor, cfg, rescale=False, with_nms=True): """Transform outputs for a single batch item into labeled boxes. This method is almost same as `ATSSHead._get_bboxes_single()`. We use sqrt(iou_preds * cls_scores) in NMS process instead of just cls_scores. Besides, score voting is used when `` score_voting`` is set to True. """ assert with_nms, 'PAA only supports "with_nms=True" now' assert len(cls_scores) == len(bbox_preds) == len(mlvl_anchors) mlvl_bboxes = [] mlvl_scores = [] mlvl_iou_preds = [] for cls_score, bbox_pred, iou_preds, anchors in zip( cls_scores, bbox_preds, iou_preds, mlvl_anchors): assert cls_score.size()[-2:] == bbox_pred.size()[-2:] scores = cls_score.permute(1, 2, 0).reshape( -1, self.cls_out_channels).sigmoid() bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4) iou_preds = iou_preds.permute(1, 2, 0).reshape(-1).sigmoid() nms_pre = cfg.get('nms_pre', -1) if nms_pre > 0 and scores.shape[0] > nms_pre: max_scores, _ = (scores * iou_preds[:, None]).sqrt().max(dim=1) _, topk_inds = max_scores.topk(nms_pre) anchors = anchors[topk_inds, :] bbox_pred = bbox_pred[topk_inds, :] scores = scores[topk_inds, :] iou_preds = iou_preds[topk_inds] bboxes = self.bbox_coder.decode( anchors, bbox_pred, max_shape=img_shape) mlvl_bboxes.append(bboxes) mlvl_scores.append(scores) mlvl_iou_preds.append(iou_preds) mlvl_bboxes = torch.cat(mlvl_bboxes) if rescale: mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor) mlvl_scores = torch.cat(mlvl_scores) # Add a dummy background class to the backend when using sigmoid # remind that we set FG labels to [0, num_class-1] since mmdet v2.0 # BG cat_id: num_class padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1) mlvl_scores = torch.cat([mlvl_scores, padding], dim=1) mlvl_iou_preds = torch.cat(mlvl_iou_preds) mlvl_nms_scores = (mlvl_scores * mlvl_iou_preds[:, None]).sqrt() det_bboxes, det_labels = multiclass_nms( mlvl_bboxes, mlvl_nms_scores, cfg.score_thr, cfg.nms, cfg.max_per_img, score_factors=None) if self.with_score_voting: det_bboxes, det_labels = self.score_voting(det_bboxes, det_labels, mlvl_bboxes, mlvl_nms_scores, cfg.score_thr) return det_bboxes, det_labels def score_voting(self, det_bboxes, det_labels, mlvl_bboxes, mlvl_nms_scores, score_thr): """Implementation of score voting method works on each remaining boxes after NMS procedure. Args: det_bboxes (Tensor): Remaining boxes after NMS procedure, with shape (k, 5), each dimension means (x1, y1, x2, y2, score). det_labels (Tensor): The label of remaining boxes, with shape (k, 1),Labels are 0-based. mlvl_bboxes (Tensor): All boxes before the NMS procedure, with shape (num_anchors,4). mlvl_nms_scores (Tensor): The scores of all boxes which is used in the NMS procedure, with shape (num_anchors, num_class) mlvl_iou_preds (Tensot): The predictions of IOU of all boxes before the NMS procedure, with shape (num_anchors, 1) score_thr (float): The score threshold of bboxes. Returns: tuple: Usually returns a tuple containing voting results. - det_bboxes_voted (Tensor): Remaining boxes after score voting procedure, with shape (k, 5), each dimension means (x1, y1, x2, y2, score). - det_labels_voted (Tensor): Label of remaining bboxes after voting, with shape (num_anchors,). """ candidate_mask = mlvl_nms_scores > score_thr candidate_mask_nozeros = candidate_mask.nonzero() candidate_inds = candidate_mask_nozeros[:, 0] candidate_labels = candidate_mask_nozeros[:, 1] candidate_bboxes = mlvl_bboxes[candidate_inds] candidate_scores = mlvl_nms_scores[candidate_mask] det_bboxes_voted = [] det_labels_voted = [] for cls in range(self.cls_out_channels): candidate_cls_mask = candidate_labels == cls if not candidate_cls_mask.any(): continue candidate_cls_scores = candidate_scores[candidate_cls_mask] candidate_cls_bboxes = candidate_bboxes[candidate_cls_mask] det_cls_mask = det_labels == cls det_cls_bboxes = det_bboxes[det_cls_mask].view( -1, det_bboxes.size(-1)) det_candidate_ious = bbox_overlaps(det_cls_bboxes[:, :4], candidate_cls_bboxes) for det_ind in range(len(det_cls_bboxes)): single_det_ious = det_candidate_ious[det_ind] pos_ious_mask = single_det_ious > 0.01 pos_ious = single_det_ious[pos_ious_mask] pos_bboxes = candidate_cls_bboxes[pos_ious_mask] pos_scores = candidate_cls_scores[pos_ious_mask] pis = (torch.exp(-(1 - pos_ious)**2 / 0.025) * pos_scores)[:, None] voted_box = torch.sum( pis * pos_bboxes, dim=0) / torch.sum( pis, dim=0) voted_score = det_cls_bboxes[det_ind][-1:][None, :] det_bboxes_voted.append( torch.cat((voted_box[None, :], voted_score), dim=1)) det_labels_voted.append(cls) det_bboxes_voted = torch.cat(det_bboxes_voted, dim=0) det_labels_voted = det_labels.new_tensor(det_labels_voted) return det_bboxes_voted, det_labels_voted
28,818
43.065749
79
py
GFocalV2
GFocalV2-master/mmdet/models/dense_heads/retina_sepbn_head.py
import torch.nn as nn from mmcv.cnn import ConvModule, bias_init_with_prob, normal_init from ..builder import HEADS from .anchor_head import AnchorHead @HEADS.register_module() class RetinaSepBNHead(AnchorHead): """"RetinaHead with separate BN. In RetinaHead, conv/norm layers are shared across different FPN levels, while in RetinaSepBNHead, conv layers are shared across different FPN levels, but BN layers are separated. """ def __init__(self, num_classes, num_ins, in_channels, stacked_convs=4, conv_cfg=None, norm_cfg=None, **kwargs): self.stacked_convs = stacked_convs self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.num_ins = num_ins super(RetinaSepBNHead, self).__init__(num_classes, in_channels, **kwargs) def _init_layers(self): """Initialize layers of the head.""" self.relu = nn.ReLU(inplace=True) self.cls_convs = nn.ModuleList() self.reg_convs = nn.ModuleList() for i in range(self.num_ins): cls_convs = nn.ModuleList() reg_convs = nn.ModuleList() for i in range(self.stacked_convs): chn = self.in_channels if i == 0 else self.feat_channels cls_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) reg_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) self.cls_convs.append(cls_convs) self.reg_convs.append(reg_convs) for i in range(self.stacked_convs): for j in range(1, self.num_ins): self.cls_convs[j][i].conv = self.cls_convs[0][i].conv self.reg_convs[j][i].conv = self.reg_convs[0][i].conv self.retina_cls = nn.Conv2d( self.feat_channels, self.num_anchors * self.cls_out_channels, 3, padding=1) self.retina_reg = nn.Conv2d( self.feat_channels, self.num_anchors * 4, 3, padding=1) def init_weights(self): """Initialize weights of the head.""" for m in self.cls_convs[0]: normal_init(m.conv, std=0.01) for m in self.reg_convs[0]: normal_init(m.conv, std=0.01) bias_cls = bias_init_with_prob(0.01) normal_init(self.retina_cls, std=0.01, bias=bias_cls) normal_init(self.retina_reg, std=0.01) def forward(self, feats): """Forward features from the upstream network. Args: feats (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. Returns: tuple: Usually a tuple of classification scores and bbox prediction cls_scores (list[Tensor]): Classification scores for all scale levels, each is a 4D-tensor, the channels number is num_anchors * num_classes. bbox_preds (list[Tensor]): Box energies / deltas for all scale levels, each is a 4D-tensor, the channels number is num_anchors * 4. """ cls_scores = [] bbox_preds = [] for i, x in enumerate(feats): cls_feat = feats[i] reg_feat = feats[i] for cls_conv in self.cls_convs[i]: cls_feat = cls_conv(cls_feat) for reg_conv in self.reg_convs[i]: reg_feat = reg_conv(reg_feat) cls_score = self.retina_cls(cls_feat) bbox_pred = self.retina_reg(reg_feat) cls_scores.append(cls_score) bbox_preds.append(bbox_pred) return cls_scores, bbox_preds
4,288
36.622807
79
py
GFocalV2
GFocalV2-master/mmdet/models/dense_heads/anchor_free_head.py
from abc import abstractmethod import torch import torch.nn as nn from mmcv.cnn import ConvModule, bias_init_with_prob, normal_init from mmcv.runner import force_fp32 from mmdet.core import multi_apply from ..builder import HEADS, build_loss from .base_dense_head import BaseDenseHead from .dense_test_mixins import BBoxTestMixin @HEADS.register_module() class AnchorFreeHead(BaseDenseHead, BBoxTestMixin): """Anchor-free head (FCOS, Fovea, RepPoints, etc.). Args: num_classes (int): Number of categories excluding the background category. in_channels (int): Number of channels in the input feature map. feat_channels (int): Number of hidden channels. Used in child classes. stacked_convs (int): Number of stacking convs of the head. strides (tuple): Downsample factor of each feature map. dcn_on_last_conv (bool): If true, use dcn in the last layer of towers. Default: False. conv_bias (bool | str): If specified as `auto`, it will be decided by the norm_cfg. Bias of conv will be set as True if `norm_cfg` is None, otherwise False. Default: "auto". loss_cls (dict): Config of classification loss. loss_bbox (dict): Config of localization loss. conv_cfg (dict): Config dict for convolution layer. Default: None. norm_cfg (dict): Config dict for normalization layer. Default: None. train_cfg (dict): Training config of anchor head. test_cfg (dict): Testing config of anchor head. """ # noqa: W605 _version = 1 def __init__(self, num_classes, in_channels, feat_channels=256, stacked_convs=4, strides=(4, 8, 16, 32, 64), dcn_on_last_conv=False, conv_bias='auto', loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_bbox=dict(type='IoULoss', loss_weight=1.0), conv_cfg=None, norm_cfg=None, train_cfg=None, test_cfg=None): super(AnchorFreeHead, self).__init__() self.num_classes = num_classes self.cls_out_channels = num_classes self.in_channels = in_channels self.feat_channels = feat_channels self.stacked_convs = stacked_convs self.strides = strides self.dcn_on_last_conv = dcn_on_last_conv assert conv_bias == 'auto' or isinstance(conv_bias, bool) self.conv_bias = conv_bias self.loss_cls = build_loss(loss_cls) self.loss_bbox = build_loss(loss_bbox) self.train_cfg = train_cfg self.test_cfg = test_cfg self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.fp16_enabled = False self._init_layers() def _init_layers(self): """Initialize layers of the head.""" self._init_cls_convs() self._init_reg_convs() self._init_predictor() def _init_cls_convs(self): """Initialize classification conv layers of the head.""" self.cls_convs = nn.ModuleList() for i in range(self.stacked_convs): chn = self.in_channels if i == 0 else self.feat_channels if self.dcn_on_last_conv and i == self.stacked_convs - 1: conv_cfg = dict(type='DCNv2') else: conv_cfg = self.conv_cfg self.cls_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=conv_cfg, norm_cfg=self.norm_cfg, bias=self.conv_bias)) def _init_reg_convs(self): """Initialize bbox regression conv layers of the head.""" self.reg_convs = nn.ModuleList() for i in range(self.stacked_convs): chn = self.in_channels if i == 0 else self.feat_channels if self.dcn_on_last_conv and i == self.stacked_convs - 1: conv_cfg = dict(type='DCNv2') else: conv_cfg = self.conv_cfg self.reg_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=conv_cfg, norm_cfg=self.norm_cfg, bias=self.conv_bias)) def _init_predictor(self): """Initialize predictor layers of the head.""" self.conv_cls = nn.Conv2d( self.feat_channels, self.cls_out_channels, 3, padding=1) self.conv_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1) def init_weights(self): """Initialize weights of the head.""" for m in self.cls_convs: if isinstance(m.conv, nn.Conv2d): normal_init(m.conv, std=0.01) for m in self.reg_convs: if isinstance(m.conv, nn.Conv2d): normal_init(m.conv, std=0.01) bias_cls = bias_init_with_prob(0.01) normal_init(self.conv_cls, std=0.01, bias=bias_cls) normal_init(self.conv_reg, std=0.01) def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs): """Hack some keys of the model state dict so that can load checkpoints of previous version.""" version = local_metadata.get('version', None) if version is None: # the key is different in early versions # for example, 'fcos_cls' become 'conv_cls' now bbox_head_keys = [ k for k in state_dict.keys() if k.startswith(prefix) ] ori_predictor_keys = [] new_predictor_keys = [] # e.g. 'fcos_cls' or 'fcos_reg' for key in bbox_head_keys: ori_predictor_keys.append(key) key = key.split('.') conv_name = None if key[1].endswith('cls'): conv_name = 'conv_cls' elif key[1].endswith('reg'): conv_name = 'conv_reg' elif key[1].endswith('centerness'): conv_name = 'conv_centerness' else: assert NotImplementedError if conv_name is not None: key[1] = conv_name new_predictor_keys.append('.'.join(key)) else: ori_predictor_keys.pop(-1) for i in range(len(new_predictor_keys)): state_dict[new_predictor_keys[i]] = state_dict.pop( ori_predictor_keys[i]) super()._load_from_state_dict(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs) def forward(self, feats): """Forward features from the upstream network. Args: feats (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. Returns: tuple: Usually contain classification scores and bbox predictions. cls_scores (list[Tensor]): Box scores for each scale level, each is a 4D-tensor, the channel number is num_points * num_classes. bbox_preds (list[Tensor]): Box energies / deltas for each scale level, each is a 4D-tensor, the channel number is num_points * 4. """ return multi_apply(self.forward_single, feats)[:2] def forward_single(self, x): """Forward features of a single scale levle. Args: x (Tensor): FPN feature maps of the specified stride. Returns: tuple: Scores for each class, bbox predictions, features after classification and regression conv layers, some models needs these features like FCOS. """ cls_feat = x reg_feat = x for cls_layer in self.cls_convs: cls_feat = cls_layer(cls_feat) cls_score = self.conv_cls(cls_feat) for reg_layer in self.reg_convs: reg_feat = reg_layer(reg_feat) bbox_pred = self.conv_reg(reg_feat) return cls_score, bbox_pred, cls_feat, reg_feat @abstractmethod @force_fp32(apply_to=('cls_scores', 'bbox_preds')) def loss(self, cls_scores, bbox_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None): """Compute loss of the head. Args: cls_scores (list[Tensor]): Box scores for each scale level, each is a 4D-tensor, the channel number is num_points * num_classes. bbox_preds (list[Tensor]): Box energies / deltas for each scale level, each is a 4D-tensor, the channel number is num_points * 4. gt_bboxes (list[Tensor]): Ground truth bboxes for each image with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. gt_labels (list[Tensor]): class indices corresponding to each box img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. gt_bboxes_ignore (None | list[Tensor]): specify which bounding boxes can be ignored when computing the loss. """ raise NotImplementedError @abstractmethod @force_fp32(apply_to=('cls_scores', 'bbox_preds')) def get_bboxes(self, cls_scores, bbox_preds, img_metas, cfg=None, rescale=None): """Transform network output for a batch into bbox predictions. Args: cls_scores (list[Tensor]): Box scores for each scale level Has shape (N, num_points * num_classes, H, W) bbox_preds (list[Tensor]): Box energies / deltas for each scale level with shape (N, num_points * 4, H, W) img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. cfg (mmcv.Config): Test / postprocessing configuration, if None, test_cfg would be used rescale (bool): If True, return boxes in original image space """ raise NotImplementedError @abstractmethod def get_targets(self, points, gt_bboxes_list, gt_labels_list): """Compute regression, classification and centerss targets for points in multiple images. Args: points (list[Tensor]): Points of each fpn level, each has shape (num_points, 2). gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image, each has shape (num_gt, 4). gt_labels_list (list[Tensor]): Ground truth labels of each box, each has shape (num_gt,). """ raise NotImplementedError def _get_points_single(self, featmap_size, stride, dtype, device, flatten=False): """Get points of a single scale level.""" h, w = featmap_size x_range = torch.arange(w, dtype=dtype, device=device) y_range = torch.arange(h, dtype=dtype, device=device) y, x = torch.meshgrid(y_range, x_range) if flatten: y = y.flatten() x = x.flatten() return y, x def get_points(self, featmap_sizes, dtype, device, flatten=False): """Get points according to feature map sizes. Args: featmap_sizes (list[tuple]): Multi-level feature map sizes. dtype (torch.dtype): Type of points. device (torch.device): Device of points. Returns: tuple: points of each image. """ mlvl_points = [] for i in range(len(featmap_sizes)): mlvl_points.append( self._get_points_single(featmap_sizes[i], self.strides[i], dtype, device, flatten)) return mlvl_points def aug_test(self, feats, img_metas, rescale=False): """Test function with test time augmentation. Args: feats (list[Tensor]): the outer list indicates test-time augmentations and inner Tensor should have a shape NxCxHxW, which contains features for all images in the batch. img_metas (list[list[dict]]): the outer list indicates test-time augs (multiscale, flip, etc.) and the inner list indicates images in a batch. each dict has image information. rescale (bool, optional): Whether to rescale the results. Defaults to False. Returns: list[ndarray]: bbox results of each class """ return self.aug_test_bboxes(feats, img_metas, rescale=rescale)
13,494
38.57478
79
py
GFocalV2
GFocalV2-master/mmdet/models/dense_heads/pisa_ssd_head.py
import torch from mmdet.core import multi_apply from ..builder import HEADS from ..losses import CrossEntropyLoss, SmoothL1Loss, carl_loss, isr_p from .ssd_head import SSDHead # TODO: add loss evaluator for SSD @HEADS.register_module() class PISASSDHead(SSDHead): def loss(self, cls_scores, bbox_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None): """Compute losses of the head. Args: cls_scores (list[Tensor]): Box scores for each scale level Has shape (N, num_anchors * num_classes, H, W) bbox_preds (list[Tensor]): Box energies / deltas for each scale level with shape (N, num_anchors * 4, H, W) gt_bboxes (list[Tensor]): Ground truth bboxes of each image with shape (num_obj, 4). gt_labels (list[Tensor]): Ground truth labels of each image with shape (num_obj, 4). img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. gt_bboxes_ignore (list[Tensor]): Ignored gt bboxes of each image. Default: None. Returns: dict: Loss dict, comprise classification loss regression loss and carl loss. """ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] assert len(featmap_sizes) == self.anchor_generator.num_levels device = cls_scores[0].device anchor_list, valid_flag_list = self.get_anchors( featmap_sizes, img_metas, device=device) cls_reg_targets = self.get_targets( anchor_list, valid_flag_list, gt_bboxes, img_metas, gt_bboxes_ignore_list=gt_bboxes_ignore, gt_labels_list=gt_labels, label_channels=1, unmap_outputs=False, return_sampling_results=True) if cls_reg_targets is None: return None (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_pos, num_total_neg, sampling_results_list) = cls_reg_targets num_images = len(img_metas) all_cls_scores = torch.cat([ s.permute(0, 2, 3, 1).reshape( num_images, -1, self.cls_out_channels) for s in cls_scores ], 1) all_labels = torch.cat(labels_list, -1).view(num_images, -1) all_label_weights = torch.cat(label_weights_list, -1).view(num_images, -1) all_bbox_preds = torch.cat([ b.permute(0, 2, 3, 1).reshape(num_images, -1, 4) for b in bbox_preds ], -2) all_bbox_targets = torch.cat(bbox_targets_list, -2).view(num_images, -1, 4) all_bbox_weights = torch.cat(bbox_weights_list, -2).view(num_images, -1, 4) # concat all level anchors to a single tensor all_anchors = [] for i in range(num_images): all_anchors.append(torch.cat(anchor_list[i])) isr_cfg = self.train_cfg.get('isr', None) all_targets = (all_labels.view(-1), all_label_weights.view(-1), all_bbox_targets.view(-1, 4), all_bbox_weights.view(-1, 4)) # apply ISR-P if isr_cfg is not None: all_targets = isr_p( all_cls_scores.view(-1, all_cls_scores.size(-1)), all_bbox_preds.view(-1, 4), all_targets, torch.cat(all_anchors), sampling_results_list, loss_cls=CrossEntropyLoss(), bbox_coder=self.bbox_coder, **self.train_cfg.isr, num_class=self.num_classes) (new_labels, new_label_weights, new_bbox_targets, new_bbox_weights) = all_targets all_labels = new_labels.view(all_labels.shape) all_label_weights = new_label_weights.view(all_label_weights.shape) all_bbox_targets = new_bbox_targets.view(all_bbox_targets.shape) all_bbox_weights = new_bbox_weights.view(all_bbox_weights.shape) # add CARL loss carl_loss_cfg = self.train_cfg.get('carl', None) if carl_loss_cfg is not None: loss_carl = carl_loss( all_cls_scores.view(-1, all_cls_scores.size(-1)), all_targets[0], all_bbox_preds.view(-1, 4), all_targets[2], SmoothL1Loss(beta=1.), **self.train_cfg.carl, avg_factor=num_total_pos, num_class=self.num_classes) # check NaN and Inf assert torch.isfinite(all_cls_scores).all().item(), \ 'classification scores become infinite or NaN!' assert torch.isfinite(all_bbox_preds).all().item(), \ 'bbox predications become infinite or NaN!' losses_cls, losses_bbox = multi_apply( self.loss_single, all_cls_scores, all_bbox_preds, all_anchors, all_labels, all_label_weights, all_bbox_targets, all_bbox_weights, num_total_samples=num_total_pos) loss_dict = dict(loss_cls=losses_cls, loss_bbox=losses_bbox) if carl_loss_cfg is not None: loss_dict.update(loss_carl) return loss_dict
5,551
38.657143
79
py
GFocalV2
GFocalV2-master/mmdet/models/dense_heads/pisa_retinanet_head.py
import torch from mmcv.runner import force_fp32 from mmdet.core import images_to_levels from ..builder import HEADS from ..losses import carl_loss, isr_p from .retina_head import RetinaHead @HEADS.register_module() class PISARetinaHead(RetinaHead): """PISA Retinanet Head. The head owns the same structure with Retinanet Head, but differs in two aspects: 1. Importance-based Sample Reweighting Positive (ISR-P) is applied to change the positive loss weights. 2. Classification-aware regression loss is adopted as a third loss. """ @force_fp32(apply_to=('cls_scores', 'bbox_preds')) def loss(self, cls_scores, bbox_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None): """Compute losses of the head. Args: cls_scores (list[Tensor]): Box scores for each scale level Has shape (N, num_anchors * num_classes, H, W) bbox_preds (list[Tensor]): Box energies / deltas for each scale level with shape (N, num_anchors * 4, H, W) gt_bboxes (list[Tensor]): Ground truth bboxes of each image with shape (num_obj, 4). gt_labels (list[Tensor]): Ground truth labels of each image with shape (num_obj, 4). img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. gt_bboxes_ignore (list[Tensor]): Ignored gt bboxes of each image. Default: None. Returns: dict: Loss dict, comprise classification loss, regression loss and carl loss. """ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] assert len(featmap_sizes) == self.anchor_generator.num_levels device = cls_scores[0].device anchor_list, valid_flag_list = self.get_anchors( featmap_sizes, img_metas, device=device) label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 cls_reg_targets = self.get_targets( anchor_list, valid_flag_list, gt_bboxes, img_metas, gt_bboxes_ignore_list=gt_bboxes_ignore, gt_labels_list=gt_labels, label_channels=label_channels, return_sampling_results=True) if cls_reg_targets is None: return None (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_pos, num_total_neg, sampling_results_list) = cls_reg_targets num_total_samples = ( num_total_pos + num_total_neg if self.sampling else num_total_pos) # anchor number of multi levels num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]] # concat all level anchors and flags to a single tensor concat_anchor_list = [] for i in range(len(anchor_list)): concat_anchor_list.append(torch.cat(anchor_list[i])) all_anchor_list = images_to_levels(concat_anchor_list, num_level_anchors) num_imgs = len(img_metas) flatten_cls_scores = [ cls_score.permute(0, 2, 3, 1).reshape(num_imgs, -1, label_channels) for cls_score in cls_scores ] flatten_cls_scores = torch.cat( flatten_cls_scores, dim=1).reshape(-1, flatten_cls_scores[0].size(-1)) flatten_bbox_preds = [ bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, 4) for bbox_pred in bbox_preds ] flatten_bbox_preds = torch.cat( flatten_bbox_preds, dim=1).view(-1, flatten_bbox_preds[0].size(-1)) flatten_labels = torch.cat(labels_list, dim=1).reshape(-1) flatten_label_weights = torch.cat( label_weights_list, dim=1).reshape(-1) flatten_anchors = torch.cat(all_anchor_list, dim=1).reshape(-1, 4) flatten_bbox_targets = torch.cat( bbox_targets_list, dim=1).reshape(-1, 4) flatten_bbox_weights = torch.cat( bbox_weights_list, dim=1).reshape(-1, 4) # Apply ISR-P isr_cfg = self.train_cfg.get('isr', None) if isr_cfg is not None: all_targets = (flatten_labels, flatten_label_weights, flatten_bbox_targets, flatten_bbox_weights) with torch.no_grad(): all_targets = isr_p( flatten_cls_scores, flatten_bbox_preds, all_targets, flatten_anchors, sampling_results_list, bbox_coder=self.bbox_coder, loss_cls=self.loss_cls, num_class=self.num_classes, **self.train_cfg.isr) (flatten_labels, flatten_label_weights, flatten_bbox_targets, flatten_bbox_weights) = all_targets # For convenience we compute loss once instead separating by fpn level, # so that we don't need to separate the weights by level again. # The result should be the same losses_cls = self.loss_cls( flatten_cls_scores, flatten_labels, flatten_label_weights, avg_factor=num_total_samples) losses_bbox = self.loss_bbox( flatten_bbox_preds, flatten_bbox_targets, flatten_bbox_weights, avg_factor=num_total_samples) loss_dict = dict(loss_cls=losses_cls, loss_bbox=losses_bbox) # CARL Loss carl_cfg = self.train_cfg.get('carl', None) if carl_cfg is not None: loss_carl = carl_loss( flatten_cls_scores, flatten_labels, flatten_bbox_preds, flatten_bbox_targets, self.loss_bbox, **self.train_cfg.carl, avg_factor=num_total_pos, sigmoid=True, num_class=self.num_classes) loss_dict.update(loss_carl) return loss_dict
6,220
39.135484
79
py
GFocalV2
GFocalV2-master/mmdet/models/dense_heads/gfl_head.py
import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import ConvModule, Scale, bias_init_with_prob, normal_init from mmcv.runner import force_fp32 from mmdet.core import (anchor_inside_flags, bbox2distance, bbox_overlaps, build_assigner, build_sampler, distance2bbox, images_to_levels, multi_apply, multiclass_nms, reduce_mean, unmap) from ..builder import HEADS, build_loss from .anchor_head import AnchorHead class Integral(nn.Module): """A fixed layer for calculating integral result from distribution. This layer calculates the target location by :math: `sum{P(y_i) * y_i}`, P(y_i) denotes the softmax vector that represents the discrete distribution y_i denotes the discrete set, usually {0, 1, 2, ..., reg_max} Args: reg_max (int): The maximal value of the discrete set. Default: 16. You may want to reset it according to your new dataset or related settings. """ def __init__(self, reg_max=16): super(Integral, self).__init__() self.reg_max = reg_max self.register_buffer('project', torch.linspace(0, self.reg_max, self.reg_max + 1)) def forward(self, x): """Forward feature from the regression head to get integral result of bounding box location. Args: x (Tensor): Features of the regression head, shape (N, 4*(n+1)), n is self.reg_max. Returns: x (Tensor): Integral result of box locations, i.e., distance offsets from the box center in four directions, shape (N, 4). """ x = F.softmax(x.reshape(-1, self.reg_max + 1), dim=1) x = F.linear(x, self.project.type_as(x)).reshape(-1, 4) return x @HEADS.register_module() class GFLHead(AnchorHead): """Generalized Focal Loss: Learning Qualified and Distributed Bounding Boxes for Dense Object Detection. GFL head structure is similar with ATSS, however GFL uses 1) joint representation for classification and localization quality, and 2) flexible General distribution for bounding box locations, which are supervised by Quality Focal Loss (QFL) and Distribution Focal Loss (DFL), respectively https://arxiv.org/abs/2006.04388 Args: num_classes (int): Number of categories excluding the background category. in_channels (int): Number of channels in the input feature map. stacked_convs (int): Number of conv layers in cls and reg tower. Default: 4. conv_cfg (dict): dictionary to construct and config conv layer. Default: None. norm_cfg (dict): dictionary to construct and config norm layer. Default: dict(type='GN', num_groups=32, requires_grad=True). loss_qfl (dict): Config of Quality Focal Loss (QFL). reg_max (int): Max value of integral set :math: `{0, ..., reg_max}` in QFL setting. Default: 16. Example: >>> self = GFLHead(11, 7) >>> feats = [torch.rand(1, 7, s, s) for s in [4, 8, 16, 32, 64]] >>> cls_quality_score, bbox_pred = self.forward(feats) >>> assert len(cls_quality_score) == len(self.scales) """ def __init__(self, num_classes, in_channels, stacked_convs=4, conv_cfg=None, norm_cfg=dict(type='GN', num_groups=32, requires_grad=True), loss_dfl=dict(type='DistributionFocalLoss', loss_weight=0.25), reg_max=16, **kwargs): self.stacked_convs = stacked_convs self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.reg_max = reg_max super(GFLHead, self).__init__(num_classes, in_channels, **kwargs) self.sampling = False if self.train_cfg: self.assigner = build_assigner(self.train_cfg.assigner) # SSD sampling=False so use PseudoSampler sampler_cfg = dict(type='PseudoSampler') self.sampler = build_sampler(sampler_cfg, context=self) self.integral = Integral(self.reg_max) self.loss_dfl = build_loss(loss_dfl) def _init_layers(self): """Initialize layers of the head.""" self.relu = nn.ReLU(inplace=True) self.cls_convs = nn.ModuleList() self.reg_convs = nn.ModuleList() for i in range(self.stacked_convs): chn = self.in_channels if i == 0 else self.feat_channels self.cls_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) self.reg_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) assert self.num_anchors == 1, 'anchor free version' self.gfl_cls = nn.Conv2d( self.feat_channels, self.cls_out_channels, 3, padding=1) self.gfl_reg = nn.Conv2d( self.feat_channels, 4 * (self.reg_max + 1), 3, padding=1) self.scales = nn.ModuleList( [Scale(1.0) for _ in self.anchor_generator.strides]) def init_weights(self): """Initialize weights of the head.""" for m in self.cls_convs: normal_init(m.conv, std=0.01) for m in self.reg_convs: normal_init(m.conv, std=0.01) bias_cls = bias_init_with_prob(0.01) normal_init(self.gfl_cls, std=0.01, bias=bias_cls) normal_init(self.gfl_reg, std=0.01) def forward(self, feats): """Forward features from the upstream network. Args: feats (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. Returns: tuple: Usually a tuple of classification scores and bbox prediction cls_scores (list[Tensor]): Classification and quality (IoU) joint scores for all scale levels, each is a 4D-tensor, the channel number is num_classes. bbox_preds (list[Tensor]): Box distribution logits for all scale levels, each is a 4D-tensor, the channel number is 4*(n+1), n is max value of integral set. """ return multi_apply(self.forward_single, feats, self.scales) def forward_single(self, x, scale): """Forward feature of a single scale level. Args: x (Tensor): Features of a single scale level. scale (:obj: `mmcv.cnn.Scale`): Learnable scale module to resize the bbox prediction. Returns: tuple: cls_score (Tensor): Cls and quality joint scores for a single scale level the channel number is num_classes. bbox_pred (Tensor): Box distribution logits for a single scale level, the channel number is 4*(n+1), n is max value of integral set. """ cls_feat = x reg_feat = x for cls_conv in self.cls_convs: cls_feat = cls_conv(cls_feat) for reg_conv in self.reg_convs: reg_feat = reg_conv(reg_feat) cls_score = self.gfl_cls(cls_feat) bbox_pred = scale(self.gfl_reg(reg_feat)).float() return cls_score, bbox_pred def anchor_center(self, anchors): """Get anchor centers from anchors. Args: anchors (Tensor): Anchor list with shape (N, 4), "xyxy" format. Returns: Tensor: Anchor centers with shape (N, 2), "xy" format. """ anchors_cx = (anchors[:, 2] + anchors[:, 0]) / 2 anchors_cy = (anchors[:, 3] + anchors[:, 1]) / 2 return torch.stack([anchors_cx, anchors_cy], dim=-1) def loss_single(self, anchors, cls_score, bbox_pred, labels, label_weights, bbox_targets, stride, num_total_samples): """Compute loss of a single scale level. Args: anchors (Tensor): Box reference for each scale level with shape (N, num_total_anchors, 4). cls_score (Tensor): Cls and quality joint scores for each scale level has shape (N, num_classes, H, W). bbox_pred (Tensor): Box distribution logits for each scale level with shape (N, 4*(n+1), H, W), n is max value of integral set. labels (Tensor): Labels of each anchors with shape (N, num_total_anchors). label_weights (Tensor): Label weights of each anchor with shape (N, num_total_anchors) bbox_targets (Tensor): BBox regression targets of each anchor wight shape (N, num_total_anchors, 4). stride (tuple): Stride in this scale level. num_total_samples (int): Number of positive samples that is reduced over all GPUs. Returns: dict[str, Tensor]: A dictionary of loss components. """ assert stride[0] == stride[1], 'h stride is not equal to w stride!' anchors = anchors.reshape(-1, 4) cls_score = cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels) bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4 * (self.reg_max + 1)) bbox_targets = bbox_targets.reshape(-1, 4) labels = labels.reshape(-1) label_weights = label_weights.reshape(-1) # FG cat_id: [0, num_classes -1], BG cat_id: num_classes bg_class_ind = self.num_classes pos_inds = ((labels >= 0) & (labels < bg_class_ind)).nonzero().squeeze(1) score = label_weights.new_zeros(labels.shape) if len(pos_inds) > 0: pos_bbox_targets = bbox_targets[pos_inds] pos_bbox_pred = bbox_pred[pos_inds] pos_anchors = anchors[pos_inds] pos_anchor_centers = self.anchor_center(pos_anchors) / stride[0] weight_targets = cls_score.detach().sigmoid() weight_targets = weight_targets.max(dim=1)[0][pos_inds] pos_bbox_pred_corners = self.integral(pos_bbox_pred) pos_decode_bbox_pred = distance2bbox(pos_anchor_centers, pos_bbox_pred_corners) pos_decode_bbox_targets = pos_bbox_targets / stride[0] score[pos_inds] = bbox_overlaps( pos_decode_bbox_pred.detach(), pos_decode_bbox_targets, is_aligned=True) pred_corners = pos_bbox_pred.reshape(-1, self.reg_max + 1) target_corners = bbox2distance(pos_anchor_centers, pos_decode_bbox_targets, self.reg_max).reshape(-1) # regression loss loss_bbox = self.loss_bbox( pos_decode_bbox_pred, pos_decode_bbox_targets, weight=weight_targets, avg_factor=1.0) # dfl loss loss_dfl = self.loss_dfl( pred_corners, target_corners, weight=weight_targets[:, None].expand(-1, 4).reshape(-1), avg_factor=4.0) else: loss_bbox = bbox_pred.sum() * 0 loss_dfl = bbox_pred.sum() * 0 weight_targets = torch.tensor(0).cuda() # cls (qfl) loss loss_cls = self.loss_cls( cls_score, (labels, score), weight=label_weights, avg_factor=num_total_samples) return loss_cls, loss_bbox, loss_dfl, weight_targets.sum() @force_fp32(apply_to=('cls_scores', 'bbox_preds')) def loss(self, cls_scores, bbox_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None): """Compute losses of the head. Args: cls_scores (list[Tensor]): Cls and quality scores for each scale level has shape (N, num_classes, H, W). bbox_preds (list[Tensor]): Box distribution logits for each scale level with shape (N, 4*(n+1), H, W), n is max value of integral set. gt_bboxes (list[Tensor]): Ground truth bboxes for each image with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. gt_labels (list[Tensor]): class indices corresponding to each box img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. gt_bboxes_ignore (list[Tensor] | None): specify which bounding boxes can be ignored when computing the loss. Returns: dict[str, Tensor]: A dictionary of loss components. """ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] assert len(featmap_sizes) == self.anchor_generator.num_levels device = cls_scores[0].device anchor_list, valid_flag_list = self.get_anchors( featmap_sizes, img_metas, device=device) label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 cls_reg_targets = self.get_targets( anchor_list, valid_flag_list, gt_bboxes, img_metas, gt_bboxes_ignore_list=gt_bboxes_ignore, gt_labels_list=gt_labels, label_channels=label_channels) if cls_reg_targets is None: return None (anchor_list, labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_pos, num_total_neg) = cls_reg_targets num_total_samples = reduce_mean( torch.tensor(num_total_pos).cuda()).item() num_total_samples = max(num_total_samples, 1.0) losses_cls, losses_bbox, losses_dfl,\ avg_factor = multi_apply( self.loss_single, anchor_list, cls_scores, bbox_preds, labels_list, label_weights_list, bbox_targets_list, self.anchor_generator.strides, num_total_samples=num_total_samples) avg_factor = sum(avg_factor) avg_factor = reduce_mean(avg_factor).item() losses_bbox = list(map(lambda x: x / avg_factor, losses_bbox)) losses_dfl = list(map(lambda x: x / avg_factor, losses_dfl)) return dict( loss_cls=losses_cls, loss_bbox=losses_bbox, loss_dfl=losses_dfl) def _get_bboxes_single(self, cls_scores, bbox_preds, mlvl_anchors, img_shape, scale_factor, cfg, rescale=False, with_nms=True): """Transform outputs for a single batch item into labeled boxes. Args: cls_scores (list[Tensor]): Box scores for a single scale level has shape (num_classes, H, W). bbox_preds (list[Tensor]): Box distribution logits for a single scale level with shape (4*(n+1), H, W), n is max value of integral set. mlvl_anchors (list[Tensor]): Box reference for a single scale level with shape (num_total_anchors, 4). img_shape (tuple[int]): Shape of the input image, (height, width, 3). scale_factor (ndarray): Scale factor of the image arange as (w_scale, h_scale, w_scale, h_scale). cfg (mmcv.Config | None): Test / postprocessing configuration, if None, test_cfg would be used. rescale (bool): If True, return boxes in original image space. Default: False. with_nms (bool): If True, do nms before return boxes. Default: True. Returns: tuple(Tensor): det_bboxes (Tensor): Bbox predictions in shape (N, 5), where the first 4 columns are bounding box positions (tl_x, tl_y, br_x, br_y) and the 5-th column is a score between 0 and 1. det_labels (Tensor): A (N,) tensor where each item is the predicted class label of the corresponding box. """ cfg = self.test_cfg if cfg is None else cfg assert len(cls_scores) == len(bbox_preds) == len(mlvl_anchors) mlvl_bboxes = [] mlvl_scores = [] for cls_score, bbox_pred, stride, anchors in zip( cls_scores, bbox_preds, self.anchor_generator.strides, mlvl_anchors): assert cls_score.size()[-2:] == bbox_pred.size()[-2:] assert stride[0] == stride[1] scores = cls_score.permute(1, 2, 0).reshape( -1, self.cls_out_channels).sigmoid() bbox_pred = bbox_pred.permute(1, 2, 0) bbox_pred = self.integral(bbox_pred) * stride[0] nms_pre = cfg.get('nms_pre', -1) if nms_pre > 0 and scores.shape[0] > nms_pre: max_scores, _ = scores.max(dim=1) _, topk_inds = max_scores.topk(nms_pre) anchors = anchors[topk_inds, :] bbox_pred = bbox_pred[topk_inds, :] scores = scores[topk_inds, :] bboxes = distance2bbox( self.anchor_center(anchors), bbox_pred, max_shape=img_shape) mlvl_bboxes.append(bboxes) mlvl_scores.append(scores) mlvl_bboxes = torch.cat(mlvl_bboxes) if rescale: mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor) mlvl_scores = torch.cat(mlvl_scores) # Add a dummy background class to the backend when using sigmoid # remind that we set FG labels to [0, num_class-1] since mmdet v2.0 # BG cat_id: num_class padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1) mlvl_scores = torch.cat([mlvl_scores, padding], dim=1) if with_nms: det_bboxes, det_labels = multiclass_nms(mlvl_bboxes, mlvl_scores, cfg.score_thr, cfg.nms, cfg.max_per_img) return det_bboxes, det_labels else: return mlvl_bboxes, mlvl_scores def get_targets(self, anchor_list, valid_flag_list, gt_bboxes_list, img_metas, gt_bboxes_ignore_list=None, gt_labels_list=None, label_channels=1, unmap_outputs=True): """Get targets for GFL head. This method is almost the same as `AnchorHead.get_targets()`. Besides returning the targets as the parent method does, it also returns the anchors as the first element of the returned tuple. """ num_imgs = len(img_metas) assert len(anchor_list) == len(valid_flag_list) == num_imgs # anchor number of multi levels num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]] num_level_anchors_list = [num_level_anchors] * num_imgs # concat all level anchors and flags to a single tensor for i in range(num_imgs): assert len(anchor_list[i]) == len(valid_flag_list[i]) anchor_list[i] = torch.cat(anchor_list[i]) valid_flag_list[i] = torch.cat(valid_flag_list[i]) # compute targets for each image if gt_bboxes_ignore_list is None: gt_bboxes_ignore_list = [None for _ in range(num_imgs)] if gt_labels_list is None: gt_labels_list = [None for _ in range(num_imgs)] (all_anchors, all_labels, all_label_weights, all_bbox_targets, all_bbox_weights, pos_inds_list, neg_inds_list) = multi_apply( self._get_target_single, anchor_list, valid_flag_list, num_level_anchors_list, gt_bboxes_list, gt_bboxes_ignore_list, gt_labels_list, img_metas, label_channels=label_channels, unmap_outputs=unmap_outputs) # no valid anchors if any([labels is None for labels in all_labels]): return None # sampled anchors of all images num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list]) num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list]) # split targets to a list w.r.t. multiple levels anchors_list = images_to_levels(all_anchors, num_level_anchors) labels_list = images_to_levels(all_labels, num_level_anchors) label_weights_list = images_to_levels(all_label_weights, num_level_anchors) bbox_targets_list = images_to_levels(all_bbox_targets, num_level_anchors) bbox_weights_list = images_to_levels(all_bbox_weights, num_level_anchors) return (anchors_list, labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_pos, num_total_neg) def _get_target_single(self, flat_anchors, valid_flags, num_level_anchors, gt_bboxes, gt_bboxes_ignore, gt_labels, img_meta, label_channels=1, unmap_outputs=True): """Compute regression, classification targets for anchors in a single image. Args: flat_anchors (Tensor): Multi-level anchors of the image, which are concatenated into a single tensor of shape (num_anchors, 4) valid_flags (Tensor): Multi level valid flags of the image, which are concatenated into a single tensor of shape (num_anchors,). num_level_anchors Tensor): Number of anchors of each scale level. gt_bboxes (Tensor): Ground truth bboxes of the image, shape (num_gts, 4). gt_bboxes_ignore (Tensor): Ground truth bboxes to be ignored, shape (num_ignored_gts, 4). gt_labels (Tensor): Ground truth labels of each box, shape (num_gts,). img_meta (dict): Meta info of the image. label_channels (int): Channel of label. unmap_outputs (bool): Whether to map outputs back to the original set of anchors. Returns: tuple: N is the number of total anchors in the image. anchors (Tensor): All anchors in the image with shape (N, 4). labels (Tensor): Labels of all anchors in the image with shape (N,). label_weights (Tensor): Label weights of all anchor in the image with shape (N,). bbox_targets (Tensor): BBox targets of all anchors in the image with shape (N, 4). bbox_weights (Tensor): BBox weights of all anchors in the image with shape (N, 4). pos_inds (Tensor): Indices of postive anchor with shape (num_pos,). neg_inds (Tensor): Indices of negative anchor with shape (num_neg,). """ inside_flags = anchor_inside_flags(flat_anchors, valid_flags, img_meta['img_shape'][:2], self.train_cfg.allowed_border) if not inside_flags.any(): return (None, ) * 7 # assign gt and sample anchors anchors = flat_anchors[inside_flags, :] num_level_anchors_inside = self.get_num_level_anchors_inside( num_level_anchors, inside_flags) assign_result = self.assigner.assign(anchors, num_level_anchors_inside, gt_bboxes, gt_bboxes_ignore, gt_labels) sampling_result = self.sampler.sample(assign_result, anchors, gt_bboxes) num_valid_anchors = anchors.shape[0] bbox_targets = torch.zeros_like(anchors) bbox_weights = torch.zeros_like(anchors) labels = anchors.new_full((num_valid_anchors, ), self.num_classes, dtype=torch.long) label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float) pos_inds = sampling_result.pos_inds neg_inds = sampling_result.neg_inds if len(pos_inds) > 0: pos_bbox_targets = sampling_result.pos_gt_bboxes bbox_targets[pos_inds, :] = pos_bbox_targets bbox_weights[pos_inds, :] = 1.0 if gt_labels is None: # Only rpn gives gt_labels as None # Foreground is the first class labels[pos_inds] = 0 else: labels[pos_inds] = gt_labels[ sampling_result.pos_assigned_gt_inds] if self.train_cfg.pos_weight <= 0: label_weights[pos_inds] = 1.0 else: label_weights[pos_inds] = self.train_cfg.pos_weight if len(neg_inds) > 0: label_weights[neg_inds] = 1.0 # map up to original set of anchors if unmap_outputs: num_total_anchors = flat_anchors.size(0) anchors = unmap(anchors, num_total_anchors, inside_flags) labels = unmap( labels, num_total_anchors, inside_flags, fill=self.num_classes) label_weights = unmap(label_weights, num_total_anchors, inside_flags) bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags) bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags) return (anchors, labels, label_weights, bbox_targets, bbox_weights, pos_inds, neg_inds) def get_num_level_anchors_inside(self, num_level_anchors, inside_flags): split_inside_flags = torch.split(inside_flags, num_level_anchors) num_level_anchors_inside = [ int(flags.sum()) for flags in split_inside_flags ] return num_level_anchors_inside
27,254
42.125
79
py
GFocalV2
GFocalV2-master/mmdet/models/dense_heads/corner_head.py
from math import ceil, log import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import ConvModule, bias_init_with_prob from mmcv.ops import CornerPool, batched_nms from mmdet.core import multi_apply from ..builder import HEADS, build_loss from ..utils import gaussian_radius, gen_gaussian_target from .base_dense_head import BaseDenseHead class BiCornerPool(nn.Module): """Bidirectional Corner Pooling Module (TopLeft, BottomRight, etc.) Args: in_channels (int): Input channels of module. out_channels (int): Output channels of module. feat_channels (int): Feature channels of module. directions (list[str]): Directions of two CornerPools. norm_cfg (dict): Dictionary to construct and config norm layer. """ def __init__(self, in_channels, directions, feat_channels=128, out_channels=128, norm_cfg=dict(type='BN', requires_grad=True)): super(BiCornerPool, self).__init__() self.direction1_conv = ConvModule( in_channels, feat_channels, 3, padding=1, norm_cfg=norm_cfg) self.direction2_conv = ConvModule( in_channels, feat_channels, 3, padding=1, norm_cfg=norm_cfg) self.aftpool_conv = ConvModule( feat_channels, out_channels, 3, padding=1, norm_cfg=norm_cfg, act_cfg=None) self.conv1 = ConvModule( in_channels, out_channels, 1, norm_cfg=norm_cfg, act_cfg=None) self.conv2 = ConvModule( in_channels, out_channels, 3, padding=1, norm_cfg=norm_cfg) self.direction1_pool = CornerPool(directions[0]) self.direction2_pool = CornerPool(directions[1]) self.relu = nn.ReLU(inplace=True) def forward(self, x): """Forward features from the upstream network. Args: x (tensor): Input feature of BiCornerPool. Returns: conv2 (tensor): Output feature of BiCornerPool. """ direction1_conv = self.direction1_conv(x) direction2_conv = self.direction2_conv(x) direction1_feat = self.direction1_pool(direction1_conv) direction2_feat = self.direction2_pool(direction2_conv) aftpool_conv = self.aftpool_conv(direction1_feat + direction2_feat) conv1 = self.conv1(x) relu = self.relu(aftpool_conv + conv1) conv2 = self.conv2(relu) return conv2 @HEADS.register_module() class CornerHead(BaseDenseHead): """Head of CornerNet: Detecting Objects as Paired Keypoints. Code is modified from the `official github repo <https://github.com/princeton-vl/CornerNet/blob/master/models/py_utils/ kp.py#L73>`_ . More details can be found in the `paper <https://arxiv.org/abs/1808.01244>`_ . Args: num_classes (int): Number of categories excluding the background category. in_channels (int): Number of channels in the input feature map. num_feat_levels (int): Levels of feature from the previous module. 2 for HourglassNet-104 and 1 for HourglassNet-52. Because HourglassNet-104 outputs the final feature and intermediate supervision feature and HourglassNet-52 only outputs the final feature. Default: 2. corner_emb_channels (int): Channel of embedding vector. Default: 1. train_cfg (dict | None): Training config. Useless in CornerHead, but we keep this variable for SingleStageDetector. Default: None. test_cfg (dict | None): Testing config of CornerHead. Default: None. loss_heatmap (dict | None): Config of corner heatmap loss. Default: GaussianFocalLoss. loss_embedding (dict | None): Config of corner embedding loss. Default: AssociativeEmbeddingLoss. loss_offset (dict | None): Config of corner offset loss. Default: SmoothL1Loss. """ def __init__(self, num_classes, in_channels, num_feat_levels=2, corner_emb_channels=1, train_cfg=None, test_cfg=None, loss_heatmap=dict( type='GaussianFocalLoss', alpha=2.0, gamma=4.0, loss_weight=1), loss_embedding=dict( type='AssociativeEmbeddingLoss', pull_weight=0.25, push_weight=0.25), loss_offset=dict( type='SmoothL1Loss', beta=1.0, loss_weight=1)): super(CornerHead, self).__init__() self.num_classes = num_classes self.in_channels = in_channels self.corner_emb_channels = corner_emb_channels self.with_corner_emb = self.corner_emb_channels > 0 self.corner_offset_channels = 2 self.num_feat_levels = num_feat_levels self.loss_heatmap = build_loss( loss_heatmap) if loss_heatmap is not None else None self.loss_embedding = build_loss( loss_embedding) if loss_embedding is not None else None self.loss_offset = build_loss( loss_offset) if loss_offset is not None else None self.train_cfg = train_cfg self.test_cfg = test_cfg self._init_layers() def _make_layers(self, out_channels, in_channels=256, feat_channels=256): """Initialize conv sequential for CornerHead.""" return nn.Sequential( ConvModule(in_channels, feat_channels, 3, padding=1), ConvModule( feat_channels, out_channels, 1, norm_cfg=None, act_cfg=None)) def _init_corner_kpt_layers(self): """Initialize corner keypoint layers. Including corner heatmap branch and corner offset branch. Each branch has two parts: prefix `tl_` for top-left and `br_` for bottom-right. """ self.tl_pool, self.br_pool = nn.ModuleList(), nn.ModuleList() self.tl_heat, self.br_heat = nn.ModuleList(), nn.ModuleList() self.tl_off, self.br_off = nn.ModuleList(), nn.ModuleList() for _ in range(self.num_feat_levels): self.tl_pool.append( BiCornerPool( self.in_channels, ['top', 'left'], out_channels=self.in_channels)) self.br_pool.append( BiCornerPool( self.in_channels, ['bottom', 'right'], out_channels=self.in_channels)) self.tl_heat.append( self._make_layers( out_channels=self.num_classes, in_channels=self.in_channels)) self.br_heat.append( self._make_layers( out_channels=self.num_classes, in_channels=self.in_channels)) self.tl_off.append( self._make_layers( out_channels=self.corner_offset_channels, in_channels=self.in_channels)) self.br_off.append( self._make_layers( out_channels=self.corner_offset_channels, in_channels=self.in_channels)) def _init_corner_emb_layers(self): """Initialize corner embedding layers. Only include corner embedding branch with two parts: prefix `tl_` for top-left and `br_` for bottom-right. """ self.tl_emb, self.br_emb = nn.ModuleList(), nn.ModuleList() for _ in range(self.num_feat_levels): self.tl_emb.append( self._make_layers( out_channels=self.corner_emb_channels, in_channels=self.in_channels)) self.br_emb.append( self._make_layers( out_channels=self.corner_emb_channels, in_channels=self.in_channels)) def _init_layers(self): """Initialize layers for CornerHead. Including two parts: corner keypoint layers and corner embedding layers """ self._init_corner_kpt_layers() if self.with_corner_emb: self._init_corner_emb_layers() def init_weights(self): """Initialize weights of the head.""" bias_init = bias_init_with_prob(0.1) for i in range(self.num_feat_levels): # The initialization of parameters are different between nn.Conv2d # and ConvModule. Our experiments show that using the original # initialization of nn.Conv2d increases the final mAP by about 0.2% self.tl_heat[i][-1].conv.reset_parameters() self.tl_heat[i][-1].conv.bias.data.fill_(bias_init) self.br_heat[i][-1].conv.reset_parameters() self.br_heat[i][-1].conv.bias.data.fill_(bias_init) self.tl_off[i][-1].conv.reset_parameters() self.br_off[i][-1].conv.reset_parameters() if self.with_corner_emb: self.tl_emb[i][-1].conv.reset_parameters() self.br_emb[i][-1].conv.reset_parameters() def forward(self, feats): """Forward features from the upstream network. Args: feats (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. Returns: tuple: Usually a tuple of corner heatmaps, offset heatmaps and embedding heatmaps. - tl_heats (list[Tensor]): Top-left corner heatmaps for all levels, each is a 4D-tensor, the channels number is num_classes. - br_heats (list[Tensor]): Bottom-right corner heatmaps for all levels, each is a 4D-tensor, the channels number is num_classes. - tl_embs (list[Tensor] | list[None]): Top-left embedding heatmaps for all levels, each is a 4D-tensor or None. If not None, the channels number is corner_emb_channels. - br_embs (list[Tensor] | list[None]): Bottom-right embedding heatmaps for all levels, each is a 4D-tensor or None. If not None, the channels number is corner_emb_channels. - tl_offs (list[Tensor]): Top-left offset heatmaps for all levels, each is a 4D-tensor. The channels number is corner_offset_channels. - br_offs (list[Tensor]): Bottom-right offset heatmaps for all levels, each is a 4D-tensor. The channels number is corner_offset_channels. """ lvl_ind = list(range(self.num_feat_levels)) return multi_apply(self.forward_single, feats, lvl_ind) def forward_single(self, x, lvl_ind, return_pool=False): """Forward feature of a single level. Args: x (Tensor): Feature of a single level. lvl_ind (int): Level index of current feature. return_pool (bool): Return corner pool feature or not. Returns: tuple[Tensor]: A tuple of CornerHead's output for current feature level. Containing the following Tensors: - tl_heat (Tensor): Predicted top-left corner heatmap. - br_heat (Tensor): Predicted bottom-right corner heatmap. - tl_emb (Tensor | None): Predicted top-left embedding heatmap. None for `self.with_corner_emb == False`. - br_emb (Tensor | None): Predicted bottom-right embedding heatmap. None for `self.with_corner_emb == False`. - tl_off (Tensor): Predicted top-left offset heatmap. - br_off (Tensor): Predicted bottom-right offset heatmap. - tl_pool (Tensor): Top-left corner pool feature. Not must have. - br_pool (Tensor): Bottom-right corner pool feature. Not must have. """ tl_pool = self.tl_pool[lvl_ind](x) tl_heat = self.tl_heat[lvl_ind](tl_pool) br_pool = self.br_pool[lvl_ind](x) br_heat = self.br_heat[lvl_ind](br_pool) tl_emb, br_emb = None, None if self.with_corner_emb: tl_emb = self.tl_emb[lvl_ind](tl_pool) br_emb = self.br_emb[lvl_ind](br_pool) tl_off = self.tl_off[lvl_ind](tl_pool) br_off = self.br_off[lvl_ind](br_pool) result_list = [tl_heat, br_heat, tl_emb, br_emb, tl_off, br_off] if return_pool: result_list.append(tl_pool) result_list.append(br_pool) return result_list def get_targets(self, gt_bboxes, gt_labels, feat_shape, img_shape, with_corner_emb=False, with_guiding_shift=False, with_centripetal_shift=False): """Generate corner targets. Including corner heatmap, corner offset. Optional: corner embedding, corner guiding shift, centripetal shift. For CornerNet, we generate corner heatmap, corner offset and corner embedding from this function. For CentripetalNet, we generate corner heatmap, corner offset, guiding shift and centripetal shift from this function. Args: gt_bboxes (list[Tensor]): Ground truth bboxes of each image, each has shape (num_gt, 4). gt_labels (list[Tensor]): Ground truth labels of each box, each has shape (num_gt,). feat_shape (list[int]): Shape of output feature, [batch, channel, height, width]. img_shape (list[int]): Shape of input image, [height, width, channel]. with_corner_emb (bool): Generate corner embedding target or not. Default: False. with_guiding_shift (bool): Generate guiding shift target or not. Default: False. with_centripetal_shift (bool): Generate centripetal shift target or not. Default: False. Returns: dict: Ground truth of corner heatmap, corner offset, corner embedding, guiding shift and centripetal shift. Containing the following keys: - topleft_heatmap (Tensor): Ground truth top-left corner heatmap. - bottomright_heatmap (Tensor): Ground truth bottom-right corner heatmap. - topleft_offset (Tensor): Ground truth top-left corner offset. - bottomright_offset (Tensor): Ground truth bottom-right corner offset. - corner_embedding (list[list[list[int]]]): Ground truth corner embedding. Not must have. - topleft_guiding_shift (Tensor): Ground truth top-left corner guiding shift. Not must have. - bottomright_guiding_shift (Tensor): Ground truth bottom-right corner guiding shift. Not must have. - topleft_centripetal_shift (Tensor): Ground truth top-left corner centripetal shift. Not must have. - bottomright_centripetal_shift (Tensor): Ground truth bottom-right corner centripetal shift. Not must have. """ batch_size, _, height, width = feat_shape img_h, img_w = img_shape[:2] width_ratio = float(width / img_w) height_ratio = float(height / img_h) gt_tl_heatmap = gt_bboxes[-1].new_zeros( [batch_size, self.num_classes, height, width]) gt_br_heatmap = gt_bboxes[-1].new_zeros( [batch_size, self.num_classes, height, width]) gt_tl_offset = gt_bboxes[-1].new_zeros([batch_size, 2, height, width]) gt_br_offset = gt_bboxes[-1].new_zeros([batch_size, 2, height, width]) if with_corner_emb: match = [] # Guiding shift is a kind of offset, from center to corner if with_guiding_shift: gt_tl_guiding_shift = gt_bboxes[-1].new_zeros( [batch_size, 2, height, width]) gt_br_guiding_shift = gt_bboxes[-1].new_zeros( [batch_size, 2, height, width]) # Centripetal shift is also a kind of offset, from center to corner # and normalized by log. if with_centripetal_shift: gt_tl_centripetal_shift = gt_bboxes[-1].new_zeros( [batch_size, 2, height, width]) gt_br_centripetal_shift = gt_bboxes[-1].new_zeros( [batch_size, 2, height, width]) for batch_id in range(batch_size): # Ground truth of corner embedding per image is a list of coord set corner_match = [] for box_id in range(len(gt_labels[batch_id])): left, top, right, bottom = gt_bboxes[batch_id][box_id] center_x = (left + right) / 2.0 center_y = (top + bottom) / 2.0 label = gt_labels[batch_id][box_id] # Use coords in the feature level to generate ground truth scale_left = left * width_ratio scale_right = right * width_ratio scale_top = top * height_ratio scale_bottom = bottom * height_ratio scale_center_x = center_x * width_ratio scale_center_y = center_y * height_ratio # Int coords on feature map/ground truth tensor left_idx = int(min(scale_left, width - 1)) right_idx = int(min(scale_right, width - 1)) top_idx = int(min(scale_top, height - 1)) bottom_idx = int(min(scale_bottom, height - 1)) # Generate gaussian heatmap scale_box_width = ceil(scale_right - scale_left) scale_box_height = ceil(scale_bottom - scale_top) radius = gaussian_radius((scale_box_height, scale_box_width), min_overlap=0.3) radius = max(0, int(radius)) gt_tl_heatmap[batch_id, label] = gen_gaussian_target( gt_tl_heatmap[batch_id, label], [left_idx, top_idx], radius) gt_br_heatmap[batch_id, label] = gen_gaussian_target( gt_br_heatmap[batch_id, label], [right_idx, bottom_idx], radius) # Generate corner offset left_offset = scale_left - left_idx top_offset = scale_top - top_idx right_offset = scale_right - right_idx bottom_offset = scale_bottom - bottom_idx gt_tl_offset[batch_id, 0, top_idx, left_idx] = left_offset gt_tl_offset[batch_id, 1, top_idx, left_idx] = top_offset gt_br_offset[batch_id, 0, bottom_idx, right_idx] = right_offset gt_br_offset[batch_id, 1, bottom_idx, right_idx] = bottom_offset # Generate corner embedding if with_corner_emb: corner_match.append([[top_idx, left_idx], [bottom_idx, right_idx]]) # Generate guiding shift if with_guiding_shift: gt_tl_guiding_shift[batch_id, 0, top_idx, left_idx] = scale_center_x - left_idx gt_tl_guiding_shift[batch_id, 1, top_idx, left_idx] = scale_center_y - top_idx gt_br_guiding_shift[batch_id, 0, bottom_idx, right_idx] = right_idx - scale_center_x gt_br_guiding_shift[ batch_id, 1, bottom_idx, right_idx] = bottom_idx - scale_center_y # Generate centripetal shift if with_centripetal_shift: gt_tl_centripetal_shift[batch_id, 0, top_idx, left_idx] = log(scale_center_x - scale_left) gt_tl_centripetal_shift[batch_id, 1, top_idx, left_idx] = log(scale_center_y - scale_top) gt_br_centripetal_shift[batch_id, 0, bottom_idx, right_idx] = log(scale_right - scale_center_x) gt_br_centripetal_shift[batch_id, 1, bottom_idx, right_idx] = log(scale_bottom - scale_center_y) if with_corner_emb: match.append(corner_match) target_result = dict( topleft_heatmap=gt_tl_heatmap, topleft_offset=gt_tl_offset, bottomright_heatmap=gt_br_heatmap, bottomright_offset=gt_br_offset) if with_corner_emb: target_result.update(corner_embedding=match) if with_guiding_shift: target_result.update( topleft_guiding_shift=gt_tl_guiding_shift, bottomright_guiding_shift=gt_br_guiding_shift) if with_centripetal_shift: target_result.update( topleft_centripetal_shift=gt_tl_centripetal_shift, bottomright_centripetal_shift=gt_br_centripetal_shift) return target_result def loss(self, tl_heats, br_heats, tl_embs, br_embs, tl_offs, br_offs, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None): """Compute losses of the head. Args: tl_heats (list[Tensor]): Top-left corner heatmaps for each level with shape (N, num_classes, H, W). br_heats (list[Tensor]): Bottom-right corner heatmaps for each level with shape (N, num_classes, H, W). tl_embs (list[Tensor]): Top-left corner embeddings for each level with shape (N, corner_emb_channels, H, W). br_embs (list[Tensor]): Bottom-right corner embeddings for each level with shape (N, corner_emb_channels, H, W). tl_offs (list[Tensor]): Top-left corner offsets for each level with shape (N, corner_offset_channels, H, W). br_offs (list[Tensor]): Bottom-right corner offsets for each level with shape (N, corner_offset_channels, H, W). gt_bboxes (list[Tensor]): Ground truth bboxes for each image with shape (num_gts, 4) in [left, top, right, bottom] format. gt_labels (list[Tensor]): Class indices corresponding to each box. img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. gt_bboxes_ignore (list[Tensor] | None): Specify which bounding boxes can be ignored when computing the loss. Returns: dict[str, Tensor]: A dictionary of loss components. Containing the following losses: - det_loss (list[Tensor]): Corner keypoint losses of all feature levels. - pull_loss (list[Tensor]): Part one of AssociativeEmbedding losses of all feature levels. - push_loss (list[Tensor]): Part two of AssociativeEmbedding losses of all feature levels. - off_loss (list[Tensor]): Corner offset losses of all feature levels. """ targets = self.get_targets( gt_bboxes, gt_labels, tl_heats[-1].shape, img_metas[0]['pad_shape'], with_corner_emb=self.with_corner_emb) mlvl_targets = [targets for _ in range(self.num_feat_levels)] det_losses, pull_losses, push_losses, off_losses = multi_apply( self.loss_single, tl_heats, br_heats, tl_embs, br_embs, tl_offs, br_offs, mlvl_targets) loss_dict = dict(det_loss=det_losses, off_loss=off_losses) if self.with_corner_emb: loss_dict.update(pull_loss=pull_losses, push_loss=push_losses) return loss_dict def loss_single(self, tl_hmp, br_hmp, tl_emb, br_emb, tl_off, br_off, targets): """Compute losses for single level. Args: tl_hmp (Tensor): Top-left corner heatmap for current level with shape (N, num_classes, H, W). br_hmp (Tensor): Bottom-right corner heatmap for current level with shape (N, num_classes, H, W). tl_emb (Tensor): Top-left corner embedding for current level with shape (N, corner_emb_channels, H, W). br_emb (Tensor): Bottom-right corner embedding for current level with shape (N, corner_emb_channels, H, W). tl_off (Tensor): Top-left corner offset for current level with shape (N, corner_offset_channels, H, W). br_off (Tensor): Bottom-right corner offset for current level with shape (N, corner_offset_channels, H, W). targets (dict): Corner target generated by `get_targets`. Returns: tuple[torch.Tensor]: Losses of the head's differnet branches containing the following losses: - det_loss (Tensor): Corner keypoint loss. - pull_loss (Tensor): Part one of AssociativeEmbedding loss. - push_loss (Tensor): Part two of AssociativeEmbedding loss. - off_loss (Tensor): Corner offset loss. """ gt_tl_hmp = targets['topleft_heatmap'] gt_br_hmp = targets['bottomright_heatmap'] gt_tl_off = targets['topleft_offset'] gt_br_off = targets['bottomright_offset'] gt_embedding = targets['corner_embedding'] # Detection loss tl_det_loss = self.loss_heatmap( tl_hmp.sigmoid(), gt_tl_hmp, avg_factor=max(1, gt_tl_hmp.eq(1).sum())) br_det_loss = self.loss_heatmap( br_hmp.sigmoid(), gt_br_hmp, avg_factor=max(1, gt_br_hmp.eq(1).sum())) det_loss = (tl_det_loss + br_det_loss) / 2.0 # AssociativeEmbedding loss if self.with_corner_emb and self.loss_embedding is not None: pull_loss, push_loss = self.loss_embedding(tl_emb, br_emb, gt_embedding) else: pull_loss, push_loss = None, None # Offset loss # We only compute the offset loss at the real corner position. # The value of real corner would be 1 in heatmap ground truth. # The mask is computed in class agnostic mode and its shape is # batch * 1 * width * height. tl_off_mask = gt_tl_hmp.eq(1).sum(1).gt(0).unsqueeze(1).type_as( gt_tl_hmp) br_off_mask = gt_br_hmp.eq(1).sum(1).gt(0).unsqueeze(1).type_as( gt_br_hmp) tl_off_loss = self.loss_offset( tl_off, gt_tl_off, tl_off_mask, avg_factor=max(1, tl_off_mask.sum())) br_off_loss = self.loss_offset( br_off, gt_br_off, br_off_mask, avg_factor=max(1, br_off_mask.sum())) off_loss = (tl_off_loss + br_off_loss) / 2.0 return det_loss, pull_loss, push_loss, off_loss def get_bboxes(self, tl_heats, br_heats, tl_embs, br_embs, tl_offs, br_offs, img_metas, rescale=False, with_nms=True): """Transform network output for a batch into bbox predictions. Args: tl_heats (list[Tensor]): Top-left corner heatmaps for each level with shape (N, num_classes, H, W). br_heats (list[Tensor]): Bottom-right corner heatmaps for each level with shape (N, num_classes, H, W). tl_embs (list[Tensor]): Top-left corner embeddings for each level with shape (N, corner_emb_channels, H, W). br_embs (list[Tensor]): Bottom-right corner embeddings for each level with shape (N, corner_emb_channels, H, W). tl_offs (list[Tensor]): Top-left corner offsets for each level with shape (N, corner_offset_channels, H, W). br_offs (list[Tensor]): Bottom-right corner offsets for each level with shape (N, corner_offset_channels, H, W). img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. rescale (bool): If True, return boxes in original image space. Default: False. with_nms (bool): If True, do nms before return boxes. Default: True. """ assert tl_heats[-1].shape[0] == br_heats[-1].shape[0] == len(img_metas) result_list = [] for img_id in range(len(img_metas)): result_list.append( self._get_bboxes_single( tl_heats[-1][img_id:img_id + 1, :], br_heats[-1][img_id:img_id + 1, :], tl_offs[-1][img_id:img_id + 1, :], br_offs[-1][img_id:img_id + 1, :], img_metas[img_id], tl_emb=tl_embs[-1][img_id:img_id + 1, :], br_emb=br_embs[-1][img_id:img_id + 1, :], rescale=rescale, with_nms=with_nms)) return result_list def _get_bboxes_single(self, tl_heat, br_heat, tl_off, br_off, img_meta, tl_emb=None, br_emb=None, tl_centripetal_shift=None, br_centripetal_shift=None, rescale=False, with_nms=True): """Transform outputs for a single batch item into bbox predictions. Args: tl_heat (Tensor): Top-left corner heatmap for current level with shape (N, num_classes, H, W). br_heat (Tensor): Bottom-right corner heatmap for current level with shape (N, num_classes, H, W). tl_off (Tensor): Top-left corner offset for current level with shape (N, corner_offset_channels, H, W). br_off (Tensor): Bottom-right corner offset for current level with shape (N, corner_offset_channels, H, W). img_meta (dict): Meta information of current image, e.g., image size, scaling factor, etc. tl_emb (Tensor): Top-left corner embedding for current level with shape (N, corner_emb_channels, H, W). br_emb (Tensor): Bottom-right corner embedding for current level with shape (N, corner_emb_channels, H, W). tl_centripetal_shift: Top-left corner's centripetal shift for current level with shape (N, 2, H, W). br_centripetal_shift: Bottom-right corner's centripetal shift for current level with shape (N, 2, H, W). rescale (bool): If True, return boxes in original image space. Default: False. with_nms (bool): If True, do nms before return boxes. Default: True. """ if isinstance(img_meta, (list, tuple)): img_meta = img_meta[0] batch_bboxes, batch_scores, batch_clses = self.decode_heatmap( tl_heat=tl_heat.sigmoid(), br_heat=br_heat.sigmoid(), tl_off=tl_off, br_off=br_off, tl_emb=tl_emb, br_emb=br_emb, tl_centripetal_shift=tl_centripetal_shift, br_centripetal_shift=br_centripetal_shift, img_meta=img_meta, k=self.test_cfg.corner_topk, kernel=self.test_cfg.local_maximum_kernel, distance_threshold=self.test_cfg.distance_threshold) if rescale: batch_bboxes /= img_meta['scale_factor'] bboxes = batch_bboxes.view([-1, 4]) scores = batch_scores.view([-1, 1]) clses = batch_clses.view([-1, 1]) idx = scores.argsort(dim=0, descending=True) bboxes = bboxes[idx].view([-1, 4]) scores = scores[idx].view(-1) clses = clses[idx].view(-1) detections = torch.cat([bboxes, scores.unsqueeze(-1)], -1) keepinds = (detections[:, -1] > -0.1) detections = detections[keepinds] labels = clses[keepinds] if with_nms: detections, labels = self._bboxes_nms(detections, labels, self.test_cfg) return detections, labels def _bboxes_nms(self, bboxes, labels, cfg): out_bboxes, keep = batched_nms(bboxes[:, :4], bboxes[:, -1], labels, cfg.nms_cfg) out_labels = labels[keep] if len(out_bboxes) > 0: idx = torch.argsort(out_bboxes[:, -1], descending=True) idx = idx[:cfg.max_per_img] out_bboxes = out_bboxes[idx] out_labels = out_labels[idx] return out_bboxes, out_labels def _gather_feat(self, feat, ind, mask=None): """Gather feature according to index. Args: feat (Tensor): Target feature map. ind (Tensor): Target coord index. mask (Tensor | None): Mask of featuremap. Default: None. Returns: feat (Tensor): Gathered feature. """ dim = feat.size(2) ind = ind.unsqueeze(2).repeat(1, 1, dim) feat = feat.gather(1, ind) if mask is not None: mask = mask.unsqueeze(2).expand_as(feat) feat = feat[mask] feat = feat.view(-1, dim) return feat def _local_maximum(self, heat, kernel=3): """Extract local maximum pixel with given kernal. Args: heat (Tensor): Target heatmap. kernel (int): Kernel size of max pooling. Default: 3. Returns: heat (Tensor): A heatmap where local maximum pixels maintain its own value and other positions are 0. """ pad = (kernel - 1) // 2 hmax = F.max_pool2d(heat, kernel, stride=1, padding=pad) keep = (hmax == heat).float() return heat * keep def _transpose_and_gather_feat(self, feat, ind): """Transpose and gather feature according to index. Args: feat (Tensor): Target feature map. ind (Tensor): Target coord index. Returns: feat (Tensor): Transposed and gathered feature. """ feat = feat.permute(0, 2, 3, 1).contiguous() feat = feat.view(feat.size(0), -1, feat.size(3)) feat = self._gather_feat(feat, ind) return feat def _topk(self, scores, k=20): """Get top k positions from heatmap. Args: scores (Tensor): Target heatmap with shape [batch, num_classes, height, width]. k (int): Target number. Default: 20. Returns: tuple[torch.Tensor]: Scores, indexes, categories and coords of topk keypoint. Containing following Tensors: - topk_scores (Tensor): Max scores of each topk keypoint. - topk_inds (Tensor): Indexes of each topk keypoint. - topk_clses (Tensor): Categories of each topk keypoint. - topk_ys (Tensor): Y-coord of each topk keypoint. - topk_xs (Tensor): X-coord of each topk keypoint. """ batch, _, height, width = scores.size() topk_scores, topk_inds = torch.topk(scores.view(batch, -1), k) topk_clses = topk_inds // (height * width) topk_inds = topk_inds % (height * width) topk_ys = topk_inds // width topk_xs = (topk_inds % width).int().float() return topk_scores, topk_inds, topk_clses, topk_ys, topk_xs def decode_heatmap(self, tl_heat, br_heat, tl_off, br_off, tl_emb=None, br_emb=None, tl_centripetal_shift=None, br_centripetal_shift=None, img_meta=None, k=100, kernel=3, distance_threshold=0.5, num_dets=1000): """Transform outputs for a single batch item into raw bbox predictions. Args: tl_heat (Tensor): Top-left corner heatmap for current level with shape (N, num_classes, H, W). br_heat (Tensor): Bottom-right corner heatmap for current level with shape (N, num_classes, H, W). tl_off (Tensor): Top-left corner offset for current level with shape (N, corner_offset_channels, H, W). br_off (Tensor): Bottom-right corner offset for current level with shape (N, corner_offset_channels, H, W). tl_emb (Tensor | None): Top-left corner embedding for current level with shape (N, corner_emb_channels, H, W). br_emb (Tensor | None): Bottom-right corner embedding for current level with shape (N, corner_emb_channels, H, W). tl_centripetal_shift (Tensor | None): Top-left centripetal shift for current level with shape (N, 2, H, W). br_centripetal_shift (Tensor | None): Bottom-right centripetal shift for current level with shape (N, 2, H, W). img_meta (dict): Meta information of current image, e.g., image size, scaling factor, etc. k (int): Get top k corner keypoints from heatmap. kernel (int): Max pooling kernel for extract local maximum pixels. distance_threshold (float): Distance threshold. Top-left and bottom-right corner keypoints with feature distance less than the threshold will be regarded as keypoints from same object. num_dets (int): Num of raw boxes before doing nms. Returns: tuple[torch.Tensor]: Decoded output of CornerHead, containing the following Tensors: - bboxes (Tensor): Coords of each box. - scores (Tensor): Scores of each box. - clses (Tensor): Categories of each box. """ with_embedding = tl_emb is not None and br_emb is not None with_centripetal_shift = ( tl_centripetal_shift is not None and br_centripetal_shift is not None) assert with_embedding + with_centripetal_shift == 1 batch, _, height, width = tl_heat.size() inp_h, inp_w, _ = img_meta['pad_shape'] # perform nms on heatmaps tl_heat = self._local_maximum(tl_heat, kernel=kernel) br_heat = self._local_maximum(br_heat, kernel=kernel) tl_scores, tl_inds, tl_clses, tl_ys, tl_xs = self._topk(tl_heat, k=k) br_scores, br_inds, br_clses, br_ys, br_xs = self._topk(br_heat, k=k) # We use repeat instead of expand here because expand is a # shallow-copy function. Thus it could cause unexpected testing result # sometimes. Using expand will decrease about 10% mAP during testing # compared to repeat. tl_ys = tl_ys.view(batch, k, 1).repeat(1, 1, k) tl_xs = tl_xs.view(batch, k, 1).repeat(1, 1, k) br_ys = br_ys.view(batch, 1, k).repeat(1, k, 1) br_xs = br_xs.view(batch, 1, k).repeat(1, k, 1) tl_off = self._transpose_and_gather_feat(tl_off, tl_inds) tl_off = tl_off.view(batch, k, 1, 2) br_off = self._transpose_and_gather_feat(br_off, br_inds) br_off = br_off.view(batch, 1, k, 2) tl_xs = tl_xs + tl_off[..., 0] tl_ys = tl_ys + tl_off[..., 1] br_xs = br_xs + br_off[..., 0] br_ys = br_ys + br_off[..., 1] if with_centripetal_shift: tl_centripetal_shift = self._transpose_and_gather_feat( tl_centripetal_shift, tl_inds).view(batch, k, 1, 2).exp() br_centripetal_shift = self._transpose_and_gather_feat( br_centripetal_shift, br_inds).view(batch, 1, k, 2).exp() tl_ctxs = tl_xs + tl_centripetal_shift[..., 0] tl_ctys = tl_ys + tl_centripetal_shift[..., 1] br_ctxs = br_xs - br_centripetal_shift[..., 0] br_ctys = br_ys - br_centripetal_shift[..., 1] # all possible boxes based on top k corners (ignoring class) tl_xs *= (inp_w / width) tl_ys *= (inp_h / height) br_xs *= (inp_w / width) br_ys *= (inp_h / height) if with_centripetal_shift: tl_ctxs *= (inp_w / width) tl_ctys *= (inp_h / height) br_ctxs *= (inp_w / width) br_ctys *= (inp_h / height) x_off = img_meta['border'][2] y_off = img_meta['border'][0] tl_xs -= x_off tl_ys -= y_off br_xs -= x_off br_ys -= y_off tl_xs *= tl_xs.gt(0.0).type_as(tl_xs) tl_ys *= tl_ys.gt(0.0).type_as(tl_ys) br_xs *= br_xs.gt(0.0).type_as(br_xs) br_ys *= br_ys.gt(0.0).type_as(br_ys) bboxes = torch.stack((tl_xs, tl_ys, br_xs, br_ys), dim=3) area_bboxes = ((br_xs - tl_xs) * (br_ys - tl_ys)).abs() if with_centripetal_shift: tl_ctxs -= x_off tl_ctys -= y_off br_ctxs -= x_off br_ctys -= y_off tl_ctxs *= tl_ctxs.gt(0.0).type_as(tl_ctxs) tl_ctys *= tl_ctys.gt(0.0).type_as(tl_ctys) br_ctxs *= br_ctxs.gt(0.0).type_as(br_ctxs) br_ctys *= br_ctys.gt(0.0).type_as(br_ctys) ct_bboxes = torch.stack((tl_ctxs, tl_ctys, br_ctxs, br_ctys), dim=3) area_ct_bboxes = ((br_ctxs - tl_ctxs) * (br_ctys - tl_ctys)).abs() rcentral = torch.zeros_like(ct_bboxes) # magic nums from paper section 4.1 mu = torch.ones_like(area_bboxes) / 2.4 mu[area_bboxes > 3500] = 1 / 2.1 # large bbox have smaller mu bboxes_center_x = (bboxes[..., 0] + bboxes[..., 2]) / 2 bboxes_center_y = (bboxes[..., 1] + bboxes[..., 3]) / 2 rcentral[..., 0] = bboxes_center_x - mu * (bboxes[..., 2] - bboxes[..., 0]) / 2 rcentral[..., 1] = bboxes_center_y - mu * (bboxes[..., 3] - bboxes[..., 1]) / 2 rcentral[..., 2] = bboxes_center_x + mu * (bboxes[..., 2] - bboxes[..., 0]) / 2 rcentral[..., 3] = bboxes_center_y + mu * (bboxes[..., 3] - bboxes[..., 1]) / 2 area_rcentral = ((rcentral[..., 2] - rcentral[..., 0]) * (rcentral[..., 3] - rcentral[..., 1])).abs() dists = area_ct_bboxes / area_rcentral tl_ctx_inds = (ct_bboxes[..., 0] <= rcentral[..., 0]) | ( ct_bboxes[..., 0] >= rcentral[..., 2]) tl_cty_inds = (ct_bboxes[..., 1] <= rcentral[..., 1]) | ( ct_bboxes[..., 1] >= rcentral[..., 3]) br_ctx_inds = (ct_bboxes[..., 2] <= rcentral[..., 0]) | ( ct_bboxes[..., 2] >= rcentral[..., 2]) br_cty_inds = (ct_bboxes[..., 3] <= rcentral[..., 1]) | ( ct_bboxes[..., 3] >= rcentral[..., 3]) if with_embedding: tl_emb = self._transpose_and_gather_feat(tl_emb, tl_inds) tl_emb = tl_emb.view(batch, k, 1) br_emb = self._transpose_and_gather_feat(br_emb, br_inds) br_emb = br_emb.view(batch, 1, k) dists = torch.abs(tl_emb - br_emb) tl_scores = tl_scores.view(batch, k, 1).repeat(1, 1, k) br_scores = br_scores.view(batch, 1, k).repeat(1, k, 1) scores = (tl_scores + br_scores) / 2 # scores for all possible boxes # tl and br should have same class tl_clses = tl_clses.view(batch, k, 1).repeat(1, 1, k) br_clses = br_clses.view(batch, 1, k).repeat(1, k, 1) cls_inds = (tl_clses != br_clses) # reject boxes based on distances dist_inds = dists > distance_threshold # reject boxes based on widths and heights width_inds = (br_xs <= tl_xs) height_inds = (br_ys <= tl_ys) scores[cls_inds] = -1 scores[width_inds] = -1 scores[height_inds] = -1 scores[dist_inds] = -1 if with_centripetal_shift: scores[tl_ctx_inds] = -1 scores[tl_cty_inds] = -1 scores[br_ctx_inds] = -1 scores[br_cty_inds] = -1 scores = scores.view(batch, -1) scores, inds = torch.topk(scores, num_dets) scores = scores.unsqueeze(2) bboxes = bboxes.view(batch, -1, 4) bboxes = self._gather_feat(bboxes, inds) clses = tl_clses.contiguous().view(batch, -1, 1) clses = self._gather_feat(clses, inds).float() return bboxes, scores, clses
46,260
42.437559
79
py
GFocalV2
GFocalV2-master/mmdet/models/dense_heads/yolact_head.py
import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import ConvModule, xavier_init from mmcv.runner import force_fp32 from mmdet.core import build_sampler, fast_nms, images_to_levels, multi_apply from ..builder import HEADS, build_loss from .anchor_head import AnchorHead @HEADS.register_module() class YOLACTHead(AnchorHead): """YOLACT box head used in https://arxiv.org/abs/1904.02689. Note that YOLACT head is a light version of RetinaNet head. Four differences are described as follows: 1. YOLACT box head has three-times fewer anchors. 2. YOLACT box head shares the convs for box and cls branches. 3. YOLACT box head uses OHEM instead of Focal loss. 4. YOLACT box head predicts a set of mask coefficients for each box. Args: num_classes (int): Number of categories excluding the background category. in_channels (int): Number of channels in the input feature map. anchor_generator (dict): Config dict for anchor generator loss_cls (dict): Config of classification loss. loss_bbox (dict): Config of localization loss. num_head_convs (int): Number of the conv layers shared by box and cls branches. num_protos (int): Number of the mask coefficients. use_ohem (bool): If true, ``loss_single_OHEM`` will be used for cls loss calculation. If false, ``loss_single`` will be used. conv_cfg (dict): Dictionary to construct and config conv layer. norm_cfg (dict): Dictionary to construct and config norm layer. """ def __init__(self, num_classes, in_channels, anchor_generator=dict( type='AnchorGenerator', octave_base_scale=3, scales_per_octave=1, ratios=[0.5, 1.0, 2.0], strides=[8, 16, 32, 64, 128]), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, reduction='none', loss_weight=1.0), loss_bbox=dict( type='SmoothL1Loss', beta=1.0, loss_weight=1.5), num_head_convs=1, num_protos=32, use_ohem=True, conv_cfg=None, norm_cfg=None, **kwargs): self.num_head_convs = num_head_convs self.num_protos = num_protos self.use_ohem = use_ohem self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg super(YOLACTHead, self).__init__( num_classes, in_channels, loss_cls=loss_cls, loss_bbox=loss_bbox, anchor_generator=anchor_generator, **kwargs) if self.use_ohem: sampler_cfg = dict(type='PseudoSampler') self.sampler = build_sampler(sampler_cfg, context=self) self.sampling = False def _init_layers(self): """Initialize layers of the head.""" self.relu = nn.ReLU(inplace=True) self.head_convs = nn.ModuleList() for i in range(self.num_head_convs): chn = self.in_channels if i == 0 else self.feat_channels self.head_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) self.conv_cls = nn.Conv2d( self.feat_channels, self.num_anchors * self.cls_out_channels, 3, padding=1) self.conv_reg = nn.Conv2d( self.feat_channels, self.num_anchors * 4, 3, padding=1) self.conv_coeff = nn.Conv2d( self.feat_channels, self.num_anchors * self.num_protos, 3, padding=1) def init_weights(self): """Initialize weights of the head.""" for m in self.head_convs: xavier_init(m.conv, distribution='uniform', bias=0) xavier_init(self.conv_cls, distribution='uniform', bias=0) xavier_init(self.conv_reg, distribution='uniform', bias=0) xavier_init(self.conv_coeff, distribution='uniform', bias=0) def forward_single(self, x): """Forward feature of a single scale level. Args: x (Tensor): Features of a single scale level. Returns: tuple: cls_score (Tensor): Cls scores for a single scale level \ the channels number is num_anchors * num_classes. bbox_pred (Tensor): Box energies / deltas for a single scale \ level, the channels number is num_anchors * 4. coeff_pred (Tensor): Mask coefficients for a single scale \ level, the channels number is num_anchors * num_protos. """ for head_conv in self.head_convs: x = head_conv(x) cls_score = self.conv_cls(x) bbox_pred = self.conv_reg(x) coeff_pred = self.conv_coeff(x).tanh() return cls_score, bbox_pred, coeff_pred @force_fp32(apply_to=('cls_scores', 'bbox_preds')) def loss(self, cls_scores, bbox_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None): """A combination of the func:``AnchorHead.loss`` and func:``SSDHead.loss``. When ``self.use_ohem == True``, it functions like ``SSDHead.loss``, otherwise, it follows ``AnchorHead.loss``. Besides, it additionally returns ``sampling_results``. Args: cls_scores (list[Tensor]): Box scores for each scale level Has shape (N, num_anchors * num_classes, H, W) bbox_preds (list[Tensor]): Box energies / deltas for each scale level with shape (N, num_anchors * 4, H, W) gt_bboxes (list[Tensor]): Ground truth bboxes for each image with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. gt_labels (list[Tensor]): Class indices corresponding to each box img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. gt_bboxes_ignore (None | list[Tensor]): Specify which bounding boxes can be ignored when computing the loss. Default: None Returns: tuple: dict[str, Tensor]: A dictionary of loss components. List[:obj:``SamplingResult``]: Sampler results for each image. """ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] assert len(featmap_sizes) == self.anchor_generator.num_levels device = cls_scores[0].device anchor_list, valid_flag_list = self.get_anchors( featmap_sizes, img_metas, device=device) label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 cls_reg_targets = self.get_targets( anchor_list, valid_flag_list, gt_bboxes, img_metas, gt_bboxes_ignore_list=gt_bboxes_ignore, gt_labels_list=gt_labels, label_channels=label_channels, unmap_outputs=not self.use_ohem, return_sampling_results=True) if cls_reg_targets is None: return None (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_pos, num_total_neg, sampling_results) = cls_reg_targets if self.use_ohem: num_images = len(img_metas) all_cls_scores = torch.cat([ s.permute(0, 2, 3, 1).reshape( num_images, -1, self.cls_out_channels) for s in cls_scores ], 1) all_labels = torch.cat(labels_list, -1).view(num_images, -1) all_label_weights = torch.cat(label_weights_list, -1).view(num_images, -1) all_bbox_preds = torch.cat([ b.permute(0, 2, 3, 1).reshape(num_images, -1, 4) for b in bbox_preds ], -2) all_bbox_targets = torch.cat(bbox_targets_list, -2).view(num_images, -1, 4) all_bbox_weights = torch.cat(bbox_weights_list, -2).view(num_images, -1, 4) # concat all level anchors to a single tensor all_anchors = [] for i in range(num_images): all_anchors.append(torch.cat(anchor_list[i])) # check NaN and Inf assert torch.isfinite(all_cls_scores).all().item(), \ 'classification scores become infinite or NaN!' assert torch.isfinite(all_bbox_preds).all().item(), \ 'bbox predications become infinite or NaN!' losses_cls, losses_bbox = multi_apply( self.loss_single_OHEM, all_cls_scores, all_bbox_preds, all_anchors, all_labels, all_label_weights, all_bbox_targets, all_bbox_weights, num_total_samples=num_total_pos) else: num_total_samples = ( num_total_pos + num_total_neg if self.sampling else num_total_pos) # anchor number of multi levels num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]] # concat all level anchors and flags to a single tensor concat_anchor_list = [] for i in range(len(anchor_list)): concat_anchor_list.append(torch.cat(anchor_list[i])) all_anchor_list = images_to_levels(concat_anchor_list, num_level_anchors) losses_cls, losses_bbox = multi_apply( self.loss_single, cls_scores, bbox_preds, all_anchor_list, labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_samples=num_total_samples) return dict( loss_cls=losses_cls, loss_bbox=losses_bbox), sampling_results def loss_single_OHEM(self, cls_score, bbox_pred, anchors, labels, label_weights, bbox_targets, bbox_weights, num_total_samples): """"See func:``SSDHead.loss``.""" loss_cls_all = self.loss_cls(cls_score, labels, label_weights) # FG cat_id: [0, num_classes -1], BG cat_id: num_classes pos_inds = ((labels >= 0) & (labels < self.num_classes)).nonzero().reshape(-1) neg_inds = (labels == self.num_classes).nonzero().view(-1) num_pos_samples = pos_inds.size(0) if num_pos_samples == 0: num_neg_samples = neg_inds.size(0) else: num_neg_samples = self.train_cfg.neg_pos_ratio * num_pos_samples if num_neg_samples > neg_inds.size(0): num_neg_samples = neg_inds.size(0) topk_loss_cls_neg, _ = loss_cls_all[neg_inds].topk(num_neg_samples) loss_cls_pos = loss_cls_all[pos_inds].sum() loss_cls_neg = topk_loss_cls_neg.sum() loss_cls = (loss_cls_pos + loss_cls_neg) / num_total_samples if self.reg_decoded_bbox: bbox_pred = self.bbox_coder.decode(anchors, bbox_pred) loss_bbox = self.loss_bbox( bbox_pred, bbox_targets, bbox_weights, avg_factor=num_total_samples) return loss_cls[None], loss_bbox @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'coeff_preds')) def get_bboxes(self, cls_scores, bbox_preds, coeff_preds, img_metas, cfg=None, rescale=False): """"Similiar to func:``AnchorHead.get_bboxes``, but additionally processes coeff_preds. Args: cls_scores (list[Tensor]): Box scores for each scale level with shape (N, num_anchors * num_classes, H, W) bbox_preds (list[Tensor]): Box energies / deltas for each scale level with shape (N, num_anchors * 4, H, W) coeff_preds (list[Tensor]): Mask coefficients for each scale level with shape (N, num_anchors * num_protos, H, W) img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. cfg (mmcv.Config | None): Test / postprocessing configuration, if None, test_cfg would be used rescale (bool): If True, return boxes in original image space. Default: False. Returns: list[tuple[Tensor, Tensor, Tensor]]: Each item in result_list is a 3-tuple. The first item is an (n, 5) tensor, where the first 4 columns are bounding box positions (tl_x, tl_y, br_x, br_y) and the 5-th column is a score between 0 and 1. The second item is an (n,) tensor where each item is the predicted class label of the corresponding box. The third item is an (n, num_protos) tensor where each item is the predicted mask coefficients of instance inside the corresponding box. """ assert len(cls_scores) == len(bbox_preds) num_levels = len(cls_scores) device = cls_scores[0].device featmap_sizes = [cls_scores[i].shape[-2:] for i in range(num_levels)] mlvl_anchors = self.anchor_generator.grid_anchors( featmap_sizes, device=device) det_bboxes = [] det_labels = [] det_coeffs = [] for img_id in range(len(img_metas)): cls_score_list = [ cls_scores[i][img_id].detach() for i in range(num_levels) ] bbox_pred_list = [ bbox_preds[i][img_id].detach() for i in range(num_levels) ] coeff_pred_list = [ coeff_preds[i][img_id].detach() for i in range(num_levels) ] img_shape = img_metas[img_id]['img_shape'] scale_factor = img_metas[img_id]['scale_factor'] bbox_res = self._get_bboxes_single(cls_score_list, bbox_pred_list, coeff_pred_list, mlvl_anchors, img_shape, scale_factor, cfg, rescale) det_bboxes.append(bbox_res[0]) det_labels.append(bbox_res[1]) det_coeffs.append(bbox_res[2]) return det_bboxes, det_labels, det_coeffs def _get_bboxes_single(self, cls_score_list, bbox_pred_list, coeff_preds_list, mlvl_anchors, img_shape, scale_factor, cfg, rescale=False): """"Similiar to func:``AnchorHead._get_bboxes_single``, but additionally processes coeff_preds_list and uses fast NMS instead of traditional NMS. Args: cls_score_list (list[Tensor]): Box scores for a single scale level Has shape (num_anchors * num_classes, H, W). bbox_pred_list (list[Tensor]): Box energies / deltas for a single scale level with shape (num_anchors * 4, H, W). coeff_preds_list (list[Tensor]): Mask coefficients for a single scale level with shape (num_anchors * num_protos, H, W). mlvl_anchors (list[Tensor]): Box reference for a single scale level with shape (num_total_anchors, 4). img_shape (tuple[int]): Shape of the input image, (height, width, 3). scale_factor (ndarray): Scale factor of the image arange as (w_scale, h_scale, w_scale, h_scale). cfg (mmcv.Config): Test / postprocessing configuration, if None, test_cfg would be used. rescale (bool): If True, return boxes in original image space. Returns: tuple[Tensor, Tensor, Tensor]: The first item is an (n, 5) tensor, where the first 4 columns are bounding box positions (tl_x, tl_y, br_x, br_y) and the 5-th column is a score between 0 and 1. The second item is an (n,) tensor where each item is the predicted class label of the corresponding box. The third item is an (n, num_protos) tensor where each item is the predicted mask coefficients of instance inside the corresponding box. """ cfg = self.test_cfg if cfg is None else cfg assert len(cls_score_list) == len(bbox_pred_list) == len(mlvl_anchors) mlvl_bboxes = [] mlvl_scores = [] mlvl_coeffs = [] for cls_score, bbox_pred, coeff_pred, anchors in \ zip(cls_score_list, bbox_pred_list, coeff_preds_list, mlvl_anchors): assert cls_score.size()[-2:] == bbox_pred.size()[-2:] cls_score = cls_score.permute(1, 2, 0).reshape(-1, self.cls_out_channels) if self.use_sigmoid_cls: scores = cls_score.sigmoid() else: scores = cls_score.softmax(-1) bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4) coeff_pred = coeff_pred.permute(1, 2, 0).reshape(-1, self.num_protos) nms_pre = cfg.get('nms_pre', -1) if nms_pre > 0 and scores.shape[0] > nms_pre: # Get maximum scores for foreground classes. if self.use_sigmoid_cls: max_scores, _ = scores.max(dim=1) else: # remind that we set FG labels to [0, num_class-1] # since mmdet v2.0 # BG cat_id: num_class max_scores, _ = scores[:, :-1].max(dim=1) _, topk_inds = max_scores.topk(nms_pre) anchors = anchors[topk_inds, :] bbox_pred = bbox_pred[topk_inds, :] scores = scores[topk_inds, :] coeff_pred = coeff_pred[topk_inds, :] bboxes = self.bbox_coder.decode( anchors, bbox_pred, max_shape=img_shape) mlvl_bboxes.append(bboxes) mlvl_scores.append(scores) mlvl_coeffs.append(coeff_pred) mlvl_bboxes = torch.cat(mlvl_bboxes) if rescale: mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor) mlvl_scores = torch.cat(mlvl_scores) mlvl_coeffs = torch.cat(mlvl_coeffs) if self.use_sigmoid_cls: # Add a dummy background class to the backend when using sigmoid # remind that we set FG labels to [0, num_class-1] since mmdet v2.0 # BG cat_id: num_class padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1) mlvl_scores = torch.cat([mlvl_scores, padding], dim=1) det_bboxes, det_labels, det_coeffs = fast_nms(mlvl_bboxes, mlvl_scores, mlvl_coeffs, cfg.score_thr, cfg.iou_thr, cfg.top_k, cfg.max_per_img) return det_bboxes, det_labels, det_coeffs @HEADS.register_module() class YOLACTSegmHead(nn.Module): """YOLACT segmentation head used in https://arxiv.org/abs/1904.02689. Apply a semantic segmentation loss on feature space using layers that are only evaluated during training to increase performance with no speed penalty. Args: in_channels (int): Number of channels in the input feature map. num_classes (int): Number of categories excluding the background category. loss_segm (dict): Config of semantic segmentation loss. """ def __init__(self, num_classes, in_channels=256, loss_segm=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)): super(YOLACTSegmHead, self).__init__() self.in_channels = in_channels self.num_classes = num_classes self.loss_segm = build_loss(loss_segm) self._init_layers() self.fp16_enabled = False def _init_layers(self): """Initialize layers of the head.""" self.segm_conv = nn.Conv2d( self.in_channels, self.num_classes, kernel_size=1) def init_weights(self): """Initialize weights of the head.""" xavier_init(self.segm_conv, distribution='uniform') def forward(self, x): """Forward feature from the upstream network. Args: x (Tensor): Feature from the upstream network, which is a 4D-tensor. Returns: Tensor: Predicted semantic segmentation map with shape (N, num_classes, H, W). """ return self.segm_conv(x) @force_fp32(apply_to=('segm_pred', )) def loss(self, segm_pred, gt_masks, gt_labels): """Compute loss of the head. Args: segm_pred (list[Tensor]): Predicted semantic segmentation map with shape (N, num_classes, H, W). gt_masks (list[Tensor]): Ground truth masks for each image with the same shape of the input image. gt_labels (list[Tensor]): Class indices corresponding to each box. Returns: dict[str, Tensor]: A dictionary of loss components. """ loss_segm = [] num_imgs, num_classes, mask_h, mask_w = segm_pred.size() for idx in range(num_imgs): cur_segm_pred = segm_pred[idx] cur_gt_masks = gt_masks[idx].float() cur_gt_labels = gt_labels[idx] segm_targets = self.get_targets(cur_segm_pred, cur_gt_masks, cur_gt_labels) if segm_targets is None: loss = self.loss_segm(cur_segm_pred, torch.zeros_like(cur_segm_pred), torch.zeros_like(cur_segm_pred)) else: loss = self.loss_segm( cur_segm_pred, segm_targets, avg_factor=num_imgs * mask_h * mask_w) loss_segm.append(loss) return dict(loss_segm=loss_segm) def get_targets(self, segm_pred, gt_masks, gt_labels): """Compute semantic segmentation targets for each image. Args: segm_pred (Tensor): Predicted semantic segmentation map with shape (num_classes, H, W). gt_masks (Tensor): Ground truth masks for each image with the same shape of the input image. gt_labels (Tensor): Class indices corresponding to each box. Returns: Tensor: Semantic segmentation targets with shape (num_classes, H, W). """ if gt_masks.size(0) == 0: return None num_classes, mask_h, mask_w = segm_pred.size() with torch.no_grad(): downsampled_masks = F.interpolate( gt_masks.unsqueeze(0), (mask_h, mask_w), mode='bilinear', align_corners=False).squeeze(0) downsampled_masks = downsampled_masks.gt(0.5).float() segm_targets = torch.zeros_like(segm_pred, requires_grad=False) for obj_idx in range(downsampled_masks.size(0)): segm_targets[gt_labels[obj_idx] - 1] = torch.max( segm_targets[gt_labels[obj_idx] - 1], downsampled_masks[obj_idx]) return segm_targets @HEADS.register_module() class YOLACTProtonet(nn.Module): """YOLACT mask head used in https://arxiv.org/abs/1904.02689. This head outputs the mask prototypes for YOLACT. Args: in_channels (int): Number of channels in the input feature map. proto_channels (tuple[int]): Output channels of protonet convs. proto_kernel_sizes (tuple[int]): Kernel sizes of protonet convs. include_last_relu (Bool): If keep the last relu of protonet. num_protos (int): Number of prototypes. num_classes (int): Number of categories excluding the background category. loss_mask_weight (float): Reweight the mask loss by this factor. max_masks_to_train (int): Maximum number of masks to train for each image. """ def __init__(self, num_classes, in_channels=256, proto_channels=(256, 256, 256, None, 256, 32), proto_kernel_sizes=(3, 3, 3, -2, 3, 1), include_last_relu=True, num_protos=32, loss_mask_weight=1.0, max_masks_to_train=100): super(YOLACTProtonet, self).__init__() self.in_channels = in_channels self.proto_channels = proto_channels self.proto_kernel_sizes = proto_kernel_sizes self.include_last_relu = include_last_relu self.protonet = self._init_layers() self.loss_mask_weight = loss_mask_weight self.num_protos = num_protos self.num_classes = num_classes self.max_masks_to_train = max_masks_to_train self.fp16_enabled = False def _init_layers(self): """A helper function to take a config setting and turn it into a network.""" # Possible patterns: # ( 256, 3) -> conv # ( 256,-2) -> deconv # (None,-2) -> bilinear interpolate in_channels = self.in_channels protonets = nn.ModuleList() for num_channels, kernel_size in zip(self.proto_channels, self.proto_kernel_sizes): if kernel_size > 0: layer = nn.Conv2d( in_channels, num_channels, kernel_size, padding=kernel_size // 2) else: if num_channels is None: layer = InterpolateModule( scale_factor=-kernel_size, mode='bilinear', align_corners=False) else: layer = nn.ConvTranspose2d( in_channels, num_channels, -kernel_size, padding=kernel_size // 2) protonets.append(layer) protonets.append(nn.ReLU(inplace=True)) in_channels = num_channels if num_channels is not None \ else in_channels if not self.include_last_relu: protonets = protonets[:-1] return nn.Sequential(*protonets) def init_weights(self): """Initialize weights of the head.""" for m in self.protonet: if isinstance(m, nn.Conv2d): xavier_init(m, distribution='uniform') def forward(self, x, coeff_pred, bboxes, img_meta, sampling_results=None): """Forward feature from the upstream network to get prototypes and linearly combine the prototypes, using masks coefficients, into instance masks. Finally, crop the instance masks with given bboxes. Args: x (Tensor): Feature from the upstream network, which is a 4D-tensor. coeff_pred (list[Tensor]): Mask coefficients for each scale level with shape (N, num_anchors * num_protos, H, W). bboxes (list[Tensor]): Box used for cropping with shape (N, num_anchors * 4, H, W). During training, they are ground truth boxes. During testing, they are predicted boxes. img_meta (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. sampling_results (List[:obj:``SamplingResult``]): Sampler results for each image. Returns: list[Tensor]: Predicted instance segmentation masks. """ prototypes = self.protonet(x) prototypes = prototypes.permute(0, 2, 3, 1).contiguous() num_imgs = x.size(0) # Training state if self.training: coeff_pred_list = [] for coeff_pred_per_level in coeff_pred: coeff_pred_per_level = \ coeff_pred_per_level.permute(0, 2, 3, 1)\ .reshape(num_imgs, -1, self.num_protos) coeff_pred_list.append(coeff_pred_per_level) coeff_pred = torch.cat(coeff_pred_list, dim=1) mask_pred_list = [] for idx in range(num_imgs): cur_prototypes = prototypes[idx] cur_coeff_pred = coeff_pred[idx] cur_bboxes = bboxes[idx] cur_img_meta = img_meta[idx] # Testing state if not self.training: bboxes_for_cropping = cur_bboxes else: cur_sampling_results = sampling_results[idx] pos_assigned_gt_inds = \ cur_sampling_results.pos_assigned_gt_inds bboxes_for_cropping = cur_bboxes[pos_assigned_gt_inds].clone() pos_inds = cur_sampling_results.pos_inds cur_coeff_pred = cur_coeff_pred[pos_inds] # Linearly combine the prototypes with the mask coefficients mask_pred = cur_prototypes @ cur_coeff_pred.t() mask_pred = torch.sigmoid(mask_pred) h, w = cur_img_meta['img_shape'][:2] bboxes_for_cropping[:, 0] /= w bboxes_for_cropping[:, 1] /= h bboxes_for_cropping[:, 2] /= w bboxes_for_cropping[:, 3] /= h mask_pred = self.crop(mask_pred, bboxes_for_cropping) mask_pred = mask_pred.permute(2, 0, 1).contiguous() mask_pred_list.append(mask_pred) return mask_pred_list @force_fp32(apply_to=('mask_pred', )) def loss(self, mask_pred, gt_masks, gt_bboxes, img_meta, sampling_results): """Compute loss of the head. Args: mask_pred (list[Tensor]): Predicted prototypes with shape (num_classes, H, W). gt_masks (list[Tensor]): Ground truth masks for each image with the same shape of the input image. gt_bboxes (list[Tensor]): Ground truth bboxes for each image with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. img_meta (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. sampling_results (List[:obj:``SamplingResult``]): Sampler results for each image. Returns: dict[str, Tensor]: A dictionary of loss components. """ loss_mask = [] num_imgs = len(mask_pred) total_pos = 0 for idx in range(num_imgs): cur_mask_pred = mask_pred[idx] cur_gt_masks = gt_masks[idx].float() cur_gt_bboxes = gt_bboxes[idx] cur_img_meta = img_meta[idx] cur_sampling_results = sampling_results[idx] pos_assigned_gt_inds = cur_sampling_results.pos_assigned_gt_inds num_pos = pos_assigned_gt_inds.size(0) # Since we're producing (near) full image masks, # it'd take too much vram to backprop on every single mask. # Thus we select only a subset. if num_pos > self.max_masks_to_train: perm = torch.randperm(num_pos) select = perm[:self.max_masks_to_train] cur_mask_pred = cur_mask_pred[select] pos_assigned_gt_inds = pos_assigned_gt_inds[select] num_pos = self.max_masks_to_train total_pos += num_pos gt_bboxes_for_reweight = cur_gt_bboxes[pos_assigned_gt_inds] mask_targets = self.get_targets(cur_mask_pred, cur_gt_masks, pos_assigned_gt_inds) if num_pos == 0: loss = cur_mask_pred.sum() * 0. elif mask_targets is None: loss = F.binary_cross_entropy(cur_mask_pred, torch.zeros_like(cur_mask_pred), torch.zeros_like(cur_mask_pred)) else: cur_mask_pred = torch.clamp(cur_mask_pred, 0, 1) loss = F.binary_cross_entropy( cur_mask_pred, mask_targets, reduction='none') * self.loss_mask_weight h, w = cur_img_meta['img_shape'][:2] gt_bboxes_width = (gt_bboxes_for_reweight[:, 2] - gt_bboxes_for_reweight[:, 0]) / w gt_bboxes_height = (gt_bboxes_for_reweight[:, 3] - gt_bboxes_for_reweight[:, 1]) / h loss = loss.mean(dim=(1, 2)) / gt_bboxes_width / gt_bboxes_height loss = torch.sum(loss) loss_mask.append(loss) if total_pos == 0: total_pos += 1 # avoid nan loss_mask = [x / total_pos for x in loss_mask] return dict(loss_mask=loss_mask) def get_targets(self, mask_pred, gt_masks, pos_assigned_gt_inds): """Compute instance segmentation targets for each image. Args: mask_pred (Tensor): Predicted prototypes with shape (num_classes, H, W). gt_masks (Tensor): Ground truth masks for each image with the same shape of the input image. pos_assigned_gt_inds (Tensor): GT indices of the corresponding positive samples. Returns: Tensor: Instance segmentation targets with shape (num_instances, H, W). """ if gt_masks.size(0) == 0: return None mask_h, mask_w = mask_pred.shape[-2:] gt_masks = F.interpolate( gt_masks.unsqueeze(0), (mask_h, mask_w), mode='bilinear', align_corners=False).squeeze(0) gt_masks = gt_masks.gt(0.5).float() mask_targets = gt_masks[pos_assigned_gt_inds] return mask_targets def get_seg_masks(self, mask_pred, label_pred, img_meta, rescale): """Resize, binarize, and format the instance mask predictions. Args: mask_pred (Tensor): shape (N, H, W). label_pred (Tensor): shape (N, ). img_meta (dict): Meta information of each image, e.g., image size, scaling factor, etc. rescale (bool): If rescale is False, then returned masks will fit the scale of imgs[0]. Returns: list[ndarray]: Mask predictions grouped by their predicted classes. """ ori_shape = img_meta['ori_shape'] scale_factor = img_meta['scale_factor'] if rescale: img_h, img_w = ori_shape[:2] else: img_h = np.round(ori_shape[0] * scale_factor[1]).astype(np.int32) img_w = np.round(ori_shape[1] * scale_factor[0]).astype(np.int32) cls_segms = [[] for _ in range(self.num_classes)] if mask_pred.size(0) == 0: return cls_segms mask_pred = F.interpolate( mask_pred.unsqueeze(0), (img_h, img_w), mode='bilinear', align_corners=False).squeeze(0) > 0.5 mask_pred = mask_pred.cpu().numpy().astype(np.uint8) for m, l in zip(mask_pred, label_pred): cls_segms[l].append(m) return cls_segms def crop(self, masks, boxes, padding=1): """Crop predicted masks by zeroing out everything not in the predicted bbox. Args: masks (Tensor): shape [H, W, N]. boxes (Tensor): bbox coords in relative point form with shape [N, 4]. Return: Tensor: The cropped masks. """ h, w, n = masks.size() x1, x2 = self.sanitize_coordinates( boxes[:, 0], boxes[:, 2], w, padding, cast=False) y1, y2 = self.sanitize_coordinates( boxes[:, 1], boxes[:, 3], h, padding, cast=False) rows = torch.arange( w, device=masks.device, dtype=x1.dtype).view(1, -1, 1).expand(h, w, n) cols = torch.arange( h, device=masks.device, dtype=x1.dtype).view(-1, 1, 1).expand(h, w, n) masks_left = rows >= x1.view(1, 1, -1) masks_right = rows < x2.view(1, 1, -1) masks_up = cols >= y1.view(1, 1, -1) masks_down = cols < y2.view(1, 1, -1) crop_mask = masks_left * masks_right * masks_up * masks_down return masks * crop_mask.float() def sanitize_coordinates(self, x1, x2, img_size, padding=0, cast=True): """Sanitizes the input coordinates so that x1 < x2, x1 != x2, x1 >= 0, and x2 <= image_size. Also converts from relative to absolute coordinates and casts the results to long tensors. Warning: this does things in-place behind the scenes so copy if necessary. Args: _x1 (Tensor): shape (N, ). _x2 (Tensor): shape (N, ). img_size (int): Size of the input image. padding (int): x1 >= padding, x2 <= image_size-padding. cast (bool): If cast is false, the result won't be cast to longs. Returns: tuple: x1 (Tensor): Sanitized _x1. x2 (Tensor): Sanitized _x2. """ x1 = x1 * img_size x2 = x2 * img_size if cast: x1 = x1.long() x2 = x2.long() x1 = torch.min(x1, x2) x2 = torch.max(x1, x2) x1 = torch.clamp(x1 - padding, min=0) x2 = torch.clamp(x2 + padding, max=img_size) return x1, x2 class InterpolateModule(nn.Module): """This is a module version of F.interpolate. Any arguments you give it just get passed along for the ride. """ def __init__(self, *args, **kwargs): super().__init__() self.args = args self.kwargs = kwargs def forward(self, x): """Forward features from the upstream network.""" return F.interpolate(x, *self.args, **self.kwargs)
39,435
40.953191
79
py
GFocalV2
GFocalV2-master/mmdet/models/dense_heads/base_dense_head.py
from abc import ABCMeta, abstractmethod import torch.nn as nn class BaseDenseHead(nn.Module, metaclass=ABCMeta): """Base class for DenseHeads.""" def __init__(self): super(BaseDenseHead, self).__init__() @abstractmethod def loss(self, **kwargs): """Compute losses of the head.""" pass @abstractmethod def get_bboxes(self, **kwargs): """Transform network output for a batch into bbox predictions.""" pass def forward_train(self, x, img_metas, gt_bboxes, gt_labels=None, gt_bboxes_ignore=None, proposal_cfg=None, **kwargs): """ Args: x (list[Tensor]): Features from FPN. img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. gt_bboxes (Tensor): Ground truth bboxes of the image, shape (num_gts, 4). gt_labels (Tensor): Ground truth labels of each box, shape (num_gts,). gt_bboxes_ignore (Tensor): Ground truth bboxes to be ignored, shape (num_ignored_gts, 4). proposal_cfg (mmcv.Config): Test / postprocessing configuration, if None, test_cfg would be used Returns: tuple: losses: (dict[str, Tensor]): A dictionary of loss components. proposal_list (list[Tensor]): Proposals of each image. """ outs = self(x) if gt_labels is None: loss_inputs = outs + (gt_bboxes, img_metas) else: loss_inputs = outs + (gt_bboxes, gt_labels, img_metas) losses = self.loss(*loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore) if proposal_cfg is None: return losses else: proposal_list = self.get_bboxes(*outs, img_metas, cfg=proposal_cfg) return losses, proposal_list
2,051
33.2
79
py
GFocalV2
GFocalV2-master/mmdet/models/dense_heads/free_anchor_retina_head.py
import torch import torch.nn.functional as F from mmdet.core import bbox_overlaps from ..builder import HEADS from .retina_head import RetinaHead EPS = 1e-12 @HEADS.register_module() class FreeAnchorRetinaHead(RetinaHead): """FreeAnchor RetinaHead used in https://arxiv.org/abs/1909.02466. Args: num_classes (int): Number of categories excluding the background category. in_channels (int): Number of channels in the input feature map. stacked_convs (int): Number of conv layers in cls and reg tower. Default: 4. conv_cfg (dict): dictionary to construct and config conv layer. Default: None. norm_cfg (dict): dictionary to construct and config norm layer. Default: norm_cfg=dict(type='GN', num_groups=32, requires_grad=True). pre_anchor_topk (int): Number of boxes that be token in each bag. bbox_thr (float): The threshold of the saturated linear function. It is usually the same with the IoU threshold used in NMS. gamma (float): Gamma parameter in focal loss. alpha (float): Alpha parameter in focal loss. """ # noqa: W605 def __init__(self, num_classes, in_channels, stacked_convs=4, conv_cfg=None, norm_cfg=None, pre_anchor_topk=50, bbox_thr=0.6, gamma=2.0, alpha=0.5, **kwargs): super(FreeAnchorRetinaHead, self).__init__(num_classes, in_channels, stacked_convs, conv_cfg, norm_cfg, **kwargs) self.pre_anchor_topk = pre_anchor_topk self.bbox_thr = bbox_thr self.gamma = gamma self.alpha = alpha def loss(self, cls_scores, bbox_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None): """Compute losses of the head. Args: cls_scores (list[Tensor]): Box scores for each scale level Has shape (N, num_anchors * num_classes, H, W) bbox_preds (list[Tensor]): Box energies / deltas for each scale level with shape (N, num_anchors * 4, H, W) gt_bboxes (list[Tensor]): each item are the truth boxes for each image in [tl_x, tl_y, br_x, br_y] format. gt_labels (list[Tensor]): class indices corresponding to each box img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. gt_bboxes_ignore (None | list[Tensor]): specify which bounding boxes can be ignored when computing the loss. Returns: dict[str, Tensor]: A dictionary of loss components. """ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] assert len(featmap_sizes) == len(self.anchor_generator.base_anchors) anchor_list, _ = self.get_anchors(featmap_sizes, img_metas) anchors = [torch.cat(anchor) for anchor in anchor_list] # concatenate each level cls_scores = [ cls.permute(0, 2, 3, 1).reshape(cls.size(0), -1, self.cls_out_channels) for cls in cls_scores ] bbox_preds = [ bbox_pred.permute(0, 2, 3, 1).reshape(bbox_pred.size(0), -1, 4) for bbox_pred in bbox_preds ] cls_scores = torch.cat(cls_scores, dim=1) bbox_preds = torch.cat(bbox_preds, dim=1) cls_prob = torch.sigmoid(cls_scores) box_prob = [] num_pos = 0 positive_losses = [] for _, (anchors_, gt_labels_, gt_bboxes_, cls_prob_, bbox_preds_) in enumerate( zip(anchors, gt_labels, gt_bboxes, cls_prob, bbox_preds)): with torch.no_grad(): if len(gt_bboxes_) == 0: image_box_prob = torch.zeros( anchors_.size(0), self.cls_out_channels).type_as(bbox_preds_) else: # box_localization: a_{j}^{loc}, shape: [j, 4] pred_boxes = self.bbox_coder.decode(anchors_, bbox_preds_) # object_box_iou: IoU_{ij}^{loc}, shape: [i, j] object_box_iou = bbox_overlaps(gt_bboxes_, pred_boxes) # object_box_prob: P{a_{j} -> b_{i}}, shape: [i, j] t1 = self.bbox_thr t2 = object_box_iou.max( dim=1, keepdim=True).values.clamp(min=t1 + 1e-12) object_box_prob = ((object_box_iou - t1) / (t2 - t1)).clamp( min=0, max=1) # object_cls_box_prob: P{a_{j} -> b_{i}}, shape: [i, c, j] num_obj = gt_labels_.size(0) indices = torch.stack([ torch.arange(num_obj).type_as(gt_labels_), gt_labels_ ], dim=0) object_cls_box_prob = torch.sparse_coo_tensor( indices, object_box_prob) # image_box_iou: P{a_{j} \in A_{+}}, shape: [c, j] """ from "start" to "end" implement: image_box_iou = torch.sparse.max(object_cls_box_prob, dim=0).t() """ # start box_cls_prob = torch.sparse.sum( object_cls_box_prob, dim=0).to_dense() indices = torch.nonzero(box_cls_prob, as_tuple=False).t_() if indices.numel() == 0: image_box_prob = torch.zeros( anchors_.size(0), self.cls_out_channels).type_as(object_box_prob) else: nonzero_box_prob = torch.where( (gt_labels_.unsqueeze(dim=-1) == indices[0]), object_box_prob[:, indices[1]], torch.tensor([ 0 ]).type_as(object_box_prob)).max(dim=0).values # upmap to shape [j, c] image_box_prob = torch.sparse_coo_tensor( indices.flip([0]), nonzero_box_prob, size=(anchors_.size(0), self.cls_out_channels)).to_dense() # end box_prob.append(image_box_prob) # construct bags for objects match_quality_matrix = bbox_overlaps(gt_bboxes_, anchors_) _, matched = torch.topk( match_quality_matrix, self.pre_anchor_topk, dim=1, sorted=False) del match_quality_matrix # matched_cls_prob: P_{ij}^{cls} matched_cls_prob = torch.gather( cls_prob_[matched], 2, gt_labels_.view(-1, 1, 1).repeat(1, self.pre_anchor_topk, 1)).squeeze(2) # matched_box_prob: P_{ij}^{loc} matched_anchors = anchors_[matched] matched_object_targets = self.bbox_coder.encode( matched_anchors, gt_bboxes_.unsqueeze(dim=1).expand_as(matched_anchors)) loss_bbox = self.loss_bbox( bbox_preds_[matched], matched_object_targets, reduction_override='none').sum(-1) matched_box_prob = torch.exp(-loss_bbox) # positive_losses: {-log( Mean-max(P_{ij}^{cls} * P_{ij}^{loc}) )} num_pos += len(gt_bboxes_) positive_losses.append( self.positive_bag_loss(matched_cls_prob, matched_box_prob)) positive_loss = torch.cat(positive_losses).sum() / max(1, num_pos) # box_prob: P{a_{j} \in A_{+}} box_prob = torch.stack(box_prob, dim=0) # negative_loss: # \sum_{j}{ FL((1 - P{a_{j} \in A_{+}}) * (1 - P_{j}^{bg})) } / n||B|| negative_loss = self.negative_bag_loss(cls_prob, box_prob).sum() / max( 1, num_pos * self.pre_anchor_topk) # avoid the absence of gradients in regression subnet # when no ground-truth in a batch if num_pos == 0: positive_loss = bbox_preds.sum() * 0 losses = { 'positive_bag_loss': positive_loss, 'negative_bag_loss': negative_loss } return losses def positive_bag_loss(self, matched_cls_prob, matched_box_prob): """Compute positive bag loss. :math:`-log( Mean-max(P_{ij}^{cls} * P_{ij}^{loc}) )`. :math:`P_{ij}^{cls}`: matched_cls_prob, classification probability of matched samples. :math:`P_{ij}^{loc}`: matched_box_prob, box probability of matched samples. Args: matched_cls_prob (Tensor): Classification probabilty of matched samples in shape (num_gt, pre_anchor_topk). matched_box_prob (Tensor): BBox probability of matched samples, in shape (num_gt, pre_anchor_topk). Returns: Tensor: Positive bag loss in shape (num_gt,). """ # noqa: E501, W605 # bag_prob = Mean-max(matched_prob) matched_prob = matched_cls_prob * matched_box_prob weight = 1 / torch.clamp(1 - matched_prob, 1e-12, None) weight /= weight.sum(dim=1).unsqueeze(dim=-1) bag_prob = (weight * matched_prob).sum(dim=1) # positive_bag_loss = -self.alpha * log(bag_prob) return self.alpha * F.binary_cross_entropy( bag_prob, torch.ones_like(bag_prob), reduction='none') def negative_bag_loss(self, cls_prob, box_prob): """Compute negative bag loss. :math:`FL((1 - P_{a_{j} \in A_{+}}) * (1 - P_{j}^{bg}))`. :math:`P_{a_{j} \in A_{+}}`: Box_probability of matched samples. :math:`P_{j}^{bg}`: Classification probability of negative samples. Args: cls_prob (Tensor): Classification probability, in shape (num_img, num_anchors, num_classes). box_prob (Tensor): Box probability, in shape (num_img, num_anchors, num_classes). Returns: Tensor: Negative bag loss in shape (num_img, num_anchors, num_classes). """ # noqa: E501, W605 prob = cls_prob * (1 - box_prob) # There are some cases when neg_prob = 0. # This will cause the neg_prob.log() to be inf without clamp. prob = prob.clamp(min=EPS, max=1 - EPS) negative_bag_loss = prob**self.gamma * F.binary_cross_entropy( prob, torch.zeros_like(prob), reduction='none') return (1 - self.alpha) * negative_bag_loss
11,148
40.140221
94
py
GFocalV2
GFocalV2-master/mmdet/models/dense_heads/guided_anchor_head.py
import torch import torch.nn as nn from mmcv.cnn import bias_init_with_prob, normal_init from mmcv.ops import DeformConv2d, MaskedConv2d from mmcv.runner import force_fp32 from mmdet.core import (anchor_inside_flags, build_anchor_generator, build_assigner, build_bbox_coder, build_sampler, calc_region, images_to_levels, multi_apply, multiclass_nms, unmap) from ..builder import HEADS, build_loss from .anchor_head import AnchorHead class FeatureAdaption(nn.Module): """Feature Adaption Module. Feature Adaption Module is implemented based on DCN v1. It uses anchor shape prediction rather than feature map to predict offsets of deform conv layer. Args: in_channels (int): Number of channels in the input feature map. out_channels (int): Number of channels in the output feature map. kernel_size (int): Deformable conv kernel size. deform_groups (int): Deformable conv group size. """ def __init__(self, in_channels, out_channels, kernel_size=3, deform_groups=4): super(FeatureAdaption, self).__init__() offset_channels = kernel_size * kernel_size * 2 self.conv_offset = nn.Conv2d( 2, deform_groups * offset_channels, 1, bias=False) self.conv_adaption = DeformConv2d( in_channels, out_channels, kernel_size=kernel_size, padding=(kernel_size - 1) // 2, deform_groups=deform_groups) self.relu = nn.ReLU(inplace=True) def init_weights(self): normal_init(self.conv_offset, std=0.1) normal_init(self.conv_adaption, std=0.01) def forward(self, x, shape): offset = self.conv_offset(shape.detach()) x = self.relu(self.conv_adaption(x, offset)) return x @HEADS.register_module() class GuidedAnchorHead(AnchorHead): """Guided-Anchor-based head (GA-RPN, GA-RetinaNet, etc.). This GuidedAnchorHead will predict high-quality feature guided anchors and locations where anchors will be kept in inference. There are mainly 3 categories of bounding-boxes. - Sampled 9 pairs for target assignment. (approxes) - The square boxes where the predicted anchors are based on. (squares) - Guided anchors. Please refer to https://arxiv.org/abs/1901.03278 for more details. Args: num_classes (int): Number of classes. in_channels (int): Number of channels in the input feature map. feat_channels (int): Number of hidden channels. approx_anchor_generator (dict): Config dict for approx generator square_anchor_generator (dict): Config dict for square generator anchor_coder (dict): Config dict for anchor coder bbox_coder (dict): Config dict for bbox coder deform_groups: (int): Group number of DCN in FeatureAdaption module. loc_filter_thr (float): Threshold to filter out unconcerned regions. loss_loc (dict): Config of location loss. loss_shape (dict): Config of anchor shape loss. loss_cls (dict): Config of classification loss. loss_bbox (dict): Config of bbox regression loss. """ def __init__( self, num_classes, in_channels, feat_channels=256, approx_anchor_generator=dict( type='AnchorGenerator', octave_base_scale=8, scales_per_octave=3, ratios=[0.5, 1.0, 2.0], strides=[4, 8, 16, 32, 64]), square_anchor_generator=dict( type='AnchorGenerator', ratios=[1.0], scales=[8], strides=[4, 8, 16, 32, 64]), anchor_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[1.0, 1.0, 1.0, 1.0] ), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[1.0, 1.0, 1.0, 1.0] ), reg_decoded_bbox=False, deform_groups=4, loc_filter_thr=0.01, train_cfg=None, test_cfg=None, loss_loc=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)): # yapf: disable super(AnchorHead, self).__init__() self.in_channels = in_channels self.num_classes = num_classes self.feat_channels = feat_channels self.deform_groups = deform_groups self.loc_filter_thr = loc_filter_thr # build approx_anchor_generator and square_anchor_generator assert (approx_anchor_generator['octave_base_scale'] == square_anchor_generator['scales'][0]) assert (approx_anchor_generator['strides'] == square_anchor_generator['strides']) self.approx_anchor_generator = build_anchor_generator( approx_anchor_generator) self.square_anchor_generator = build_anchor_generator( square_anchor_generator) self.approxs_per_octave = self.approx_anchor_generator \ .num_base_anchors[0] self.reg_decoded_bbox = reg_decoded_bbox # one anchor per location self.num_anchors = 1 self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False) self.loc_focal_loss = loss_loc['type'] in ['FocalLoss'] self.sampling = loss_cls['type'] not in ['FocalLoss'] self.ga_sampling = train_cfg is not None and hasattr( train_cfg, 'ga_sampler') if self.use_sigmoid_cls: self.cls_out_channels = self.num_classes else: self.cls_out_channels = self.num_classes + 1 # build bbox_coder self.anchor_coder = build_bbox_coder(anchor_coder) self.bbox_coder = build_bbox_coder(bbox_coder) # build losses self.loss_loc = build_loss(loss_loc) self.loss_shape = build_loss(loss_shape) self.loss_cls = build_loss(loss_cls) self.loss_bbox = build_loss(loss_bbox) self.train_cfg = train_cfg self.test_cfg = test_cfg if self.train_cfg: self.assigner = build_assigner(self.train_cfg.assigner) # use PseudoSampler when sampling is False if self.sampling and hasattr(self.train_cfg, 'sampler'): sampler_cfg = self.train_cfg.sampler else: sampler_cfg = dict(type='PseudoSampler') self.sampler = build_sampler(sampler_cfg, context=self) self.ga_assigner = build_assigner(self.train_cfg.ga_assigner) if self.ga_sampling: ga_sampler_cfg = self.train_cfg.ga_sampler else: ga_sampler_cfg = dict(type='PseudoSampler') self.ga_sampler = build_sampler(ga_sampler_cfg, context=self) self.fp16_enabled = False self._init_layers() def _init_layers(self): self.relu = nn.ReLU(inplace=True) self.conv_loc = nn.Conv2d(self.in_channels, 1, 1) self.conv_shape = nn.Conv2d(self.in_channels, self.num_anchors * 2, 1) self.feature_adaption = FeatureAdaption( self.in_channels, self.feat_channels, kernel_size=3, deform_groups=self.deform_groups) self.conv_cls = MaskedConv2d(self.feat_channels, self.num_anchors * self.cls_out_channels, 1) self.conv_reg = MaskedConv2d(self.feat_channels, self.num_anchors * 4, 1) def init_weights(self): normal_init(self.conv_cls, std=0.01) normal_init(self.conv_reg, std=0.01) bias_cls = bias_init_with_prob(0.01) normal_init(self.conv_loc, std=0.01, bias=bias_cls) normal_init(self.conv_shape, std=0.01) self.feature_adaption.init_weights() def forward_single(self, x): loc_pred = self.conv_loc(x) shape_pred = self.conv_shape(x) x = self.feature_adaption(x, shape_pred) # masked conv is only used during inference for speed-up if not self.training: mask = loc_pred.sigmoid()[0] >= self.loc_filter_thr else: mask = None cls_score = self.conv_cls(x, mask) bbox_pred = self.conv_reg(x, mask) return cls_score, bbox_pred, shape_pred, loc_pred def forward(self, feats): return multi_apply(self.forward_single, feats) def get_sampled_approxs(self, featmap_sizes, img_metas, device='cuda'): """Get sampled approxs and inside flags according to feature map sizes. Args: featmap_sizes (list[tuple]): Multi-level feature map sizes. img_metas (list[dict]): Image meta info. device (torch.device | str): device for returned tensors Returns: tuple: approxes of each image, inside flags of each image """ num_imgs = len(img_metas) # since feature map sizes of all images are the same, we only compute # approxes for one time multi_level_approxs = self.approx_anchor_generator.grid_anchors( featmap_sizes, device=device) approxs_list = [multi_level_approxs for _ in range(num_imgs)] # for each image, we compute inside flags of multi level approxes inside_flag_list = [] for img_id, img_meta in enumerate(img_metas): multi_level_flags = [] multi_level_approxs = approxs_list[img_id] # obtain valid flags for each approx first multi_level_approx_flags = self.approx_anchor_generator \ .valid_flags(featmap_sizes, img_meta['pad_shape'], device=device) for i, flags in enumerate(multi_level_approx_flags): approxs = multi_level_approxs[i] inside_flags_list = [] for i in range(self.approxs_per_octave): split_valid_flags = flags[i::self.approxs_per_octave] split_approxs = approxs[i::self.approxs_per_octave, :] inside_flags = anchor_inside_flags( split_approxs, split_valid_flags, img_meta['img_shape'][:2], self.train_cfg.allowed_border) inside_flags_list.append(inside_flags) # inside_flag for a position is true if any anchor in this # position is true inside_flags = ( torch.stack(inside_flags_list, 0).sum(dim=0) > 0) multi_level_flags.append(inside_flags) inside_flag_list.append(multi_level_flags) return approxs_list, inside_flag_list def get_anchors(self, featmap_sizes, shape_preds, loc_preds, img_metas, use_loc_filter=False, device='cuda'): """Get squares according to feature map sizes and guided anchors. Args: featmap_sizes (list[tuple]): Multi-level feature map sizes. shape_preds (list[tensor]): Multi-level shape predictions. loc_preds (list[tensor]): Multi-level location predictions. img_metas (list[dict]): Image meta info. use_loc_filter (bool): Use loc filter or not. device (torch.device | str): device for returned tensors Returns: tuple: square approxs of each image, guided anchors of each image, loc masks of each image """ num_imgs = len(img_metas) num_levels = len(featmap_sizes) # since feature map sizes of all images are the same, we only compute # squares for one time multi_level_squares = self.square_anchor_generator.grid_anchors( featmap_sizes, device=device) squares_list = [multi_level_squares for _ in range(num_imgs)] # for each image, we compute multi level guided anchors guided_anchors_list = [] loc_mask_list = [] for img_id, img_meta in enumerate(img_metas): multi_level_guided_anchors = [] multi_level_loc_mask = [] for i in range(num_levels): squares = squares_list[img_id][i] shape_pred = shape_preds[i][img_id] loc_pred = loc_preds[i][img_id] guided_anchors, loc_mask = self._get_guided_anchors_single( squares, shape_pred, loc_pred, use_loc_filter=use_loc_filter) multi_level_guided_anchors.append(guided_anchors) multi_level_loc_mask.append(loc_mask) guided_anchors_list.append(multi_level_guided_anchors) loc_mask_list.append(multi_level_loc_mask) return squares_list, guided_anchors_list, loc_mask_list def _get_guided_anchors_single(self, squares, shape_pred, loc_pred, use_loc_filter=False): """Get guided anchors and loc masks for a single level. Args: square (tensor): Squares of a single level. shape_pred (tensor): Shape predections of a single level. loc_pred (tensor): Loc predections of a single level. use_loc_filter (list[tensor]): Use loc filter or not. Returns: tuple: guided anchors, location masks """ # calculate location filtering mask loc_pred = loc_pred.sigmoid().detach() if use_loc_filter: loc_mask = loc_pred >= self.loc_filter_thr else: loc_mask = loc_pred >= 0.0 mask = loc_mask.permute(1, 2, 0).expand(-1, -1, self.num_anchors) mask = mask.contiguous().view(-1) # calculate guided anchors squares = squares[mask] anchor_deltas = shape_pred.permute(1, 2, 0).contiguous().view( -1, 2).detach()[mask] bbox_deltas = anchor_deltas.new_full(squares.size(), 0) bbox_deltas[:, 2:] = anchor_deltas guided_anchors = self.anchor_coder.decode( squares, bbox_deltas, wh_ratio_clip=1e-6) return guided_anchors, mask def ga_loc_targets(self, gt_bboxes_list, featmap_sizes): """Compute location targets for guided anchoring. Each feature map is divided into positive, negative and ignore regions. - positive regions: target 1, weight 1 - ignore regions: target 0, weight 0 - negative regions: target 0, weight 0.1 Args: gt_bboxes_list (list[Tensor]): Gt bboxes of each image. featmap_sizes (list[tuple]): Multi level sizes of each feature maps. Returns: tuple """ anchor_scale = self.approx_anchor_generator.octave_base_scale anchor_strides = self.approx_anchor_generator.strides # Currently only supports same stride in x and y direction. for stride in anchor_strides: assert (stride[0] == stride[1]) anchor_strides = [stride[0] for stride in anchor_strides] center_ratio = self.train_cfg.center_ratio ignore_ratio = self.train_cfg.ignore_ratio img_per_gpu = len(gt_bboxes_list) num_lvls = len(featmap_sizes) r1 = (1 - center_ratio) / 2 r2 = (1 - ignore_ratio) / 2 all_loc_targets = [] all_loc_weights = [] all_ignore_map = [] for lvl_id in range(num_lvls): h, w = featmap_sizes[lvl_id] loc_targets = torch.zeros( img_per_gpu, 1, h, w, device=gt_bboxes_list[0].device, dtype=torch.float32) loc_weights = torch.full_like(loc_targets, -1) ignore_map = torch.zeros_like(loc_targets) all_loc_targets.append(loc_targets) all_loc_weights.append(loc_weights) all_ignore_map.append(ignore_map) for img_id in range(img_per_gpu): gt_bboxes = gt_bboxes_list[img_id] scale = torch.sqrt((gt_bboxes[:, 2] - gt_bboxes[:, 0]) * (gt_bboxes[:, 3] - gt_bboxes[:, 1])) min_anchor_size = scale.new_full( (1, ), float(anchor_scale * anchor_strides[0])) # assign gt bboxes to different feature levels w.r.t. their scales target_lvls = torch.floor( torch.log2(scale) - torch.log2(min_anchor_size) + 0.5) target_lvls = target_lvls.clamp(min=0, max=num_lvls - 1).long() for gt_id in range(gt_bboxes.size(0)): lvl = target_lvls[gt_id].item() # rescaled to corresponding feature map gt_ = gt_bboxes[gt_id, :4] / anchor_strides[lvl] # calculate ignore regions ignore_x1, ignore_y1, ignore_x2, ignore_y2 = calc_region( gt_, r2, featmap_sizes[lvl]) # calculate positive (center) regions ctr_x1, ctr_y1, ctr_x2, ctr_y2 = calc_region( gt_, r1, featmap_sizes[lvl]) all_loc_targets[lvl][img_id, 0, ctr_y1:ctr_y2 + 1, ctr_x1:ctr_x2 + 1] = 1 all_loc_weights[lvl][img_id, 0, ignore_y1:ignore_y2 + 1, ignore_x1:ignore_x2 + 1] = 0 all_loc_weights[lvl][img_id, 0, ctr_y1:ctr_y2 + 1, ctr_x1:ctr_x2 + 1] = 1 # calculate ignore map on nearby low level feature if lvl > 0: d_lvl = lvl - 1 # rescaled to corresponding feature map gt_ = gt_bboxes[gt_id, :4] / anchor_strides[d_lvl] ignore_x1, ignore_y1, ignore_x2, ignore_y2 = calc_region( gt_, r2, featmap_sizes[d_lvl]) all_ignore_map[d_lvl][img_id, 0, ignore_y1:ignore_y2 + 1, ignore_x1:ignore_x2 + 1] = 1 # calculate ignore map on nearby high level feature if lvl < num_lvls - 1: u_lvl = lvl + 1 # rescaled to corresponding feature map gt_ = gt_bboxes[gt_id, :4] / anchor_strides[u_lvl] ignore_x1, ignore_y1, ignore_x2, ignore_y2 = calc_region( gt_, r2, featmap_sizes[u_lvl]) all_ignore_map[u_lvl][img_id, 0, ignore_y1:ignore_y2 + 1, ignore_x1:ignore_x2 + 1] = 1 for lvl_id in range(num_lvls): # ignore negative regions w.r.t. ignore map all_loc_weights[lvl_id][(all_loc_weights[lvl_id] < 0) & (all_ignore_map[lvl_id] > 0)] = 0 # set negative regions with weight 0.1 all_loc_weights[lvl_id][all_loc_weights[lvl_id] < 0] = 0.1 # loc average factor to balance loss loc_avg_factor = sum( [t.size(0) * t.size(-1) * t.size(-2) for t in all_loc_targets]) / 200 return all_loc_targets, all_loc_weights, loc_avg_factor def _ga_shape_target_single(self, flat_approxs, inside_flags, flat_squares, gt_bboxes, gt_bboxes_ignore, img_meta, unmap_outputs=True): """Compute guided anchoring targets. This function returns sampled anchors and gt bboxes directly rather than calculates regression targets. Args: flat_approxs (Tensor): flat approxs of a single image, shape (n, 4) inside_flags (Tensor): inside flags of a single image, shape (n, ). flat_squares (Tensor): flat squares of a single image, shape (approxs_per_octave * n, 4) gt_bboxes (Tensor): Ground truth bboxes of a single image. img_meta (dict): Meta info of a single image. approxs_per_octave (int): number of approxs per octave cfg (dict): RPN train configs. unmap_outputs (bool): unmap outputs or not. Returns: tuple """ if not inside_flags.any(): return (None, ) * 5 # assign gt and sample anchors expand_inside_flags = inside_flags[:, None].expand( -1, self.approxs_per_octave).reshape(-1) approxs = flat_approxs[expand_inside_flags, :] squares = flat_squares[inside_flags, :] assign_result = self.ga_assigner.assign(approxs, squares, self.approxs_per_octave, gt_bboxes, gt_bboxes_ignore) sampling_result = self.ga_sampler.sample(assign_result, squares, gt_bboxes) bbox_anchors = torch.zeros_like(squares) bbox_gts = torch.zeros_like(squares) bbox_weights = torch.zeros_like(squares) pos_inds = sampling_result.pos_inds neg_inds = sampling_result.neg_inds if len(pos_inds) > 0: bbox_anchors[pos_inds, :] = sampling_result.pos_bboxes bbox_gts[pos_inds, :] = sampling_result.pos_gt_bboxes bbox_weights[pos_inds, :] = 1.0 # map up to original set of anchors if unmap_outputs: num_total_anchors = flat_squares.size(0) bbox_anchors = unmap(bbox_anchors, num_total_anchors, inside_flags) bbox_gts = unmap(bbox_gts, num_total_anchors, inside_flags) bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags) return (bbox_anchors, bbox_gts, bbox_weights, pos_inds, neg_inds) def ga_shape_targets(self, approx_list, inside_flag_list, square_list, gt_bboxes_list, img_metas, gt_bboxes_ignore_list=None, unmap_outputs=True): """Compute guided anchoring targets. Args: approx_list (list[list]): Multi level approxs of each image. inside_flag_list (list[list]): Multi level inside flags of each image. square_list (list[list]): Multi level squares of each image. gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image. img_metas (list[dict]): Meta info of each image. gt_bboxes_ignore_list (list[Tensor]): ignore list of gt bboxes. unmap_outputs (bool): unmap outputs or not. Returns: tuple """ num_imgs = len(img_metas) assert len(approx_list) == len(inside_flag_list) == len( square_list) == num_imgs # anchor number of multi levels num_level_squares = [squares.size(0) for squares in square_list[0]] # concat all level anchors and flags to a single tensor inside_flag_flat_list = [] approx_flat_list = [] square_flat_list = [] for i in range(num_imgs): assert len(square_list[i]) == len(inside_flag_list[i]) inside_flag_flat_list.append(torch.cat(inside_flag_list[i])) approx_flat_list.append(torch.cat(approx_list[i])) square_flat_list.append(torch.cat(square_list[i])) # compute targets for each image if gt_bboxes_ignore_list is None: gt_bboxes_ignore_list = [None for _ in range(num_imgs)] (all_bbox_anchors, all_bbox_gts, all_bbox_weights, pos_inds_list, neg_inds_list) = multi_apply( self._ga_shape_target_single, approx_flat_list, inside_flag_flat_list, square_flat_list, gt_bboxes_list, gt_bboxes_ignore_list, img_metas, unmap_outputs=unmap_outputs) # no valid anchors if any([bbox_anchors is None for bbox_anchors in all_bbox_anchors]): return None # sampled anchors of all images num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list]) num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list]) # split targets to a list w.r.t. multiple levels bbox_anchors_list = images_to_levels(all_bbox_anchors, num_level_squares) bbox_gts_list = images_to_levels(all_bbox_gts, num_level_squares) bbox_weights_list = images_to_levels(all_bbox_weights, num_level_squares) return (bbox_anchors_list, bbox_gts_list, bbox_weights_list, num_total_pos, num_total_neg) def loss_shape_single(self, shape_pred, bbox_anchors, bbox_gts, anchor_weights, anchor_total_num): shape_pred = shape_pred.permute(0, 2, 3, 1).contiguous().view(-1, 2) bbox_anchors = bbox_anchors.contiguous().view(-1, 4) bbox_gts = bbox_gts.contiguous().view(-1, 4) anchor_weights = anchor_weights.contiguous().view(-1, 4) bbox_deltas = bbox_anchors.new_full(bbox_anchors.size(), 0) bbox_deltas[:, 2:] += shape_pred # filter out negative samples to speed-up weighted_bounded_iou_loss inds = torch.nonzero( anchor_weights[:, 0] > 0, as_tuple=False).squeeze(1) bbox_deltas_ = bbox_deltas[inds] bbox_anchors_ = bbox_anchors[inds] bbox_gts_ = bbox_gts[inds] anchor_weights_ = anchor_weights[inds] pred_anchors_ = self.anchor_coder.decode( bbox_anchors_, bbox_deltas_, wh_ratio_clip=1e-6) loss_shape = self.loss_shape( pred_anchors_, bbox_gts_, anchor_weights_, avg_factor=anchor_total_num) return loss_shape def loss_loc_single(self, loc_pred, loc_target, loc_weight, loc_avg_factor): loss_loc = self.loss_loc( loc_pred.reshape(-1, 1), loc_target.reshape(-1).long(), loc_weight.reshape(-1), avg_factor=loc_avg_factor) return loss_loc @force_fp32( apply_to=('cls_scores', 'bbox_preds', 'shape_preds', 'loc_preds')) def loss(self, cls_scores, bbox_preds, shape_preds, loc_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None): featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] assert len(featmap_sizes) == self.approx_anchor_generator.num_levels device = cls_scores[0].device # get loc targets loc_targets, loc_weights, loc_avg_factor = self.ga_loc_targets( gt_bboxes, featmap_sizes) # get sampled approxes approxs_list, inside_flag_list = self.get_sampled_approxs( featmap_sizes, img_metas, device=device) # get squares and guided anchors squares_list, guided_anchors_list, _ = self.get_anchors( featmap_sizes, shape_preds, loc_preds, img_metas, device=device) # get shape targets shape_targets = self.ga_shape_targets(approxs_list, inside_flag_list, squares_list, gt_bboxes, img_metas) if shape_targets is None: return None (bbox_anchors_list, bbox_gts_list, anchor_weights_list, anchor_fg_num, anchor_bg_num) = shape_targets anchor_total_num = ( anchor_fg_num if not self.ga_sampling else anchor_fg_num + anchor_bg_num) # get anchor targets label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 cls_reg_targets = self.get_targets( guided_anchors_list, inside_flag_list, gt_bboxes, img_metas, gt_bboxes_ignore_list=gt_bboxes_ignore, gt_labels_list=gt_labels, label_channels=label_channels) if cls_reg_targets is None: return None (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_pos, num_total_neg) = cls_reg_targets num_total_samples = ( num_total_pos + num_total_neg if self.sampling else num_total_pos) # anchor number of multi levels num_level_anchors = [ anchors.size(0) for anchors in guided_anchors_list[0] ] # concat all level anchors to a single tensor concat_anchor_list = [] for i in range(len(guided_anchors_list)): concat_anchor_list.append(torch.cat(guided_anchors_list[i])) all_anchor_list = images_to_levels(concat_anchor_list, num_level_anchors) # get classification and bbox regression losses losses_cls, losses_bbox = multi_apply( self.loss_single, cls_scores, bbox_preds, all_anchor_list, labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_samples=num_total_samples) # get anchor location loss losses_loc = [] for i in range(len(loc_preds)): loss_loc = self.loss_loc_single( loc_preds[i], loc_targets[i], loc_weights[i], loc_avg_factor=loc_avg_factor) losses_loc.append(loss_loc) # get anchor shape loss losses_shape = [] for i in range(len(shape_preds)): loss_shape = self.loss_shape_single( shape_preds[i], bbox_anchors_list[i], bbox_gts_list[i], anchor_weights_list[i], anchor_total_num=anchor_total_num) losses_shape.append(loss_shape) return dict( loss_cls=losses_cls, loss_bbox=losses_bbox, loss_shape=losses_shape, loss_loc=losses_loc) @force_fp32( apply_to=('cls_scores', 'bbox_preds', 'shape_preds', 'loc_preds')) def get_bboxes(self, cls_scores, bbox_preds, shape_preds, loc_preds, img_metas, cfg=None, rescale=False): assert len(cls_scores) == len(bbox_preds) == len(shape_preds) == len( loc_preds) num_levels = len(cls_scores) featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] device = cls_scores[0].device # get guided anchors _, guided_anchors, loc_masks = self.get_anchors( featmap_sizes, shape_preds, loc_preds, img_metas, use_loc_filter=not self.training, device=device) result_list = [] for img_id in range(len(img_metas)): cls_score_list = [ cls_scores[i][img_id].detach() for i in range(num_levels) ] bbox_pred_list = [ bbox_preds[i][img_id].detach() for i in range(num_levels) ] guided_anchor_list = [ guided_anchors[img_id][i].detach() for i in range(num_levels) ] loc_mask_list = [ loc_masks[img_id][i].detach() for i in range(num_levels) ] img_shape = img_metas[img_id]['img_shape'] scale_factor = img_metas[img_id]['scale_factor'] proposals = self._get_bboxes_single(cls_score_list, bbox_pred_list, guided_anchor_list, loc_mask_list, img_shape, scale_factor, cfg, rescale) result_list.append(proposals) return result_list def _get_bboxes_single(self, cls_scores, bbox_preds, mlvl_anchors, mlvl_masks, img_shape, scale_factor, cfg, rescale=False): cfg = self.test_cfg if cfg is None else cfg assert len(cls_scores) == len(bbox_preds) == len(mlvl_anchors) mlvl_bboxes = [] mlvl_scores = [] for cls_score, bbox_pred, anchors, mask in zip(cls_scores, bbox_preds, mlvl_anchors, mlvl_masks): assert cls_score.size()[-2:] == bbox_pred.size()[-2:] # if no location is kept, end. if mask.sum() == 0: continue # reshape scores and bbox_pred cls_score = cls_score.permute(1, 2, 0).reshape(-1, self.cls_out_channels) if self.use_sigmoid_cls: scores = cls_score.sigmoid() else: scores = cls_score.softmax(-1) bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4) # filter scores, bbox_pred w.r.t. mask. # anchors are filtered in get_anchors() beforehand. scores = scores[mask, :] bbox_pred = bbox_pred[mask, :] if scores.dim() == 0: anchors = anchors.unsqueeze(0) scores = scores.unsqueeze(0) bbox_pred = bbox_pred.unsqueeze(0) # filter anchors, bbox_pred, scores w.r.t. scores nms_pre = cfg.get('nms_pre', -1) if nms_pre > 0 and scores.shape[0] > nms_pre: if self.use_sigmoid_cls: max_scores, _ = scores.max(dim=1) else: # remind that we set FG labels to [0, num_class-1] # since mmdet v2.0 # BG cat_id: num_class max_scores, _ = scores[:, :-1].max(dim=1) _, topk_inds = max_scores.topk(nms_pre) anchors = anchors[topk_inds, :] bbox_pred = bbox_pred[topk_inds, :] scores = scores[topk_inds, :] bboxes = self.bbox_coder.decode( anchors, bbox_pred, max_shape=img_shape) mlvl_bboxes.append(bboxes) mlvl_scores.append(scores) mlvl_bboxes = torch.cat(mlvl_bboxes) if rescale: mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor) mlvl_scores = torch.cat(mlvl_scores) if self.use_sigmoid_cls: # Add a dummy background class to the backend when using sigmoid # remind that we set FG labels to [0, num_class-1] since mmdet v2.0 # BG cat_id: num_class padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1) mlvl_scores = torch.cat([mlvl_scores, padding], dim=1) # multi class NMS det_bboxes, det_labels = multiclass_nms(mlvl_bboxes, mlvl_scores, cfg.score_thr, cfg.nms, cfg.max_per_img) return det_bboxes, det_labels
36,268
41.370327
79
py
GFocalV2
GFocalV2-master/mmdet/models/dense_heads/sabl_retina_head.py
import numpy as np import torch import torch.nn as nn from mmcv.cnn import ConvModule, bias_init_with_prob, normal_init from mmcv.runner import force_fp32 from mmdet.core import (build_anchor_generator, build_assigner, build_bbox_coder, build_sampler, images_to_levels, multi_apply, multiclass_nms, unmap) from ..builder import HEADS, build_loss from .base_dense_head import BaseDenseHead from .guided_anchor_head import GuidedAnchorHead @HEADS.register_module() class SABLRetinaHead(BaseDenseHead): """Side-Aware Boundary Localization (SABL) for RetinaNet. The anchor generation, assigning and sampling in SABLRetinaHead are the same as GuidedAnchorHead for guided anchoring. Please refer to https://arxiv.org/abs/1912.04260 for more details. Args: num_classes (int): Number of classes. in_channels (int): Number of channels in the input feature map. stacked_convs (int): Number of Convs for classification \ and regression branches. Defaults to 4. feat_channels (int): Number of hidden channels. \ Defaults to 256. approx_anchor_generator (dict): Config dict for approx generator. square_anchor_generator (dict): Config dict for square generator. conv_cfg (dict): Config dict for ConvModule. Defaults to None. norm_cfg (dict): Config dict for Norm Layer. Defaults to None. bbox_coder (dict): Config dict for bbox coder. reg_decoded_bbox (bool): Whether to regress decoded bbox. \ Defaults to False. train_cfg (dict): Training config of SABLRetinaHead. test_cfg (dict): Testing config of SABLRetinaHead. loss_cls (dict): Config of classification loss. loss_bbox_cls (dict): Config of classification loss for bbox branch. loss_bbox_reg (dict): Config of regression loss for bbox branch. """ def __init__(self, num_classes, in_channels, stacked_convs=4, feat_channels=256, approx_anchor_generator=dict( type='AnchorGenerator', octave_base_scale=4, scales_per_octave=3, ratios=[0.5, 1.0, 2.0], strides=[8, 16, 32, 64, 128]), square_anchor_generator=dict( type='AnchorGenerator', ratios=[1.0], scales=[4], strides=[8, 16, 32, 64, 128]), conv_cfg=None, norm_cfg=None, bbox_coder=dict( type='BucketingBBoxCoder', num_buckets=14, scale_factor=3.0), reg_decoded_bbox=False, train_cfg=None, test_cfg=None, loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_bbox_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.5), loss_bbox_reg=dict( type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.5)): super(SABLRetinaHead, self).__init__() self.in_channels = in_channels self.num_classes = num_classes self.feat_channels = feat_channels self.num_buckets = bbox_coder['num_buckets'] self.side_num = int(np.ceil(self.num_buckets / 2)) assert (approx_anchor_generator['octave_base_scale'] == square_anchor_generator['scales'][0]) assert (approx_anchor_generator['strides'] == square_anchor_generator['strides']) self.approx_anchor_generator = build_anchor_generator( approx_anchor_generator) self.square_anchor_generator = build_anchor_generator( square_anchor_generator) self.approxs_per_octave = ( self.approx_anchor_generator.num_base_anchors[0]) # one anchor per location self.num_anchors = 1 self.stacked_convs = stacked_convs self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.reg_decoded_bbox = reg_decoded_bbox self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False) self.sampling = loss_cls['type'] not in [ 'FocalLoss', 'GHMC', 'QualityFocalLoss' ] if self.use_sigmoid_cls: self.cls_out_channels = num_classes else: self.cls_out_channels = num_classes + 1 self.bbox_coder = build_bbox_coder(bbox_coder) self.loss_cls = build_loss(loss_cls) self.loss_bbox_cls = build_loss(loss_bbox_cls) self.loss_bbox_reg = build_loss(loss_bbox_reg) self.train_cfg = train_cfg self.test_cfg = test_cfg if self.train_cfg: self.assigner = build_assigner(self.train_cfg.assigner) # use PseudoSampler when sampling is False if self.sampling and hasattr(self.train_cfg, 'sampler'): sampler_cfg = self.train_cfg.sampler else: sampler_cfg = dict(type='PseudoSampler') self.sampler = build_sampler(sampler_cfg, context=self) self.fp16_enabled = False self._init_layers() def _init_layers(self): self.relu = nn.ReLU(inplace=True) self.cls_convs = nn.ModuleList() self.reg_convs = nn.ModuleList() for i in range(self.stacked_convs): chn = self.in_channels if i == 0 else self.feat_channels self.cls_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) self.reg_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) self.retina_cls = nn.Conv2d( self.feat_channels, self.cls_out_channels, 3, padding=1) self.retina_bbox_reg = nn.Conv2d( self.feat_channels, self.side_num * 4, 3, padding=1) self.retina_bbox_cls = nn.Conv2d( self.feat_channels, self.side_num * 4, 3, padding=1) def init_weights(self): for m in self.cls_convs: normal_init(m.conv, std=0.01) for m in self.reg_convs: normal_init(m.conv, std=0.01) bias_cls = bias_init_with_prob(0.01) normal_init(self.retina_cls, std=0.01, bias=bias_cls) normal_init(self.retina_bbox_reg, std=0.01) normal_init(self.retina_bbox_cls, std=0.01) def forward_single(self, x): cls_feat = x reg_feat = x for cls_conv in self.cls_convs: cls_feat = cls_conv(cls_feat) for reg_conv in self.reg_convs: reg_feat = reg_conv(reg_feat) cls_score = self.retina_cls(cls_feat) bbox_cls_pred = self.retina_bbox_cls(reg_feat) bbox_reg_pred = self.retina_bbox_reg(reg_feat) bbox_pred = (bbox_cls_pred, bbox_reg_pred) return cls_score, bbox_pred def forward(self, feats): return multi_apply(self.forward_single, feats) def get_anchors(self, featmap_sizes, img_metas, device='cuda'): """Get squares according to feature map sizes and guided anchors. Args: featmap_sizes (list[tuple]): Multi-level feature map sizes. img_metas (list[dict]): Image meta info. device (torch.device | str): device for returned tensors Returns: tuple: square approxs of each image """ num_imgs = len(img_metas) # since feature map sizes of all images are the same, we only compute # squares for one time multi_level_squares = self.square_anchor_generator.grid_anchors( featmap_sizes, device=device) squares_list = [multi_level_squares for _ in range(num_imgs)] return squares_list def get_target(self, approx_list, inside_flag_list, square_list, gt_bboxes_list, img_metas, gt_bboxes_ignore_list=None, gt_labels_list=None, label_channels=None, sampling=True, unmap_outputs=True): """Compute bucketing targets. Args: approx_list (list[list]): Multi level approxs of each image. inside_flag_list (list[list]): Multi level inside flags of each image. square_list (list[list]): Multi level squares of each image. gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image. img_metas (list[dict]): Meta info of each image. gt_bboxes_ignore_list (list[Tensor]): ignore list of gt bboxes. gt_bboxes_list (list[Tensor]): Gt bboxes of each image. label_channels (int): Channel of label. sampling (bool): Sample Anchors or not. unmap_outputs (bool): unmap outputs or not. Returns: tuple: Returns a tuple containing learning targets. - labels_list (list[Tensor]): Labels of each level. - label_weights_list (list[Tensor]): Label weights of each \ level. - bbox_cls_targets_list (list[Tensor]): BBox cls targets of \ each level. - bbox_cls_weights_list (list[Tensor]): BBox cls weights of \ each level. - bbox_reg_targets_list (list[Tensor]): BBox reg targets of \ each level. - bbox_reg_weights_list (list[Tensor]): BBox reg weights of \ each level. - num_total_pos (int): Number of positive samples in all \ images. - num_total_neg (int): Number of negative samples in all \ images. """ num_imgs = len(img_metas) assert len(approx_list) == len(inside_flag_list) == len( square_list) == num_imgs # anchor number of multi levels num_level_squares = [squares.size(0) for squares in square_list[0]] # concat all level anchors and flags to a single tensor inside_flag_flat_list = [] approx_flat_list = [] square_flat_list = [] for i in range(num_imgs): assert len(square_list[i]) == len(inside_flag_list[i]) inside_flag_flat_list.append(torch.cat(inside_flag_list[i])) approx_flat_list.append(torch.cat(approx_list[i])) square_flat_list.append(torch.cat(square_list[i])) # compute targets for each image if gt_bboxes_ignore_list is None: gt_bboxes_ignore_list = [None for _ in range(num_imgs)] if gt_labels_list is None: gt_labels_list = [None for _ in range(num_imgs)] (all_labels, all_label_weights, all_bbox_cls_targets, all_bbox_cls_weights, all_bbox_reg_targets, all_bbox_reg_weights, pos_inds_list, neg_inds_list) = multi_apply( self._get_target_single, approx_flat_list, inside_flag_flat_list, square_flat_list, gt_bboxes_list, gt_bboxes_ignore_list, gt_labels_list, img_metas, label_channels=label_channels, sampling=sampling, unmap_outputs=unmap_outputs) # no valid anchors if any([labels is None for labels in all_labels]): return None # sampled anchors of all images num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list]) num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list]) # split targets to a list w.r.t. multiple levels labels_list = images_to_levels(all_labels, num_level_squares) label_weights_list = images_to_levels(all_label_weights, num_level_squares) bbox_cls_targets_list = images_to_levels(all_bbox_cls_targets, num_level_squares) bbox_cls_weights_list = images_to_levels(all_bbox_cls_weights, num_level_squares) bbox_reg_targets_list = images_to_levels(all_bbox_reg_targets, num_level_squares) bbox_reg_weights_list = images_to_levels(all_bbox_reg_weights, num_level_squares) return (labels_list, label_weights_list, bbox_cls_targets_list, bbox_cls_weights_list, bbox_reg_targets_list, bbox_reg_weights_list, num_total_pos, num_total_neg) def _get_target_single(self, flat_approxs, inside_flags, flat_squares, gt_bboxes, gt_bboxes_ignore, gt_labels, img_meta, label_channels=None, sampling=True, unmap_outputs=True): """Compute regression and classification targets for anchors in a single image. Args: flat_approxs (Tensor): flat approxs of a single image, shape (n, 4) inside_flags (Tensor): inside flags of a single image, shape (n, ). flat_squares (Tensor): flat squares of a single image, shape (approxs_per_octave * n, 4) gt_bboxes (Tensor): Ground truth bboxes of a single image, \ shape (num_gts, 4). gt_bboxes_ignore (Tensor): Ground truth bboxes to be ignored, shape (num_ignored_gts, 4). gt_labels (Tensor): Ground truth labels of each box, shape (num_gts,). img_meta (dict): Meta info of the image. label_channels (int): Channel of label. sampling (bool): Sample Anchors or not. unmap_outputs (bool): unmap outputs or not. Returns: tuple: - labels_list (Tensor): Labels in a single image - label_weights (Tensor): Label weights in a single image - bbox_cls_targets (Tensor): BBox cls targets in a single image - bbox_cls_weights (Tensor): BBox cls weights in a single image - bbox_reg_targets (Tensor): BBox reg targets in a single image - bbox_reg_weights (Tensor): BBox reg weights in a single image - num_total_pos (int): Number of positive samples \ in a single image - num_total_neg (int): Number of negative samples \ in a single image """ if not inside_flags.any(): return (None, ) * 8 # assign gt and sample anchors expand_inside_flags = inside_flags[:, None].expand( -1, self.approxs_per_octave).reshape(-1) approxs = flat_approxs[expand_inside_flags, :] squares = flat_squares[inside_flags, :] assign_result = self.assigner.assign(approxs, squares, self.approxs_per_octave, gt_bboxes, gt_bboxes_ignore) sampling_result = self.sampler.sample(assign_result, squares, gt_bboxes) num_valid_squares = squares.shape[0] bbox_cls_targets = squares.new_zeros( (num_valid_squares, self.side_num * 4)) bbox_cls_weights = squares.new_zeros( (num_valid_squares, self.side_num * 4)) bbox_reg_targets = squares.new_zeros( (num_valid_squares, self.side_num * 4)) bbox_reg_weights = squares.new_zeros( (num_valid_squares, self.side_num * 4)) labels = squares.new_full((num_valid_squares, ), self.num_classes, dtype=torch.long) label_weights = squares.new_zeros(num_valid_squares, dtype=torch.float) pos_inds = sampling_result.pos_inds neg_inds = sampling_result.neg_inds if len(pos_inds) > 0: (pos_bbox_reg_targets, pos_bbox_reg_weights, pos_bbox_cls_targets, pos_bbox_cls_weights) = self.bbox_coder.encode( sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes) bbox_cls_targets[pos_inds, :] = pos_bbox_cls_targets bbox_reg_targets[pos_inds, :] = pos_bbox_reg_targets bbox_cls_weights[pos_inds, :] = pos_bbox_cls_weights bbox_reg_weights[pos_inds, :] = pos_bbox_reg_weights if gt_labels is None: # Only rpn gives gt_labels as None # Foreground is the first class labels[pos_inds] = 0 else: labels[pos_inds] = gt_labels[ sampling_result.pos_assigned_gt_inds] if self.train_cfg.pos_weight <= 0: label_weights[pos_inds] = 1.0 else: label_weights[pos_inds] = self.train_cfg.pos_weight if len(neg_inds) > 0: label_weights[neg_inds] = 1.0 # map up to original set of anchors if unmap_outputs: num_total_anchors = flat_squares.size(0) labels = unmap( labels, num_total_anchors, inside_flags, fill=self.num_classes) label_weights = unmap(label_weights, num_total_anchors, inside_flags) bbox_cls_targets = unmap(bbox_cls_targets, num_total_anchors, inside_flags) bbox_cls_weights = unmap(bbox_cls_weights, num_total_anchors, inside_flags) bbox_reg_targets = unmap(bbox_reg_targets, num_total_anchors, inside_flags) bbox_reg_weights = unmap(bbox_reg_weights, num_total_anchors, inside_flags) return (labels, label_weights, bbox_cls_targets, bbox_cls_weights, bbox_reg_targets, bbox_reg_weights, pos_inds, neg_inds) def loss_single(self, cls_score, bbox_pred, labels, label_weights, bbox_cls_targets, bbox_cls_weights, bbox_reg_targets, bbox_reg_weights, num_total_samples): # classification loss labels = labels.reshape(-1) label_weights = label_weights.reshape(-1) cls_score = cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels) loss_cls = self.loss_cls( cls_score, labels, label_weights, avg_factor=num_total_samples) # regression loss bbox_cls_targets = bbox_cls_targets.reshape(-1, self.side_num * 4) bbox_cls_weights = bbox_cls_weights.reshape(-1, self.side_num * 4) bbox_reg_targets = bbox_reg_targets.reshape(-1, self.side_num * 4) bbox_reg_weights = bbox_reg_weights.reshape(-1, self.side_num * 4) (bbox_cls_pred, bbox_reg_pred) = bbox_pred bbox_cls_pred = bbox_cls_pred.permute(0, 2, 3, 1).reshape( -1, self.side_num * 4) bbox_reg_pred = bbox_reg_pred.permute(0, 2, 3, 1).reshape( -1, self.side_num * 4) loss_bbox_cls = self.loss_bbox_cls( bbox_cls_pred, bbox_cls_targets.long(), bbox_cls_weights, avg_factor=num_total_samples * 4 * self.side_num) loss_bbox_reg = self.loss_bbox_reg( bbox_reg_pred, bbox_reg_targets, bbox_reg_weights, avg_factor=num_total_samples * 4 * self.bbox_coder.offset_topk) return loss_cls, loss_bbox_cls, loss_bbox_reg @force_fp32(apply_to=('cls_scores', 'bbox_preds')) def loss(self, cls_scores, bbox_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None): featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] assert len(featmap_sizes) == self.approx_anchor_generator.num_levels device = cls_scores[0].device # get sampled approxes approxs_list, inside_flag_list = GuidedAnchorHead.get_sampled_approxs( self, featmap_sizes, img_metas, device=device) square_list = self.get_anchors(featmap_sizes, img_metas, device=device) label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 cls_reg_targets = self.get_target( approxs_list, inside_flag_list, square_list, gt_bboxes, img_metas, gt_bboxes_ignore_list=gt_bboxes_ignore, gt_labels_list=gt_labels, label_channels=label_channels, sampling=self.sampling) if cls_reg_targets is None: return None (labels_list, label_weights_list, bbox_cls_targets_list, bbox_cls_weights_list, bbox_reg_targets_list, bbox_reg_weights_list, num_total_pos, num_total_neg) = cls_reg_targets num_total_samples = ( num_total_pos + num_total_neg if self.sampling else num_total_pos) losses_cls, losses_bbox_cls, losses_bbox_reg = multi_apply( self.loss_single, cls_scores, bbox_preds, labels_list, label_weights_list, bbox_cls_targets_list, bbox_cls_weights_list, bbox_reg_targets_list, bbox_reg_weights_list, num_total_samples=num_total_samples) return dict( loss_cls=losses_cls, loss_bbox_cls=losses_bbox_cls, loss_bbox_reg=losses_bbox_reg) @force_fp32(apply_to=('cls_scores', 'bbox_preds')) def get_bboxes(self, cls_scores, bbox_preds, img_metas, cfg=None, rescale=False): assert len(cls_scores) == len(bbox_preds) num_levels = len(cls_scores) featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] device = cls_scores[0].device mlvl_anchors = self.get_anchors( featmap_sizes, img_metas, device=device) result_list = [] for img_id in range(len(img_metas)): cls_score_list = [ cls_scores[i][img_id].detach() for i in range(num_levels) ] bbox_cls_pred_list = [ bbox_preds[i][0][img_id].detach() for i in range(num_levels) ] bbox_reg_pred_list = [ bbox_preds[i][1][img_id].detach() for i in range(num_levels) ] img_shape = img_metas[img_id]['img_shape'] scale_factor = img_metas[img_id]['scale_factor'] proposals = self.get_bboxes_single(cls_score_list, bbox_cls_pred_list, bbox_reg_pred_list, mlvl_anchors[img_id], img_shape, scale_factor, cfg, rescale) result_list.append(proposals) return result_list def get_bboxes_single(self, cls_scores, bbox_cls_preds, bbox_reg_preds, mlvl_anchors, img_shape, scale_factor, cfg, rescale=False): cfg = self.test_cfg if cfg is None else cfg mlvl_bboxes = [] mlvl_scores = [] mlvl_confids = [] assert len(cls_scores) == len(bbox_cls_preds) == len( bbox_reg_preds) == len(mlvl_anchors) for cls_score, bbox_cls_pred, bbox_reg_pred, anchors in zip( cls_scores, bbox_cls_preds, bbox_reg_preds, mlvl_anchors): assert cls_score.size()[-2:] == bbox_cls_pred.size( )[-2:] == bbox_reg_pred.size()[-2::] cls_score = cls_score.permute(1, 2, 0).reshape(-1, self.cls_out_channels) if self.use_sigmoid_cls: scores = cls_score.sigmoid() else: scores = cls_score.softmax(-1) bbox_cls_pred = bbox_cls_pred.permute(1, 2, 0).reshape( -1, self.side_num * 4) bbox_reg_pred = bbox_reg_pred.permute(1, 2, 0).reshape( -1, self.side_num * 4) nms_pre = cfg.get('nms_pre', -1) if nms_pre > 0 and scores.shape[0] > nms_pre: if self.use_sigmoid_cls: max_scores, _ = scores.max(dim=1) else: max_scores, _ = scores[:, :-1].max(dim=1) _, topk_inds = max_scores.topk(nms_pre) anchors = anchors[topk_inds, :] bbox_cls_pred = bbox_cls_pred[topk_inds, :] bbox_reg_pred = bbox_reg_pred[topk_inds, :] scores = scores[topk_inds, :] bbox_preds = [ bbox_cls_pred.contiguous(), bbox_reg_pred.contiguous() ] bboxes, confids = self.bbox_coder.decode( anchors.contiguous(), bbox_preds, max_shape=img_shape) mlvl_bboxes.append(bboxes) mlvl_scores.append(scores) mlvl_confids.append(confids) mlvl_bboxes = torch.cat(mlvl_bboxes) if rescale: mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor) mlvl_scores = torch.cat(mlvl_scores) mlvl_confids = torch.cat(mlvl_confids) if self.use_sigmoid_cls: padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1) mlvl_scores = torch.cat([mlvl_scores, padding], dim=1) det_bboxes, det_labels = multiclass_nms( mlvl_bboxes, mlvl_scores, cfg.score_thr, cfg.nms, cfg.max_per_img, score_factors=mlvl_confids) return det_bboxes, det_labels
26,915
42.483037
79
py
GFocalV2
GFocalV2-master/mmdet/models/dense_heads/fovea_head.py
import torch import torch.nn as nn from mmcv.cnn import ConvModule, normal_init from mmcv.ops import DeformConv2d from mmdet.core import multi_apply, multiclass_nms from ..builder import HEADS from .anchor_free_head import AnchorFreeHead INF = 1e8 class FeatureAlign(nn.Module): def __init__(self, in_channels, out_channels, kernel_size=3, deform_groups=4): super(FeatureAlign, self).__init__() offset_channels = kernel_size * kernel_size * 2 self.conv_offset = nn.Conv2d( 4, deform_groups * offset_channels, 1, bias=False) self.conv_adaption = DeformConv2d( in_channels, out_channels, kernel_size=kernel_size, padding=(kernel_size - 1) // 2, deform_groups=deform_groups) self.relu = nn.ReLU(inplace=True) def init_weights(self): normal_init(self.conv_offset, std=0.1) normal_init(self.conv_adaption, std=0.01) def forward(self, x, shape): offset = self.conv_offset(shape) x = self.relu(self.conv_adaption(x, offset)) return x @HEADS.register_module() class FoveaHead(AnchorFreeHead): """FoveaBox: Beyond Anchor-based Object Detector https://arxiv.org/abs/1904.03797 """ def __init__(self, num_classes, in_channels, base_edge_list=(16, 32, 64, 128, 256), scale_ranges=((8, 32), (16, 64), (32, 128), (64, 256), (128, 512)), sigma=0.4, with_deform=False, deform_groups=4, **kwargs): self.base_edge_list = base_edge_list self.scale_ranges = scale_ranges self.sigma = sigma self.with_deform = with_deform self.deform_groups = deform_groups super().__init__(num_classes, in_channels, **kwargs) def _init_layers(self): # box branch super()._init_reg_convs() self.conv_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1) # cls branch if not self.with_deform: super()._init_cls_convs() self.conv_cls = nn.Conv2d( self.feat_channels, self.cls_out_channels, 3, padding=1) else: self.cls_convs = nn.ModuleList() self.cls_convs.append( ConvModule( self.feat_channels, (self.feat_channels * 4), 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, bias=self.norm_cfg is None)) self.cls_convs.append( ConvModule((self.feat_channels * 4), (self.feat_channels * 4), 1, stride=1, padding=0, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, bias=self.norm_cfg is None)) self.feature_adaption = FeatureAlign( self.feat_channels, self.feat_channels, kernel_size=3, deform_groups=self.deform_groups) self.conv_cls = nn.Conv2d( int(self.feat_channels * 4), self.cls_out_channels, 3, padding=1) def init_weights(self): super().init_weights() if self.with_deform: self.feature_adaption.init_weights() def forward_single(self, x): cls_feat = x reg_feat = x for reg_layer in self.reg_convs: reg_feat = reg_layer(reg_feat) bbox_pred = self.conv_reg(reg_feat) if self.with_deform: cls_feat = self.feature_adaption(cls_feat, bbox_pred.exp()) for cls_layer in self.cls_convs: cls_feat = cls_layer(cls_feat) cls_score = self.conv_cls(cls_feat) return cls_score, bbox_pred def _get_points_single(self, *args, **kwargs): y, x = super()._get_points_single(*args, **kwargs) return y + 0.5, x + 0.5 def loss(self, cls_scores, bbox_preds, gt_bbox_list, gt_label_list, img_metas, gt_bboxes_ignore=None): assert len(cls_scores) == len(bbox_preds) featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] points = self.get_points(featmap_sizes, bbox_preds[0].dtype, bbox_preds[0].device) num_imgs = cls_scores[0].size(0) flatten_cls_scores = [ cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels) for cls_score in cls_scores ] flatten_bbox_preds = [ bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4) for bbox_pred in bbox_preds ] flatten_cls_scores = torch.cat(flatten_cls_scores) flatten_bbox_preds = torch.cat(flatten_bbox_preds) flatten_labels, flatten_bbox_targets = self.get_targets( gt_bbox_list, gt_label_list, featmap_sizes, points) # FG cat_id: [0, num_classes -1], BG cat_id: num_classes pos_inds = ((flatten_labels >= 0) & (flatten_labels < self.num_classes)).nonzero().view(-1) num_pos = len(pos_inds) loss_cls = self.loss_cls( flatten_cls_scores, flatten_labels, avg_factor=num_pos + num_imgs) if num_pos > 0: pos_bbox_preds = flatten_bbox_preds[pos_inds] pos_bbox_targets = flatten_bbox_targets[pos_inds] pos_weights = pos_bbox_targets.new_zeros( pos_bbox_targets.size()) + 1.0 loss_bbox = self.loss_bbox( pos_bbox_preds, pos_bbox_targets, pos_weights, avg_factor=num_pos) else: loss_bbox = torch.tensor( 0, dtype=flatten_bbox_preds.dtype, device=flatten_bbox_preds.device) return dict(loss_cls=loss_cls, loss_bbox=loss_bbox) def get_targets(self, gt_bbox_list, gt_label_list, featmap_sizes, points): label_list, bbox_target_list = multi_apply( self._get_target_single, gt_bbox_list, gt_label_list, featmap_size_list=featmap_sizes, point_list=points) flatten_labels = [ torch.cat([ labels_level_img.flatten() for labels_level_img in labels_level ]) for labels_level in zip(*label_list) ] flatten_bbox_targets = [ torch.cat([ bbox_targets_level_img.reshape(-1, 4) for bbox_targets_level_img in bbox_targets_level ]) for bbox_targets_level in zip(*bbox_target_list) ] flatten_labels = torch.cat(flatten_labels) flatten_bbox_targets = torch.cat(flatten_bbox_targets) return flatten_labels, flatten_bbox_targets def _get_target_single(self, gt_bboxes_raw, gt_labels_raw, featmap_size_list=None, point_list=None): gt_areas = torch.sqrt((gt_bboxes_raw[:, 2] - gt_bboxes_raw[:, 0]) * (gt_bboxes_raw[:, 3] - gt_bboxes_raw[:, 1])) label_list = [] bbox_target_list = [] # for each pyramid, find the cls and box target for base_len, (lower_bound, upper_bound), stride, featmap_size, \ (y, x) in zip(self.base_edge_list, self.scale_ranges, self.strides, featmap_size_list, point_list): # FG cat_id: [0, num_classes -1], BG cat_id: num_classes labels = gt_labels_raw.new_zeros(featmap_size) + self.num_classes bbox_targets = gt_bboxes_raw.new(featmap_size[0], featmap_size[1], 4) + 1 # scale assignment hit_indices = ((gt_areas >= lower_bound) & (gt_areas <= upper_bound)).nonzero().flatten() if len(hit_indices) == 0: label_list.append(labels) bbox_target_list.append(torch.log(bbox_targets)) continue _, hit_index_order = torch.sort(-gt_areas[hit_indices]) hit_indices = hit_indices[hit_index_order] gt_bboxes = gt_bboxes_raw[hit_indices, :] / stride gt_labels = gt_labels_raw[hit_indices] half_w = 0.5 * (gt_bboxes[:, 2] - gt_bboxes[:, 0]) half_h = 0.5 * (gt_bboxes[:, 3] - gt_bboxes[:, 1]) # valid fovea area: left, right, top, down pos_left = torch.ceil( gt_bboxes[:, 0] + (1 - self.sigma) * half_w - 0.5).long().\ clamp(0, featmap_size[1] - 1) pos_right = torch.floor( gt_bboxes[:, 0] + (1 + self.sigma) * half_w - 0.5).long().\ clamp(0, featmap_size[1] - 1) pos_top = torch.ceil( gt_bboxes[:, 1] + (1 - self.sigma) * half_h - 0.5).long().\ clamp(0, featmap_size[0] - 1) pos_down = torch.floor( gt_bboxes[:, 1] + (1 + self.sigma) * half_h - 0.5).long().\ clamp(0, featmap_size[0] - 1) for px1, py1, px2, py2, label, (gt_x1, gt_y1, gt_x2, gt_y2) in \ zip(pos_left, pos_top, pos_right, pos_down, gt_labels, gt_bboxes_raw[hit_indices, :]): labels[py1:py2 + 1, px1:px2 + 1] = label bbox_targets[py1:py2 + 1, px1:px2 + 1, 0] = \ (stride * x[py1:py2 + 1, px1:px2 + 1] - gt_x1) / base_len bbox_targets[py1:py2 + 1, px1:px2 + 1, 1] = \ (stride * y[py1:py2 + 1, px1:px2 + 1] - gt_y1) / base_len bbox_targets[py1:py2 + 1, px1:px2 + 1, 2] = \ (gt_x2 - stride * x[py1:py2 + 1, px1:px2 + 1]) / base_len bbox_targets[py1:py2 + 1, px1:px2 + 1, 3] = \ (gt_y2 - stride * y[py1:py2 + 1, px1:px2 + 1]) / base_len bbox_targets = bbox_targets.clamp(min=1. / 16, max=16.) label_list.append(labels) bbox_target_list.append(torch.log(bbox_targets)) return label_list, bbox_target_list def get_bboxes(self, cls_scores, bbox_preds, img_metas, cfg=None, rescale=None): assert len(cls_scores) == len(bbox_preds) num_levels = len(cls_scores) featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] points = self.get_points( featmap_sizes, bbox_preds[0].dtype, bbox_preds[0].device, flatten=True) result_list = [] for img_id in range(len(img_metas)): cls_score_list = [ cls_scores[i][img_id].detach() for i in range(num_levels) ] bbox_pred_list = [ bbox_preds[i][img_id].detach() for i in range(num_levels) ] img_shape = img_metas[img_id]['img_shape'] scale_factor = img_metas[img_id]['scale_factor'] det_bboxes = self._get_bboxes_single(cls_score_list, bbox_pred_list, featmap_sizes, points, img_shape, scale_factor, cfg, rescale) result_list.append(det_bboxes) return result_list def _get_bboxes_single(self, cls_scores, bbox_preds, featmap_sizes, point_list, img_shape, scale_factor, cfg, rescale=False): cfg = self.test_cfg if cfg is None else cfg assert len(cls_scores) == len(bbox_preds) == len(point_list) det_bboxes = [] det_scores = [] for cls_score, bbox_pred, featmap_size, stride, base_len, (y, x) \ in zip(cls_scores, bbox_preds, featmap_sizes, self.strides, self.base_edge_list, point_list): assert cls_score.size()[-2:] == bbox_pred.size()[-2:] scores = cls_score.permute(1, 2, 0).reshape( -1, self.cls_out_channels).sigmoid() bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4).exp() nms_pre = cfg.get('nms_pre', -1) if (nms_pre > 0) and (scores.shape[0] > nms_pre): max_scores, _ = scores.max(dim=1) _, topk_inds = max_scores.topk(nms_pre) bbox_pred = bbox_pred[topk_inds, :] scores = scores[topk_inds, :] y = y[topk_inds] x = x[topk_inds] x1 = (stride * x - base_len * bbox_pred[:, 0]).\ clamp(min=0, max=img_shape[1] - 1) y1 = (stride * y - base_len * bbox_pred[:, 1]).\ clamp(min=0, max=img_shape[0] - 1) x2 = (stride * x + base_len * bbox_pred[:, 2]).\ clamp(min=0, max=img_shape[1] - 1) y2 = (stride * y + base_len * bbox_pred[:, 3]).\ clamp(min=0, max=img_shape[0] - 1) bboxes = torch.stack([x1, y1, x2, y2], -1) det_bboxes.append(bboxes) det_scores.append(scores) det_bboxes = torch.cat(det_bboxes) if rescale: det_bboxes /= det_bboxes.new_tensor(scale_factor) det_scores = torch.cat(det_scores) padding = det_scores.new_zeros(det_scores.shape[0], 1) # remind that we set FG labels to [0, num_class-1] since mmdet v2.0 # BG cat_id: num_class det_scores = torch.cat([det_scores, padding], dim=1) det_bboxes, det_labels = multiclass_nms(det_bboxes, det_scores, cfg.score_thr, cfg.nms, cfg.max_per_img) return det_bboxes, det_labels
14,405
41.122807
79
py
GFocalV2
GFocalV2-master/mmdet/models/dense_heads/dense_test_mixins.py
from inspect import signature import torch from mmdet.core import bbox2result, bbox_mapping_back, multiclass_nms class BBoxTestMixin(object): """Mixin class for test time augmentation of bboxes.""" def merge_aug_bboxes(self, aug_bboxes, aug_scores, img_metas): """Merge augmented detection bboxes and scores. Args: aug_bboxes (list[Tensor]): shape (n, 4*#class) aug_scores (list[Tensor] or None): shape (n, #class) img_shapes (list[Tensor]): shape (3, ). Returns: tuple: (bboxes, scores) """ recovered_bboxes = [] for bboxes, img_info in zip(aug_bboxes, img_metas): img_shape = img_info[0]['img_shape'] scale_factor = img_info[0]['scale_factor'] flip = img_info[0]['flip'] flip_direction = img_info[0]['flip_direction'] bboxes = bbox_mapping_back(bboxes, img_shape, scale_factor, flip, flip_direction) recovered_bboxes.append(bboxes) bboxes = torch.cat(recovered_bboxes, dim=0) if aug_scores is None: return bboxes else: scores = torch.cat(aug_scores, dim=0) return bboxes, scores def aug_test_bboxes(self, feats, img_metas, rescale=False): """Test det bboxes with test time augmentation. Args: feats (list[Tensor]): the outer list indicates test-time augmentations and inner Tensor should have a shape NxCxHxW, which contains features for all images in the batch. img_metas (list[list[dict]]): the outer list indicates test-time augs (multiscale, flip, etc.) and the inner list indicates images in a batch. each dict has image information. rescale (bool, optional): Whether to rescale the results. Defaults to False. Returns: list[ndarray]: bbox results of each class """ # check with_nms argument gb_sig = signature(self.get_bboxes) gb_args = [p.name for p in gb_sig.parameters.values()] gbs_sig = signature(self._get_bboxes_single) gbs_args = [p.name for p in gbs_sig.parameters.values()] assert ('with_nms' in gb_args) and ('with_nms' in gbs_args), \ f'{self.__class__.__name__}' \ ' does not support test-time augmentation' aug_bboxes = [] aug_scores = [] aug_factors = [] # score_factors for NMS for x, img_meta in zip(feats, img_metas): # only one image in the batch outs = self.forward(x) bbox_inputs = outs + (img_meta, self.test_cfg, False, False) bbox_outputs = self.get_bboxes(*bbox_inputs)[0] aug_bboxes.append(bbox_outputs[0]) aug_scores.append(bbox_outputs[1]) # bbox_outputs of some detectors (e.g., ATSS, FCOS, YOLOv3) # contains additional element to adjust scores before NMS if len(bbox_outputs) >= 3: aug_factors.append(bbox_outputs[2]) # after merging, bboxes will be rescaled to the original image size merged_bboxes, merged_scores = self.merge_aug_bboxes( aug_bboxes, aug_scores, img_metas) merged_factors = torch.cat(aug_factors, dim=0) if aug_factors else None det_bboxes, det_labels = multiclass_nms( merged_bboxes, merged_scores, self.test_cfg.score_thr, self.test_cfg.nms, self.test_cfg.max_per_img, score_factors=merged_factors) if rescale: _det_bboxes = det_bboxes else: _det_bboxes = det_bboxes.clone() _det_bboxes[:, :4] *= det_bboxes.new_tensor( img_metas[0][0]['scale_factor']) bbox_results = bbox2result(_det_bboxes, det_labels, self.num_classes) return bbox_results
3,983
39.653061
79
py
GFocalV2
GFocalV2-master/mmdet/models/utils/gaussian_target.py
from math import sqrt import torch def gaussian2D(radius, sigma=1, dtype=torch.float32, device='cpu'): """Generate 2D gaussian kernel. Args: radius (int): Radius of gaussian kernel. sigma (int): Sigma of gaussian function. Default: 1. dtype (torch.dtype): Dtype of gaussian tensor. Default: torch.float32. device (str): Device of gaussian tensor. Default: 'cpu'. Returns: h (Tensor): Gaussian kernel with a ``(2 * radius + 1) * (2 * radius + 1)`` shape. """ x = torch.arange( -radius, radius + 1, dtype=dtype, device=device).view(1, -1) y = torch.arange( -radius, radius + 1, dtype=dtype, device=device).view(-1, 1) h = (-(x * x + y * y) / (2 * sigma * sigma)).exp() h[h < torch.finfo(h.dtype).eps * h.max()] = 0 return h def gen_gaussian_target(heatmap, center, radius, k=1): """Generate 2D gaussian heatmap. Args: heatmap (Tensor): Input heatmap, the gaussian kernel will cover on it and maintain the max value. center (list[int]): Coord of gaussian kernel's center. radius (int): Radius of gaussian kernel. k (int): Coefficient of gaussian kernel. Default: 1. Returns: out_heatmap (Tensor): Updated heatmap covered by gaussian kernel. """ diameter = 2 * radius + 1 gaussian_kernel = gaussian2D( radius, sigma=diameter / 6, dtype=heatmap.dtype, device=heatmap.device) x, y = center height, width = heatmap.shape[:2] left, right = min(x, radius), min(width - x, radius + 1) top, bottom = min(y, radius), min(height - y, radius + 1) masked_heatmap = heatmap[y - top:y + bottom, x - left:x + right] masked_gaussian = gaussian_kernel[radius - top:radius + bottom, radius - left:radius + right] out_heatmap = heatmap torch.max( masked_heatmap, masked_gaussian * k, out=out_heatmap[y - top:y + bottom, x - left:x + right]) return out_heatmap def gaussian_radius(det_size, min_overlap): r"""Generate 2D gaussian radius. This function is modified from the `official github repo <https://github.com/princeton-vl/CornerNet-Lite/blob/master/core/sample/ utils.py#L65>`_. Given ``min_overlap``, radius could computed by a quadratic equation according to Vieta's formulas. There are 3 cases for computing gaussian radius, details are following: - Explanation of figure: ``lt`` and ``br`` indicates the left-top and bottom-right corner of ground truth box. ``x`` indicates the generated corner at the limited position when ``radius=r``. - Case1: one corner is inside the gt box and the other is outside. .. code:: text |< width >| lt-+----------+ - | | | ^ +--x----------+--+ | | | | | | | | height | | overlap | | | | | | | | | | v +--+---------br--+ - | | | +----------+--x To ensure IoU of generated box and gt box is larger than ``min_overlap``: .. math:: \cfrac{(w-r)*(h-r)}{w*h+(w+h)r-r^2} \ge {iou} \quad\Rightarrow\quad {r^2-(w+h)r+\cfrac{1-iou}{1+iou}*w*h} \ge 0 \\ {a} = 1,\quad{b} = {-(w+h)},\quad{c} = {\cfrac{1-iou}{1+iou}*w*h} {r} \le \cfrac{-b-\sqrt{b^2-4*a*c}}{2*a} - Case2: both two corners are inside the gt box. .. code:: text |< width >| lt-+----------+ - | | | ^ +--x-------+ | | | | | | |overlap| | height | | | | | +-------x--+ | | | v +----------+-br - To ensure IoU of generated box and gt box is larger than ``min_overlap``: .. math:: \cfrac{(w-2*r)*(h-2*r)}{w*h} \ge {iou} \quad\Rightarrow\quad {4r^2-2(w+h)r+(1-iou)*w*h} \ge 0 \\ {a} = 4,\quad {b} = {-2(w+h)},\quad {c} = {(1-iou)*w*h} {r} \le \cfrac{-b-\sqrt{b^2-4*a*c}}{2*a} - Case3: both two corners are outside the gt box. .. code:: text |< width >| x--+----------------+ | | | +-lt-------------+ | - | | | | ^ | | | | | | overlap | | height | | | | | | | | v | +------------br--+ - | | | +----------------+--x To ensure IoU of generated box and gt box is larger than ``min_overlap``: .. math:: \cfrac{w*h}{(w+2*r)*(h+2*r)} \ge {iou} \quad\Rightarrow\quad {4*iou*r^2+2*iou*(w+h)r+(iou-1)*w*h} \le 0 \\ {a} = {4*iou},\quad {b} = {2*iou*(w+h)},\quad {c} = {(iou-1)*w*h} \\ {r} \le \cfrac{-b+\sqrt{b^2-4*a*c}}{2*a} Args: det_size (list[int]): Shape of object. min_overlap (float): Min IoU with ground truth for boxes generated by keypoints inside the gaussian kernel. Returns: radius (int): Radius of gaussian kernel. """ height, width = det_size a1 = 1 b1 = (height + width) c1 = width * height * (1 - min_overlap) / (1 + min_overlap) sq1 = sqrt(b1**2 - 4 * a1 * c1) r1 = (b1 - sq1) / (2 * a1) a2 = 4 b2 = 2 * (height + width) c2 = (1 - min_overlap) * width * height sq2 = sqrt(b2**2 - 4 * a2 * c2) r2 = (b2 - sq2) / (2 * a2) a3 = 4 * min_overlap b3 = -2 * min_overlap * (height + width) c3 = (min_overlap - 1) * width * height sq3 = sqrt(b3**2 - 4 * a3 * c3) r3 = (b3 + sq3) / (2 * a3) return min(r1, r2, r3)
5,784
30.102151
79
py
GFocalV2
GFocalV2-master/mmdet/models/utils/res_layer.py
from mmcv.cnn import build_conv_layer, build_norm_layer from torch import nn as nn class ResLayer(nn.Sequential): """ResLayer to build ResNet style backbone. Args: block (nn.Module): block used to build ResLayer. inplanes (int): inplanes of block. planes (int): planes of block. num_blocks (int): number of blocks. stride (int): stride of the first block. Default: 1 avg_down (bool): Use AvgPool instead of stride conv when downsampling in the bottleneck. Default: False conv_cfg (dict): dictionary to construct and config conv layer. Default: None norm_cfg (dict): dictionary to construct and config norm layer. Default: dict(type='BN') downsample_first (bool): Downsample at the first block or last block. False for Hourglass, True for ResNet. Default: True """ def __init__(self, block, inplanes, planes, num_blocks, stride=1, avg_down=False, conv_cfg=None, norm_cfg=dict(type='BN'), downsample_first=True, **kwargs): self.block = block downsample = None if stride != 1 or inplanes != planes * block.expansion: downsample = [] conv_stride = stride if avg_down and stride != 1: conv_stride = 1 downsample.append( nn.AvgPool2d( kernel_size=stride, stride=stride, ceil_mode=True, count_include_pad=False)) downsample.extend([ build_conv_layer( conv_cfg, inplanes, planes * block.expansion, kernel_size=1, stride=conv_stride, bias=False), build_norm_layer(norm_cfg, planes * block.expansion)[1] ]) downsample = nn.Sequential(*downsample) layers = [] if downsample_first: layers.append( block( inplanes=inplanes, planes=planes, stride=stride, downsample=downsample, conv_cfg=conv_cfg, norm_cfg=norm_cfg, **kwargs)) inplanes = planes * block.expansion for _ in range(1, num_blocks): layers.append( block( inplanes=inplanes, planes=planes, stride=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, **kwargs)) else: # downsample_first=False is for HourglassModule for _ in range(num_blocks - 1): layers.append( block( inplanes=inplanes, planes=inplanes, stride=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, **kwargs)) layers.append( block( inplanes=inplanes, planes=planes, stride=stride, downsample=downsample, conv_cfg=conv_cfg, norm_cfg=norm_cfg, **kwargs)) super(ResLayer, self).__init__(*layers)
3,655
34.495146
77
py
GFocalV2
GFocalV2-master/mmdet/models/roi_heads/standard_roi_head.py
import torch from mmdet.core import bbox2result, bbox2roi, build_assigner, build_sampler from ..builder import HEADS, build_head, build_roi_extractor from .base_roi_head import BaseRoIHead from .test_mixins import BBoxTestMixin, MaskTestMixin @HEADS.register_module() class StandardRoIHead(BaseRoIHead, BBoxTestMixin, MaskTestMixin): """Simplest base roi head including one bbox head and one mask head.""" def init_assigner_sampler(self): """Initialize assigner and sampler.""" self.bbox_assigner = None self.bbox_sampler = None if self.train_cfg: self.bbox_assigner = build_assigner(self.train_cfg.assigner) self.bbox_sampler = build_sampler( self.train_cfg.sampler, context=self) def init_bbox_head(self, bbox_roi_extractor, bbox_head): """Initialize ``bbox_head``""" self.bbox_roi_extractor = build_roi_extractor(bbox_roi_extractor) self.bbox_head = build_head(bbox_head) def init_mask_head(self, mask_roi_extractor, mask_head): """Initialize ``mask_head``""" if mask_roi_extractor is not None: self.mask_roi_extractor = build_roi_extractor(mask_roi_extractor) self.share_roi_extractor = False else: self.share_roi_extractor = True self.mask_roi_extractor = self.bbox_roi_extractor self.mask_head = build_head(mask_head) def init_weights(self, pretrained): """Initialize the weights in head. Args: pretrained (str, optional): Path to pre-trained weights. Defaults to None. """ if self.with_shared_head: self.shared_head.init_weights(pretrained=pretrained) if self.with_bbox: self.bbox_roi_extractor.init_weights() self.bbox_head.init_weights() if self.with_mask: self.mask_head.init_weights() if not self.share_roi_extractor: self.mask_roi_extractor.init_weights() def forward_dummy(self, x, proposals): """Dummy forward function.""" # bbox head outs = () rois = bbox2roi([proposals]) if self.with_bbox: bbox_results = self._bbox_forward(x, rois) outs = outs + (bbox_results['cls_score'], bbox_results['bbox_pred']) # mask head if self.with_mask: mask_rois = rois[:100] mask_results = self._mask_forward(x, mask_rois) outs = outs + (mask_results['mask_pred'], ) return outs def forward_train(self, x, img_metas, proposal_list, gt_bboxes, gt_labels, gt_bboxes_ignore=None, gt_masks=None): """ Args: x (list[Tensor]): list of multi-level img features. img_metas (list[dict]): list of image info dict where each dict has: 'img_shape', 'scale_factor', 'flip', and may also contain 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. For details on the values of these keys see `mmdet/datasets/pipelines/formatting.py:Collect`. proposals (list[Tensors]): list of region proposals. gt_bboxes (list[Tensor]): Ground truth bboxes for each image with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. gt_labels (list[Tensor]): class indices corresponding to each box gt_bboxes_ignore (None | list[Tensor]): specify which bounding boxes can be ignored when computing the loss. gt_masks (None | Tensor) : true segmentation masks for each box used if the architecture supports a segmentation task. Returns: dict[str, Tensor]: a dictionary of loss components """ # assign gts and sample proposals if self.with_bbox or self.with_mask: num_imgs = len(img_metas) if gt_bboxes_ignore is None: gt_bboxes_ignore = [None for _ in range(num_imgs)] sampling_results = [] for i in range(num_imgs): assign_result = self.bbox_assigner.assign( proposal_list[i], gt_bboxes[i], gt_bboxes_ignore[i], gt_labels[i]) sampling_result = self.bbox_sampler.sample( assign_result, proposal_list[i], gt_bboxes[i], gt_labels[i], feats=[lvl_feat[i][None] for lvl_feat in x]) sampling_results.append(sampling_result) losses = dict() # bbox head forward and loss if self.with_bbox: bbox_results = self._bbox_forward_train(x, sampling_results, gt_bboxes, gt_labels, img_metas) losses.update(bbox_results['loss_bbox']) # mask head forward and loss if self.with_mask: mask_results = self._mask_forward_train(x, sampling_results, bbox_results['bbox_feats'], gt_masks, img_metas) losses.update(mask_results['loss_mask']) return losses def _bbox_forward(self, x, rois): """Box head forward function used in both training and testing.""" # TODO: a more flexible way to decide which feature maps to use bbox_feats = self.bbox_roi_extractor( x[:self.bbox_roi_extractor.num_inputs], rois) if self.with_shared_head: bbox_feats = self.shared_head(bbox_feats) cls_score, bbox_pred = self.bbox_head(bbox_feats) bbox_results = dict( cls_score=cls_score, bbox_pred=bbox_pred, bbox_feats=bbox_feats) return bbox_results def _bbox_forward_train(self, x, sampling_results, gt_bboxes, gt_labels, img_metas): """Run forward function and calculate loss for box head in training.""" rois = bbox2roi([res.bboxes for res in sampling_results]) bbox_results = self._bbox_forward(x, rois) bbox_targets = self.bbox_head.get_targets(sampling_results, gt_bboxes, gt_labels, self.train_cfg) loss_bbox = self.bbox_head.loss(bbox_results['cls_score'], bbox_results['bbox_pred'], rois, *bbox_targets) bbox_results.update(loss_bbox=loss_bbox) return bbox_results def _mask_forward_train(self, x, sampling_results, bbox_feats, gt_masks, img_metas): """Run forward function and calculate loss for mask head in training.""" if not self.share_roi_extractor: pos_rois = bbox2roi([res.pos_bboxes for res in sampling_results]) mask_results = self._mask_forward(x, pos_rois) else: pos_inds = [] device = bbox_feats.device for res in sampling_results: pos_inds.append( torch.ones( res.pos_bboxes.shape[0], device=device, dtype=torch.uint8)) pos_inds.append( torch.zeros( res.neg_bboxes.shape[0], device=device, dtype=torch.uint8)) pos_inds = torch.cat(pos_inds) mask_results = self._mask_forward( x, pos_inds=pos_inds, bbox_feats=bbox_feats) mask_targets = self.mask_head.get_targets(sampling_results, gt_masks, self.train_cfg) pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results]) loss_mask = self.mask_head.loss(mask_results['mask_pred'], mask_targets, pos_labels) mask_results.update(loss_mask=loss_mask, mask_targets=mask_targets) return mask_results def _mask_forward(self, x, rois=None, pos_inds=None, bbox_feats=None): """Mask head forward function used in both training and testing.""" assert ((rois is not None) ^ (pos_inds is not None and bbox_feats is not None)) if rois is not None: mask_feats = self.mask_roi_extractor( x[:self.mask_roi_extractor.num_inputs], rois) if self.with_shared_head: mask_feats = self.shared_head(mask_feats) else: assert bbox_feats is not None mask_feats = bbox_feats[pos_inds] mask_pred = self.mask_head(mask_feats) mask_results = dict(mask_pred=mask_pred, mask_feats=mask_feats) return mask_results async def async_simple_test(self, x, proposal_list, img_metas, proposals=None, rescale=False): """Async test without augmentation.""" assert self.with_bbox, 'Bbox head must be implemented.' det_bboxes, det_labels = await self.async_test_bboxes( x, img_metas, proposal_list, self.test_cfg, rescale=rescale) bbox_results = bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes) if not self.with_mask: return bbox_results else: segm_results = await self.async_test_mask( x, img_metas, det_bboxes, det_labels, rescale=rescale, mask_test_cfg=self.test_cfg.get('mask')) return bbox_results, segm_results def simple_test(self, x, proposal_list, img_metas, proposals=None, rescale=False): """Test without augmentation.""" assert self.with_bbox, 'Bbox head must be implemented.' det_bboxes, det_labels = self.simple_test_bboxes( x, img_metas, proposal_list, self.test_cfg, rescale=rescale) bbox_results = [ bbox2result(det_bboxes[i], det_labels[i], self.bbox_head.num_classes) for i in range(len(det_bboxes)) ] if not self.with_mask: return bbox_results else: segm_results = self.simple_test_mask( x, img_metas, det_bboxes, det_labels, rescale=rescale) return list(zip(bbox_results, segm_results)) def aug_test(self, x, proposal_list, img_metas, rescale=False): """Test with augmentations. If rescale is False, then returned bboxes and masks will fit the scale of imgs[0]. """ det_bboxes, det_labels = self.aug_test_bboxes(x, img_metas, proposal_list, self.test_cfg) if rescale: _det_bboxes = det_bboxes else: _det_bboxes = det_bboxes.clone() _det_bboxes[:, :4] *= det_bboxes.new_tensor( img_metas[0][0]['scale_factor']) bbox_results = bbox2result(_det_bboxes, det_labels, self.bbox_head.num_classes) # det_bboxes always keep the original scale if self.with_mask: segm_results = self.aug_test_mask(x, img_metas, det_bboxes, det_labels) return [(bbox_results, segm_results)] else: return [bbox_results]
12,006
40.690972
79
py
GFocalV2
GFocalV2-master/mmdet/models/roi_heads/grid_roi_head.py
import torch from mmdet.core import bbox2result, bbox2roi from ..builder import HEADS, build_head, build_roi_extractor from .standard_roi_head import StandardRoIHead @HEADS.register_module() class GridRoIHead(StandardRoIHead): """Grid roi head for Grid R-CNN. https://arxiv.org/abs/1811.12030 """ def __init__(self, grid_roi_extractor, grid_head, **kwargs): assert grid_head is not None super(GridRoIHead, self).__init__(**kwargs) if grid_roi_extractor is not None: self.grid_roi_extractor = build_roi_extractor(grid_roi_extractor) self.share_roi_extractor = False else: self.share_roi_extractor = True self.grid_roi_extractor = self.bbox_roi_extractor self.grid_head = build_head(grid_head) def init_weights(self, pretrained): """Initialize the weights in head. Args: pretrained (str, optional): Path to pre-trained weights. Defaults to None. """ super(GridRoIHead, self).init_weights(pretrained) self.grid_head.init_weights() if not self.share_roi_extractor: self.grid_roi_extractor.init_weights() def _random_jitter(self, sampling_results, img_metas, amplitude=0.15): """Ramdom jitter positive proposals for training.""" for sampling_result, img_meta in zip(sampling_results, img_metas): bboxes = sampling_result.pos_bboxes random_offsets = bboxes.new_empty(bboxes.shape[0], 4).uniform_( -amplitude, amplitude) # before jittering cxcy = (bboxes[:, 2:4] + bboxes[:, :2]) / 2 wh = (bboxes[:, 2:4] - bboxes[:, :2]).abs() # after jittering new_cxcy = cxcy + wh * random_offsets[:, :2] new_wh = wh * (1 + random_offsets[:, 2:]) # xywh to xyxy new_x1y1 = (new_cxcy - new_wh / 2) new_x2y2 = (new_cxcy + new_wh / 2) new_bboxes = torch.cat([new_x1y1, new_x2y2], dim=1) # clip bboxes max_shape = img_meta['img_shape'] if max_shape is not None: new_bboxes[:, 0::2].clamp_(min=0, max=max_shape[1] - 1) new_bboxes[:, 1::2].clamp_(min=0, max=max_shape[0] - 1) sampling_result.pos_bboxes = new_bboxes return sampling_results def forward_dummy(self, x, proposals): """Dummy forward function.""" # bbox head outs = () rois = bbox2roi([proposals]) if self.with_bbox: bbox_results = self._bbox_forward(x, rois) outs = outs + (bbox_results['cls_score'], bbox_results['bbox_pred']) # grid head grid_rois = rois[:100] grid_feats = self.grid_roi_extractor( x[:self.grid_roi_extractor.num_inputs], grid_rois) if self.with_shared_head: grid_feats = self.shared_head(grid_feats) grid_pred = self.grid_head(grid_feats) outs = outs + (grid_pred, ) # mask head if self.with_mask: mask_rois = rois[:100] mask_results = self._mask_forward(x, mask_rois) outs = outs + (mask_results['mask_pred'], ) return outs def _bbox_forward_train(self, x, sampling_results, gt_bboxes, gt_labels, img_metas): """Run forward function and calculate loss for box head in training.""" bbox_results = super(GridRoIHead, self)._bbox_forward_train(x, sampling_results, gt_bboxes, gt_labels, img_metas) # Grid head forward and loss sampling_results = self._random_jitter(sampling_results, img_metas) pos_rois = bbox2roi([res.pos_bboxes for res in sampling_results]) # GN in head does not support zero shape input if pos_rois.shape[0] == 0: return bbox_results grid_feats = self.grid_roi_extractor( x[:self.grid_roi_extractor.num_inputs], pos_rois) if self.with_shared_head: grid_feats = self.shared_head(grid_feats) # Accelerate training max_sample_num_grid = self.train_cfg.get('max_num_grid', 192) sample_idx = torch.randperm( grid_feats.shape[0])[:min(grid_feats.shape[0], max_sample_num_grid )] grid_feats = grid_feats[sample_idx] grid_pred = self.grid_head(grid_feats) grid_targets = self.grid_head.get_targets(sampling_results, self.train_cfg) grid_targets = grid_targets[sample_idx] loss_grid = self.grid_head.loss(grid_pred, grid_targets) bbox_results['loss_bbox'].update(loss_grid) return bbox_results def simple_test(self, x, proposal_list, img_metas, proposals=None, rescale=False): """Test without augmentation.""" assert self.with_bbox, 'Bbox head must be implemented.' det_bboxes, det_labels = self.simple_test_bboxes( x, img_metas, proposal_list, self.test_cfg, rescale=False) # pack rois into bboxes grid_rois = bbox2roi([det_bbox[:, :4] for det_bbox in det_bboxes]) if grid_rois.shape[0] != 0: grid_feats = self.grid_roi_extractor( x[:len(self.grid_roi_extractor.featmap_strides)], grid_rois) self.grid_head.test_mode = True grid_pred = self.grid_head(grid_feats) # split batch grid head prediction back to each image num_roi_per_img = tuple(len(det_bbox) for det_bbox in det_bboxes) grid_pred = { k: v.split(num_roi_per_img, 0) for k, v in grid_pred.items() } # apply bbox post-processing to each image individually bbox_results = [] num_imgs = len(det_bboxes) for i in range(num_imgs): if det_bboxes[i].shape[0] == 0: bbox_results.append(grid_rois.new_tensor([])) else: det_bbox = self.grid_head.get_bboxes( det_bboxes[i], grid_pred['fused'][i], [img_metas[i]]) if rescale: det_bbox[:, :4] /= img_metas[i]['scale_factor'] bbox_results.append( bbox2result(det_bbox, det_labels[i], self.bbox_head.num_classes)) else: bbox_results = [ grid_rois.new_tensor([]) for _ in range(len(det_bboxes)) ] if not self.with_mask: return bbox_results else: segm_results = self.simple_test_mask( x, img_metas, det_bboxes, det_labels, rescale=rescale) return list(zip(bbox_results, segm_results))
7,100
39.118644
79
py
GFocalV2
GFocalV2-master/mmdet/models/roi_heads/cascade_roi_head.py
import torch import torch.nn as nn from mmdet.core import (bbox2result, bbox2roi, bbox_mapping, build_assigner, build_sampler, merge_aug_bboxes, merge_aug_masks, multiclass_nms) from ..builder import HEADS, build_head, build_roi_extractor from .base_roi_head import BaseRoIHead from .test_mixins import BBoxTestMixin, MaskTestMixin @HEADS.register_module() class CascadeRoIHead(BaseRoIHead, BBoxTestMixin, MaskTestMixin): """Cascade roi head including one bbox head and one mask head. https://arxiv.org/abs/1712.00726 """ def __init__(self, num_stages, stage_loss_weights, bbox_roi_extractor=None, bbox_head=None, mask_roi_extractor=None, mask_head=None, shared_head=None, train_cfg=None, test_cfg=None): assert bbox_roi_extractor is not None assert bbox_head is not None assert shared_head is None, \ 'Shared head is not supported in Cascade RCNN anymore' self.num_stages = num_stages self.stage_loss_weights = stage_loss_weights super(CascadeRoIHead, self).__init__( bbox_roi_extractor=bbox_roi_extractor, bbox_head=bbox_head, mask_roi_extractor=mask_roi_extractor, mask_head=mask_head, shared_head=shared_head, train_cfg=train_cfg, test_cfg=test_cfg) def init_bbox_head(self, bbox_roi_extractor, bbox_head): """Initialize box head and box roi extractor. Args: bbox_roi_extractor (dict): Config of box roi extractor. bbox_head (dict): Config of box in box head. """ self.bbox_roi_extractor = nn.ModuleList() self.bbox_head = nn.ModuleList() if not isinstance(bbox_roi_extractor, list): bbox_roi_extractor = [ bbox_roi_extractor for _ in range(self.num_stages) ] if not isinstance(bbox_head, list): bbox_head = [bbox_head for _ in range(self.num_stages)] assert len(bbox_roi_extractor) == len(bbox_head) == self.num_stages for roi_extractor, head in zip(bbox_roi_extractor, bbox_head): self.bbox_roi_extractor.append(build_roi_extractor(roi_extractor)) self.bbox_head.append(build_head(head)) def init_mask_head(self, mask_roi_extractor, mask_head): """Initialize mask head and mask roi extractor. Args: mask_roi_extractor (dict): Config of mask roi extractor. mask_head (dict): Config of mask in mask head. """ self.mask_head = nn.ModuleList() if not isinstance(mask_head, list): mask_head = [mask_head for _ in range(self.num_stages)] assert len(mask_head) == self.num_stages for head in mask_head: self.mask_head.append(build_head(head)) if mask_roi_extractor is not None: self.share_roi_extractor = False self.mask_roi_extractor = nn.ModuleList() if not isinstance(mask_roi_extractor, list): mask_roi_extractor = [ mask_roi_extractor for _ in range(self.num_stages) ] assert len(mask_roi_extractor) == self.num_stages for roi_extractor in mask_roi_extractor: self.mask_roi_extractor.append( build_roi_extractor(roi_extractor)) else: self.share_roi_extractor = True self.mask_roi_extractor = self.bbox_roi_extractor def init_assigner_sampler(self): """Initialize assigner and sampler for each stage.""" self.bbox_assigner = [] self.bbox_sampler = [] if self.train_cfg is not None: for idx, rcnn_train_cfg in enumerate(self.train_cfg): self.bbox_assigner.append( build_assigner(rcnn_train_cfg.assigner)) self.current_stage = idx self.bbox_sampler.append( build_sampler(rcnn_train_cfg.sampler, context=self)) def init_weights(self, pretrained): """Initialize the weights in head. Args: pretrained (str, optional): Path to pre-trained weights. Defaults to None. """ if self.with_shared_head: self.shared_head.init_weights(pretrained=pretrained) for i in range(self.num_stages): if self.with_bbox: self.bbox_roi_extractor[i].init_weights() self.bbox_head[i].init_weights() if self.with_mask: if not self.share_roi_extractor: self.mask_roi_extractor[i].init_weights() self.mask_head[i].init_weights() def forward_dummy(self, x, proposals): """Dummy forward function.""" # bbox head outs = () rois = bbox2roi([proposals]) if self.with_bbox: for i in range(self.num_stages): bbox_results = self._bbox_forward(i, x, rois) outs = outs + (bbox_results['cls_score'], bbox_results['bbox_pred']) # mask heads if self.with_mask: mask_rois = rois[:100] for i in range(self.num_stages): mask_results = self._mask_forward(i, x, mask_rois) outs = outs + (mask_results['mask_pred'], ) return outs def _bbox_forward(self, stage, x, rois): """Box head forward function used in both training and testing.""" bbox_roi_extractor = self.bbox_roi_extractor[stage] bbox_head = self.bbox_head[stage] bbox_feats = bbox_roi_extractor(x[:bbox_roi_extractor.num_inputs], rois) # do not support caffe_c4 model anymore cls_score, bbox_pred = bbox_head(bbox_feats) bbox_results = dict( cls_score=cls_score, bbox_pred=bbox_pred, bbox_feats=bbox_feats) return bbox_results def _bbox_forward_train(self, stage, x, sampling_results, gt_bboxes, gt_labels, rcnn_train_cfg): """Run forward function and calculate loss for box head in training.""" rois = bbox2roi([res.bboxes for res in sampling_results]) bbox_results = self._bbox_forward(stage, x, rois) bbox_targets = self.bbox_head[stage].get_targets( sampling_results, gt_bboxes, gt_labels, rcnn_train_cfg) loss_bbox = self.bbox_head[stage].loss(bbox_results['cls_score'], bbox_results['bbox_pred'], rois, *bbox_targets) bbox_results.update( loss_bbox=loss_bbox, rois=rois, bbox_targets=bbox_targets) return bbox_results def _mask_forward(self, stage, x, rois): """Mask head forward function used in both training and testing.""" mask_roi_extractor = self.mask_roi_extractor[stage] mask_head = self.mask_head[stage] mask_feats = mask_roi_extractor(x[:mask_roi_extractor.num_inputs], rois) # do not support caffe_c4 model anymore mask_pred = mask_head(mask_feats) mask_results = dict(mask_pred=mask_pred) return mask_results def _mask_forward_train(self, stage, x, sampling_results, gt_masks, rcnn_train_cfg, bbox_feats=None): """Run forward function and calculate loss for mask head in training.""" pos_rois = bbox2roi([res.pos_bboxes for res in sampling_results]) mask_results = self._mask_forward(stage, x, pos_rois) mask_targets = self.mask_head[stage].get_targets( sampling_results, gt_masks, rcnn_train_cfg) pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results]) loss_mask = self.mask_head[stage].loss(mask_results['mask_pred'], mask_targets, pos_labels) mask_results.update(loss_mask=loss_mask) return mask_results def forward_train(self, x, img_metas, proposal_list, gt_bboxes, gt_labels, gt_bboxes_ignore=None, gt_masks=None): """ Args: x (list[Tensor]): list of multi-level img features. img_metas (list[dict]): list of image info dict where each dict has: 'img_shape', 'scale_factor', 'flip', and may also contain 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. For details on the values of these keys see `mmdet/datasets/pipelines/formatting.py:Collect`. proposals (list[Tensors]): list of region proposals. gt_bboxes (list[Tensor]): Ground truth bboxes for each image with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. gt_labels (list[Tensor]): class indices corresponding to each box gt_bboxes_ignore (None | list[Tensor]): specify which bounding boxes can be ignored when computing the loss. gt_masks (None | Tensor) : true segmentation masks for each box used if the architecture supports a segmentation task. Returns: dict[str, Tensor]: a dictionary of loss components """ losses = dict() for i in range(self.num_stages): self.current_stage = i rcnn_train_cfg = self.train_cfg[i] lw = self.stage_loss_weights[i] # assign gts and sample proposals sampling_results = [] if self.with_bbox or self.with_mask: bbox_assigner = self.bbox_assigner[i] bbox_sampler = self.bbox_sampler[i] num_imgs = len(img_metas) if gt_bboxes_ignore is None: gt_bboxes_ignore = [None for _ in range(num_imgs)] for j in range(num_imgs): assign_result = bbox_assigner.assign( proposal_list[j], gt_bboxes[j], gt_bboxes_ignore[j], gt_labels[j]) sampling_result = bbox_sampler.sample( assign_result, proposal_list[j], gt_bboxes[j], gt_labels[j], feats=[lvl_feat[j][None] for lvl_feat in x]) sampling_results.append(sampling_result) # bbox head forward and loss bbox_results = self._bbox_forward_train(i, x, sampling_results, gt_bboxes, gt_labels, rcnn_train_cfg) for name, value in bbox_results['loss_bbox'].items(): losses[f's{i}.{name}'] = ( value * lw if 'loss' in name else value) # mask head forward and loss if self.with_mask: mask_results = self._mask_forward_train( i, x, sampling_results, gt_masks, rcnn_train_cfg, bbox_results['bbox_feats']) for name, value in mask_results['loss_mask'].items(): losses[f's{i}.{name}'] = ( value * lw if 'loss' in name else value) # refine bboxes if i < self.num_stages - 1: pos_is_gts = [res.pos_is_gt for res in sampling_results] # bbox_targets is a tuple roi_labels = bbox_results['bbox_targets'][0] with torch.no_grad(): roi_labels = torch.where( roi_labels == self.bbox_head[i].num_classes, bbox_results['cls_score'][:, :-1].argmax(1), roi_labels) proposal_list = self.bbox_head[i].refine_bboxes( bbox_results['rois'], roi_labels, bbox_results['bbox_pred'], pos_is_gts, img_metas) return losses def simple_test(self, x, proposal_list, img_metas, rescale=False): """Test without augmentation.""" assert self.with_bbox, 'Bbox head must be implemented.' num_imgs = len(proposal_list) img_shapes = tuple(meta['img_shape'] for meta in img_metas) ori_shapes = tuple(meta['ori_shape'] for meta in img_metas) scale_factors = tuple(meta['scale_factor'] for meta in img_metas) # "ms" in variable names means multi-stage ms_bbox_result = {} ms_segm_result = {} ms_scores = [] rcnn_test_cfg = self.test_cfg rois = bbox2roi(proposal_list) for i in range(self.num_stages): bbox_results = self._bbox_forward(i, x, rois) # split batch bbox prediction back to each image cls_score = bbox_results['cls_score'] bbox_pred = bbox_results['bbox_pred'] num_proposals_per_img = tuple( len(proposals) for proposals in proposal_list) rois = rois.split(num_proposals_per_img, 0) cls_score = cls_score.split(num_proposals_per_img, 0) if isinstance(bbox_pred, torch.Tensor): bbox_pred = bbox_pred.split(num_proposals_per_img, 0) else: bbox_pred = self.bbox_head[i].bbox_pred_split( bbox_pred, num_proposals_per_img) ms_scores.append(cls_score) if i < self.num_stages - 1: bbox_label = [s[:, :-1].argmax(dim=1) for s in cls_score] rois = torch.cat([ self.bbox_head[i].regress_by_class(rois[j], bbox_label[j], bbox_pred[j], img_metas[j]) for j in range(num_imgs) ]) # average scores of each image by stages cls_score = [ sum([score[i] for score in ms_scores]) / float(len(ms_scores)) for i in range(num_imgs) ] # apply bbox post-processing to each image individually det_bboxes = [] det_labels = [] for i in range(num_imgs): det_bbox, det_label = self.bbox_head[-1].get_bboxes( rois[i], cls_score[i], bbox_pred[i], img_shapes[i], scale_factors[i], rescale=rescale, cfg=rcnn_test_cfg) det_bboxes.append(det_bbox) det_labels.append(det_label) bbox_results = [ bbox2result(det_bboxes[i], det_labels[i], self.bbox_head[-1].num_classes) for i in range(num_imgs) ] ms_bbox_result['ensemble'] = bbox_results if self.with_mask: if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes): mask_classes = self.mask_head[-1].num_classes segm_results = [[[] for _ in range(mask_classes)] for _ in range(num_imgs)] else: if rescale and not isinstance(scale_factors[0], float): scale_factors = [ torch.from_numpy(scale_factor).to(det_bboxes[0].device) for scale_factor in scale_factors ] _bboxes = [ det_bboxes[i][:, :4] * scale_factors[i] if rescale else det_bboxes[i][:, :4] for i in range(len(det_bboxes)) ] mask_rois = bbox2roi(_bboxes) num_mask_rois_per_img = tuple( _bbox.size(0) for _bbox in _bboxes) aug_masks = [] for i in range(self.num_stages): mask_results = self._mask_forward(i, x, mask_rois) mask_pred = mask_results['mask_pred'] # split batch mask prediction back to each image mask_pred = mask_pred.split(num_mask_rois_per_img, 0) aug_masks.append( [m.sigmoid().cpu().numpy() for m in mask_pred]) # apply mask post-processing to each image individually segm_results = [] for i in range(num_imgs): if det_bboxes[i].shape[0] == 0: segm_results.append( [[] for _ in range(self.mask_head[-1].num_classes)]) else: aug_mask = [mask[i] for mask in aug_masks] merged_masks = merge_aug_masks( aug_mask, [[img_metas[i]]] * self.num_stages, rcnn_test_cfg) segm_result = self.mask_head[-1].get_seg_masks( merged_masks, _bboxes[i], det_labels[i], rcnn_test_cfg, ori_shapes[i], scale_factors[i], rescale) segm_results.append(segm_result) ms_segm_result['ensemble'] = segm_results if self.with_mask: results = list( zip(ms_bbox_result['ensemble'], ms_segm_result['ensemble'])) else: results = ms_bbox_result['ensemble'] return results def aug_test(self, features, proposal_list, img_metas, rescale=False): """Test with augmentations. If rescale is False, then returned bboxes and masks will fit the scale of imgs[0]. """ rcnn_test_cfg = self.test_cfg aug_bboxes = [] aug_scores = [] for x, img_meta in zip(features, img_metas): # only one image in the batch img_shape = img_meta[0]['img_shape'] scale_factor = img_meta[0]['scale_factor'] flip = img_meta[0]['flip'] flip_direction = img_meta[0]['flip_direction'] proposals = bbox_mapping(proposal_list[0][:, :4], img_shape, scale_factor, flip, flip_direction) # "ms" in variable names means multi-stage ms_scores = [] rois = bbox2roi([proposals]) for i in range(self.num_stages): bbox_results = self._bbox_forward(i, x, rois) ms_scores.append(bbox_results['cls_score']) if i < self.num_stages - 1: bbox_label = bbox_results['cls_score'][:, :-1].argmax( dim=1) rois = self.bbox_head[i].regress_by_class( rois, bbox_label, bbox_results['bbox_pred'], img_meta[0]) cls_score = sum(ms_scores) / float(len(ms_scores)) bboxes, scores = self.bbox_head[-1].get_bboxes( rois, cls_score, bbox_results['bbox_pred'], img_shape, scale_factor, rescale=False, cfg=None) aug_bboxes.append(bboxes) aug_scores.append(scores) # after merging, bboxes will be rescaled to the original image size merged_bboxes, merged_scores = merge_aug_bboxes( aug_bboxes, aug_scores, img_metas, rcnn_test_cfg) det_bboxes, det_labels = multiclass_nms(merged_bboxes, merged_scores, rcnn_test_cfg.score_thr, rcnn_test_cfg.nms, rcnn_test_cfg.max_per_img) bbox_result = bbox2result(det_bboxes, det_labels, self.bbox_head[-1].num_classes) if self.with_mask: if det_bboxes.shape[0] == 0: segm_result = [[[] for _ in range(self.mask_head[-1].num_classes)] ] else: aug_masks = [] aug_img_metas = [] for x, img_meta in zip(features, img_metas): img_shape = img_meta[0]['img_shape'] scale_factor = img_meta[0]['scale_factor'] flip = img_meta[0]['flip'] flip_direction = img_meta[0]['flip_direction'] _bboxes = bbox_mapping(det_bboxes[:, :4], img_shape, scale_factor, flip, flip_direction) mask_rois = bbox2roi([_bboxes]) for i in range(self.num_stages): mask_results = self._mask_forward(i, x, mask_rois) aug_masks.append( mask_results['mask_pred'].sigmoid().cpu().numpy()) aug_img_metas.append(img_meta) merged_masks = merge_aug_masks(aug_masks, aug_img_metas, self.test_cfg) ori_shape = img_metas[0][0]['ori_shape'] segm_result = self.mask_head[-1].get_seg_masks( merged_masks, det_bboxes, det_labels, rcnn_test_cfg, ori_shape, scale_factor=1.0, rescale=False) return [(bbox_result, segm_result)] else: return [bbox_result]
21,967
42.50099
79
py
GFocalV2
GFocalV2-master/mmdet/models/roi_heads/dynamic_roi_head.py
import numpy as np import torch from mmdet.core import bbox2roi from mmdet.models.losses import SmoothL1Loss from ..builder import HEADS from .standard_roi_head import StandardRoIHead @HEADS.register_module() class DynamicRoIHead(StandardRoIHead): """RoI head for `Dynamic R-CNN <https://arxiv.org/abs/2004.06002>`_.""" def __init__(self, **kwargs): super(DynamicRoIHead, self).__init__(**kwargs) assert isinstance(self.bbox_head.loss_bbox, SmoothL1Loss) # the IoU history of the past `update_iter_interval` iterations self.iou_history = [] # the beta history of the past `update_iter_interval` iterations self.beta_history = [] def forward_train(self, x, img_metas, proposal_list, gt_bboxes, gt_labels, gt_bboxes_ignore=None, gt_masks=None): """Forward function for training. Args: x (list[Tensor]): list of multi-level img features. img_metas (list[dict]): list of image info dict where each dict has: 'img_shape', 'scale_factor', 'flip', and may also contain 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. For details on the values of these keys see `mmdet/datasets/pipelines/formatting.py:Collect`. proposals (list[Tensors]): list of region proposals. gt_bboxes (list[Tensor]): each item are the truth boxes for each image in [tl_x, tl_y, br_x, br_y] format. gt_labels (list[Tensor]): class indices corresponding to each box gt_bboxes_ignore (None | list[Tensor]): specify which bounding boxes can be ignored when computing the loss. gt_masks (None | Tensor) : true segmentation masks for each box used if the architecture supports a segmentation task. Returns: dict[str, Tensor]: a dictionary of loss components """ # assign gts and sample proposals if self.with_bbox or self.with_mask: num_imgs = len(img_metas) if gt_bboxes_ignore is None: gt_bboxes_ignore = [None for _ in range(num_imgs)] sampling_results = [] cur_iou = [] for i in range(num_imgs): assign_result = self.bbox_assigner.assign( proposal_list[i], gt_bboxes[i], gt_bboxes_ignore[i], gt_labels[i]) sampling_result = self.bbox_sampler.sample( assign_result, proposal_list[i], gt_bboxes[i], gt_labels[i], feats=[lvl_feat[i][None] for lvl_feat in x]) # record the `iou_topk`-th largest IoU in an image iou_topk = min(self.train_cfg.dynamic_rcnn.iou_topk, len(assign_result.max_overlaps)) ious, _ = torch.topk(assign_result.max_overlaps, iou_topk) cur_iou.append(ious[-1].item()) sampling_results.append(sampling_result) # average the current IoUs over images cur_iou = np.mean(cur_iou) self.iou_history.append(cur_iou) losses = dict() # bbox head forward and loss if self.with_bbox: bbox_results = self._bbox_forward_train(x, sampling_results, gt_bboxes, gt_labels, img_metas) losses.update(bbox_results['loss_bbox']) # mask head forward and loss if self.with_mask: mask_results = self._mask_forward_train(x, sampling_results, bbox_results['bbox_feats'], gt_masks, img_metas) losses.update(mask_results['loss_mask']) # update IoU threshold and SmoothL1 beta update_iter_interval = self.train_cfg.dynamic_rcnn.update_iter_interval if len(self.iou_history) % update_iter_interval == 0: new_iou_thr, new_beta = self.update_hyperparameters() return losses def _bbox_forward_train(self, x, sampling_results, gt_bboxes, gt_labels, img_metas): num_imgs = len(img_metas) rois = bbox2roi([res.bboxes for res in sampling_results]) bbox_results = self._bbox_forward(x, rois) bbox_targets = self.bbox_head.get_targets(sampling_results, gt_bboxes, gt_labels, self.train_cfg) # record the `beta_topk`-th smallest target # `bbox_targets[2]` and `bbox_targets[3]` stand for bbox_targets # and bbox_weights, respectively pos_inds = bbox_targets[3][:, 0].nonzero().squeeze(1) num_pos = len(pos_inds) cur_target = bbox_targets[2][pos_inds, :2].abs().mean(dim=1) beta_topk = min(self.train_cfg.dynamic_rcnn.beta_topk * num_imgs, num_pos) cur_target = torch.kthvalue(cur_target, beta_topk)[0].item() self.beta_history.append(cur_target) loss_bbox = self.bbox_head.loss(bbox_results['cls_score'], bbox_results['bbox_pred'], rois, *bbox_targets) bbox_results.update(loss_bbox=loss_bbox) return bbox_results def update_hyperparameters(self): """Update hyperparameters like IoU thresholds for assigner and beta for SmoothL1 loss based on the training statistics. Returns: tuple[float]: the updated ``iou_thr`` and ``beta``. """ new_iou_thr = max(self.train_cfg.dynamic_rcnn.initial_iou, np.mean(self.iou_history)) self.iou_history = [] self.bbox_assigner.pos_iou_thr = new_iou_thr self.bbox_assigner.neg_iou_thr = new_iou_thr self.bbox_assigner.min_pos_iou = new_iou_thr new_beta = min(self.train_cfg.dynamic_rcnn.initial_beta, np.median(self.beta_history)) self.beta_history = [] self.bbox_head.loss_bbox.beta = new_beta return new_iou_thr, new_beta
6,415
42.060403
79
py
GFocalV2
GFocalV2-master/mmdet/models/roi_heads/point_rend_roi_head.py
# Modified from https://github.com/facebookresearch/detectron2/tree/master/projects/PointRend # noqa import torch import torch.nn.functional as F from mmcv.ops import point_sample, rel_roi_point_to_rel_img_point from mmdet.core import bbox2roi, bbox_mapping, merge_aug_masks from .. import builder from ..builder import HEADS from .standard_roi_head import StandardRoIHead @HEADS.register_module() class PointRendRoIHead(StandardRoIHead): """`PointRend <https://arxiv.org/abs/1912.08193>`_.""" def __init__(self, point_head, *args, **kwargs): super().__init__(*args, **kwargs) assert self.with_bbox and self.with_mask self.init_point_head(point_head) def init_point_head(self, point_head): """Initialize ``point_head``""" self.point_head = builder.build_head(point_head) def init_weights(self, pretrained): """Initialize the weights in head. Args: pretrained (str, optional): Path to pre-trained weights. """ super().init_weights(pretrained) self.point_head.init_weights() def _mask_forward_train(self, x, sampling_results, bbox_feats, gt_masks, img_metas): """Run forward function and calculate loss for mask head and point head in training.""" mask_results = super()._mask_forward_train(x, sampling_results, bbox_feats, gt_masks, img_metas) if mask_results['loss_mask'] is not None: loss_point = self._mask_point_forward_train( x, sampling_results, mask_results['mask_pred'], gt_masks, img_metas) mask_results['loss_mask'].update(loss_point) return mask_results def _mask_point_forward_train(self, x, sampling_results, mask_pred, gt_masks, img_metas): """Run forward function and calculate loss for point head in training.""" pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results]) rel_roi_points = self.point_head.get_roi_rel_points_train( mask_pred, pos_labels, cfg=self.train_cfg) rois = bbox2roi([res.pos_bboxes for res in sampling_results]) fine_grained_point_feats = self._get_fine_grained_point_feats( x, rois, rel_roi_points, img_metas) coarse_point_feats = point_sample(mask_pred, rel_roi_points) mask_point_pred = self.point_head(fine_grained_point_feats, coarse_point_feats) mask_point_target = self.point_head.get_targets( rois, rel_roi_points, sampling_results, gt_masks, self.train_cfg) loss_mask_point = self.point_head.loss(mask_point_pred, mask_point_target, pos_labels) return loss_mask_point def _get_fine_grained_point_feats(self, x, rois, rel_roi_points, img_metas): """Sample fine grained feats from each level feature map and concatenate them together.""" num_imgs = len(img_metas) fine_grained_feats = [] for idx in range(self.mask_roi_extractor.num_inputs): feats = x[idx] spatial_scale = 1. / float( self.mask_roi_extractor.featmap_strides[idx]) point_feats = [] for batch_ind in range(num_imgs): # unravel batch dim feat = feats[batch_ind].unsqueeze(0) inds = (rois[:, 0].long() == batch_ind) if inds.any(): rel_img_points = rel_roi_point_to_rel_img_point( rois[inds], rel_roi_points[inds], feat.shape[2:], spatial_scale).unsqueeze(0) point_feat = point_sample(feat, rel_img_points) point_feat = point_feat.squeeze(0).transpose(0, 1) point_feats.append(point_feat) fine_grained_feats.append(torch.cat(point_feats, dim=0)) return torch.cat(fine_grained_feats, dim=1) def _mask_point_forward_test(self, x, rois, label_pred, mask_pred, img_metas): """Mask refining process with point head in testing.""" refined_mask_pred = mask_pred.clone() for subdivision_step in range(self.test_cfg.subdivision_steps): refined_mask_pred = F.interpolate( refined_mask_pred, scale_factor=self.test_cfg.scale_factor, mode='bilinear', align_corners=False) # If `subdivision_num_points` is larger or equal to the # resolution of the next step, then we can skip this step num_rois, channels, mask_height, mask_width = \ refined_mask_pred.shape if (self.test_cfg.subdivision_num_points >= self.test_cfg.scale_factor**2 * mask_height * mask_width and subdivision_step < self.test_cfg.subdivision_steps - 1): continue point_indices, rel_roi_points = \ self.point_head.get_roi_rel_points_test( refined_mask_pred, label_pred, cfg=self.test_cfg) fine_grained_point_feats = self._get_fine_grained_point_feats( x, rois, rel_roi_points, img_metas) coarse_point_feats = point_sample(mask_pred, rel_roi_points) mask_point_pred = self.point_head(fine_grained_point_feats, coarse_point_feats) point_indices = point_indices.unsqueeze(1).expand(-1, channels, -1) refined_mask_pred = refined_mask_pred.reshape( num_rois, channels, mask_height * mask_width) refined_mask_pred = refined_mask_pred.scatter_( 2, point_indices, mask_point_pred) refined_mask_pred = refined_mask_pred.view(num_rois, channels, mask_height, mask_width) return refined_mask_pred def simple_test_mask(self, x, img_metas, det_bboxes, det_labels, rescale=False): """Obtain mask prediction without augmentation.""" ori_shapes = tuple(meta['ori_shape'] for meta in img_metas) scale_factors = tuple(meta['scale_factor'] for meta in img_metas) num_imgs = len(det_bboxes) if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes): segm_results = [[[] for _ in range(self.mask_head.num_classes)] for _ in range(num_imgs)] else: # if det_bboxes is rescaled to the original image size, we need to # rescale it back to the testing scale to obtain RoIs. if rescale and not isinstance(scale_factors[0], float): scale_factors = [ torch.from_numpy(scale_factor).to(det_bboxes[0].device) for scale_factor in scale_factors ] _bboxes = [ det_bboxes[i][:, :4] * scale_factors[i] if rescale else det_bboxes[i][:, :4] for i in range(len(det_bboxes)) ] mask_rois = bbox2roi(_bboxes) mask_results = self._mask_forward(x, mask_rois) # split batch mask prediction back to each image mask_pred = mask_results['mask_pred'] num_mask_roi_per_img = [len(det_bbox) for det_bbox in det_bboxes] mask_preds = mask_pred.split(num_mask_roi_per_img, 0) mask_rois = mask_rois.split(num_mask_roi_per_img, 0) # apply mask post-processing to each image individually segm_results = [] for i in range(num_imgs): if det_bboxes[i].shape[0] == 0: segm_results.append( [[] for _ in range(self.mask_head.num_classes)]) else: x_i = [xx[[i]] for xx in x] mask_rois_i = mask_rois[i] mask_rois_i[:, 0] = 0 # TODO: remove this hack mask_pred_i = self._mask_point_forward_test( x_i, mask_rois_i, det_labels[i], mask_preds[i], [img_metas]) segm_result = self.mask_head.get_seg_masks( mask_pred_i, _bboxes[i], det_labels[i], self.test_cfg, ori_shapes[i], scale_factors[i], rescale) segm_results.append(segm_result) return segm_results def aug_test_mask(self, feats, img_metas, det_bboxes, det_labels): """Test for mask head with test time augmentation.""" if det_bboxes.shape[0] == 0: segm_result = [[] for _ in range(self.mask_head.num_classes)] else: aug_masks = [] for x, img_meta in zip(feats, img_metas): img_shape = img_meta[0]['img_shape'] scale_factor = img_meta[0]['scale_factor'] flip = img_meta[0]['flip'] _bboxes = bbox_mapping(det_bboxes[:, :4], img_shape, scale_factor, flip) mask_rois = bbox2roi([_bboxes]) mask_results = self._mask_forward(x, mask_rois) mask_results['mask_pred'] = self._mask_point_forward_test( x, mask_rois, det_labels, mask_results['mask_pred'], img_metas) # convert to numpy array to save memory aug_masks.append( mask_results['mask_pred'].sigmoid().cpu().numpy()) merged_masks = merge_aug_masks(aug_masks, img_metas, self.test_cfg) ori_shape = img_metas[0][0]['ori_shape'] segm_result = self.mask_head.get_seg_masks( merged_masks, det_bboxes, det_labels, self.test_cfg, ori_shape, scale_factor=1.0, rescale=False) return segm_result
10,311
46.086758
101
py
GFocalV2
GFocalV2-master/mmdet/models/roi_heads/base_roi_head.py
from abc import ABCMeta, abstractmethod import torch.nn as nn from ..builder import build_shared_head class BaseRoIHead(nn.Module, metaclass=ABCMeta): """Base class for RoIHeads.""" def __init__(self, bbox_roi_extractor=None, bbox_head=None, mask_roi_extractor=None, mask_head=None, shared_head=None, train_cfg=None, test_cfg=None): super(BaseRoIHead, self).__init__() self.train_cfg = train_cfg self.test_cfg = test_cfg if shared_head is not None: self.shared_head = build_shared_head(shared_head) if bbox_head is not None: self.init_bbox_head(bbox_roi_extractor, bbox_head) if mask_head is not None: self.init_mask_head(mask_roi_extractor, mask_head) self.init_assigner_sampler() @property def with_bbox(self): """bool: whether the RoI head contains a `bbox_head`""" return hasattr(self, 'bbox_head') and self.bbox_head is not None @property def with_mask(self): """bool: whether the RoI head contains a `mask_head`""" return hasattr(self, 'mask_head') and self.mask_head is not None @property def with_shared_head(self): """bool: whether the RoI head contains a `shared_head`""" return hasattr(self, 'shared_head') and self.shared_head is not None @abstractmethod def init_weights(self, pretrained): """Initialize the weights in head. Args: pretrained (str, optional): Path to pre-trained weights. Defaults to None. """ pass @abstractmethod def init_bbox_head(self): """Initialize ``bbox_head``""" pass @abstractmethod def init_mask_head(self): """Initialize ``mask_head``""" pass @abstractmethod def init_assigner_sampler(self): """Initialize assigner and sampler.""" pass @abstractmethod def forward_train(self, x, img_meta, proposal_list, gt_bboxes, gt_labels, gt_bboxes_ignore=None, gt_masks=None, **kwargs): """Forward function during training.""" pass async def async_simple_test(self, x, img_meta, **kwargs): """Asynchronized test function.""" raise NotImplementedError def simple_test(self, x, proposal_list, img_meta, proposals=None, rescale=False, **kwargs): """Test without augmentation.""" pass def aug_test(self, x, proposal_list, img_metas, rescale=False, **kwargs): """Test with augmentations. If rescale is False, then returned bboxes and masks will fit the scale of imgs[0]. """ pass
3,060
27.607477
78
py
GFocalV2
GFocalV2-master/mmdet/models/roi_heads/mask_scoring_roi_head.py
import torch from mmdet.core import bbox2roi from ..builder import HEADS, build_head from .standard_roi_head import StandardRoIHead @HEADS.register_module() class MaskScoringRoIHead(StandardRoIHead): """Mask Scoring RoIHead for Mask Scoring RCNN. https://arxiv.org/abs/1903.00241 """ def __init__(self, mask_iou_head, **kwargs): assert mask_iou_head is not None super(MaskScoringRoIHead, self).__init__(**kwargs) self.mask_iou_head = build_head(mask_iou_head) def init_weights(self, pretrained): """Initialize the weights in head. Args: pretrained (str, optional): Path to pre-trained weights. Defaults to None. """ super(MaskScoringRoIHead, self).init_weights(pretrained) self.mask_iou_head.init_weights() def _mask_forward_train(self, x, sampling_results, bbox_feats, gt_masks, img_metas): """Run forward function and calculate loss for Mask head in training.""" pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results]) mask_results = super(MaskScoringRoIHead, self)._mask_forward_train(x, sampling_results, bbox_feats, gt_masks, img_metas) if mask_results['loss_mask'] is None: return mask_results # mask iou head forward and loss pos_mask_pred = mask_results['mask_pred'][ range(mask_results['mask_pred'].size(0)), pos_labels] mask_iou_pred = self.mask_iou_head(mask_results['mask_feats'], pos_mask_pred) pos_mask_iou_pred = mask_iou_pred[range(mask_iou_pred.size(0)), pos_labels] mask_iou_targets = self.mask_iou_head.get_targets( sampling_results, gt_masks, pos_mask_pred, mask_results['mask_targets'], self.train_cfg) loss_mask_iou = self.mask_iou_head.loss(pos_mask_iou_pred, mask_iou_targets) mask_results['loss_mask'].update(loss_mask_iou) return mask_results def simple_test_mask(self, x, img_metas, det_bboxes, det_labels, rescale=False): """Obtain mask prediction without augmentation.""" # image shapes of images in the batch ori_shapes = tuple(meta['ori_shape'] for meta in img_metas) scale_factors = tuple(meta['scale_factor'] for meta in img_metas) num_imgs = len(det_bboxes) if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes): num_classes = self.mask_head.num_classes segm_results = [[[] for _ in range(num_classes)] for _ in range(num_imgs)] mask_scores = [[[] for _ in range(num_classes)] for _ in range(num_imgs)] else: # if det_bboxes is rescaled to the original image size, we need to # rescale it back to the testing scale to obtain RoIs. if rescale and not isinstance(scale_factors[0], float): scale_factors = [ torch.from_numpy(scale_factor).to(det_bboxes[0].device) for scale_factor in scale_factors ] _bboxes = [ det_bboxes[i][:, :4] * scale_factors[i] if rescale else det_bboxes[i] for i in range(num_imgs) ] mask_rois = bbox2roi(_bboxes) mask_results = self._mask_forward(x, mask_rois) concat_det_labels = torch.cat(det_labels) # get mask scores with mask iou head mask_feats = mask_results['mask_feats'] mask_pred = mask_results['mask_pred'] mask_iou_pred = self.mask_iou_head( mask_feats, mask_pred[range(concat_det_labels.size(0)), concat_det_labels]) # split batch mask prediction back to each image num_bboxes_per_img = tuple(len(_bbox) for _bbox in _bboxes) mask_preds = mask_pred.split(num_bboxes_per_img, 0) mask_iou_preds = mask_iou_pred.split(num_bboxes_per_img, 0) # apply mask post-processing to each image individually segm_results = [] mask_scores = [] for i in range(num_imgs): if det_bboxes[i].shape[0] == 0: segm_results.append( [[] for _ in range(self.mask_head.num_classes)]) mask_scores.append( [[] for _ in range(self.mask_head.num_classes)]) else: segm_result = self.mask_head.get_seg_masks( mask_preds[i], _bboxes[i], det_labels[i], self.test_cfg, ori_shapes[i], scale_factors[i], rescale) # get mask scores with mask iou head mask_score = self.mask_iou_head.get_mask_scores( mask_iou_preds[i], det_bboxes[i], det_labels[i]) segm_results.append(segm_result) mask_scores.append(mask_score) return list(zip(segm_results, mask_scores))
5,503
43.747967
79
py
GFocalV2
GFocalV2-master/mmdet/models/roi_heads/htc_roi_head.py
import torch import torch.nn.functional as F from mmdet.core import (bbox2result, bbox2roi, bbox_mapping, merge_aug_bboxes, merge_aug_masks, multiclass_nms) from ..builder import HEADS, build_head, build_roi_extractor from .cascade_roi_head import CascadeRoIHead @HEADS.register_module() class HybridTaskCascadeRoIHead(CascadeRoIHead): """Hybrid task cascade roi head including one bbox head and one mask head. https://arxiv.org/abs/1901.07518 """ def __init__(self, num_stages, stage_loss_weights, semantic_roi_extractor=None, semantic_head=None, semantic_fusion=('bbox', 'mask'), interleaved=True, mask_info_flow=True, **kwargs): super(HybridTaskCascadeRoIHead, self).__init__(num_stages, stage_loss_weights, **kwargs) assert self.with_bbox and self.with_mask assert not self.with_shared_head # shared head is not supported if semantic_head is not None: self.semantic_roi_extractor = build_roi_extractor( semantic_roi_extractor) self.semantic_head = build_head(semantic_head) self.semantic_fusion = semantic_fusion self.interleaved = interleaved self.mask_info_flow = mask_info_flow def init_weights(self, pretrained): """Initialize the weights in head. Args: pretrained (str, optional): Path to pre-trained weights. Defaults to None. """ super(HybridTaskCascadeRoIHead, self).init_weights(pretrained) if self.with_semantic: self.semantic_head.init_weights() @property def with_semantic(self): """bool: whether the head has semantic head""" if hasattr(self, 'semantic_head') and self.semantic_head is not None: return True else: return False def forward_dummy(self, x, proposals): """Dummy forward function.""" outs = () # semantic head if self.with_semantic: _, semantic_feat = self.semantic_head(x) else: semantic_feat = None # bbox heads rois = bbox2roi([proposals]) for i in range(self.num_stages): bbox_results = self._bbox_forward( i, x, rois, semantic_feat=semantic_feat) outs = outs + (bbox_results['cls_score'], bbox_results['bbox_pred']) # mask heads if self.with_mask: mask_rois = rois[:100] mask_roi_extractor = self.mask_roi_extractor[-1] mask_feats = mask_roi_extractor( x[:len(mask_roi_extractor.featmap_strides)], mask_rois) if self.with_semantic and 'mask' in self.semantic_fusion: mask_semantic_feat = self.semantic_roi_extractor( [semantic_feat], mask_rois) mask_feats += mask_semantic_feat last_feat = None for i in range(self.num_stages): mask_head = self.mask_head[i] if self.mask_info_flow: mask_pred, last_feat = mask_head(mask_feats, last_feat) else: mask_pred = mask_head(mask_feats) outs = outs + (mask_pred, ) return outs def _bbox_forward_train(self, stage, x, sampling_results, gt_bboxes, gt_labels, rcnn_train_cfg, semantic_feat=None): """Run forward function and calculate loss for box head in training.""" bbox_head = self.bbox_head[stage] rois = bbox2roi([res.bboxes for res in sampling_results]) bbox_results = self._bbox_forward( stage, x, rois, semantic_feat=semantic_feat) bbox_targets = bbox_head.get_targets(sampling_results, gt_bboxes, gt_labels, rcnn_train_cfg) loss_bbox = bbox_head.loss(bbox_results['cls_score'], bbox_results['bbox_pred'], rois, *bbox_targets) bbox_results.update( loss_bbox=loss_bbox, rois=rois, bbox_targets=bbox_targets, ) return bbox_results def _mask_forward_train(self, stage, x, sampling_results, gt_masks, rcnn_train_cfg, semantic_feat=None): """Run forward function and calculate loss for mask head in training.""" mask_roi_extractor = self.mask_roi_extractor[stage] mask_head = self.mask_head[stage] pos_rois = bbox2roi([res.pos_bboxes for res in sampling_results]) mask_feats = mask_roi_extractor(x[:mask_roi_extractor.num_inputs], pos_rois) # semantic feature fusion # element-wise sum for original features and pooled semantic features if self.with_semantic and 'mask' in self.semantic_fusion: mask_semantic_feat = self.semantic_roi_extractor([semantic_feat], pos_rois) if mask_semantic_feat.shape[-2:] != mask_feats.shape[-2:]: mask_semantic_feat = F.adaptive_avg_pool2d( mask_semantic_feat, mask_feats.shape[-2:]) mask_feats += mask_semantic_feat # mask information flow # forward all previous mask heads to obtain last_feat, and fuse it # with the normal mask feature if self.mask_info_flow: last_feat = None for i in range(stage): last_feat = self.mask_head[i]( mask_feats, last_feat, return_logits=False) mask_pred = mask_head(mask_feats, last_feat, return_feat=False) else: mask_pred = mask_head(mask_feats, return_feat=False) mask_targets = mask_head.get_targets(sampling_results, gt_masks, rcnn_train_cfg) pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results]) loss_mask = mask_head.loss(mask_pred, mask_targets, pos_labels) mask_results = dict(loss_mask=loss_mask) return mask_results def _bbox_forward(self, stage, x, rois, semantic_feat=None): """Box head forward function used in both training and testing.""" bbox_roi_extractor = self.bbox_roi_extractor[stage] bbox_head = self.bbox_head[stage] bbox_feats = bbox_roi_extractor( x[:len(bbox_roi_extractor.featmap_strides)], rois) if self.with_semantic and 'bbox' in self.semantic_fusion: bbox_semantic_feat = self.semantic_roi_extractor([semantic_feat], rois) if bbox_semantic_feat.shape[-2:] != bbox_feats.shape[-2:]: bbox_semantic_feat = F.adaptive_avg_pool2d( bbox_semantic_feat, bbox_feats.shape[-2:]) bbox_feats += bbox_semantic_feat cls_score, bbox_pred = bbox_head(bbox_feats) bbox_results = dict(cls_score=cls_score, bbox_pred=bbox_pred) return bbox_results def _mask_forward_test(self, stage, x, bboxes, semantic_feat=None): """Mask head forward function for testing.""" mask_roi_extractor = self.mask_roi_extractor[stage] mask_head = self.mask_head[stage] mask_rois = bbox2roi([bboxes]) mask_feats = mask_roi_extractor( x[:len(mask_roi_extractor.featmap_strides)], mask_rois) if self.with_semantic and 'mask' in self.semantic_fusion: mask_semantic_feat = self.semantic_roi_extractor([semantic_feat], mask_rois) if mask_semantic_feat.shape[-2:] != mask_feats.shape[-2:]: mask_semantic_feat = F.adaptive_avg_pool2d( mask_semantic_feat, mask_feats.shape[-2:]) mask_feats += mask_semantic_feat if self.mask_info_flow: last_feat = None last_pred = None for i in range(stage): mask_pred, last_feat = self.mask_head[i](mask_feats, last_feat) if last_pred is not None: mask_pred = mask_pred + last_pred last_pred = mask_pred mask_pred = mask_head(mask_feats, last_feat, return_feat=False) if last_pred is not None: mask_pred = mask_pred + last_pred else: mask_pred = mask_head(mask_feats) return mask_pred def forward_train(self, x, img_metas, proposal_list, gt_bboxes, gt_labels, gt_bboxes_ignore=None, gt_masks=None, gt_semantic_seg=None): """ Args: x (list[Tensor]): list of multi-level img features. img_metas (list[dict]): list of image info dict where each dict has: 'img_shape', 'scale_factor', 'flip', and may also contain 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. For details on the values of these keys see `mmdet/datasets/pipelines/formatting.py:Collect`. proposal_list (list[Tensors]): list of region proposals. gt_bboxes (list[Tensor]): Ground truth bboxes for each image with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. gt_labels (list[Tensor]): class indices corresponding to each box gt_bboxes_ignore (None, list[Tensor]): specify which bounding boxes can be ignored when computing the loss. gt_masks (None, Tensor) : true segmentation masks for each box used if the architecture supports a segmentation task. gt_semantic_seg (None, list[Tensor]): semantic segmentation masks used if the architecture supports semantic segmentation task. Returns: dict[str, Tensor]: a dictionary of loss components """ # semantic segmentation part # 2 outputs: segmentation prediction and embedded features losses = dict() if self.with_semantic: semantic_pred, semantic_feat = self.semantic_head(x) loss_seg = self.semantic_head.loss(semantic_pred, gt_semantic_seg) losses['loss_semantic_seg'] = loss_seg else: semantic_feat = None for i in range(self.num_stages): self.current_stage = i rcnn_train_cfg = self.train_cfg[i] lw = self.stage_loss_weights[i] # assign gts and sample proposals sampling_results = [] bbox_assigner = self.bbox_assigner[i] bbox_sampler = self.bbox_sampler[i] num_imgs = len(img_metas) if gt_bboxes_ignore is None: gt_bboxes_ignore = [None for _ in range(num_imgs)] for j in range(num_imgs): assign_result = bbox_assigner.assign(proposal_list[j], gt_bboxes[j], gt_bboxes_ignore[j], gt_labels[j]) sampling_result = bbox_sampler.sample( assign_result, proposal_list[j], gt_bboxes[j], gt_labels[j], feats=[lvl_feat[j][None] for lvl_feat in x]) sampling_results.append(sampling_result) # bbox head forward and loss bbox_results = \ self._bbox_forward_train( i, x, sampling_results, gt_bboxes, gt_labels, rcnn_train_cfg, semantic_feat) roi_labels = bbox_results['bbox_targets'][0] for name, value in bbox_results['loss_bbox'].items(): losses[f's{i}.{name}'] = ( value * lw if 'loss' in name else value) # mask head forward and loss if self.with_mask: # interleaved execution: use regressed bboxes by the box branch # to train the mask branch if self.interleaved: pos_is_gts = [res.pos_is_gt for res in sampling_results] with torch.no_grad(): proposal_list = self.bbox_head[i].refine_bboxes( bbox_results['rois'], roi_labels, bbox_results['bbox_pred'], pos_is_gts, img_metas) # re-assign and sample 512 RoIs from 512 RoIs sampling_results = [] for j in range(num_imgs): assign_result = bbox_assigner.assign( proposal_list[j], gt_bboxes[j], gt_bboxes_ignore[j], gt_labels[j]) sampling_result = bbox_sampler.sample( assign_result, proposal_list[j], gt_bboxes[j], gt_labels[j], feats=[lvl_feat[j][None] for lvl_feat in x]) sampling_results.append(sampling_result) mask_results = self._mask_forward_train( i, x, sampling_results, gt_masks, rcnn_train_cfg, semantic_feat) for name, value in mask_results['loss_mask'].items(): losses[f's{i}.{name}'] = ( value * lw if 'loss' in name else value) # refine bboxes (same as Cascade R-CNN) if i < self.num_stages - 1 and not self.interleaved: pos_is_gts = [res.pos_is_gt for res in sampling_results] with torch.no_grad(): proposal_list = self.bbox_head[i].refine_bboxes( bbox_results['rois'], roi_labels, bbox_results['bbox_pred'], pos_is_gts, img_metas) return losses def simple_test(self, x, proposal_list, img_metas, rescale=False): """Test without augmentation.""" if self.with_semantic: _, semantic_feat = self.semantic_head(x) else: semantic_feat = None num_imgs = len(proposal_list) img_shapes = tuple(meta['img_shape'] for meta in img_metas) ori_shapes = tuple(meta['ori_shape'] for meta in img_metas) scale_factors = tuple(meta['scale_factor'] for meta in img_metas) # "ms" in variable names means multi-stage ms_bbox_result = {} ms_segm_result = {} ms_scores = [] rcnn_test_cfg = self.test_cfg rois = bbox2roi(proposal_list) for i in range(self.num_stages): bbox_head = self.bbox_head[i] bbox_results = self._bbox_forward( i, x, rois, semantic_feat=semantic_feat) # split batch bbox prediction back to each image cls_score = bbox_results['cls_score'] bbox_pred = bbox_results['bbox_pred'] num_proposals_per_img = tuple(len(p) for p in proposal_list) rois = rois.split(num_proposals_per_img, 0) cls_score = cls_score.split(num_proposals_per_img, 0) bbox_pred = bbox_pred.split(num_proposals_per_img, 0) ms_scores.append(cls_score) if i < self.num_stages - 1: bbox_label = [s[:, :-1].argmax(dim=1) for s in cls_score] rois = torch.cat([ bbox_head.regress_by_class(rois[i], bbox_label[i], bbox_pred[i], img_metas[i]) for i in range(num_imgs) ]) # average scores of each image by stages cls_score = [ sum([score[i] for score in ms_scores]) / float(len(ms_scores)) for i in range(num_imgs) ] # apply bbox post-processing to each image individually det_bboxes = [] det_labels = [] for i in range(num_imgs): det_bbox, det_label = self.bbox_head[-1].get_bboxes( rois[i], cls_score[i], bbox_pred[i], img_shapes[i], scale_factors[i], rescale=rescale, cfg=rcnn_test_cfg) det_bboxes.append(det_bbox) det_labels.append(det_label) bbox_result = [ bbox2result(det_bboxes[i], det_labels[i], self.bbox_head[-1].num_classes) for i in range(num_imgs) ] ms_bbox_result['ensemble'] = bbox_result if self.with_mask: if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes): mask_classes = self.mask_head[-1].num_classes segm_results = [[[] for _ in range(mask_classes)] for _ in range(num_imgs)] else: if rescale and not isinstance(scale_factors[0], float): scale_factors = [ torch.from_numpy(scale_factor).to(det_bboxes[0].device) for scale_factor in scale_factors ] _bboxes = [ det_bboxes[i][:, :4] * scale_factors[i] if rescale else det_bboxes[i] for i in range(num_imgs) ] mask_rois = bbox2roi(_bboxes) aug_masks = [] mask_roi_extractor = self.mask_roi_extractor[-1] mask_feats = mask_roi_extractor( x[:len(mask_roi_extractor.featmap_strides)], mask_rois) if self.with_semantic and 'mask' in self.semantic_fusion: mask_semantic_feat = self.semantic_roi_extractor( [semantic_feat], mask_rois) mask_feats += mask_semantic_feat last_feat = None num_bbox_per_img = tuple(len(_bbox) for _bbox in _bboxes) for i in range(self.num_stages): mask_head = self.mask_head[i] if self.mask_info_flow: mask_pred, last_feat = mask_head(mask_feats, last_feat) else: mask_pred = mask_head(mask_feats) # split batch mask prediction back to each image mask_pred = mask_pred.split(num_bbox_per_img, 0) aug_masks.append( [mask.sigmoid().cpu().numpy() for mask in mask_pred]) # apply mask post-processing to each image individually segm_results = [] for i in range(num_imgs): if det_bboxes[i].shape[0] == 0: segm_results.append( [[] for _ in range(self.mask_head[-1].num_classes)]) else: aug_mask = [mask[i] for mask in aug_masks] merged_mask = merge_aug_masks( aug_mask, [[img_metas[i]]] * self.num_stages, rcnn_test_cfg) segm_result = self.mask_head[-1].get_seg_masks( merged_mask, _bboxes[i], det_labels[i], rcnn_test_cfg, ori_shapes[i], scale_factors[i], rescale) segm_results.append(segm_result) ms_segm_result['ensemble'] = segm_results if self.with_mask: results = list( zip(ms_bbox_result['ensemble'], ms_segm_result['ensemble'])) else: results = ms_bbox_result['ensemble'] return results def aug_test(self, img_feats, proposal_list, img_metas, rescale=False): """Test with augmentations. If rescale is False, then returned bboxes and masks will fit the scale of imgs[0]. """ if self.with_semantic: semantic_feats = [ self.semantic_head(feat)[1] for feat in img_feats ] else: semantic_feats = [None] * len(img_metas) rcnn_test_cfg = self.test_cfg aug_bboxes = [] aug_scores = [] for x, img_meta, semantic in zip(img_feats, img_metas, semantic_feats): # only one image in the batch img_shape = img_meta[0]['img_shape'] scale_factor = img_meta[0]['scale_factor'] flip = img_meta[0]['flip'] flip_direction = img_meta[0]['flip_direction'] proposals = bbox_mapping(proposal_list[0][:, :4], img_shape, scale_factor, flip, flip_direction) # "ms" in variable names means multi-stage ms_scores = [] rois = bbox2roi([proposals]) for i in range(self.num_stages): bbox_head = self.bbox_head[i] bbox_results = self._bbox_forward( i, x, rois, semantic_feat=semantic) ms_scores.append(bbox_results['cls_score']) if i < self.num_stages - 1: bbox_label = bbox_results['cls_score'].argmax(dim=1) rois = bbox_head.regress_by_class( rois, bbox_label, bbox_results['bbox_pred'], img_meta[0]) cls_score = sum(ms_scores) / float(len(ms_scores)) bboxes, scores = self.bbox_head[-1].get_bboxes( rois, cls_score, bbox_results['bbox_pred'], img_shape, scale_factor, rescale=False, cfg=None) aug_bboxes.append(bboxes) aug_scores.append(scores) # after merging, bboxes will be rescaled to the original image size merged_bboxes, merged_scores = merge_aug_bboxes( aug_bboxes, aug_scores, img_metas, rcnn_test_cfg) det_bboxes, det_labels = multiclass_nms(merged_bboxes, merged_scores, rcnn_test_cfg.score_thr, rcnn_test_cfg.nms, rcnn_test_cfg.max_per_img) bbox_result = bbox2result(det_bboxes, det_labels, self.bbox_head[-1].num_classes) if self.with_mask: if det_bboxes.shape[0] == 0: segm_result = [[[] for _ in range(self.mask_head[-1].num_classes - 1)]] else: aug_masks = [] aug_img_metas = [] for x, img_meta, semantic in zip(img_feats, img_metas, semantic_feats): img_shape = img_meta[0]['img_shape'] scale_factor = img_meta[0]['scale_factor'] flip = img_meta[0]['flip'] flip_direction = img_meta[0]['flip_direction'] _bboxes = bbox_mapping(det_bboxes[:, :4], img_shape, scale_factor, flip, flip_direction) mask_rois = bbox2roi([_bboxes]) mask_feats = self.mask_roi_extractor[-1]( x[:len(self.mask_roi_extractor[-1].featmap_strides)], mask_rois) if self.with_semantic: semantic_feat = semantic mask_semantic_feat = self.semantic_roi_extractor( [semantic_feat], mask_rois) if mask_semantic_feat.shape[-2:] != mask_feats.shape[ -2:]: mask_semantic_feat = F.adaptive_avg_pool2d( mask_semantic_feat, mask_feats.shape[-2:]) mask_feats += mask_semantic_feat last_feat = None for i in range(self.num_stages): mask_head = self.mask_head[i] if self.mask_info_flow: mask_pred, last_feat = mask_head( mask_feats, last_feat) else: mask_pred = mask_head(mask_feats) aug_masks.append(mask_pred.sigmoid().cpu().numpy()) aug_img_metas.append(img_meta) merged_masks = merge_aug_masks(aug_masks, aug_img_metas, self.test_cfg) ori_shape = img_metas[0][0]['ori_shape'] segm_result = self.mask_head[-1].get_seg_masks( merged_masks, det_bboxes, det_labels, rcnn_test_cfg, ori_shape, scale_factor=1.0, rescale=False) return [(bbox_result, segm_result)] else: return [bbox_result]
25,919
42.932203
79
py
GFocalV2
GFocalV2-master/mmdet/models/roi_heads/test_mixins.py
import logging import sys import torch from mmdet.core import (bbox2roi, bbox_mapping, merge_aug_bboxes, merge_aug_masks, multiclass_nms) logger = logging.getLogger(__name__) if sys.version_info >= (3, 7): from mmdet.utils.contextmanagers import completed class BBoxTestMixin(object): if sys.version_info >= (3, 7): async def async_test_bboxes(self, x, img_metas, proposals, rcnn_test_cfg, rescale=False, bbox_semaphore=None, global_lock=None): """Asynchronized test for box head without augmentation.""" rois = bbox2roi(proposals) roi_feats = self.bbox_roi_extractor( x[:len(self.bbox_roi_extractor.featmap_strides)], rois) if self.with_shared_head: roi_feats = self.shared_head(roi_feats) sleep_interval = rcnn_test_cfg.get('async_sleep_interval', 0.017) async with completed( __name__, 'bbox_head_forward', sleep_interval=sleep_interval): cls_score, bbox_pred = self.bbox_head(roi_feats) img_shape = img_metas[0]['img_shape'] scale_factor = img_metas[0]['scale_factor'] det_bboxes, det_labels = self.bbox_head.get_bboxes( rois, cls_score, bbox_pred, img_shape, scale_factor, rescale=rescale, cfg=rcnn_test_cfg) return det_bboxes, det_labels def simple_test_bboxes(self, x, img_metas, proposals, rcnn_test_cfg, rescale=False): """Test only det bboxes without augmentation.""" rois = bbox2roi(proposals) bbox_results = self._bbox_forward(x, rois) img_shapes = tuple(meta['img_shape'] for meta in img_metas) scale_factors = tuple(meta['scale_factor'] for meta in img_metas) # split batch bbox prediction back to each image cls_score = bbox_results['cls_score'] bbox_pred = bbox_results['bbox_pred'] num_proposals_per_img = tuple(len(p) for p in proposals) rois = rois.split(num_proposals_per_img, 0) cls_score = cls_score.split(num_proposals_per_img, 0) # some detector with_reg is False, bbox_pred will be None if bbox_pred is not None: # the bbox prediction of some detectors like SABL is not Tensor if isinstance(bbox_pred, torch.Tensor): bbox_pred = bbox_pred.split(num_proposals_per_img, 0) else: bbox_pred = self.bbox_head.bbox_pred_split( bbox_pred, num_proposals_per_img) else: bbox_pred = (None, ) * len(proposals) # apply bbox post-processing to each image individually det_bboxes = [] det_labels = [] for i in range(len(proposals)): det_bbox, det_label = self.bbox_head.get_bboxes( rois[i], cls_score[i], bbox_pred[i], img_shapes[i], scale_factors[i], rescale=rescale, cfg=rcnn_test_cfg) det_bboxes.append(det_bbox) det_labels.append(det_label) return det_bboxes, det_labels def aug_test_bboxes(self, feats, img_metas, proposal_list, rcnn_test_cfg): """Test det bboxes with test time augmentation.""" aug_bboxes = [] aug_scores = [] for x, img_meta in zip(feats, img_metas): # only one image in the batch img_shape = img_meta[0]['img_shape'] scale_factor = img_meta[0]['scale_factor'] flip = img_meta[0]['flip'] flip_direction = img_meta[0]['flip_direction'] # TODO more flexible proposals = bbox_mapping(proposal_list[0][:, :4], img_shape, scale_factor, flip, flip_direction) rois = bbox2roi([proposals]) bbox_results = self._bbox_forward(x, rois) bboxes, scores = self.bbox_head.get_bboxes( rois, bbox_results['cls_score'], bbox_results['bbox_pred'], img_shape, scale_factor, rescale=False, cfg=None) aug_bboxes.append(bboxes) aug_scores.append(scores) # after merging, bboxes will be rescaled to the original image size merged_bboxes, merged_scores = merge_aug_bboxes( aug_bboxes, aug_scores, img_metas, rcnn_test_cfg) det_bboxes, det_labels = multiclass_nms(merged_bboxes, merged_scores, rcnn_test_cfg.score_thr, rcnn_test_cfg.nms, rcnn_test_cfg.max_per_img) return det_bboxes, det_labels class MaskTestMixin(object): if sys.version_info >= (3, 7): async def async_test_mask(self, x, img_metas, det_bboxes, det_labels, rescale=False, mask_test_cfg=None): """Asynchronized test for mask head without augmentation.""" # image shape of the first image in the batch (only one) ori_shape = img_metas[0]['ori_shape'] scale_factor = img_metas[0]['scale_factor'] if det_bboxes.shape[0] == 0: segm_result = [[] for _ in range(self.mask_head.num_classes)] else: _bboxes = ( det_bboxes[:, :4] * scale_factor if rescale else det_bboxes) mask_rois = bbox2roi([_bboxes]) mask_feats = self.mask_roi_extractor( x[:len(self.mask_roi_extractor.featmap_strides)], mask_rois) if self.with_shared_head: mask_feats = self.shared_head(mask_feats) if mask_test_cfg and mask_test_cfg.get('async_sleep_interval'): sleep_interval = mask_test_cfg['async_sleep_interval'] else: sleep_interval = 0.035 async with completed( __name__, 'mask_head_forward', sleep_interval=sleep_interval): mask_pred = self.mask_head(mask_feats) segm_result = self.mask_head.get_seg_masks( mask_pred, _bboxes, det_labels, self.test_cfg, ori_shape, scale_factor, rescale) return segm_result def simple_test_mask(self, x, img_metas, det_bboxes, det_labels, rescale=False): """Simple test for mask head without augmentation.""" # image shapes of images in the batch ori_shapes = tuple(meta['ori_shape'] for meta in img_metas) scale_factors = tuple(meta['scale_factor'] for meta in img_metas) num_imgs = len(det_bboxes) if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes): segm_results = [[[] for _ in range(self.mask_head.num_classes)] for _ in range(num_imgs)] else: # if det_bboxes is rescaled to the original image size, we need to # rescale it back to the testing scale to obtain RoIs. if rescale and not isinstance(scale_factors[0], float): scale_factors = [ torch.from_numpy(scale_factor).to(det_bboxes[0].device) for scale_factor in scale_factors ] _bboxes = [ det_bboxes[i][:, :4] * scale_factors[i] if rescale else det_bboxes[i][:, :4] for i in range(len(det_bboxes)) ] mask_rois = bbox2roi(_bboxes) mask_results = self._mask_forward(x, mask_rois) mask_pred = mask_results['mask_pred'] # split batch mask prediction back to each image num_mask_roi_per_img = [len(det_bbox) for det_bbox in det_bboxes] mask_preds = mask_pred.split(num_mask_roi_per_img, 0) # apply mask post-processing to each image individually segm_results = [] for i in range(num_imgs): if det_bboxes[i].shape[0] == 0: segm_results.append( [[] for _ in range(self.mask_head.num_classes)]) else: segm_result = self.mask_head.get_seg_masks( mask_preds[i], _bboxes[i], det_labels[i], self.test_cfg, ori_shapes[i], scale_factors[i], rescale) segm_results.append(segm_result) return segm_results def aug_test_mask(self, feats, img_metas, det_bboxes, det_labels): """Test for mask head with test time augmentation.""" if det_bboxes.shape[0] == 0: segm_result = [[] for _ in range(self.mask_head.num_classes)] else: aug_masks = [] for x, img_meta in zip(feats, img_metas): img_shape = img_meta[0]['img_shape'] scale_factor = img_meta[0]['scale_factor'] flip = img_meta[0]['flip'] flip_direction = img_meta[0]['flip_direction'] _bboxes = bbox_mapping(det_bboxes[:, :4], img_shape, scale_factor, flip, flip_direction) mask_rois = bbox2roi([_bboxes]) mask_results = self._mask_forward(x, mask_rois) # convert to numpy array to save memory aug_masks.append( mask_results['mask_pred'].sigmoid().cpu().numpy()) merged_masks = merge_aug_masks(aug_masks, img_metas, self.test_cfg) ori_shape = img_metas[0][0]['ori_shape'] segm_result = self.mask_head.get_seg_masks( merged_masks, det_bboxes, det_labels, self.test_cfg, ori_shape, scale_factor=1.0, rescale=False) return segm_result
10,852
41.897233
79
py
GFocalV2
GFocalV2-master/mmdet/models/roi_heads/roi_extractors/base_roi_extractor.py
from abc import ABCMeta, abstractmethod import torch import torch.nn as nn from mmcv import ops class BaseRoIExtractor(nn.Module, metaclass=ABCMeta): """Base class for RoI extractor. Args: roi_layer (dict): Specify RoI layer type and arguments. out_channels (int): Output channels of RoI layers. featmap_strides (int): Strides of input feature maps. """ def __init__(self, roi_layer, out_channels, featmap_strides): super(BaseRoIExtractor, self).__init__() self.roi_layers = self.build_roi_layers(roi_layer, featmap_strides) self.out_channels = out_channels self.featmap_strides = featmap_strides self.fp16_enabled = False @property def num_inputs(self): """int: Number of input feature maps.""" return len(self.featmap_strides) def init_weights(self): pass def build_roi_layers(self, layer_cfg, featmap_strides): """Build RoI operator to extract feature from each level feature map. Args: layer_cfg (dict): Dictionary to construct and config RoI layer operation. Options are modules under ``mmcv/ops`` such as ``RoIAlign``. featmap_strides (int): The stride of input feature map w.r.t to the original image size, which would be used to scale RoI coordinate (original image coordinate system) to feature coordinate system. Returns: nn.ModuleList: The RoI extractor modules for each level feature map. """ cfg = layer_cfg.copy() layer_type = cfg.pop('type') assert hasattr(ops, layer_type) layer_cls = getattr(ops, layer_type) roi_layers = nn.ModuleList( [layer_cls(spatial_scale=1 / s, **cfg) for s in featmap_strides]) return roi_layers def roi_rescale(self, rois, scale_factor): """Scale RoI coordinates by scale factor. Args: rois (torch.Tensor): RoI (Region of Interest), shape (n, 5) scale_factor (float): Scale factor that RoI will be multiplied by. Returns: torch.Tensor: Scaled RoI. """ cx = (rois[:, 1] + rois[:, 3]) * 0.5 cy = (rois[:, 2] + rois[:, 4]) * 0.5 w = rois[:, 3] - rois[:, 1] h = rois[:, 4] - rois[:, 2] new_w = w * scale_factor new_h = h * scale_factor x1 = cx - new_w * 0.5 x2 = cx + new_w * 0.5 y1 = cy - new_h * 0.5 y2 = cy + new_h * 0.5 new_rois = torch.stack((rois[:, 0], x1, y1, x2, y2), dim=-1) return new_rois @abstractmethod def forward(self, feats, rois, roi_scale_factor=None): pass
2,760
31.869048
79
py
GFocalV2
GFocalV2-master/mmdet/models/roi_heads/roi_extractors/single_level_roi_extractor.py
import torch from mmcv.runner import force_fp32 from mmdet.models.builder import ROI_EXTRACTORS from .base_roi_extractor import BaseRoIExtractor @ROI_EXTRACTORS.register_module() class SingleRoIExtractor(BaseRoIExtractor): """Extract RoI features from a single level feature map. If there are multiple input feature levels, each RoI is mapped to a level according to its scale. The mapping rule is proposed in `FPN <https://arxiv.org/abs/1612.03144>`_. Args: roi_layer (dict): Specify RoI layer type and arguments. out_channels (int): Output channels of RoI layers. featmap_strides (int): Strides of input feature maps. finest_scale (int): Scale threshold of mapping to level 0. Default: 56. """ def __init__(self, roi_layer, out_channels, featmap_strides, finest_scale=56): super(SingleRoIExtractor, self).__init__(roi_layer, out_channels, featmap_strides) self.finest_scale = finest_scale def map_roi_levels(self, rois, num_levels): """Map rois to corresponding feature levels by scales. - scale < finest_scale * 2: level 0 - finest_scale * 2 <= scale < finest_scale * 4: level 1 - finest_scale * 4 <= scale < finest_scale * 8: level 2 - scale >= finest_scale * 8: level 3 Args: rois (Tensor): Input RoIs, shape (k, 5). num_levels (int): Total level number. Returns: Tensor: Level index (0-based) of each RoI, shape (k, ) """ scale = torch.sqrt( (rois[:, 3] - rois[:, 1]) * (rois[:, 4] - rois[:, 2])) target_lvls = torch.floor(torch.log2(scale / self.finest_scale + 1e-6)) target_lvls = target_lvls.clamp(min=0, max=num_levels - 1).long() return target_lvls @force_fp32(apply_to=('feats', ), out_fp16=True) def forward(self, feats, rois, roi_scale_factor=None): """Forward function.""" out_size = self.roi_layers[0].output_size num_levels = len(feats) roi_feats = feats[0].new_zeros( rois.size(0), self.out_channels, *out_size) # TODO: remove this when parrots supports if torch.__version__ == 'parrots': roi_feats.requires_grad = True if num_levels == 1: if len(rois) == 0: return roi_feats return self.roi_layers[0](feats[0], rois) target_lvls = self.map_roi_levels(rois, num_levels) if roi_scale_factor is not None: rois = self.roi_rescale(rois, roi_scale_factor) for i in range(num_levels): inds = target_lvls == i if inds.any(): rois_ = rois[inds, :] roi_feats_t = self.roi_layers[i](feats[i], rois_) roi_feats[inds] = roi_feats_t else: roi_feats += sum( x.view(-1)[0] for x in self.parameters()) * 0. + feats[i].sum() * 0. return roi_feats
3,114
36.53012
79
py
GFocalV2
GFocalV2-master/mmdet/models/roi_heads/bbox_heads/bbox_head.py
import torch import torch.nn as nn import torch.nn.functional as F from mmcv.runner import auto_fp16, force_fp32 from torch.nn.modules.utils import _pair from mmdet.core import build_bbox_coder, multi_apply, multiclass_nms from mmdet.models.builder import HEADS, build_loss from mmdet.models.losses import accuracy @HEADS.register_module() class BBoxHead(nn.Module): """Simplest RoI head, with only two fc layers for classification and regression respectively.""" def __init__(self, with_avg_pool=False, with_cls=True, with_reg=True, roi_feat_size=7, in_channels=256, num_classes=80, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.1, 0.1, 0.2, 0.2]), reg_class_agnostic=False, reg_decoded_bbox=False, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict( type='SmoothL1Loss', beta=1.0, loss_weight=1.0)): super(BBoxHead, self).__init__() assert with_cls or with_reg self.with_avg_pool = with_avg_pool self.with_cls = with_cls self.with_reg = with_reg self.roi_feat_size = _pair(roi_feat_size) self.roi_feat_area = self.roi_feat_size[0] * self.roi_feat_size[1] self.in_channels = in_channels self.num_classes = num_classes self.reg_class_agnostic = reg_class_agnostic self.reg_decoded_bbox = reg_decoded_bbox self.fp16_enabled = False self.bbox_coder = build_bbox_coder(bbox_coder) self.loss_cls = build_loss(loss_cls) self.loss_bbox = build_loss(loss_bbox) in_channels = self.in_channels if self.with_avg_pool: self.avg_pool = nn.AvgPool2d(self.roi_feat_size) else: in_channels *= self.roi_feat_area if self.with_cls: # need to add background class self.fc_cls = nn.Linear(in_channels, num_classes + 1) if self.with_reg: out_dim_reg = 4 if reg_class_agnostic else 4 * num_classes self.fc_reg = nn.Linear(in_channels, out_dim_reg) self.debug_imgs = None def init_weights(self): # conv layers are already initialized by ConvModule if self.with_cls: nn.init.normal_(self.fc_cls.weight, 0, 0.01) nn.init.constant_(self.fc_cls.bias, 0) if self.with_reg: nn.init.normal_(self.fc_reg.weight, 0, 0.001) nn.init.constant_(self.fc_reg.bias, 0) @auto_fp16() def forward(self, x): if self.with_avg_pool: x = self.avg_pool(x) x = x.view(x.size(0), -1) cls_score = self.fc_cls(x) if self.with_cls else None bbox_pred = self.fc_reg(x) if self.with_reg else None return cls_score, bbox_pred def _get_target_single(self, pos_bboxes, neg_bboxes, pos_gt_bboxes, pos_gt_labels, cfg): num_pos = pos_bboxes.size(0) num_neg = neg_bboxes.size(0) num_samples = num_pos + num_neg # original implementation uses new_zeros since BG are set to be 0 # now use empty & fill because BG cat_id = num_classes, # FG cat_id = [0, num_classes-1] labels = pos_bboxes.new_full((num_samples, ), self.num_classes, dtype=torch.long) label_weights = pos_bboxes.new_zeros(num_samples) bbox_targets = pos_bboxes.new_zeros(num_samples, 4) bbox_weights = pos_bboxes.new_zeros(num_samples, 4) if num_pos > 0: labels[:num_pos] = pos_gt_labels pos_weight = 1.0 if cfg.pos_weight <= 0 else cfg.pos_weight label_weights[:num_pos] = pos_weight if not self.reg_decoded_bbox: pos_bbox_targets = self.bbox_coder.encode( pos_bboxes, pos_gt_bboxes) else: pos_bbox_targets = pos_gt_bboxes bbox_targets[:num_pos, :] = pos_bbox_targets bbox_weights[:num_pos, :] = 1 if num_neg > 0: label_weights[-num_neg:] = 1.0 return labels, label_weights, bbox_targets, bbox_weights def get_targets(self, sampling_results, gt_bboxes, gt_labels, rcnn_train_cfg, concat=True): pos_bboxes_list = [res.pos_bboxes for res in sampling_results] neg_bboxes_list = [res.neg_bboxes for res in sampling_results] pos_gt_bboxes_list = [res.pos_gt_bboxes for res in sampling_results] pos_gt_labels_list = [res.pos_gt_labels for res in sampling_results] labels, label_weights, bbox_targets, bbox_weights = multi_apply( self._get_target_single, pos_bboxes_list, neg_bboxes_list, pos_gt_bboxes_list, pos_gt_labels_list, cfg=rcnn_train_cfg) if concat: labels = torch.cat(labels, 0) label_weights = torch.cat(label_weights, 0) bbox_targets = torch.cat(bbox_targets, 0) bbox_weights = torch.cat(bbox_weights, 0) return labels, label_weights, bbox_targets, bbox_weights @force_fp32(apply_to=('cls_score', 'bbox_pred')) def loss(self, cls_score, bbox_pred, rois, labels, label_weights, bbox_targets, bbox_weights, reduction_override=None): losses = dict() if cls_score is not None: avg_factor = max(torch.sum(label_weights > 0).float().item(), 1.) if cls_score.numel() > 0: losses['loss_cls'] = self.loss_cls( cls_score, labels, label_weights, avg_factor=avg_factor, reduction_override=reduction_override) losses['acc'] = accuracy(cls_score, labels) if bbox_pred is not None: bg_class_ind = self.num_classes # 0~self.num_classes-1 are FG, self.num_classes is BG pos_inds = (labels >= 0) & (labels < bg_class_ind) # do not perform bounding box regression for BG anymore. if pos_inds.any(): if self.reg_decoded_bbox: bbox_pred = self.bbox_coder.decode(rois[:, 1:], bbox_pred) if self.reg_class_agnostic: pos_bbox_pred = bbox_pred.view( bbox_pred.size(0), 4)[pos_inds.type(torch.bool)] else: pos_bbox_pred = bbox_pred.view( bbox_pred.size(0), -1, 4)[pos_inds.type(torch.bool), labels[pos_inds.type(torch.bool)]] losses['loss_bbox'] = self.loss_bbox( pos_bbox_pred, bbox_targets[pos_inds.type(torch.bool)], bbox_weights[pos_inds.type(torch.bool)], avg_factor=bbox_targets.size(0), reduction_override=reduction_override) else: losses['loss_bbox'] = bbox_pred[pos_inds].sum() return losses @force_fp32(apply_to=('cls_score', 'bbox_pred')) def get_bboxes(self, rois, cls_score, bbox_pred, img_shape, scale_factor, rescale=False, cfg=None): if isinstance(cls_score, list): cls_score = sum(cls_score) / float(len(cls_score)) scores = F.softmax(cls_score, dim=1) if cls_score is not None else None if bbox_pred is not None: bboxes = self.bbox_coder.decode( rois[:, 1:], bbox_pred, max_shape=img_shape) else: bboxes = rois[:, 1:].clone() if img_shape is not None: bboxes[:, [0, 2]].clamp_(min=0, max=img_shape[1]) bboxes[:, [1, 3]].clamp_(min=0, max=img_shape[0]) if rescale and bboxes.size(0) > 0: if isinstance(scale_factor, float): bboxes /= scale_factor else: scale_factor = bboxes.new_tensor(scale_factor) bboxes = (bboxes.view(bboxes.size(0), -1, 4) / scale_factor).view(bboxes.size()[0], -1) if cfg is None: return bboxes, scores else: det_bboxes, det_labels = multiclass_nms(bboxes, scores, cfg.score_thr, cfg.nms, cfg.max_per_img) return det_bboxes, det_labels @force_fp32(apply_to=('bbox_preds', )) def refine_bboxes(self, rois, labels, bbox_preds, pos_is_gts, img_metas): """Refine bboxes during training. Args: rois (Tensor): Shape (n*bs, 5), where n is image number per GPU, and bs is the sampled RoIs per image. The first column is the image id and the next 4 columns are x1, y1, x2, y2. labels (Tensor): Shape (n*bs, ). bbox_preds (Tensor): Shape (n*bs, 4) or (n*bs, 4*#class). pos_is_gts (list[Tensor]): Flags indicating if each positive bbox is a gt bbox. img_metas (list[dict]): Meta info of each image. Returns: list[Tensor]: Refined bboxes of each image in a mini-batch. Example: >>> # xdoctest: +REQUIRES(module:kwarray) >>> import kwarray >>> import numpy as np >>> from mmdet.core.bbox.demodata import random_boxes >>> self = BBoxHead(reg_class_agnostic=True) >>> n_roi = 2 >>> n_img = 4 >>> scale = 512 >>> rng = np.random.RandomState(0) >>> img_metas = [{'img_shape': (scale, scale)} ... for _ in range(n_img)] >>> # Create rois in the expected format >>> roi_boxes = random_boxes(n_roi, scale=scale, rng=rng) >>> img_ids = torch.randint(0, n_img, (n_roi,)) >>> img_ids = img_ids.float() >>> rois = torch.cat([img_ids[:, None], roi_boxes], dim=1) >>> # Create other args >>> labels = torch.randint(0, 2, (n_roi,)).long() >>> bbox_preds = random_boxes(n_roi, scale=scale, rng=rng) >>> # For each image, pretend random positive boxes are gts >>> is_label_pos = (labels.numpy() > 0).astype(np.int) >>> lbl_per_img = kwarray.group_items(is_label_pos, ... img_ids.numpy()) >>> pos_per_img = [sum(lbl_per_img.get(gid, [])) ... for gid in range(n_img)] >>> pos_is_gts = [ >>> torch.randint(0, 2, (npos,)).byte().sort( >>> descending=True)[0] >>> for npos in pos_per_img >>> ] >>> bboxes_list = self.refine_bboxes(rois, labels, bbox_preds, >>> pos_is_gts, img_metas) >>> print(bboxes_list) """ img_ids = rois[:, 0].long().unique(sorted=True) assert img_ids.numel() <= len(img_metas) bboxes_list = [] for i in range(len(img_metas)): inds = torch.nonzero( rois[:, 0] == i, as_tuple=False).squeeze(dim=1) num_rois = inds.numel() bboxes_ = rois[inds, 1:] label_ = labels[inds] bbox_pred_ = bbox_preds[inds] img_meta_ = img_metas[i] pos_is_gts_ = pos_is_gts[i] bboxes = self.regress_by_class(bboxes_, label_, bbox_pred_, img_meta_) # filter gt bboxes pos_keep = 1 - pos_is_gts_ keep_inds = pos_is_gts_.new_ones(num_rois) keep_inds[:len(pos_is_gts_)] = pos_keep bboxes_list.append(bboxes[keep_inds.type(torch.bool)]) return bboxes_list @force_fp32(apply_to=('bbox_pred', )) def regress_by_class(self, rois, label, bbox_pred, img_meta): """Regress the bbox for the predicted class. Used in Cascade R-CNN. Args: rois (Tensor): shape (n, 4) or (n, 5) label (Tensor): shape (n, ) bbox_pred (Tensor): shape (n, 4*(#class)) or (n, 4) img_meta (dict): Image meta info. Returns: Tensor: Regressed bboxes, the same shape as input rois. """ assert rois.size(1) == 4 or rois.size(1) == 5, repr(rois.shape) if not self.reg_class_agnostic: label = label * 4 inds = torch.stack((label, label + 1, label + 2, label + 3), 1) bbox_pred = torch.gather(bbox_pred, 1, inds) assert bbox_pred.size(1) == 4 if rois.size(1) == 4: new_rois = self.bbox_coder.decode( rois, bbox_pred, max_shape=img_meta['img_shape']) else: bboxes = self.bbox_coder.decode( rois[:, 1:], bbox_pred, max_shape=img_meta['img_shape']) new_rois = torch.cat((rois[:, [0]], bboxes), dim=1) return new_rois
13,645
39.734328
79
py
GFocalV2
GFocalV2-master/mmdet/models/roi_heads/bbox_heads/sabl_head.py
import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import ConvModule, kaiming_init, normal_init, xavier_init from mmcv.runner import force_fp32 from mmdet.core import build_bbox_coder, multi_apply, multiclass_nms from mmdet.models.builder import HEADS, build_loss from mmdet.models.losses import accuracy @HEADS.register_module() class SABLHead(nn.Module): """Side-Aware Boundary Localization (SABL) for RoI-Head. Side-Aware features are extracted by conv layers with an attention mechanism. Boundary Localization with Bucketing and Bucketing Guided Rescoring are implemented in BucketingBBoxCoder. Please refer to https://arxiv.org/abs/1912.04260 for more details. Args: cls_in_channels (int): Input channels of cls RoI feature. \ Defaults to 256. reg_in_channels (int): Input channels of reg RoI feature. \ Defaults to 256. roi_feat_size (int): Size of RoI features. Defaults to 7. reg_feat_up_ratio (int): Upsample ratio of reg features. \ Defaults to 2. reg_pre_kernel (int): Kernel of 2D conv layers before \ attention pooling. Defaults to 3. reg_post_kernel (int): Kernel of 1D conv layers after \ attention pooling. Defaults to 3. reg_pre_num (int): Number of pre convs. Defaults to 2. reg_post_num (int): Number of post convs. Defaults to 1. num_classes (int): Number of classes in dataset. Defaults to 80. cls_out_channels (int): Hidden channels in cls fcs. Defaults to 1024. reg_offset_out_channels (int): Hidden and output channel \ of reg offset branch. Defaults to 256. reg_cls_out_channels (int): Hidden and output channel \ of reg cls branch. Defaults to 256. num_cls_fcs (int): Number of fcs for cls branch. Defaults to 1. num_reg_fcs (int): Number of fcs for reg branch.. Defaults to 0. reg_class_agnostic (bool): Class agnostic regresion or not. \ Defaults to True. norm_cfg (dict): Config of norm layers. Defaults to None. bbox_coder (dict): Config of bbox coder. Defaults 'BucketingBBoxCoder'. loss_cls (dict): Config of classification loss. loss_bbox_cls (dict): Config of classification loss for bbox branch. loss_bbox_reg (dict): Config of regression loss for bbox branch. """ def __init__(self, num_classes, cls_in_channels=256, reg_in_channels=256, roi_feat_size=7, reg_feat_up_ratio=2, reg_pre_kernel=3, reg_post_kernel=3, reg_pre_num=2, reg_post_num=1, cls_out_channels=1024, reg_offset_out_channels=256, reg_cls_out_channels=256, num_cls_fcs=1, num_reg_fcs=0, reg_class_agnostic=True, norm_cfg=None, bbox_coder=dict( type='BucketingBBoxCoder', num_buckets=14, scale_factor=1.7), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox_reg=dict( type='SmoothL1Loss', beta=0.1, loss_weight=1.0)): super(SABLHead, self).__init__() self.cls_in_channels = cls_in_channels self.reg_in_channels = reg_in_channels self.roi_feat_size = roi_feat_size self.reg_feat_up_ratio = int(reg_feat_up_ratio) self.num_buckets = bbox_coder['num_buckets'] assert self.reg_feat_up_ratio // 2 >= 1 self.up_reg_feat_size = roi_feat_size * self.reg_feat_up_ratio assert self.up_reg_feat_size == bbox_coder['num_buckets'] self.reg_pre_kernel = reg_pre_kernel self.reg_post_kernel = reg_post_kernel self.reg_pre_num = reg_pre_num self.reg_post_num = reg_post_num self.num_classes = num_classes self.cls_out_channels = cls_out_channels self.reg_offset_out_channels = reg_offset_out_channels self.reg_cls_out_channels = reg_cls_out_channels self.num_cls_fcs = num_cls_fcs self.num_reg_fcs = num_reg_fcs self.reg_class_agnostic = reg_class_agnostic assert self.reg_class_agnostic self.norm_cfg = norm_cfg self.bbox_coder = build_bbox_coder(bbox_coder) self.loss_cls = build_loss(loss_cls) self.loss_bbox_cls = build_loss(loss_bbox_cls) self.loss_bbox_reg = build_loss(loss_bbox_reg) self.cls_fcs = self._add_fc_branch(self.num_cls_fcs, self.cls_in_channels, self.roi_feat_size, self.cls_out_channels) self.side_num = int(np.ceil(self.num_buckets / 2)) if self.reg_feat_up_ratio > 1: self.upsample_x = nn.ConvTranspose1d( reg_in_channels, reg_in_channels, self.reg_feat_up_ratio, stride=self.reg_feat_up_ratio) self.upsample_y = nn.ConvTranspose1d( reg_in_channels, reg_in_channels, self.reg_feat_up_ratio, stride=self.reg_feat_up_ratio) self.reg_pre_convs = nn.ModuleList() for i in range(self.reg_pre_num): reg_pre_conv = ConvModule( reg_in_channels, reg_in_channels, kernel_size=reg_pre_kernel, padding=reg_pre_kernel // 2, norm_cfg=norm_cfg, act_cfg=dict(type='ReLU')) self.reg_pre_convs.append(reg_pre_conv) self.reg_post_conv_xs = nn.ModuleList() for i in range(self.reg_post_num): reg_post_conv_x = ConvModule( reg_in_channels, reg_in_channels, kernel_size=(1, reg_post_kernel), padding=(0, reg_post_kernel // 2), norm_cfg=norm_cfg, act_cfg=dict(type='ReLU')) self.reg_post_conv_xs.append(reg_post_conv_x) self.reg_post_conv_ys = nn.ModuleList() for i in range(self.reg_post_num): reg_post_conv_y = ConvModule( reg_in_channels, reg_in_channels, kernel_size=(reg_post_kernel, 1), padding=(reg_post_kernel // 2, 0), norm_cfg=norm_cfg, act_cfg=dict(type='ReLU')) self.reg_post_conv_ys.append(reg_post_conv_y) self.reg_conv_att_x = nn.Conv2d(reg_in_channels, 1, 1) self.reg_conv_att_y = nn.Conv2d(reg_in_channels, 1, 1) self.fc_cls = nn.Linear(self.cls_out_channels, self.num_classes + 1) self.relu = nn.ReLU(inplace=True) self.reg_cls_fcs = self._add_fc_branch(self.num_reg_fcs, self.reg_in_channels, 1, self.reg_cls_out_channels) self.reg_offset_fcs = self._add_fc_branch(self.num_reg_fcs, self.reg_in_channels, 1, self.reg_offset_out_channels) self.fc_reg_cls = nn.Linear(self.reg_cls_out_channels, 1) self.fc_reg_offset = nn.Linear(self.reg_offset_out_channels, 1) def _add_fc_branch(self, num_branch_fcs, in_channels, roi_feat_size, fc_out_channels): in_channels = in_channels * roi_feat_size * roi_feat_size branch_fcs = nn.ModuleList() for i in range(num_branch_fcs): fc_in_channels = (in_channels if i == 0 else fc_out_channels) branch_fcs.append(nn.Linear(fc_in_channels, fc_out_channels)) return branch_fcs def init_weights(self): for module_list in [ self.reg_cls_fcs, self.reg_offset_fcs, self.cls_fcs ]: for m in module_list.modules(): if isinstance(m, nn.Linear): xavier_init(m, distribution='uniform') if self.reg_feat_up_ratio > 1: kaiming_init(self.upsample_x, distribution='normal') kaiming_init(self.upsample_y, distribution='normal') normal_init(self.reg_conv_att_x, 0, 0.01) normal_init(self.reg_conv_att_y, 0, 0.01) normal_init(self.fc_reg_offset, 0, 0.001) normal_init(self.fc_reg_cls, 0, 0.01) normal_init(self.fc_cls, 0, 0.01) def cls_forward(self, cls_x): cls_x = cls_x.view(cls_x.size(0), -1) for fc in self.cls_fcs: cls_x = self.relu(fc(cls_x)) cls_score = self.fc_cls(cls_x) return cls_score def attention_pool(self, reg_x): """Extract direction-specific features fx and fy with attention methanism.""" reg_fx = reg_x reg_fy = reg_x reg_fx_att = self.reg_conv_att_x(reg_fx).sigmoid() reg_fy_att = self.reg_conv_att_y(reg_fy).sigmoid() reg_fx_att = reg_fx_att / reg_fx_att.sum(dim=2).unsqueeze(2) reg_fy_att = reg_fy_att / reg_fy_att.sum(dim=3).unsqueeze(3) reg_fx = (reg_fx * reg_fx_att).sum(dim=2) reg_fy = (reg_fy * reg_fy_att).sum(dim=3) return reg_fx, reg_fy def side_aware_feature_extractor(self, reg_x): """Refine and extract side-aware features without split them.""" for reg_pre_conv in self.reg_pre_convs: reg_x = reg_pre_conv(reg_x) reg_fx, reg_fy = self.attention_pool(reg_x) if self.reg_post_num > 0: reg_fx = reg_fx.unsqueeze(2) reg_fy = reg_fy.unsqueeze(3) for i in range(self.reg_post_num): reg_fx = self.reg_post_conv_xs[i](reg_fx) reg_fy = self.reg_post_conv_ys[i](reg_fy) reg_fx = reg_fx.squeeze(2) reg_fy = reg_fy.squeeze(3) if self.reg_feat_up_ratio > 1: reg_fx = self.relu(self.upsample_x(reg_fx)) reg_fy = self.relu(self.upsample_y(reg_fy)) reg_fx = torch.transpose(reg_fx, 1, 2) reg_fy = torch.transpose(reg_fy, 1, 2) return reg_fx.contiguous(), reg_fy.contiguous() def reg_pred(self, x, offfset_fcs, cls_fcs): """Predict bucketing esimation (cls_pred) and fine regression (offset pred) with side-aware features.""" x_offset = x.view(-1, self.reg_in_channels) x_cls = x.view(-1, self.reg_in_channels) for fc in offfset_fcs: x_offset = self.relu(fc(x_offset)) for fc in cls_fcs: x_cls = self.relu(fc(x_cls)) offset_pred = self.fc_reg_offset(x_offset) cls_pred = self.fc_reg_cls(x_cls) offset_pred = offset_pred.view(x.size(0), -1) cls_pred = cls_pred.view(x.size(0), -1) return offset_pred, cls_pred def side_aware_split(self, feat): """Split side-aware features aligned with orders of bucketing targets.""" l_end = int(np.ceil(self.up_reg_feat_size / 2)) r_start = int(np.floor(self.up_reg_feat_size / 2)) feat_fl = feat[:, :l_end] feat_fr = feat[:, r_start:].flip(dims=(1, )) feat_fl = feat_fl.contiguous() feat_fr = feat_fr.contiguous() feat = torch.cat([feat_fl, feat_fr], dim=-1) return feat def bbox_pred_split(self, bbox_pred, num_proposals_per_img): """Split batch bbox prediction back to each image.""" bucket_cls_preds, bucket_offset_preds = bbox_pred bucket_cls_preds = bucket_cls_preds.split(num_proposals_per_img, 0) bucket_offset_preds = bucket_offset_preds.split( num_proposals_per_img, 0) bbox_pred = tuple(zip(bucket_cls_preds, bucket_offset_preds)) return bbox_pred def reg_forward(self, reg_x): outs = self.side_aware_feature_extractor(reg_x) edge_offset_preds = [] edge_cls_preds = [] reg_fx = outs[0] reg_fy = outs[1] offset_pred_x, cls_pred_x = self.reg_pred(reg_fx, self.reg_offset_fcs, self.reg_cls_fcs) offset_pred_y, cls_pred_y = self.reg_pred(reg_fy, self.reg_offset_fcs, self.reg_cls_fcs) offset_pred_x = self.side_aware_split(offset_pred_x) offset_pred_y = self.side_aware_split(offset_pred_y) cls_pred_x = self.side_aware_split(cls_pred_x) cls_pred_y = self.side_aware_split(cls_pred_y) edge_offset_preds = torch.cat([offset_pred_x, offset_pred_y], dim=-1) edge_cls_preds = torch.cat([cls_pred_x, cls_pred_y], dim=-1) return (edge_cls_preds, edge_offset_preds) def forward(self, x): bbox_pred = self.reg_forward(x) cls_score = self.cls_forward(x) return cls_score, bbox_pred def get_targets(self, sampling_results, gt_bboxes, gt_labels, rcnn_train_cfg): pos_proposals = [res.pos_bboxes for res in sampling_results] neg_proposals = [res.neg_bboxes for res in sampling_results] pos_gt_bboxes = [res.pos_gt_bboxes for res in sampling_results] pos_gt_labels = [res.pos_gt_labels for res in sampling_results] cls_reg_targets = self.bucket_target(pos_proposals, neg_proposals, pos_gt_bboxes, pos_gt_labels, rcnn_train_cfg) (labels, label_weights, bucket_cls_targets, bucket_cls_weights, bucket_offset_targets, bucket_offset_weights) = cls_reg_targets return (labels, label_weights, (bucket_cls_targets, bucket_offset_targets), (bucket_cls_weights, bucket_offset_weights)) def bucket_target(self, pos_proposals_list, neg_proposals_list, pos_gt_bboxes_list, pos_gt_labels_list, rcnn_train_cfg, concat=True): (labels, label_weights, bucket_cls_targets, bucket_cls_weights, bucket_offset_targets, bucket_offset_weights) = multi_apply( self._bucket_target_single, pos_proposals_list, neg_proposals_list, pos_gt_bboxes_list, pos_gt_labels_list, cfg=rcnn_train_cfg) if concat: labels = torch.cat(labels, 0) label_weights = torch.cat(label_weights, 0) bucket_cls_targets = torch.cat(bucket_cls_targets, 0) bucket_cls_weights = torch.cat(bucket_cls_weights, 0) bucket_offset_targets = torch.cat(bucket_offset_targets, 0) bucket_offset_weights = torch.cat(bucket_offset_weights, 0) return (labels, label_weights, bucket_cls_targets, bucket_cls_weights, bucket_offset_targets, bucket_offset_weights) def _bucket_target_single(self, pos_proposals, neg_proposals, pos_gt_bboxes, pos_gt_labels, cfg): """Compute bucketing estimation targets and fine regression targets for a single image. Args: pos_proposals (Tensor): positive proposals of a single image, Shape (n_pos, 4) neg_proposals (Tensor): negative proposals of a single image, Shape (n_neg, 4). pos_gt_bboxes (Tensor): gt bboxes assigned to positive proposals of a single image, Shape (n_pos, 4). pos_gt_labels (Tensor): gt labels assigned to positive proposals of a single image, Shape (n_pos, ). cfg (dict): Config of calculating targets Returns: tuple: - labels (Tensor): Labels in a single image. \ Shape (n,). - label_weights (Tensor): Label weights in a single image.\ Shape (n,) - bucket_cls_targets (Tensor): Bucket cls targets in \ a single image. Shape (n, num_buckets*2). - bucket_cls_weights (Tensor): Bucket cls weights in \ a single image. Shape (n, num_buckets*2). - bucket_offset_targets (Tensor): Bucket offset targets \ in a single image. Shape (n, num_buckets*2). - bucket_offset_targets (Tensor): Bucket offset weights \ in a single image. Shape (n, num_buckets*2). """ num_pos = pos_proposals.size(0) num_neg = neg_proposals.size(0) num_samples = num_pos + num_neg labels = pos_gt_bboxes.new_full((num_samples, ), self.num_classes, dtype=torch.long) label_weights = pos_proposals.new_zeros(num_samples) bucket_cls_targets = pos_proposals.new_zeros(num_samples, 4 * self.side_num) bucket_cls_weights = pos_proposals.new_zeros(num_samples, 4 * self.side_num) bucket_offset_targets = pos_proposals.new_zeros( num_samples, 4 * self.side_num) bucket_offset_weights = pos_proposals.new_zeros( num_samples, 4 * self.side_num) if num_pos > 0: labels[:num_pos] = pos_gt_labels label_weights[:num_pos] = 1.0 (pos_bucket_offset_targets, pos_bucket_offset_weights, pos_bucket_cls_targets, pos_bucket_cls_weights) = self.bbox_coder.encode( pos_proposals, pos_gt_bboxes) bucket_cls_targets[:num_pos, :] = pos_bucket_cls_targets bucket_cls_weights[:num_pos, :] = pos_bucket_cls_weights bucket_offset_targets[:num_pos, :] = pos_bucket_offset_targets bucket_offset_weights[:num_pos, :] = pos_bucket_offset_weights if num_neg > 0: label_weights[-num_neg:] = 1.0 return (labels, label_weights, bucket_cls_targets, bucket_cls_weights, bucket_offset_targets, bucket_offset_weights) def loss(self, cls_score, bbox_pred, rois, labels, label_weights, bbox_targets, bbox_weights, reduction_override=None): losses = dict() if cls_score is not None: avg_factor = max(torch.sum(label_weights > 0).float().item(), 1.) losses['loss_cls'] = self.loss_cls( cls_score, labels, label_weights, avg_factor=avg_factor, reduction_override=reduction_override) losses['acc'] = accuracy(cls_score, labels) if bbox_pred is not None: bucket_cls_preds, bucket_offset_preds = bbox_pred bucket_cls_targets, bucket_offset_targets = bbox_targets bucket_cls_weights, bucket_offset_weights = bbox_weights # edge cls bucket_cls_preds = bucket_cls_preds.view(-1, self.side_num) bucket_cls_targets = bucket_cls_targets.view(-1, self.side_num) bucket_cls_weights = bucket_cls_weights.view(-1, self.side_num) losses['loss_bbox_cls'] = self.loss_bbox_cls( bucket_cls_preds, bucket_cls_targets, bucket_cls_weights, avg_factor=bucket_cls_targets.size(0), reduction_override=reduction_override) losses['loss_bbox_reg'] = self.loss_bbox_reg( bucket_offset_preds, bucket_offset_targets, bucket_offset_weights, avg_factor=bucket_offset_targets.size(0), reduction_override=reduction_override) return losses @force_fp32(apply_to=('cls_score', 'bbox_pred')) def get_bboxes(self, rois, cls_score, bbox_pred, img_shape, scale_factor, rescale=False, cfg=None): if isinstance(cls_score, list): cls_score = sum(cls_score) / float(len(cls_score)) scores = F.softmax(cls_score, dim=1) if cls_score is not None else None if bbox_pred is not None: bboxes, confids = self.bbox_coder.decode(rois[:, 1:], bbox_pred, img_shape) else: bboxes = rois[:, 1:].clone() confids = None if img_shape is not None: bboxes[:, [0, 2]].clamp_(min=0, max=img_shape[1] - 1) bboxes[:, [1, 3]].clamp_(min=0, max=img_shape[0] - 1) if rescale and bboxes.size(0) > 0: if isinstance(scale_factor, float): bboxes /= scale_factor else: bboxes /= torch.from_numpy(scale_factor).to(bboxes.device) if cfg is None: return bboxes, scores else: det_bboxes, det_labels = multiclass_nms( bboxes, scores, cfg.score_thr, cfg.nms, cfg.max_per_img, score_factors=confids) return det_bboxes, det_labels @force_fp32(apply_to=('bbox_preds', )) def refine_bboxes(self, rois, labels, bbox_preds, pos_is_gts, img_metas): """Refine bboxes during training. Args: rois (Tensor): Shape (n*bs, 5), where n is image number per GPU, and bs is the sampled RoIs per image. labels (Tensor): Shape (n*bs, ). bbox_preds (list[Tensor]): Shape [(n*bs, num_buckets*2), \ (n*bs, num_buckets*2)]. pos_is_gts (list[Tensor]): Flags indicating if each positive bbox is a gt bbox. img_metas (list[dict]): Meta info of each image. Returns: list[Tensor]: Refined bboxes of each image in a mini-batch. """ img_ids = rois[:, 0].long().unique(sorted=True) assert img_ids.numel() == len(img_metas) bboxes_list = [] for i in range(len(img_metas)): inds = torch.nonzero( rois[:, 0] == i, as_tuple=False).squeeze(dim=1) num_rois = inds.numel() bboxes_ = rois[inds, 1:] label_ = labels[inds] edge_cls_preds, edge_offset_preds = bbox_preds edge_cls_preds_ = edge_cls_preds[inds] edge_offset_preds_ = edge_offset_preds[inds] bbox_pred_ = [edge_cls_preds_, edge_offset_preds_] img_meta_ = img_metas[i] pos_is_gts_ = pos_is_gts[i] bboxes = self.regress_by_class(bboxes_, label_, bbox_pred_, img_meta_) # filter gt bboxes pos_keep = 1 - pos_is_gts_ keep_inds = pos_is_gts_.new_ones(num_rois) keep_inds[:len(pos_is_gts_)] = pos_keep bboxes_list.append(bboxes[keep_inds.type(torch.bool)]) return bboxes_list @force_fp32(apply_to=('bbox_pred', )) def regress_by_class(self, rois, label, bbox_pred, img_meta): """Regress the bbox for the predicted class. Used in Cascade R-CNN. Args: rois (Tensor): shape (n, 4) or (n, 5) label (Tensor): shape (n, ) bbox_pred (list[Tensor]): shape [(n, num_buckets *2), \ (n, num_buckets *2)] img_meta (dict): Image meta info. Returns: Tensor: Regressed bboxes, the same shape as input rois. """ assert rois.size(1) == 4 or rois.size(1) == 5 if rois.size(1) == 4: new_rois, _ = self.bbox_coder.decode(rois, bbox_pred, img_meta['img_shape']) else: bboxes, _ = self.bbox_coder.decode(rois[:, 1:], bbox_pred, img_meta['img_shape']) new_rois = torch.cat((rois[:, [0]], bboxes), dim=1) return new_rois
24,585
41.907504
79
py
GFocalV2
GFocalV2-master/mmdet/models/roi_heads/bbox_heads/convfc_bbox_head.py
import torch.nn as nn from mmcv.cnn import ConvModule from mmdet.models.builder import HEADS from .bbox_head import BBoxHead @HEADS.register_module() class ConvFCBBoxHead(BBoxHead): r"""More general bbox head, with shared conv and fc layers and two optional separated branches. .. code-block:: none /-> cls convs -> cls fcs -> cls shared convs -> shared fcs \-> reg convs -> reg fcs -> reg """ # noqa: W605 def __init__(self, num_shared_convs=0, num_shared_fcs=0, num_cls_convs=0, num_cls_fcs=0, num_reg_convs=0, num_reg_fcs=0, conv_out_channels=256, fc_out_channels=1024, conv_cfg=None, norm_cfg=None, *args, **kwargs): super(ConvFCBBoxHead, self).__init__(*args, **kwargs) assert (num_shared_convs + num_shared_fcs + num_cls_convs + num_cls_fcs + num_reg_convs + num_reg_fcs > 0) if num_cls_convs > 0 or num_reg_convs > 0: assert num_shared_fcs == 0 if not self.with_cls: assert num_cls_convs == 0 and num_cls_fcs == 0 if not self.with_reg: assert num_reg_convs == 0 and num_reg_fcs == 0 self.num_shared_convs = num_shared_convs self.num_shared_fcs = num_shared_fcs self.num_cls_convs = num_cls_convs self.num_cls_fcs = num_cls_fcs self.num_reg_convs = num_reg_convs self.num_reg_fcs = num_reg_fcs self.conv_out_channels = conv_out_channels self.fc_out_channels = fc_out_channels self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg # add shared convs and fcs self.shared_convs, self.shared_fcs, last_layer_dim = \ self._add_conv_fc_branch( self.num_shared_convs, self.num_shared_fcs, self.in_channels, True) self.shared_out_channels = last_layer_dim # add cls specific branch self.cls_convs, self.cls_fcs, self.cls_last_dim = \ self._add_conv_fc_branch( self.num_cls_convs, self.num_cls_fcs, self.shared_out_channels) # add reg specific branch self.reg_convs, self.reg_fcs, self.reg_last_dim = \ self._add_conv_fc_branch( self.num_reg_convs, self.num_reg_fcs, self.shared_out_channels) if self.num_shared_fcs == 0 and not self.with_avg_pool: if self.num_cls_fcs == 0: self.cls_last_dim *= self.roi_feat_area if self.num_reg_fcs == 0: self.reg_last_dim *= self.roi_feat_area self.relu = nn.ReLU(inplace=True) # reconstruct fc_cls and fc_reg since input channels are changed if self.with_cls: self.fc_cls = nn.Linear(self.cls_last_dim, self.num_classes + 1) if self.with_reg: out_dim_reg = (4 if self.reg_class_agnostic else 4 * self.num_classes) self.fc_reg = nn.Linear(self.reg_last_dim, out_dim_reg) def _add_conv_fc_branch(self, num_branch_convs, num_branch_fcs, in_channels, is_shared=False): """Add shared or separable branch. convs -> avg pool (optional) -> fcs """ last_layer_dim = in_channels # add branch specific conv layers branch_convs = nn.ModuleList() if num_branch_convs > 0: for i in range(num_branch_convs): conv_in_channels = ( last_layer_dim if i == 0 else self.conv_out_channels) branch_convs.append( ConvModule( conv_in_channels, self.conv_out_channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) last_layer_dim = self.conv_out_channels # add branch specific fc layers branch_fcs = nn.ModuleList() if num_branch_fcs > 0: # for shared branch, only consider self.with_avg_pool # for separated branches, also consider self.num_shared_fcs if (is_shared or self.num_shared_fcs == 0) and not self.with_avg_pool: last_layer_dim *= self.roi_feat_area for i in range(num_branch_fcs): fc_in_channels = ( last_layer_dim if i == 0 else self.fc_out_channels) branch_fcs.append( nn.Linear(fc_in_channels, self.fc_out_channels)) last_layer_dim = self.fc_out_channels return branch_convs, branch_fcs, last_layer_dim def init_weights(self): super(ConvFCBBoxHead, self).init_weights() # conv layers are already initialized by ConvModule for module_list in [self.shared_fcs, self.cls_fcs, self.reg_fcs]: for m in module_list.modules(): if isinstance(m, nn.Linear): nn.init.xavier_uniform_(m.weight) nn.init.constant_(m.bias, 0) def forward(self, x): # shared part if self.num_shared_convs > 0: for conv in self.shared_convs: x = conv(x) if self.num_shared_fcs > 0: if self.with_avg_pool: x = self.avg_pool(x) x = x.flatten(1) for fc in self.shared_fcs: x = self.relu(fc(x)) # separate branches x_cls = x x_reg = x for conv in self.cls_convs: x_cls = conv(x_cls) if x_cls.dim() > 2: if self.with_avg_pool: x_cls = self.avg_pool(x_cls) x_cls = x_cls.flatten(1) for fc in self.cls_fcs: x_cls = self.relu(fc(x_cls)) for conv in self.reg_convs: x_reg = conv(x_reg) if x_reg.dim() > 2: if self.with_avg_pool: x_reg = self.avg_pool(x_reg) x_reg = x_reg.flatten(1) for fc in self.reg_fcs: x_reg = self.relu(fc(x_reg)) cls_score = self.fc_cls(x_cls) if self.with_cls else None bbox_pred = self.fc_reg(x_reg) if self.with_reg else None return cls_score, bbox_pred @HEADS.register_module() class Shared2FCBBoxHead(ConvFCBBoxHead): def __init__(self, fc_out_channels=1024, *args, **kwargs): super(Shared2FCBBoxHead, self).__init__( num_shared_convs=0, num_shared_fcs=2, num_cls_convs=0, num_cls_fcs=0, num_reg_convs=0, num_reg_fcs=0, fc_out_channels=fc_out_channels, *args, **kwargs) @HEADS.register_module() class Shared4Conv1FCBBoxHead(ConvFCBBoxHead): def __init__(self, fc_out_channels=1024, *args, **kwargs): super(Shared4Conv1FCBBoxHead, self).__init__( num_shared_convs=4, num_shared_fcs=1, num_cls_convs=0, num_cls_fcs=0, num_reg_convs=0, num_reg_fcs=0, fc_out_channels=fc_out_channels, *args, **kwargs)
7,436
35.101942
79
py
GFocalV2
GFocalV2-master/mmdet/models/roi_heads/bbox_heads/double_bbox_head.py
import torch.nn as nn from mmcv.cnn import ConvModule, normal_init, xavier_init from mmdet.models.backbones.resnet import Bottleneck from mmdet.models.builder import HEADS from .bbox_head import BBoxHead class BasicResBlock(nn.Module): """Basic residual block. This block is a little different from the block in the ResNet backbone. The kernel size of conv1 is 1 in this block while 3 in ResNet BasicBlock. Args: in_channels (int): Channels of the input feature map. out_channels (int): Channels of the output feature map. conv_cfg (dict): The config dict for convolution layers. norm_cfg (dict): The config dict for normalization layers. """ def __init__(self, in_channels, out_channels, conv_cfg=None, norm_cfg=dict(type='BN')): super(BasicResBlock, self).__init__() # main path self.conv1 = ConvModule( in_channels, in_channels, kernel_size=3, padding=1, bias=False, conv_cfg=conv_cfg, norm_cfg=norm_cfg) self.conv2 = ConvModule( in_channels, out_channels, kernel_size=1, bias=False, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=None) # identity path self.conv_identity = ConvModule( in_channels, out_channels, kernel_size=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=None) self.relu = nn.ReLU(inplace=True) def forward(self, x): identity = x x = self.conv1(x) x = self.conv2(x) identity = self.conv_identity(identity) out = x + identity out = self.relu(out) return out @HEADS.register_module() class DoubleConvFCBBoxHead(BBoxHead): r"""Bbox head used in Double-Head R-CNN .. code-block:: none /-> cls /-> shared convs -> \-> reg roi features /-> cls \-> shared fc -> \-> reg """ # noqa: W605 def __init__(self, num_convs=0, num_fcs=0, conv_out_channels=1024, fc_out_channels=1024, conv_cfg=None, norm_cfg=dict(type='BN'), **kwargs): kwargs.setdefault('with_avg_pool', True) super(DoubleConvFCBBoxHead, self).__init__(**kwargs) assert self.with_avg_pool assert num_convs > 0 assert num_fcs > 0 self.num_convs = num_convs self.num_fcs = num_fcs self.conv_out_channels = conv_out_channels self.fc_out_channels = fc_out_channels self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg # increase the channel of input features self.res_block = BasicResBlock(self.in_channels, self.conv_out_channels) # add conv heads self.conv_branch = self._add_conv_branch() # add fc heads self.fc_branch = self._add_fc_branch() out_dim_reg = 4 if self.reg_class_agnostic else 4 * self.num_classes self.fc_reg = nn.Linear(self.conv_out_channels, out_dim_reg) self.fc_cls = nn.Linear(self.fc_out_channels, self.num_classes + 1) self.relu = nn.ReLU(inplace=True) def _add_conv_branch(self): """Add the fc branch which consists of a sequential of conv layers.""" branch_convs = nn.ModuleList() for i in range(self.num_convs): branch_convs.append( Bottleneck( inplanes=self.conv_out_channels, planes=self.conv_out_channels // 4, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) return branch_convs def _add_fc_branch(self): """Add the fc branch which consists of a sequential of fc layers.""" branch_fcs = nn.ModuleList() for i in range(self.num_fcs): fc_in_channels = ( self.in_channels * self.roi_feat_area if i == 0 else self.fc_out_channels) branch_fcs.append(nn.Linear(fc_in_channels, self.fc_out_channels)) return branch_fcs def init_weights(self): # conv layers are already initialized by ConvModule normal_init(self.fc_cls, std=0.01) normal_init(self.fc_reg, std=0.001) for m in self.fc_branch.modules(): if isinstance(m, nn.Linear): xavier_init(m, distribution='uniform') def forward(self, x_cls, x_reg): # conv head x_conv = self.res_block(x_reg) for conv in self.conv_branch: x_conv = conv(x_conv) if self.with_avg_pool: x_conv = self.avg_pool(x_conv) x_conv = x_conv.view(x_conv.size(0), -1) bbox_pred = self.fc_reg(x_conv) # fc head x_fc = x_cls.view(x_cls.size(0), -1) for fc in self.fc_branch: x_fc = self.relu(fc(x_fc)) cls_score = self.fc_cls(x_fc) return cls_score, bbox_pred
5,380
30.104046
78
py
GFocalV2
GFocalV2-master/mmdet/models/roi_heads/shared_heads/res_layer.py
import torch.nn as nn from mmcv.cnn import constant_init, kaiming_init from mmcv.runner import auto_fp16, load_checkpoint from mmdet.models.backbones import ResNet from mmdet.models.builder import SHARED_HEADS from mmdet.models.utils import ResLayer as _ResLayer from mmdet.utils import get_root_logger @SHARED_HEADS.register_module() class ResLayer(nn.Module): def __init__(self, depth, stage=3, stride=2, dilation=1, style='pytorch', norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, with_cp=False, dcn=None): super(ResLayer, self).__init__() self.norm_eval = norm_eval self.norm_cfg = norm_cfg self.stage = stage self.fp16_enabled = False block, stage_blocks = ResNet.arch_settings[depth] stage_block = stage_blocks[stage] planes = 64 * 2**stage inplanes = 64 * 2**(stage - 1) * block.expansion res_layer = _ResLayer( block, inplanes, planes, stage_block, stride=stride, dilation=dilation, style=style, with_cp=with_cp, norm_cfg=self.norm_cfg, dcn=dcn) self.add_module(f'layer{stage + 1}', res_layer) def init_weights(self, pretrained=None): """Initialize the weights in the module. Args: pretrained (str, optional): Path to pre-trained weights. Defaults to None. """ if isinstance(pretrained, str): logger = get_root_logger() load_checkpoint(self, pretrained, strict=False, logger=logger) elif pretrained is None: for m in self.modules(): if isinstance(m, nn.Conv2d): kaiming_init(m) elif isinstance(m, nn.BatchNorm2d): constant_init(m, 1) else: raise TypeError('pretrained must be a str or None') @auto_fp16() def forward(self, x): res_layer = getattr(self, f'layer{self.stage + 1}') out = res_layer(x) return out def train(self, mode=True): super(ResLayer, self).train(mode) if self.norm_eval: for m in self.modules(): if isinstance(m, nn.BatchNorm2d): m.eval()
2,454
30.474359
74
py
GFocalV2
GFocalV2-master/mmdet/models/roi_heads/mask_heads/grid_head.py
import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import ConvModule, kaiming_init, normal_init from mmdet.models.builder import HEADS, build_loss @HEADS.register_module() class GridHead(nn.Module): def __init__(self, grid_points=9, num_convs=8, roi_feat_size=14, in_channels=256, conv_kernel_size=3, point_feat_channels=64, deconv_kernel_size=4, class_agnostic=False, loss_grid=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=15), conv_cfg=None, norm_cfg=dict(type='GN', num_groups=36)): super(GridHead, self).__init__() self.grid_points = grid_points self.num_convs = num_convs self.roi_feat_size = roi_feat_size self.in_channels = in_channels self.conv_kernel_size = conv_kernel_size self.point_feat_channels = point_feat_channels self.conv_out_channels = self.point_feat_channels * self.grid_points self.class_agnostic = class_agnostic self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg if isinstance(norm_cfg, dict) and norm_cfg['type'] == 'GN': assert self.conv_out_channels % norm_cfg['num_groups'] == 0 assert self.grid_points >= 4 self.grid_size = int(np.sqrt(self.grid_points)) if self.grid_size * self.grid_size != self.grid_points: raise ValueError('grid_points must be a square number') # the predicted heatmap is half of whole_map_size if not isinstance(self.roi_feat_size, int): raise ValueError('Only square RoIs are supporeted in Grid R-CNN') self.whole_map_size = self.roi_feat_size * 4 # compute point-wise sub-regions self.sub_regions = self.calc_sub_regions() self.convs = [] for i in range(self.num_convs): in_channels = ( self.in_channels if i == 0 else self.conv_out_channels) stride = 2 if i == 0 else 1 padding = (self.conv_kernel_size - 1) // 2 self.convs.append( ConvModule( in_channels, self.conv_out_channels, self.conv_kernel_size, stride=stride, padding=padding, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, bias=True)) self.convs = nn.Sequential(*self.convs) self.deconv1 = nn.ConvTranspose2d( self.conv_out_channels, self.conv_out_channels, kernel_size=deconv_kernel_size, stride=2, padding=(deconv_kernel_size - 2) // 2, groups=grid_points) self.norm1 = nn.GroupNorm(grid_points, self.conv_out_channels) self.deconv2 = nn.ConvTranspose2d( self.conv_out_channels, grid_points, kernel_size=deconv_kernel_size, stride=2, padding=(deconv_kernel_size - 2) // 2, groups=grid_points) # find the 4-neighbor of each grid point self.neighbor_points = [] grid_size = self.grid_size for i in range(grid_size): # i-th column for j in range(grid_size): # j-th row neighbors = [] if i > 0: # left: (i - 1, j) neighbors.append((i - 1) * grid_size + j) if j > 0: # up: (i, j - 1) neighbors.append(i * grid_size + j - 1) if j < grid_size - 1: # down: (i, j + 1) neighbors.append(i * grid_size + j + 1) if i < grid_size - 1: # right: (i + 1, j) neighbors.append((i + 1) * grid_size + j) self.neighbor_points.append(tuple(neighbors)) # total edges in the grid self.num_edges = sum([len(p) for p in self.neighbor_points]) self.forder_trans = nn.ModuleList() # first-order feature transition self.sorder_trans = nn.ModuleList() # second-order feature transition for neighbors in self.neighbor_points: fo_trans = nn.ModuleList() so_trans = nn.ModuleList() for _ in range(len(neighbors)): # each transition module consists of a 5x5 depth-wise conv and # 1x1 conv. fo_trans.append( nn.Sequential( nn.Conv2d( self.point_feat_channels, self.point_feat_channels, 5, stride=1, padding=2, groups=self.point_feat_channels), nn.Conv2d(self.point_feat_channels, self.point_feat_channels, 1))) so_trans.append( nn.Sequential( nn.Conv2d( self.point_feat_channels, self.point_feat_channels, 5, 1, 2, groups=self.point_feat_channels), nn.Conv2d(self.point_feat_channels, self.point_feat_channels, 1))) self.forder_trans.append(fo_trans) self.sorder_trans.append(so_trans) self.loss_grid = build_loss(loss_grid) def init_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear): # TODO: compare mode = "fan_in" or "fan_out" kaiming_init(m) for m in self.modules(): if isinstance(m, nn.ConvTranspose2d): normal_init(m, std=0.001) nn.init.constant_(self.deconv2.bias, -np.log(0.99 / 0.01)) def forward(self, x): assert x.shape[-1] == x.shape[-2] == self.roi_feat_size # RoI feature transformation, downsample 2x x = self.convs(x) c = self.point_feat_channels # first-order fusion x_fo = [None for _ in range(self.grid_points)] for i, points in enumerate(self.neighbor_points): x_fo[i] = x[:, i * c:(i + 1) * c] for j, point_idx in enumerate(points): x_fo[i] = x_fo[i] + self.forder_trans[i][j]( x[:, point_idx * c:(point_idx + 1) * c]) # second-order fusion x_so = [None for _ in range(self.grid_points)] for i, points in enumerate(self.neighbor_points): x_so[i] = x[:, i * c:(i + 1) * c] for j, point_idx in enumerate(points): x_so[i] = x_so[i] + self.sorder_trans[i][j](x_fo[point_idx]) # predicted heatmap with fused features x2 = torch.cat(x_so, dim=1) x2 = self.deconv1(x2) x2 = F.relu(self.norm1(x2), inplace=True) heatmap = self.deconv2(x2) # predicted heatmap with original features (applicable during training) if self.training: x1 = x x1 = self.deconv1(x1) x1 = F.relu(self.norm1(x1), inplace=True) heatmap_unfused = self.deconv2(x1) else: heatmap_unfused = heatmap return dict(fused=heatmap, unfused=heatmap_unfused) def calc_sub_regions(self): """Compute point specific representation regions. See Grid R-CNN Plus (https://arxiv.org/abs/1906.05688) for details. """ # to make it consistent with the original implementation, half_size # is computed as 2 * quarter_size, which is smaller half_size = self.whole_map_size // 4 * 2 sub_regions = [] for i in range(self.grid_points): x_idx = i // self.grid_size y_idx = i % self.grid_size if x_idx == 0: sub_x1 = 0 elif x_idx == self.grid_size - 1: sub_x1 = half_size else: ratio = x_idx / (self.grid_size - 1) - 0.25 sub_x1 = max(int(ratio * self.whole_map_size), 0) if y_idx == 0: sub_y1 = 0 elif y_idx == self.grid_size - 1: sub_y1 = half_size else: ratio = y_idx / (self.grid_size - 1) - 0.25 sub_y1 = max(int(ratio * self.whole_map_size), 0) sub_regions.append( (sub_x1, sub_y1, sub_x1 + half_size, sub_y1 + half_size)) return sub_regions def get_targets(self, sampling_results, rcnn_train_cfg): # mix all samples (across images) together. pos_bboxes = torch.cat([res.pos_bboxes for res in sampling_results], dim=0).cpu() pos_gt_bboxes = torch.cat( [res.pos_gt_bboxes for res in sampling_results], dim=0).cpu() assert pos_bboxes.shape == pos_gt_bboxes.shape # expand pos_bboxes to 2x of original size x1 = pos_bboxes[:, 0] - (pos_bboxes[:, 2] - pos_bboxes[:, 0]) / 2 y1 = pos_bboxes[:, 1] - (pos_bboxes[:, 3] - pos_bboxes[:, 1]) / 2 x2 = pos_bboxes[:, 2] + (pos_bboxes[:, 2] - pos_bboxes[:, 0]) / 2 y2 = pos_bboxes[:, 3] + (pos_bboxes[:, 3] - pos_bboxes[:, 1]) / 2 pos_bboxes = torch.stack([x1, y1, x2, y2], dim=-1) pos_bbox_ws = (pos_bboxes[:, 2] - pos_bboxes[:, 0]).unsqueeze(-1) pos_bbox_hs = (pos_bboxes[:, 3] - pos_bboxes[:, 1]).unsqueeze(-1) num_rois = pos_bboxes.shape[0] map_size = self.whole_map_size # this is not the final target shape targets = torch.zeros((num_rois, self.grid_points, map_size, map_size), dtype=torch.float) # pre-compute interpolation factors for all grid points. # the first item is the factor of x-dim, and the second is y-dim. # for a 9-point grid, factors are like (1, 0), (0.5, 0.5), (0, 1) factors = [] for j in range(self.grid_points): x_idx = j // self.grid_size y_idx = j % self.grid_size factors.append((1 - x_idx / (self.grid_size - 1), 1 - y_idx / (self.grid_size - 1))) radius = rcnn_train_cfg.pos_radius radius2 = radius**2 for i in range(num_rois): # ignore small bboxes if (pos_bbox_ws[i] <= self.grid_size or pos_bbox_hs[i] <= self.grid_size): continue # for each grid point, mark a small circle as positive for j in range(self.grid_points): factor_x, factor_y = factors[j] gridpoint_x = factor_x * pos_gt_bboxes[i, 0] + ( 1 - factor_x) * pos_gt_bboxes[i, 2] gridpoint_y = factor_y * pos_gt_bboxes[i, 1] + ( 1 - factor_y) * pos_gt_bboxes[i, 3] cx = int((gridpoint_x - pos_bboxes[i, 0]) / pos_bbox_ws[i] * map_size) cy = int((gridpoint_y - pos_bboxes[i, 1]) / pos_bbox_hs[i] * map_size) for x in range(cx - radius, cx + radius + 1): for y in range(cy - radius, cy + radius + 1): if x >= 0 and x < map_size and y >= 0 and y < map_size: if (x - cx)**2 + (y - cy)**2 <= radius2: targets[i, j, y, x] = 1 # reduce the target heatmap size by a half # proposed in Grid R-CNN Plus (https://arxiv.org/abs/1906.05688). sub_targets = [] for i in range(self.grid_points): sub_x1, sub_y1, sub_x2, sub_y2 = self.sub_regions[i] sub_targets.append(targets[:, [i], sub_y1:sub_y2, sub_x1:sub_x2]) sub_targets = torch.cat(sub_targets, dim=1) sub_targets = sub_targets.to(sampling_results[0].pos_bboxes.device) return sub_targets def loss(self, grid_pred, grid_targets): loss_fused = self.loss_grid(grid_pred['fused'], grid_targets) loss_unfused = self.loss_grid(grid_pred['unfused'], grid_targets) loss_grid = loss_fused + loss_unfused return dict(loss_grid=loss_grid) def get_bboxes(self, det_bboxes, grid_pred, img_metas): # TODO: refactoring assert det_bboxes.shape[0] == grid_pred.shape[0] det_bboxes = det_bboxes.cpu() cls_scores = det_bboxes[:, [4]] det_bboxes = det_bboxes[:, :4] grid_pred = grid_pred.sigmoid().cpu() R, c, h, w = grid_pred.shape half_size = self.whole_map_size // 4 * 2 assert h == w == half_size assert c == self.grid_points # find the point with max scores in the half-sized heatmap grid_pred = grid_pred.view(R * c, h * w) pred_scores, pred_position = grid_pred.max(dim=1) xs = pred_position % w ys = pred_position // w # get the position in the whole heatmap instead of half-sized heatmap for i in range(self.grid_points): xs[i::self.grid_points] += self.sub_regions[i][0] ys[i::self.grid_points] += self.sub_regions[i][1] # reshape to (num_rois, grid_points) pred_scores, xs, ys = tuple( map(lambda x: x.view(R, c), [pred_scores, xs, ys])) # get expanded pos_bboxes widths = (det_bboxes[:, 2] - det_bboxes[:, 0]).unsqueeze(-1) heights = (det_bboxes[:, 3] - det_bboxes[:, 1]).unsqueeze(-1) x1 = (det_bboxes[:, 0, None] - widths / 2) y1 = (det_bboxes[:, 1, None] - heights / 2) # map the grid point to the absolute coordinates abs_xs = (xs.float() + 0.5) / w * widths + x1 abs_ys = (ys.float() + 0.5) / h * heights + y1 # get the grid points indices that fall on the bbox boundaries x1_inds = [i for i in range(self.grid_size)] y1_inds = [i * self.grid_size for i in range(self.grid_size)] x2_inds = [ self.grid_points - self.grid_size + i for i in range(self.grid_size) ] y2_inds = [(i + 1) * self.grid_size - 1 for i in range(self.grid_size)] # voting of all grid points on some boundary bboxes_x1 = (abs_xs[:, x1_inds] * pred_scores[:, x1_inds]).sum( dim=1, keepdim=True) / ( pred_scores[:, x1_inds].sum(dim=1, keepdim=True)) bboxes_y1 = (abs_ys[:, y1_inds] * pred_scores[:, y1_inds]).sum( dim=1, keepdim=True) / ( pred_scores[:, y1_inds].sum(dim=1, keepdim=True)) bboxes_x2 = (abs_xs[:, x2_inds] * pred_scores[:, x2_inds]).sum( dim=1, keepdim=True) / ( pred_scores[:, x2_inds].sum(dim=1, keepdim=True)) bboxes_y2 = (abs_ys[:, y2_inds] * pred_scores[:, y2_inds]).sum( dim=1, keepdim=True) / ( pred_scores[:, y2_inds].sum(dim=1, keepdim=True)) bbox_res = torch.cat( [bboxes_x1, bboxes_y1, bboxes_x2, bboxes_y2, cls_scores], dim=1) bbox_res[:, [0, 2]].clamp_(min=0, max=img_metas[0]['img_shape'][1]) bbox_res[:, [1, 3]].clamp_(min=0, max=img_metas[0]['img_shape'][0]) return bbox_res
15,432
41.869444
79
py
GFocalV2
GFocalV2-master/mmdet/models/roi_heads/mask_heads/coarse_mask_head.py
import torch.nn as nn from mmcv.cnn import ConvModule, Linear, constant_init, xavier_init from mmcv.runner import auto_fp16 from mmdet.models.builder import HEADS from .fcn_mask_head import FCNMaskHead @HEADS.register_module() class CoarseMaskHead(FCNMaskHead): """Coarse mask head used in PointRend. Compared with standard ``FCNMaskHead``, ``CoarseMaskHead`` will downsample the input feature map instead of upsample it. Args: num_convs (int): Number of conv layers in the head. Default: 0. num_fcs (int): Number of fc layers in the head. Default: 2. fc_out_channels (int): Number of output channels of fc layer. Default: 1024. downsample_factor (int): The factor that feature map is downsampled by. Default: 2. """ def __init__(self, num_convs=0, num_fcs=2, fc_out_channels=1024, downsample_factor=2, *arg, **kwarg): super(CoarseMaskHead, self).__init__( *arg, num_convs=num_convs, upsample_cfg=dict(type=None), **kwarg) self.num_fcs = num_fcs assert self.num_fcs > 0 self.fc_out_channels = fc_out_channels self.downsample_factor = downsample_factor assert self.downsample_factor >= 1 # remove conv_logit delattr(self, 'conv_logits') if downsample_factor > 1: downsample_in_channels = ( self.conv_out_channels if self.num_convs > 0 else self.in_channels) self.downsample_conv = ConvModule( downsample_in_channels, self.conv_out_channels, kernel_size=downsample_factor, stride=downsample_factor, padding=0, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg) else: self.downsample_conv = None self.output_size = (self.roi_feat_size[0] // downsample_factor, self.roi_feat_size[1] // downsample_factor) self.output_area = self.output_size[0] * self.output_size[1] last_layer_dim = self.conv_out_channels * self.output_area self.fcs = nn.ModuleList() for i in range(num_fcs): fc_in_channels = ( last_layer_dim if i == 0 else self.fc_out_channels) self.fcs.append(Linear(fc_in_channels, self.fc_out_channels)) last_layer_dim = self.fc_out_channels output_channels = self.num_classes * self.output_area self.fc_logits = Linear(last_layer_dim, output_channels) def init_weights(self): for m in self.fcs.modules(): if isinstance(m, nn.Linear): xavier_init(m) constant_init(self.fc_logits, 0.001) @auto_fp16() def forward(self, x): for conv in self.convs: x = conv(x) if self.downsample_conv is not None: x = self.downsample_conv(x) x = x.flatten(1) for fc in self.fcs: x = self.relu(fc(x)) mask_pred = self.fc_logits(x).view( x.size(0), self.num_classes, *self.output_size) return mask_pred
3,233
34.152174
79
py
GFocalV2
GFocalV2-master/mmdet/models/roi_heads/mask_heads/maskiou_head.py
import numpy as np import torch import torch.nn as nn from mmcv.cnn import Conv2d, Linear, MaxPool2d, kaiming_init, normal_init from mmcv.runner import force_fp32 from torch.nn.modules.utils import _pair from mmdet.models.builder import HEADS, build_loss @HEADS.register_module() class MaskIoUHead(nn.Module): """Mask IoU Head. This head predicts the IoU of predicted masks and corresponding gt masks. """ def __init__(self, num_convs=4, num_fcs=2, roi_feat_size=14, in_channels=256, conv_out_channels=256, fc_out_channels=1024, num_classes=80, loss_iou=dict(type='MSELoss', loss_weight=0.5)): super(MaskIoUHead, self).__init__() self.in_channels = in_channels self.conv_out_channels = conv_out_channels self.fc_out_channels = fc_out_channels self.num_classes = num_classes self.fp16_enabled = False self.convs = nn.ModuleList() for i in range(num_convs): if i == 0: # concatenation of mask feature and mask prediction in_channels = self.in_channels + 1 else: in_channels = self.conv_out_channels stride = 2 if i == num_convs - 1 else 1 self.convs.append( Conv2d( in_channels, self.conv_out_channels, 3, stride=stride, padding=1)) roi_feat_size = _pair(roi_feat_size) pooled_area = (roi_feat_size[0] // 2) * (roi_feat_size[1] // 2) self.fcs = nn.ModuleList() for i in range(num_fcs): in_channels = ( self.conv_out_channels * pooled_area if i == 0 else self.fc_out_channels) self.fcs.append(Linear(in_channels, self.fc_out_channels)) self.fc_mask_iou = Linear(self.fc_out_channels, self.num_classes) self.relu = nn.ReLU() self.max_pool = MaxPool2d(2, 2) self.loss_iou = build_loss(loss_iou) def init_weights(self): for conv in self.convs: kaiming_init(conv) for fc in self.fcs: kaiming_init( fc, a=1, mode='fan_in', nonlinearity='leaky_relu', distribution='uniform') normal_init(self.fc_mask_iou, std=0.01) def forward(self, mask_feat, mask_pred): mask_pred = mask_pred.sigmoid() mask_pred_pooled = self.max_pool(mask_pred.unsqueeze(1)) x = torch.cat((mask_feat, mask_pred_pooled), 1) for conv in self.convs: x = self.relu(conv(x)) x = x.flatten(1) for fc in self.fcs: x = self.relu(fc(x)) mask_iou = self.fc_mask_iou(x) return mask_iou @force_fp32(apply_to=('mask_iou_pred', )) def loss(self, mask_iou_pred, mask_iou_targets): pos_inds = mask_iou_targets > 0 if pos_inds.sum() > 0: loss_mask_iou = self.loss_iou(mask_iou_pred[pos_inds], mask_iou_targets[pos_inds]) else: loss_mask_iou = mask_iou_pred.sum() * 0 return dict(loss_mask_iou=loss_mask_iou) @force_fp32(apply_to=('mask_pred', )) def get_targets(self, sampling_results, gt_masks, mask_pred, mask_targets, rcnn_train_cfg): """Compute target of mask IoU. Mask IoU target is the IoU of the predicted mask (inside a bbox) and the gt mask of corresponding gt mask (the whole instance). The intersection area is computed inside the bbox, and the gt mask area is computed with two steps, firstly we compute the gt area inside the bbox, then divide it by the area ratio of gt area inside the bbox and the gt area of the whole instance. Args: sampling_results (list[:obj:`SamplingResult`]): sampling results. gt_masks (BitmapMask | PolygonMask): Gt masks (the whole instance) of each image, with the same shape of the input image. mask_pred (Tensor): Predicted masks of each positive proposal, shape (num_pos, h, w). mask_targets (Tensor): Gt mask of each positive proposal, binary map of the shape (num_pos, h, w). rcnn_train_cfg (dict): Training config for R-CNN part. Returns: Tensor: mask iou target (length == num positive). """ pos_proposals = [res.pos_bboxes for res in sampling_results] pos_assigned_gt_inds = [ res.pos_assigned_gt_inds for res in sampling_results ] # compute the area ratio of gt areas inside the proposals and # the whole instance area_ratios = map(self._get_area_ratio, pos_proposals, pos_assigned_gt_inds, gt_masks) area_ratios = torch.cat(list(area_ratios)) assert mask_targets.size(0) == area_ratios.size(0) mask_pred = (mask_pred > rcnn_train_cfg.mask_thr_binary).float() mask_pred_areas = mask_pred.sum((-1, -2)) # mask_pred and mask_targets are binary maps overlap_areas = (mask_pred * mask_targets).sum((-1, -2)) # compute the mask area of the whole instance gt_full_areas = mask_targets.sum((-1, -2)) / (area_ratios + 1e-7) mask_iou_targets = overlap_areas / ( mask_pred_areas + gt_full_areas - overlap_areas) return mask_iou_targets def _get_area_ratio(self, pos_proposals, pos_assigned_gt_inds, gt_masks): """Compute area ratio of the gt mask inside the proposal and the gt mask of the corresponding instance.""" num_pos = pos_proposals.size(0) if num_pos > 0: area_ratios = [] proposals_np = pos_proposals.cpu().numpy() pos_assigned_gt_inds = pos_assigned_gt_inds.cpu().numpy() # compute mask areas of gt instances (batch processing for speedup) gt_instance_mask_area = gt_masks.areas for i in range(num_pos): gt_mask = gt_masks[pos_assigned_gt_inds[i]] # crop the gt mask inside the proposal bbox = proposals_np[i, :].astype(np.int32) gt_mask_in_proposal = gt_mask.crop(bbox) ratio = gt_mask_in_proposal.areas[0] / ( gt_instance_mask_area[pos_assigned_gt_inds[i]] + 1e-7) area_ratios.append(ratio) area_ratios = torch.from_numpy(np.stack(area_ratios)).float().to( pos_proposals.device) else: area_ratios = pos_proposals.new_zeros((0, )) return area_ratios @force_fp32(apply_to=('mask_iou_pred', )) def get_mask_scores(self, mask_iou_pred, det_bboxes, det_labels): """Get the mask scores. mask_score = bbox_score * mask_iou """ inds = range(det_labels.size(0)) mask_scores = mask_iou_pred[inds, det_labels] * det_bboxes[inds, -1] mask_scores = mask_scores.cpu().numpy() det_labels = det_labels.cpu().numpy() return [mask_scores[det_labels == i] for i in range(self.num_classes)]
7,332
38.213904
79
py
GFocalV2
GFocalV2-master/mmdet/models/roi_heads/mask_heads/fcn_mask_head.py
import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import Conv2d, ConvModule, build_upsample_layer from mmcv.ops.carafe import CARAFEPack from mmcv.runner import auto_fp16, force_fp32 from torch.nn.modules.utils import _pair from mmdet.core import mask_target from mmdet.models.builder import HEADS, build_loss BYTES_PER_FLOAT = 4 # TODO: This memory limit may be too much or too little. It would be better to # determine it based on available resources. GPU_MEM_LIMIT = 1024**3 # 1 GB memory limit @HEADS.register_module() class FCNMaskHead(nn.Module): def __init__(self, num_convs=4, roi_feat_size=14, in_channels=256, conv_kernel_size=3, conv_out_channels=256, num_classes=80, class_agnostic=False, upsample_cfg=dict(type='deconv', scale_factor=2), conv_cfg=None, norm_cfg=None, loss_mask=dict( type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)): super(FCNMaskHead, self).__init__() self.upsample_cfg = upsample_cfg.copy() if self.upsample_cfg['type'] not in [ None, 'deconv', 'nearest', 'bilinear', 'carafe' ]: raise ValueError( f'Invalid upsample method {self.upsample_cfg["type"]}, ' 'accepted methods are "deconv", "nearest", "bilinear", ' '"carafe"') self.num_convs = num_convs # WARN: roi_feat_size is reserved and not used self.roi_feat_size = _pair(roi_feat_size) self.in_channels = in_channels self.conv_kernel_size = conv_kernel_size self.conv_out_channels = conv_out_channels self.upsample_method = self.upsample_cfg.get('type') self.scale_factor = self.upsample_cfg.pop('scale_factor', None) self.num_classes = num_classes self.class_agnostic = class_agnostic self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.fp16_enabled = False self.loss_mask = build_loss(loss_mask) self.convs = nn.ModuleList() for i in range(self.num_convs): in_channels = ( self.in_channels if i == 0 else self.conv_out_channels) padding = (self.conv_kernel_size - 1) // 2 self.convs.append( ConvModule( in_channels, self.conv_out_channels, self.conv_kernel_size, padding=padding, conv_cfg=conv_cfg, norm_cfg=norm_cfg)) upsample_in_channels = ( self.conv_out_channels if self.num_convs > 0 else in_channels) upsample_cfg_ = self.upsample_cfg.copy() if self.upsample_method is None: self.upsample = None elif self.upsample_method == 'deconv': upsample_cfg_.update( in_channels=upsample_in_channels, out_channels=self.conv_out_channels, kernel_size=self.scale_factor, stride=self.scale_factor) self.upsample = build_upsample_layer(upsample_cfg_) elif self.upsample_method == 'carafe': upsample_cfg_.update( channels=upsample_in_channels, scale_factor=self.scale_factor) self.upsample = build_upsample_layer(upsample_cfg_) else: # suppress warnings align_corners = (None if self.upsample_method == 'nearest' else False) upsample_cfg_.update( scale_factor=self.scale_factor, mode=self.upsample_method, align_corners=align_corners) self.upsample = build_upsample_layer(upsample_cfg_) out_channels = 1 if self.class_agnostic else self.num_classes logits_in_channel = ( self.conv_out_channels if self.upsample_method == 'deconv' else upsample_in_channels) self.conv_logits = Conv2d(logits_in_channel, out_channels, 1) self.relu = nn.ReLU(inplace=True) self.debug_imgs = None def init_weights(self): for m in [self.upsample, self.conv_logits]: if m is None: continue elif isinstance(m, CARAFEPack): m.init_weights() else: nn.init.kaiming_normal_( m.weight, mode='fan_out', nonlinearity='relu') nn.init.constant_(m.bias, 0) @auto_fp16() def forward(self, x): for conv in self.convs: x = conv(x) if self.upsample is not None: x = self.upsample(x) if self.upsample_method == 'deconv': x = self.relu(x) mask_pred = self.conv_logits(x) return mask_pred def get_targets(self, sampling_results, gt_masks, rcnn_train_cfg): pos_proposals = [res.pos_bboxes for res in sampling_results] pos_assigned_gt_inds = [ res.pos_assigned_gt_inds for res in sampling_results ] mask_targets = mask_target(pos_proposals, pos_assigned_gt_inds, gt_masks, rcnn_train_cfg) return mask_targets @force_fp32(apply_to=('mask_pred', )) def loss(self, mask_pred, mask_targets, labels): loss = dict() if mask_pred.size(0) == 0: loss_mask = mask_pred.sum() else: if self.class_agnostic: loss_mask = self.loss_mask(mask_pred, mask_targets, torch.zeros_like(labels)) else: loss_mask = self.loss_mask(mask_pred, mask_targets, labels) loss['loss_mask'] = loss_mask return loss def get_seg_masks(self, mask_pred, det_bboxes, det_labels, rcnn_test_cfg, ori_shape, scale_factor, rescale): """Get segmentation masks from mask_pred and bboxes. Args: mask_pred (Tensor or ndarray): shape (n, #class, h, w). For single-scale testing, mask_pred is the direct output of model, whose type is Tensor, while for multi-scale testing, it will be converted to numpy array outside of this method. det_bboxes (Tensor): shape (n, 4/5) det_labels (Tensor): shape (n, ) img_shape (Tensor): shape (3, ) rcnn_test_cfg (dict): rcnn testing config ori_shape: original image size Returns: list[list]: encoded masks """ if isinstance(mask_pred, torch.Tensor): mask_pred = mask_pred.sigmoid() else: mask_pred = det_bboxes.new_tensor(mask_pred) device = mask_pred.device cls_segms = [[] for _ in range(self.num_classes) ] # BG is not included in num_classes bboxes = det_bboxes[:, :4] labels = det_labels if rescale: img_h, img_w = ori_shape[:2] else: if isinstance(scale_factor, float): img_h = np.round(ori_shape[0] * scale_factor).astype(np.int32) img_w = np.round(ori_shape[1] * scale_factor).astype(np.int32) else: w_scale, h_scale = scale_factor[0], scale_factor[1] img_h = np.round(ori_shape[0] * h_scale.item()).astype( np.int32) img_w = np.round(ori_shape[1] * w_scale.item()).astype( np.int32) scale_factor = 1.0 if not isinstance(scale_factor, (float, torch.Tensor)): scale_factor = bboxes.new_tensor(scale_factor) bboxes = bboxes / scale_factor N = len(mask_pred) # The actual implementation split the input into chunks, # and paste them chunk by chunk. if device.type == 'cpu': # CPU is most efficient when they are pasted one by one with # skip_empty=True, so that it performs minimal number of # operations. num_chunks = N else: # GPU benefits from parallelism for larger chunks, # but may have memory issue num_chunks = int( np.ceil(N * img_h * img_w * BYTES_PER_FLOAT / GPU_MEM_LIMIT)) assert (num_chunks <= N), 'Default GPU_MEM_LIMIT is too small; try increasing it' chunks = torch.chunk(torch.arange(N, device=device), num_chunks) threshold = rcnn_test_cfg.mask_thr_binary im_mask = torch.zeros( N, img_h, img_w, device=device, dtype=torch.bool if threshold >= 0 else torch.uint8) if not self.class_agnostic: mask_pred = mask_pred[range(N), labels][:, None] for inds in chunks: masks_chunk, spatial_inds = _do_paste_mask( mask_pred[inds], bboxes[inds], img_h, img_w, skip_empty=device.type == 'cpu') if threshold >= 0: masks_chunk = (masks_chunk >= threshold).to(dtype=torch.bool) else: # for visualization and debugging masks_chunk = (masks_chunk * 255).to(dtype=torch.uint8) im_mask[(inds, ) + spatial_inds] = masks_chunk for i in range(N): cls_segms[labels[i]].append(im_mask[i].cpu().numpy()) return cls_segms def _do_paste_mask(masks, boxes, img_h, img_w, skip_empty=True): """Paste instance masks acoording to boxes. This implementation is modified from https://github.com/facebookresearch/detectron2/ Args: masks (Tensor): N, 1, H, W boxes (Tensor): N, 4 img_h (int): Height of the image to be pasted. img_w (int): Width of the image to be pasted. skip_empty (bool): Only paste masks within the region that tightly bound all boxes, and returns the results this region only. An important optimization for CPU. Returns: tuple: (Tensor, tuple). The first item is mask tensor, the second one is the slice object. If skip_empty == False, the whole image will be pasted. It will return a mask of shape (N, img_h, img_w) and an empty tuple. If skip_empty == True, only area around the mask will be pasted. A mask of shape (N, h', w') and its start and end coordinates in the original image will be returned. """ # On GPU, paste all masks together (up to chunk size) # by using the entire image to sample the masks # Compared to pasting them one by one, # this has more operations but is faster on COCO-scale dataset. device = masks.device if skip_empty: x0_int, y0_int = torch.clamp( boxes.min(dim=0).values.floor()[:2] - 1, min=0).to(dtype=torch.int32) x1_int = torch.clamp( boxes[:, 2].max().ceil() + 1, max=img_w).to(dtype=torch.int32) y1_int = torch.clamp( boxes[:, 3].max().ceil() + 1, max=img_h).to(dtype=torch.int32) else: x0_int, y0_int = 0, 0 x1_int, y1_int = img_w, img_h x0, y0, x1, y1 = torch.split(boxes, 1, dim=1) # each is Nx1 N = masks.shape[0] img_y = torch.arange( y0_int, y1_int, device=device, dtype=torch.float32) + 0.5 img_x = torch.arange( x0_int, x1_int, device=device, dtype=torch.float32) + 0.5 img_y = (img_y - y0) / (y1 - y0) * 2 - 1 img_x = (img_x - x0) / (x1 - x0) * 2 - 1 # img_x, img_y have shapes (N, w), (N, h) if torch.isinf(img_x).any(): inds = torch.where(torch.isinf(img_x)) img_x[inds] = 0 if torch.isinf(img_y).any(): inds = torch.where(torch.isinf(img_y)) img_y[inds] = 0 gx = img_x[:, None, :].expand(N, img_y.size(1), img_x.size(1)) gy = img_y[:, :, None].expand(N, img_y.size(1), img_x.size(1)) grid = torch.stack([gx, gy], dim=3) img_masks = F.grid_sample( masks.to(dtype=torch.float32), grid, align_corners=False) if skip_empty: return img_masks[:, 0], (slice(y0_int, y1_int), slice(x0_int, x1_int)) else: return img_masks[:, 0], ()
12,399
38.240506
79
py
GFocalV2
GFocalV2-master/mmdet/models/roi_heads/mask_heads/fused_semantic_head.py
import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import ConvModule, kaiming_init from mmcv.runner import auto_fp16, force_fp32 from mmdet.models.builder import HEADS @HEADS.register_module() class FusedSemanticHead(nn.Module): r"""Multi-level fused semantic segmentation head. .. code-block:: none in_1 -> 1x1 conv --- | in_2 -> 1x1 conv -- | || in_3 -> 1x1 conv - || ||| /-> 1x1 conv (mask prediction) in_4 -> 1x1 conv -----> 3x3 convs (*4) | \-> 1x1 conv (feature) in_5 -> 1x1 conv --- """ # noqa: W605 def __init__(self, num_ins, fusion_level, num_convs=4, in_channels=256, conv_out_channels=256, num_classes=183, ignore_label=255, loss_weight=0.2, conv_cfg=None, norm_cfg=None): super(FusedSemanticHead, self).__init__() self.num_ins = num_ins self.fusion_level = fusion_level self.num_convs = num_convs self.in_channels = in_channels self.conv_out_channels = conv_out_channels self.num_classes = num_classes self.ignore_label = ignore_label self.loss_weight = loss_weight self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.fp16_enabled = False self.lateral_convs = nn.ModuleList() for i in range(self.num_ins): self.lateral_convs.append( ConvModule( self.in_channels, self.in_channels, 1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, inplace=False)) self.convs = nn.ModuleList() for i in range(self.num_convs): in_channels = self.in_channels if i == 0 else conv_out_channels self.convs.append( ConvModule( in_channels, conv_out_channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) self.conv_embedding = ConvModule( conv_out_channels, conv_out_channels, 1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg) self.conv_logits = nn.Conv2d(conv_out_channels, self.num_classes, 1) self.criterion = nn.CrossEntropyLoss(ignore_index=ignore_label) def init_weights(self): kaiming_init(self.conv_logits) @auto_fp16() def forward(self, feats): x = self.lateral_convs[self.fusion_level](feats[self.fusion_level]) fused_size = tuple(x.shape[-2:]) for i, feat in enumerate(feats): if i != self.fusion_level: feat = F.interpolate( feat, size=fused_size, mode='bilinear', align_corners=True) x += self.lateral_convs[i](feat) for i in range(self.num_convs): x = self.convs[i](x) mask_pred = self.conv_logits(x) x = self.conv_embedding(x) return mask_pred, x @force_fp32(apply_to=('mask_pred', )) def loss(self, mask_pred, labels): labels = labels.squeeze(1).long() loss_semantic_seg = self.criterion(mask_pred, labels) loss_semantic_seg *= self.loss_weight return loss_semantic_seg
3,610
32.435185
79
py
GFocalV2
GFocalV2-master/mmdet/models/roi_heads/mask_heads/mask_point_head.py
# Modified from https://github.com/facebookresearch/detectron2/tree/master/projects/PointRend/point_head/point_head.py # noqa import torch import torch.nn as nn from mmcv.cnn import ConvModule, normal_init from mmcv.ops import point_sample, rel_roi_point_to_rel_img_point from mmdet.models.builder import HEADS, build_loss @HEADS.register_module() class MaskPointHead(nn.Module): """A mask point head use in PointRend. ``MaskPointHead`` use shared multi-layer perceptron (equivalent to nn.Conv1d) to predict the logit of input points. The fine-grained feature and coarse feature will be concatenate together for predication. Args: num_fcs (int): Number of fc layers in the head. Default: 3. in_channels (int): Number of input channels. Default: 256. fc_channels (int): Number of fc channels. Default: 256. num_classes (int): Number of classes for logits. Default: 80. class_agnostic (bool): Whether use class agnostic classification. If so, the output channels of logits will be 1. Default: False. coarse_pred_each_layer (bool): Whether concatenate coarse feature with the output of each fc layer. Default: True. conv_cfg (dict | None): Dictionary to construct and config conv layer. Default: dict(type='Conv1d')) norm_cfg (dict | None): Dictionary to construct and config norm layer. Default: None. loss_point (dict): Dictionary to construct and config loss layer of point head. Default: dict(type='CrossEntropyLoss', use_mask=True, loss_weight=1.0). """ def __init__(self, num_classes, num_fcs=3, in_channels=256, fc_channels=256, class_agnostic=False, coarse_pred_each_layer=True, conv_cfg=dict(type='Conv1d'), norm_cfg=None, act_cfg=dict(type='ReLU'), loss_point=dict( type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)): super().__init__() self.num_fcs = num_fcs self.in_channels = in_channels self.fc_channles = fc_channels self.num_classes = num_classes self.class_agnostic = class_agnostic self.coarse_pred_each_layer = coarse_pred_each_layer self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.loss_point = build_loss(loss_point) fc_in_channels = in_channels + num_classes self.fcs = nn.ModuleList() for _ in range(num_fcs): fc = ConvModule( fc_in_channels, fc_channels, kernel_size=1, stride=1, padding=0, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) self.fcs.append(fc) fc_in_channels = fc_channels fc_in_channels += num_classes if self.coarse_pred_each_layer else 0 out_channels = 1 if self.class_agnostic else self.num_classes self.fc_logits = nn.Conv1d( fc_in_channels, out_channels, kernel_size=1, stride=1, padding=0) def init_weights(self): """Initialize last classification layer of MaskPointHead, conv layers are already initialized by ConvModule.""" normal_init(self.fc_logits, std=0.001) def forward(self, fine_grained_feats, coarse_feats): """Classify each point base on fine grained and coarse feats. Args: fine_grained_feats (Tensor): Fine grained feature sampled from FPN, shape (num_rois, in_channels, num_points). coarse_feats (Tensor): Coarse feature sampled from CoarseMaskHead, shape (num_rois, num_classes, num_points). Returns: Tensor: Point classification results, shape (num_rois, num_class, num_points). """ x = torch.cat([fine_grained_feats, coarse_feats], dim=1) for fc in self.fcs: x = fc(x) if self.coarse_pred_each_layer: x = torch.cat((x, coarse_feats), dim=1) return self.fc_logits(x) def get_targets(self, rois, rel_roi_points, sampling_results, gt_masks, cfg): """Get training targets of MaskPointHead for all images. Args: rois (Tensor): Region of Interest, shape (num_rois, 5). rel_roi_points: Points coordinates relative to RoI, shape (num_rois, num_points, 2). sampling_results (:obj:`SamplingResult`): Sampling result after sampling and assignment. gt_masks (Tensor) : Ground truth segmentation masks of corresponding boxes, shape (num_rois, height, width). cfg (dict): Training cfg. Returns: Tensor: Point target, shape (num_rois, num_points). """ num_imgs = len(sampling_results) rois_list = [] rel_roi_points_list = [] for batch_ind in range(num_imgs): inds = (rois[:, 0] == batch_ind) rois_list.append(rois[inds]) rel_roi_points_list.append(rel_roi_points[inds]) pos_assigned_gt_inds_list = [ res.pos_assigned_gt_inds for res in sampling_results ] cfg_list = [cfg for _ in range(num_imgs)] point_targets = map(self._get_target_single, rois_list, rel_roi_points_list, pos_assigned_gt_inds_list, gt_masks, cfg_list) point_targets = list(point_targets) if len(point_targets) > 0: point_targets = torch.cat(point_targets) return point_targets def _get_target_single(self, rois, rel_roi_points, pos_assigned_gt_inds, gt_masks, cfg): """Get training target of MaskPointHead for each image.""" num_pos = rois.size(0) num_points = cfg.num_points if num_pos > 0: gt_masks_th = ( gt_masks.to_tensor(rois.dtype, rois.device).index_select( 0, pos_assigned_gt_inds)) gt_masks_th = gt_masks_th.unsqueeze(1) rel_img_points = rel_roi_point_to_rel_img_point( rois, rel_roi_points, gt_masks_th.shape[2:]) point_targets = point_sample(gt_masks_th, rel_img_points).squeeze(1) else: point_targets = rois.new_zeros((0, num_points)) return point_targets def loss(self, point_pred, point_targets, labels): """Calculate loss for MaskPointHead. Args: point_pred (Tensor): Point predication result, shape (num_rois, num_classes, num_points). point_targets (Tensor): Point targets, shape (num_roi, num_points). labels (Tensor): Class label of corresponding boxes, shape (num_rois, ) Returns: dict[str, Tensor]: a dictionary of point loss components """ loss = dict() if self.class_agnostic: loss_point = self.loss_point(point_pred, point_targets, torch.zeros_like(labels)) else: loss_point = self.loss_point(point_pred, point_targets, labels) loss['loss_point'] = loss_point return loss def _get_uncertainty(self, mask_pred, labels): """Estimate uncertainty based on pred logits. We estimate uncertainty as L1 distance between 0.0 and the logits prediction in 'mask_pred' for the foreground class in `classes`. Args: mask_pred (Tensor): mask predication logits, shape (num_rois, num_classes, mask_height, mask_width). labels (list[Tensor]): Either predicted or ground truth label for each predicted mask, of length num_rois. Returns: scores (Tensor): Uncertainty scores with the most uncertain locations having the highest uncertainty score, shape (num_rois, 1, mask_height, mask_width) """ if mask_pred.shape[1] == 1: gt_class_logits = mask_pred.clone() else: inds = torch.arange(mask_pred.shape[0], device=mask_pred.device) gt_class_logits = mask_pred[inds, labels].unsqueeze(1) return -torch.abs(gt_class_logits) def get_roi_rel_points_train(self, mask_pred, labels, cfg): """Get ``num_points`` most uncertain points with random points during train. Sample points in [0, 1] x [0, 1] coordinate space based on their uncertainty. The uncertainties are calculated for each point using '_get_uncertainty()' function that takes point's logit prediction as input. Args: mask_pred (Tensor): A tensor of shape (num_rois, num_classes, mask_height, mask_width) for class-specific or class-agnostic prediction. labels (list): The ground truth class for each instance. cfg (dict): Training config of point head. Returns: point_coords (Tensor): A tensor of shape (num_rois, num_points, 2) that contains the coordinates sampled points. """ num_points = cfg.num_points oversample_ratio = cfg.oversample_ratio importance_sample_ratio = cfg.importance_sample_ratio assert oversample_ratio >= 1 assert 0 <= importance_sample_ratio <= 1 batch_size = mask_pred.shape[0] num_sampled = int(num_points * oversample_ratio) point_coords = torch.rand( batch_size, num_sampled, 2, device=mask_pred.device) point_logits = point_sample(mask_pred, point_coords) # It is crucial to calculate uncertainty based on the sampled # prediction value for the points. Calculating uncertainties of the # coarse predictions first and sampling them for points leads to # incorrect results. To illustrate this: assume uncertainty func( # logits)=-abs(logits), a sampled point between two coarse # predictions with -1 and 1 logits has 0 logits, and therefore 0 # uncertainty value. However, if we calculate uncertainties for the # coarse predictions first, both will have -1 uncertainty, # and sampled point will get -1 uncertainty. point_uncertainties = self._get_uncertainty(point_logits, labels) num_uncertain_points = int(importance_sample_ratio * num_points) num_random_points = num_points - num_uncertain_points idx = torch.topk( point_uncertainties[:, 0, :], k=num_uncertain_points, dim=1)[1] shift = num_sampled * torch.arange( batch_size, dtype=torch.long, device=mask_pred.device) idx += shift[:, None] point_coords = point_coords.view(-1, 2)[idx.view(-1), :].view( batch_size, num_uncertain_points, 2) if num_random_points > 0: rand_roi_coords = torch.rand( batch_size, num_random_points, 2, device=mask_pred.device) point_coords = torch.cat((point_coords, rand_roi_coords), dim=1) return point_coords def get_roi_rel_points_test(self, mask_pred, pred_label, cfg): """Get ``num_points`` most uncertain points during test. Args: mask_pred (Tensor): A tensor of shape (num_rois, num_classes, mask_height, mask_width) for class-specific or class-agnostic prediction. pred_label (list): The predication class for each instance. cfg (dict): Testing config of point head. Returns: point_indices (Tensor): A tensor of shape (num_rois, num_points) that contains indices from [0, mask_height x mask_width) of the most uncertain points. point_coords (Tensor): A tensor of shape (num_rois, num_points, 2) that contains [0, 1] x [0, 1] normalized coordinates of the most uncertain points from the [mask_height, mask_width] grid . """ num_points = cfg.subdivision_num_points uncertainty_map = self._get_uncertainty(mask_pred, pred_label) num_rois, _, mask_height, mask_width = uncertainty_map.shape h_step = 1.0 / mask_height w_step = 1.0 / mask_width uncertainty_map = uncertainty_map.view(num_rois, mask_height * mask_width) num_points = min(mask_height * mask_width, num_points) point_indices = uncertainty_map.topk(num_points, dim=1)[1] point_coords = uncertainty_map.new_zeros(num_rois, num_points, 2) point_coords[:, :, 0] = w_step / 2.0 + (point_indices % mask_width).float() * w_step point_coords[:, :, 1] = h_step / 2.0 + (point_indices // mask_width).float() * h_step return point_indices, point_coords
13,190
42.82392
126
py
GFocalV2
GFocalV2-master/mmdet/models/losses/ghm_loss.py
import torch import torch.nn as nn import torch.nn.functional as F from ..builder import LOSSES def _expand_onehot_labels(labels, label_weights, label_channels): bin_labels = labels.new_full((labels.size(0), label_channels), 0) inds = torch.nonzero( (labels >= 0) & (labels < label_channels), as_tuple=False).squeeze() if inds.numel() > 0: bin_labels[inds, labels[inds]] = 1 bin_label_weights = label_weights.view(-1, 1).expand( label_weights.size(0), label_channels) return bin_labels, bin_label_weights # TODO: code refactoring to make it consistent with other losses @LOSSES.register_module() class GHMC(nn.Module): """GHM Classification Loss. Details of the theorem can be viewed in the paper `Gradient Harmonized Single-stage Detector <https://arxiv.org/abs/1811.05181>`_. Args: bins (int): Number of the unit regions for distribution calculation. momentum (float): The parameter for moving average. use_sigmoid (bool): Can only be true for BCE based loss now. loss_weight (float): The weight of the total GHM-C loss. """ def __init__(self, bins=10, momentum=0, use_sigmoid=True, loss_weight=1.0): super(GHMC, self).__init__() self.bins = bins self.momentum = momentum edges = torch.arange(bins + 1).float() / bins self.register_buffer('edges', edges) self.edges[-1] += 1e-6 if momentum > 0: acc_sum = torch.zeros(bins) self.register_buffer('acc_sum', acc_sum) self.use_sigmoid = use_sigmoid if not self.use_sigmoid: raise NotImplementedError self.loss_weight = loss_weight def forward(self, pred, target, label_weight, *args, **kwargs): """Calculate the GHM-C loss. Args: pred (float tensor of size [batch_num, class_num]): The direct prediction of classification fc layer. target (float tensor of size [batch_num, class_num]): Binary class target for each sample. label_weight (float tensor of size [batch_num, class_num]): the value is 1 if the sample is valid and 0 if ignored. Returns: The gradient harmonized loss. """ # the target should be binary class label if pred.dim() != target.dim(): target, label_weight = _expand_onehot_labels( target, label_weight, pred.size(-1)) target, label_weight = target.float(), label_weight.float() edges = self.edges mmt = self.momentum weights = torch.zeros_like(pred) # gradient length g = torch.abs(pred.sigmoid().detach() - target) valid = label_weight > 0 tot = max(valid.float().sum().item(), 1.0) n = 0 # n valid bins for i in range(self.bins): inds = (g >= edges[i]) & (g < edges[i + 1]) & valid num_in_bin = inds.sum().item() if num_in_bin > 0: if mmt > 0: self.acc_sum[i] = mmt * self.acc_sum[i] \ + (1 - mmt) * num_in_bin weights[inds] = tot / self.acc_sum[i] else: weights[inds] = tot / num_in_bin n += 1 if n > 0: weights = weights / n loss = F.binary_cross_entropy_with_logits( pred, target, weights, reduction='sum') / tot return loss * self.loss_weight # TODO: code refactoring to make it consistent with other losses @LOSSES.register_module() class GHMR(nn.Module): """GHM Regression Loss. Details of the theorem can be viewed in the paper `Gradient Harmonized Single-stage Detector <https://arxiv.org/abs/1811.05181>`_. Args: mu (float): The parameter for the Authentic Smooth L1 loss. bins (int): Number of the unit regions for distribution calculation. momentum (float): The parameter for moving average. loss_weight (float): The weight of the total GHM-R loss. """ def __init__(self, mu=0.02, bins=10, momentum=0, loss_weight=1.0): super(GHMR, self).__init__() self.mu = mu self.bins = bins edges = torch.arange(bins + 1).float() / bins self.register_buffer('edges', edges) self.edges[-1] = 1e3 self.momentum = momentum if momentum > 0: acc_sum = torch.zeros(bins) self.register_buffer('acc_sum', acc_sum) self.loss_weight = loss_weight # TODO: support reduction parameter def forward(self, pred, target, label_weight, avg_factor=None): """Calculate the GHM-R loss. Args: pred (float tensor of size [batch_num, 4 (* class_num)]): The prediction of box regression layer. Channel number can be 4 or 4 * class_num depending on whether it is class-agnostic. target (float tensor of size [batch_num, 4 (* class_num)]): The target regression values with the same size of pred. label_weight (float tensor of size [batch_num, 4 (* class_num)]): The weight of each sample, 0 if ignored. Returns: The gradient harmonized loss. """ mu = self.mu edges = self.edges mmt = self.momentum # ASL1 loss diff = pred - target loss = torch.sqrt(diff * diff + mu * mu) - mu # gradient length g = torch.abs(diff / torch.sqrt(mu * mu + diff * diff)).detach() weights = torch.zeros_like(g) valid = label_weight > 0 tot = max(label_weight.float().sum().item(), 1.0) n = 0 # n: valid bins for i in range(self.bins): inds = (g >= edges[i]) & (g < edges[i + 1]) & valid num_in_bin = inds.sum().item() if num_in_bin > 0: n += 1 if mmt > 0: self.acc_sum[i] = mmt * self.acc_sum[i] \ + (1 - mmt) * num_in_bin weights[inds] = tot / self.acc_sum[i] else: weights[inds] = tot / num_in_bin if n > 0: weights /= n loss = loss * weights loss = loss.sum() / tot return loss * self.loss_weight
6,365
35.797688
79
py
GFocalV2
GFocalV2-master/mmdet/models/losses/mse_loss.py
import torch.nn as nn import torch.nn.functional as F from ..builder import LOSSES from .utils import weighted_loss @weighted_loss def mse_loss(pred, target): """Warpper of mse loss.""" return F.mse_loss(pred, target, reduction='none') @LOSSES.register_module() class MSELoss(nn.Module): """MSELoss. Args: reduction (str, optional): The method that reduces the loss to a scalar. Options are "none", "mean" and "sum". loss_weight (float, optional): The weight of the loss. Defaults to 1.0 """ def __init__(self, reduction='mean', loss_weight=1.0): super().__init__() self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None): """Forward function of loss. Args: pred (torch.Tensor): The prediction. target (torch.Tensor): The learning target of the prediction. weight (torch.Tensor, optional): Weight of the loss for each prediction. Defaults to None. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. Returns: torch.Tensor: The calculated loss """ loss = self.loss_weight * mse_loss( pred, target, weight, reduction=self.reduction, avg_factor=avg_factor) return loss
1,463
28.28
78
py
GFocalV2
GFocalV2-master/mmdet/models/losses/pisa_loss.py
import torch from mmdet.core import bbox_overlaps def isr_p(cls_score, bbox_pred, bbox_targets, rois, sampling_results, loss_cls, bbox_coder, k=2, bias=0, num_class=80): """Importance-based Sample Reweighting (ISR_P), positive part. Args: cls_score (Tensor): Predicted classification scores. bbox_pred (Tensor): Predicted bbox deltas. bbox_targets (tuple[Tensor]): A tuple of bbox targets, the are labels, label_weights, bbox_targets, bbox_weights, respectively. rois (Tensor): Anchors (single_stage) in shape (n, 4) or RoIs (two_stage) in shape (n, 5). sampling_results (obj): Sampling results. loss_cls (func): Classification loss func of the head. bbox_coder (obj): BBox coder of the head. k (float): Power of the non-linear mapping. bias (float): Shift of the non-linear mapping. num_class (int): Number of classes, default: 80. Return: tuple([Tensor]): labels, imp_based_label_weights, bbox_targets, bbox_target_weights """ labels, label_weights, bbox_targets, bbox_weights = bbox_targets pos_label_inds = ((labels >= 0) & (labels < num_class)).nonzero().reshape(-1) pos_labels = labels[pos_label_inds] # if no positive samples, return the original targets num_pos = float(pos_label_inds.size(0)) if num_pos == 0: return labels, label_weights, bbox_targets, bbox_weights # merge pos_assigned_gt_inds of per image to a single tensor gts = list() last_max_gt = 0 for i in range(len(sampling_results)): gt_i = sampling_results[i].pos_assigned_gt_inds gts.append(gt_i + last_max_gt) if len(gt_i) != 0: last_max_gt = gt_i.max() + 1 gts = torch.cat(gts) assert len(gts) == num_pos cls_score = cls_score.detach() bbox_pred = bbox_pred.detach() # For single stage detectors, rois here indicate anchors, in shape (N, 4) # For two stage detectors, rois are in shape (N, 5) if rois.size(-1) == 5: pos_rois = rois[pos_label_inds][:, 1:] else: pos_rois = rois[pos_label_inds] if bbox_pred.size(-1) > 4: bbox_pred = bbox_pred.view(bbox_pred.size(0), -1, 4) pos_delta_pred = bbox_pred[pos_label_inds, pos_labels].view(-1, 4) else: pos_delta_pred = bbox_pred[pos_label_inds].view(-1, 4) # compute iou of the predicted bbox and the corresponding GT pos_delta_target = bbox_targets[pos_label_inds].view(-1, 4) pos_bbox_pred = bbox_coder.decode(pos_rois, pos_delta_pred) target_bbox_pred = bbox_coder.decode(pos_rois, pos_delta_target) ious = bbox_overlaps(pos_bbox_pred, target_bbox_pred, is_aligned=True) pos_imp_weights = label_weights[pos_label_inds] # Two steps to compute IoU-HLR. Samples are first sorted by IoU locally, # then sorted again within the same-rank group max_l_num = pos_labels.bincount().max() for label in pos_labels.unique(): l_inds = (pos_labels == label).nonzero().view(-1) l_gts = gts[l_inds] for t in l_gts.unique(): t_inds = l_inds[l_gts == t] t_ious = ious[t_inds] _, t_iou_rank_idx = t_ious.sort(descending=True) _, t_iou_rank = t_iou_rank_idx.sort() ious[t_inds] += max_l_num - t_iou_rank.float() l_ious = ious[l_inds] _, l_iou_rank_idx = l_ious.sort(descending=True) _, l_iou_rank = l_iou_rank_idx.sort() # IoU-HLR # linearly map HLR to label weights pos_imp_weights[l_inds] *= (max_l_num - l_iou_rank.float()) / max_l_num pos_imp_weights = (bias + pos_imp_weights * (1 - bias)).pow(k) # normalize to make the new weighted loss value equal to the original loss pos_loss_cls = loss_cls( cls_score[pos_label_inds], pos_labels, reduction_override='none') if pos_loss_cls.dim() > 1: ori_pos_loss_cls = pos_loss_cls * label_weights[pos_label_inds][:, None] new_pos_loss_cls = pos_loss_cls * pos_imp_weights[:, None] else: ori_pos_loss_cls = pos_loss_cls * label_weights[pos_label_inds] new_pos_loss_cls = pos_loss_cls * pos_imp_weights pos_loss_cls_ratio = ori_pos_loss_cls.sum() / new_pos_loss_cls.sum() pos_imp_weights = pos_imp_weights * pos_loss_cls_ratio label_weights[pos_label_inds] = pos_imp_weights bbox_targets = labels, label_weights, bbox_targets, bbox_weights return bbox_targets def carl_loss(cls_score, labels, bbox_pred, bbox_targets, loss_bbox, k=1, bias=0.2, avg_factor=None, sigmoid=False, num_class=80): """Classification-Aware Regression Loss (CARL). Args: cls_score (Tensor): Predicted classification scores. labels (Tensor): Targets of classification. bbox_pred (Tensor): Predicted bbox deltas. bbox_targets (Tensor): Target of bbox regression. loss_bbox (func): Regression loss func of the head. bbox_coder (obj): BBox coder of the head. k (float): Power of the non-linear mapping. bias (float): Shift of the non-linear mapping. avg_factor (int): Average factor used in regression loss. sigmoid (bool): Activation of the classification score. num_class (int): Number of classes, default: 80. Return: dict: CARL loss dict. """ pos_label_inds = ((labels >= 0) & (labels < num_class)).nonzero().reshape(-1) if pos_label_inds.numel() == 0: return dict(loss_carl=cls_score.sum()[None] * 0.) pos_labels = labels[pos_label_inds] # multiply pos_cls_score with the corresponding bbox weight # and remain gradient if sigmoid: pos_cls_score = cls_score.sigmoid()[pos_label_inds, pos_labels] else: pos_cls_score = cls_score.softmax(-1)[pos_label_inds, pos_labels] carl_loss_weights = (bias + (1 - bias) * pos_cls_score).pow(k) # normalize carl_loss_weight to make its sum equal to num positive num_pos = float(pos_cls_score.size(0)) weight_ratio = num_pos / carl_loss_weights.sum() carl_loss_weights *= weight_ratio if avg_factor is None: avg_factor = bbox_targets.size(0) # if is class agnostic, bbox pred is in shape (N, 4) # otherwise, bbox pred is in shape (N, #classes, 4) if bbox_pred.size(-1) > 4: bbox_pred = bbox_pred.view(bbox_pred.size(0), -1, 4) pos_bbox_preds = bbox_pred[pos_label_inds, pos_labels] else: pos_bbox_preds = bbox_pred[pos_label_inds] ori_loss_reg = loss_bbox( pos_bbox_preds, bbox_targets[pos_label_inds], reduction_override='none') / avg_factor loss_carl = (ori_loss_reg * carl_loss_weights[:, None]).sum() return dict(loss_carl=loss_carl[None])
7,076
38.099448
79
py
GFocalV2
GFocalV2-master/mmdet/models/losses/balanced_l1_loss.py
import numpy as np import torch import torch.nn as nn from ..builder import LOSSES from .utils import weighted_loss @weighted_loss def balanced_l1_loss(pred, target, beta=1.0, alpha=0.5, gamma=1.5, reduction='mean'): """Calculate balanced L1 loss. Please see the `Libra R-CNN <https://arxiv.org/pdf/1904.02701.pdf>`_ Args: pred (torch.Tensor): The prediction with shape (N, 4). target (torch.Tensor): The learning target of the prediction with shape (N, 4). beta (float): The loss is a piecewise function of prediction and target and ``beta`` serves as a threshold for the difference between the prediction and target. Defaults to 1.0. alpha (float): The denominator ``alpha`` in the balanced L1 loss. Defaults to 0.5. gamma (float): The ``gamma`` in the balanced L1 loss. Defaults to 1.5. reduction (str, optional): The method that reduces the loss to a scalar. Options are "none", "mean" and "sum". Returns: torch.Tensor: The calculated loss """ assert beta > 0 assert pred.size() == target.size() and target.numel() > 0 diff = torch.abs(pred - target) b = np.e**(gamma / alpha) - 1 loss = torch.where( diff < beta, alpha / b * (b * diff + 1) * torch.log(b * diff / beta + 1) - alpha * diff, gamma * diff + gamma / b - alpha * beta) return loss @LOSSES.register_module() class BalancedL1Loss(nn.Module): """Balanced L1 Loss. arXiv: https://arxiv.org/pdf/1904.02701.pdf (CVPR 2019) Args: alpha (float): The denominator ``alpha`` in the balanced L1 loss. Defaults to 0.5. gamma (float): The ``gamma`` in the balanced L1 loss. Defaults to 1.5. beta (float, optional): The loss is a piecewise function of prediction and target. ``beta`` serves as a threshold for the difference between the prediction and target. Defaults to 1.0. reduction (str, optional): The method that reduces the loss to a scalar. Options are "none", "mean" and "sum". loss_weight (float, optional): The weight of the loss. Defaults to 1.0 """ def __init__(self, alpha=0.5, gamma=1.5, beta=1.0, reduction='mean', loss_weight=1.0): super(BalancedL1Loss, self).__init__() self.alpha = alpha self.gamma = gamma self.beta = beta self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None, **kwargs): """Forward function of loss. Args: pred (torch.Tensor): The prediction with shape (N, 4). target (torch.Tensor): The learning target of the prediction with shape (N, 4). weight (torch.Tensor, optional): Sample-wise loss weight with shape (N, ). avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. reduction_override (str, optional): The reduction method used to override the original reduction method of the loss. Options are "none", "mean" and "sum". Returns: torch.Tensor: The calculated loss """ assert reduction_override in (None, 'none', 'mean', 'sum') reduction = ( reduction_override if reduction_override else self.reduction) loss_bbox = self.loss_weight * balanced_l1_loss( pred, target, weight, alpha=self.alpha, gamma=self.gamma, beta=self.beta, reduction=reduction, avg_factor=avg_factor, **kwargs) return loss_bbox
4,116
33.596639
79
py
GFocalV2
GFocalV2-master/mmdet/models/losses/iou_loss.py
import math import torch import torch.nn as nn from mmdet.core import bbox_overlaps from ..builder import LOSSES from .utils import weighted_loss @weighted_loss def iou_loss(pred, target, eps=1e-6): """IoU loss. Computing the IoU loss between a set of predicted bboxes and target bboxes. The loss is calculated as negative log of IoU. Args: pred (torch.Tensor): Predicted bboxes of format (x1, y1, x2, y2), shape (n, 4). target (torch.Tensor): Corresponding gt bboxes, shape (n, 4). eps (float): Eps to avoid log(0). Return: torch.Tensor: Loss tensor. """ ious = bbox_overlaps(pred, target, is_aligned=True).clamp(min=eps) loss = -ious.log() return loss @weighted_loss def bounded_iou_loss(pred, target, beta=0.2, eps=1e-3): """BIoULoss. This is an implementation of paper `Improving Object Localization with Fitness NMS and Bounded IoU Loss. <https://arxiv.org/abs/1711.00164>`_. Args: pred (torch.Tensor): Predicted bboxes. target (torch.Tensor): Target bboxes. beta (float): beta parameter in smoothl1. eps (float): eps to avoid NaN. """ pred_ctrx = (pred[:, 0] + pred[:, 2]) * 0.5 pred_ctry = (pred[:, 1] + pred[:, 3]) * 0.5 pred_w = pred[:, 2] - pred[:, 0] pred_h = pred[:, 3] - pred[:, 1] with torch.no_grad(): target_ctrx = (target[:, 0] + target[:, 2]) * 0.5 target_ctry = (target[:, 1] + target[:, 3]) * 0.5 target_w = target[:, 2] - target[:, 0] target_h = target[:, 3] - target[:, 1] dx = target_ctrx - pred_ctrx dy = target_ctry - pred_ctry loss_dx = 1 - torch.max( (target_w - 2 * dx.abs()) / (target_w + 2 * dx.abs() + eps), torch.zeros_like(dx)) loss_dy = 1 - torch.max( (target_h - 2 * dy.abs()) / (target_h + 2 * dy.abs() + eps), torch.zeros_like(dy)) loss_dw = 1 - torch.min(target_w / (pred_w + eps), pred_w / (target_w + eps)) loss_dh = 1 - torch.min(target_h / (pred_h + eps), pred_h / (target_h + eps)) loss_comb = torch.stack([loss_dx, loss_dy, loss_dw, loss_dh], dim=-1).view(loss_dx.size(0), -1) loss = torch.where(loss_comb < beta, 0.5 * loss_comb * loss_comb / beta, loss_comb - 0.5 * beta) return loss @weighted_loss def giou_loss(pred, target, eps=1e-7): r"""`Generalized Intersection over Union: A Metric and A Loss for Bounding Box Regression <https://arxiv.org/abs/1902.09630>`_. Args: pred (torch.Tensor): Predicted bboxes of format (x1, y1, x2, y2), shape (n, 4). target (torch.Tensor): Corresponding gt bboxes, shape (n, 4). eps (float): Eps to avoid log(0). Return: Tensor: Loss tensor. """ gious = bbox_overlaps(pred, target, mode='giou', is_aligned=True, eps=eps) loss = 1 - gious return loss @weighted_loss def diou_loss(pred, target, eps=1e-7): r"""`Implementation of Distance-IoU Loss: Faster and Better Learning for Bounding Box Regression, https://arxiv.org/abs/1911.08287`_. Code is modified from https://github.com/Zzh-tju/DIoU. Args: pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2), shape (n, 4). target (Tensor): Corresponding gt bboxes, shape (n, 4). eps (float): Eps to avoid log(0). Return: Tensor: Loss tensor. """ # overlap lt = torch.max(pred[:, :2], target[:, :2]) rb = torch.min(pred[:, 2:], target[:, 2:]) wh = (rb - lt).clamp(min=0) overlap = wh[:, 0] * wh[:, 1] # union ap = (pred[:, 2] - pred[:, 0]) * (pred[:, 3] - pred[:, 1]) ag = (target[:, 2] - target[:, 0]) * (target[:, 3] - target[:, 1]) union = ap + ag - overlap + eps # IoU ious = overlap / union # enclose area enclose_x1y1 = torch.min(pred[:, :2], target[:, :2]) enclose_x2y2 = torch.max(pred[:, 2:], target[:, 2:]) enclose_wh = (enclose_x2y2 - enclose_x1y1).clamp(min=0) cw = enclose_wh[:, 0] ch = enclose_wh[:, 1] c2 = cw**2 + ch**2 + eps b1_x1, b1_y1 = pred[:, 0], pred[:, 1] b1_x2, b1_y2 = pred[:, 2], pred[:, 3] b2_x1, b2_y1 = target[:, 0], target[:, 1] b2_x2, b2_y2 = target[:, 2], target[:, 3] left = ((b2_x1 + b2_x2) - (b1_x1 + b1_x2))**2 / 4 right = ((b2_y1 + b2_y2) - (b1_y1 + b1_y2))**2 / 4 rho2 = left + right # DIoU dious = ious - rho2 / c2 loss = 1 - dious return loss @weighted_loss def ciou_loss(pred, target, eps=1e-7): r"""`Implementation of paper `Enhancing Geometric Factors into Model Learning and Inference for Object Detection and Instance Segmentation <https://arxiv.org/abs/2005.03572>`_. Code is modified from https://github.com/Zzh-tju/CIoU. Args: pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2), shape (n, 4). target (Tensor): Corresponding gt bboxes, shape (n, 4). eps (float): Eps to avoid log(0). Return: Tensor: Loss tensor. """ # overlap lt = torch.max(pred[:, :2], target[:, :2]) rb = torch.min(pred[:, 2:], target[:, 2:]) wh = (rb - lt).clamp(min=0) overlap = wh[:, 0] * wh[:, 1] # union ap = (pred[:, 2] - pred[:, 0]) * (pred[:, 3] - pred[:, 1]) ag = (target[:, 2] - target[:, 0]) * (target[:, 3] - target[:, 1]) union = ap + ag - overlap + eps # IoU ious = overlap / union # enclose area enclose_x1y1 = torch.min(pred[:, :2], target[:, :2]) enclose_x2y2 = torch.max(pred[:, 2:], target[:, 2:]) enclose_wh = (enclose_x2y2 - enclose_x1y1).clamp(min=0) cw = enclose_wh[:, 0] ch = enclose_wh[:, 1] c2 = cw**2 + ch**2 + eps b1_x1, b1_y1 = pred[:, 0], pred[:, 1] b1_x2, b1_y2 = pred[:, 2], pred[:, 3] b2_x1, b2_y1 = target[:, 0], target[:, 1] b2_x2, b2_y2 = target[:, 2], target[:, 3] w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps left = ((b2_x1 + b2_x2) - (b1_x1 + b1_x2))**2 / 4 right = ((b2_y1 + b2_y2) - (b1_y1 + b1_y2))**2 / 4 rho2 = left + right factor = 4 / math.pi**2 v = factor * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2) # CIoU cious = ious - (rho2 / c2 + v**2 / (1 - ious + v)) loss = 1 - cious return loss @LOSSES.register_module() class IoULoss(nn.Module): """IoULoss. Computing the IoU loss between a set of predicted bboxes and target bboxes. Args: eps (float): Eps to avoid log(0). reduction (str): Options are "none", "mean" and "sum". loss_weight (float): Weight of loss. """ def __init__(self, eps=1e-6, reduction='mean', loss_weight=1.0): super(IoULoss, self).__init__() self.eps = eps self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None, **kwargs): """Forward function. Args: pred (torch.Tensor): The prediction. target (torch.Tensor): The learning target of the prediction. weight (torch.Tensor, optional): The weight of loss for each prediction. Defaults to None. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. reduction_override (str, optional): The reduction method used to override the original reduction method of the loss. Defaults to None. Options are "none", "mean" and "sum". """ assert reduction_override in (None, 'none', 'mean', 'sum') reduction = ( reduction_override if reduction_override else self.reduction) if (weight is not None) and (not torch.any(weight > 0)) and ( reduction != 'none'): return (pred * weight).sum() # 0 if weight is not None and weight.dim() > 1: # TODO: remove this in the future # reduce the weight of shape (n, 4) to (n,) to match the # iou_loss of shape (n,) assert weight.shape == pred.shape weight = weight.mean(-1) loss = self.loss_weight * iou_loss( pred, target, weight, eps=self.eps, reduction=reduction, avg_factor=avg_factor, **kwargs) return loss @LOSSES.register_module() class BoundedIoULoss(nn.Module): def __init__(self, beta=0.2, eps=1e-3, reduction='mean', loss_weight=1.0): super(BoundedIoULoss, self).__init__() self.beta = beta self.eps = eps self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None, **kwargs): if weight is not None and not torch.any(weight > 0): return (pred * weight).sum() # 0 assert reduction_override in (None, 'none', 'mean', 'sum') reduction = ( reduction_override if reduction_override else self.reduction) loss = self.loss_weight * bounded_iou_loss( pred, target, weight, beta=self.beta, eps=self.eps, reduction=reduction, avg_factor=avg_factor, **kwargs) return loss @LOSSES.register_module() class GIoULoss(nn.Module): def __init__(self, eps=1e-6, reduction='mean', loss_weight=1.0): super(GIoULoss, self).__init__() self.eps = eps self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None, **kwargs): if weight is not None and not torch.any(weight > 0): return (pred * weight).sum() # 0 assert reduction_override in (None, 'none', 'mean', 'sum') reduction = ( reduction_override if reduction_override else self.reduction) if weight is not None and weight.dim() > 1: # TODO: remove this in the future # reduce the weight of shape (n, 4) to (n,) to match the # giou_loss of shape (n,) assert weight.shape == pred.shape weight = weight.mean(-1) loss = self.loss_weight * giou_loss( pred, target, weight, eps=self.eps, reduction=reduction, avg_factor=avg_factor, **kwargs) return loss @LOSSES.register_module() class DIoULoss(nn.Module): def __init__(self, eps=1e-6, reduction='mean', loss_weight=1.0): super(DIoULoss, self).__init__() self.eps = eps self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None, **kwargs): if weight is not None and not torch.any(weight > 0): return (pred * weight).sum() # 0 assert reduction_override in (None, 'none', 'mean', 'sum') reduction = ( reduction_override if reduction_override else self.reduction) if weight is not None and weight.dim() > 1: # TODO: remove this in the future # reduce the weight of shape (n, 4) to (n,) to match the # giou_loss of shape (n,) assert weight.shape == pred.shape weight = weight.mean(-1) loss = self.loss_weight * diou_loss( pred, target, weight, eps=self.eps, reduction=reduction, avg_factor=avg_factor, **kwargs) return loss @LOSSES.register_module() class CIoULoss(nn.Module): def __init__(self, eps=1e-6, reduction='mean', loss_weight=1.0): super(CIoULoss, self).__init__() self.eps = eps self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None, **kwargs): if weight is not None and not torch.any(weight > 0): return (pred * weight).sum() # 0 assert reduction_override in (None, 'none', 'mean', 'sum') reduction = ( reduction_override if reduction_override else self.reduction) if weight is not None and weight.dim() > 1: # TODO: remove this in the future # reduce the weight of shape (n, 4) to (n,) to match the # giou_loss of shape (n,) assert weight.shape == pred.shape weight = weight.mean(-1) loss = self.loss_weight * ciou_loss( pred, target, weight, eps=self.eps, reduction=reduction, avg_factor=avg_factor, **kwargs) return loss
13,454
31.188995
79
py
GFocalV2
GFocalV2-master/mmdet/models/losses/smooth_l1_loss.py
import torch import torch.nn as nn from ..builder import LOSSES from .utils import weighted_loss @weighted_loss def smooth_l1_loss(pred, target, beta=1.0): """Smooth L1 loss. Args: pred (torch.Tensor): The prediction. target (torch.Tensor): The learning target of the prediction. beta (float, optional): The threshold in the piecewise function. Defaults to 1.0. Returns: torch.Tensor: Calculated loss """ assert beta > 0 assert pred.size() == target.size() and target.numel() > 0 diff = torch.abs(pred - target) loss = torch.where(diff < beta, 0.5 * diff * diff / beta, diff - 0.5 * beta) return loss @weighted_loss def l1_loss(pred, target): """L1 loss. Args: pred (torch.Tensor): The prediction. target (torch.Tensor): The learning target of the prediction. Returns: torch.Tensor: Calculated loss """ assert pred.size() == target.size() and target.numel() > 0 loss = torch.abs(pred - target) return loss @LOSSES.register_module() class SmoothL1Loss(nn.Module): """Smooth L1 loss. Args: beta (float, optional): The threshold in the piecewise function. Defaults to 1.0. reduction (str, optional): The method to reduce the loss. Options are "none", "mean" and "sum". Defaults to "mean". loss_weight (float, optional): The weight of loss. """ def __init__(self, beta=1.0, reduction='mean', loss_weight=1.0): super(SmoothL1Loss, self).__init__() self.beta = beta self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None, **kwargs): """Forward function. Args: pred (torch.Tensor): The prediction. target (torch.Tensor): The learning target of the prediction. weight (torch.Tensor, optional): The weight of loss for each prediction. Defaults to None. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. reduction_override (str, optional): The reduction method used to override the original reduction method of the loss. Defaults to None. """ assert reduction_override in (None, 'none', 'mean', 'sum') reduction = ( reduction_override if reduction_override else self.reduction) loss_bbox = self.loss_weight * smooth_l1_loss( pred, target, weight, beta=self.beta, reduction=reduction, avg_factor=avg_factor, **kwargs) return loss_bbox @LOSSES.register_module() class L1Loss(nn.Module): """L1 loss. Args: reduction (str, optional): The method to reduce the loss. Options are "none", "mean" and "sum". loss_weight (float, optional): The weight of loss. """ def __init__(self, reduction='mean', loss_weight=1.0): super(L1Loss, self).__init__() self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None): """Forward function. Args: pred (torch.Tensor): The prediction. target (torch.Tensor): The learning target of the prediction. weight (torch.Tensor, optional): The weight of loss for each prediction. Defaults to None. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. reduction_override (str, optional): The reduction method used to override the original reduction method of the loss. Defaults to None. """ assert reduction_override in (None, 'none', 'mean', 'sum') reduction = ( reduction_override if reduction_override else self.reduction) loss_bbox = self.loss_weight * l1_loss( pred, target, weight, reduction=reduction, avg_factor=avg_factor) return loss_bbox
4,423
31.291971
78
py
GFocalV2
GFocalV2-master/mmdet/models/losses/gfocal_loss.py
import torch.nn as nn import torch.nn.functional as F from ..builder import LOSSES from .utils import weighted_loss @weighted_loss def quality_focal_loss(pred, target, beta=2.0, use_sigmoid=True): r"""Quality Focal Loss (QFL) is from `Generalized Focal Loss: Learning Qualified and Distributed Bounding Boxes for Dense Object Detection <https://arxiv.org/abs/2006.04388>`_. Args: pred (torch.Tensor): Predicted joint representation of classification and quality (IoU) estimation with shape (N, C), C is the number of classes. target (tuple([torch.Tensor])): Target category label with shape (N,) and target quality label with shape (N,). beta (float): The beta parameter for calculating the modulating factor. Defaults to 2.0. Returns: torch.Tensor: Loss tensor with shape (N,). """ assert len(target) == 2, """target for QFL must be a tuple of two elements, including category label and quality label, respectively""" # label denotes the category id, score denotes the quality score label, score = target if use_sigmoid: func = F.binary_cross_entropy_with_logits else: func = F.binary_cross_entropy # negatives are supervised by 0 quality score pred_sigmoid = pred.sigmoid() if use_sigmoid else pred scale_factor = pred_sigmoid zerolabel = scale_factor.new_zeros(pred.shape) loss = func(pred, zerolabel, reduction='none') * scale_factor.pow(beta) # FG cat_id: [0, num_classes -1], BG cat_id: num_classes bg_class_ind = pred.size(1) pos = ((label >= 0) & (label < bg_class_ind)).nonzero().squeeze(1) pos_label = label[pos].long() # positives are supervised by bbox quality (IoU) score scale_factor = score[pos] - pred_sigmoid[pos, pos_label] loss[pos, pos_label] = func(pred[pos, pos_label], score[pos], reduction='none') * scale_factor.abs().pow(beta) loss = loss.sum(dim=1, keepdim=False) return loss @weighted_loss def distribution_focal_loss(pred, label): r"""Distribution Focal Loss (DFL) is from `Generalized Focal Loss: Learning Qualified and Distributed Bounding Boxes for Dense Object Detection <https://arxiv.org/abs/2006.04388>`_. Args: pred (torch.Tensor): Predicted general distribution of bounding boxes (before softmax) with shape (N, n+1), n is the max value of the integral set `{0, ..., n}` in paper. label (torch.Tensor): Target distance label for bounding boxes with shape (N,). Returns: torch.Tensor: Loss tensor with shape (N,). """ dis_left = label.long() dis_right = dis_left + 1 weight_left = dis_right.float() - label weight_right = label - dis_left.float() loss = F.cross_entropy(pred, dis_left, reduction='none') * weight_left \ + F.cross_entropy(pred, dis_right, reduction='none') * weight_right return loss @LOSSES.register_module() class QualityFocalLoss(nn.Module): r"""Quality Focal Loss (QFL) is a variant of `Generalized Focal Loss: Learning Qualified and Distributed Bounding Boxes for Dense Object Detection <https://arxiv.org/abs/2006.04388>`_. Args: use_sigmoid (bool): Whether sigmoid operation is conducted in QFL. Defaults to True. beta (float): The beta parameter for calculating the modulating factor. Defaults to 2.0. reduction (str): Options are "none", "mean" and "sum". loss_weight (float): Loss weight of current loss. """ def __init__(self, use_sigmoid=True, beta=2.0, reduction='mean', loss_weight=1.0): super(QualityFocalLoss, self).__init__() # assert use_sigmoid is True, 'Only sigmoid in QFL supported now.' self.use_sigmoid = use_sigmoid self.beta = beta self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None): """Forward function. Args: pred (torch.Tensor): Predicted joint representation of classification and quality (IoU) estimation with shape (N, C), C is the number of classes. target (tuple([torch.Tensor])): Target category label with shape (N,) and target quality label with shape (N,). weight (torch.Tensor, optional): The weight of loss for each prediction. Defaults to None. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. reduction_override (str, optional): The reduction method used to override the original reduction method of the loss. Defaults to None. """ assert reduction_override in (None, 'none', 'mean', 'sum') reduction = ( reduction_override if reduction_override else self.reduction) loss_cls = self.loss_weight * quality_focal_loss( pred, target, weight, beta=self.beta, use_sigmoid=self.use_sigmoid, reduction=reduction, avg_factor=avg_factor) return loss_cls @LOSSES.register_module() class DistributionFocalLoss(nn.Module): r"""Distribution Focal Loss (DFL) is a variant of `Generalized Focal Loss: Learning Qualified and Distributed Bounding Boxes for Dense Object Detection <https://arxiv.org/abs/2006.04388>`_. Args: reduction (str): Options are `'none'`, `'mean'` and `'sum'`. loss_weight (float): Loss weight of current loss. """ def __init__(self, reduction='mean', loss_weight=1.0): super(DistributionFocalLoss, self).__init__() self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None): """Forward function. Args: pred (torch.Tensor): Predicted general distribution of bounding boxes (before softmax) with shape (N, n+1), n is the max value of the integral set `{0, ..., n}` in paper. target (torch.Tensor): Target distance label for bounding boxes with shape (N,). weight (torch.Tensor, optional): The weight of loss for each prediction. Defaults to None. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. reduction_override (str, optional): The reduction method used to override the original reduction method of the loss. Defaults to None. """ assert reduction_override in (None, 'none', 'mean', 'sum') reduction = ( reduction_override if reduction_override else self.reduction) loss_cls = self.loss_weight * distribution_focal_loss( pred, target, weight, reduction=reduction, avg_factor=avg_factor) return loss_cls
7,336
38.446237
79
py
GFocalV2
GFocalV2-master/mmdet/models/losses/varifocal_loss.py
import torch.nn as nn import torch.nn.functional as F from ..builder import LOSSES from .utils import weight_reduce_loss def varifocal_loss(pred, target, weight=None, alpha=0.75, gamma=2.0, iou_weighted=True, reduction='mean', avg_factor=None): """`Varifocal Loss <https://arxiv.org/abs/2008.13367>`_ Args: pred (torch.Tensor): The prediction with shape (N, C), C is the number of classes target (torch.Tensor): The learning target of the iou-aware classification score with shape (N, C), C is the number of classes. weight (torch.Tensor, optional): The weight of loss for each prediction. Defaults to None. alpha (float, optional): A balance factor for the negative part of Varifocal Loss, which is different from the alpha of Focal Loss. Defaults to 0.75. gamma (float, optional): The gamma for calculating the modulating factor. Defaults to 2.0. iou_weighted (bool, optional): Whether to weight the loss of the positive example with the iou target. Defaults to True. reduction (str, optional): The method used to reduce the loss into a scalar. Defaults to 'mean'. Options are "none", "mean" and "sum". avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. """ # pred and target should be of the same size assert pred.size() == target.size() pred_sigmoid = pred.sigmoid() target = target.type_as(pred) if iou_weighted: focal_weight = target * (target > 0.0).float() + \ alpha * (pred_sigmoid - target).abs().pow(gamma) * \ (target <= 0.0).float() else: focal_weight = (target > 0.0).float() + \ alpha * (pred_sigmoid - target).abs().pow(gamma) * \ (target <= 0.0).float() loss = F.binary_cross_entropy_with_logits( pred, target, reduction='none') * focal_weight loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss @LOSSES.register_module() class VarifocalLoss(nn.Module): def __init__(self, use_sigmoid=True, alpha=0.75, gamma=2.0, iou_weighted=True, reduction='mean', loss_weight=1.0): """`Varifocal Loss <https://arxiv.org/abs/2008.13367>`_ Args: use_sigmoid (bool, optional): Whether the prediction is used for sigmoid or softmax. Defaults to True. alpha (float, optional): A balance factor for the negative part of Varifocal Loss, which is different from the alpha of Focal Loss. Defaults to 0.75. gamma (float, optional): The gamma for calculating the modulating factor. Defaults to 2.0. iou_weighted (bool, optional): Whether to weight the loss of the positive examples with the iou target. Defaults to True. reduction (str, optional): The method used to reduce the loss into a scalar. Defaults to 'mean'. Options are "none", "mean" and "sum". loss_weight (float, optional): Weight of loss. Defaults to 1.0. """ super(VarifocalLoss, self).__init__() assert use_sigmoid is True, \ 'Only sigmoid varifocal loss supported now.' assert alpha >= 0.0 self.use_sigmoid = use_sigmoid self.alpha = alpha self.gamma = gamma self.iou_weighted = iou_weighted self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None): """Forward function. Args: pred (torch.Tensor): The prediction. target (torch.Tensor): The learning target of the prediction. weight (torch.Tensor, optional): The weight of loss for each prediction. Defaults to None. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. reduction_override (str, optional): The reduction method used to override the original reduction method of the loss. Options are "none", "mean" and "sum". Returns: torch.Tensor: The calculated loss """ assert reduction_override in (None, 'none', 'mean', 'sum') reduction = ( reduction_override if reduction_override else self.reduction) if self.use_sigmoid: loss_cls = self.loss_weight * varifocal_loss( pred, target, weight, alpha=self.alpha, gamma=self.gamma, iou_weighted=self.iou_weighted, reduction=reduction, avg_factor=avg_factor) else: raise NotImplementedError return loss_cls
5,265
38.893939
79
py
GFocalV2
GFocalV2-master/mmdet/models/losses/utils.py
import functools import torch.nn.functional as F def reduce_loss(loss, reduction): """Reduce loss as specified. Args: loss (Tensor): Elementwise loss tensor. reduction (str): Options are "none", "mean" and "sum". Return: Tensor: Reduced loss tensor. """ reduction_enum = F._Reduction.get_enum(reduction) # none: 0, elementwise_mean:1, sum: 2 if reduction_enum == 0: return loss elif reduction_enum == 1: return loss.mean() elif reduction_enum == 2: return loss.sum() def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): """Apply element-wise weight and reduce loss. Args: loss (Tensor): Element-wise loss. weight (Tensor): Element-wise weights. reduction (str): Same as built-in losses of PyTorch. avg_factor (float): Avarage factor when computing the mean of losses. Returns: Tensor: Processed loss values. """ # if weight is specified, apply element-wise weight if weight is not None: loss = loss * weight # if avg_factor is not specified, just reduce the loss if avg_factor is None: loss = reduce_loss(loss, reduction) else: # if reduction is mean, then average the loss by avg_factor if reduction == 'mean': loss = loss.sum() / avg_factor # if reduction is 'none', then do nothing, otherwise raise an error elif reduction != 'none': raise ValueError('avg_factor can not be used with reduction="sum"') return loss def weighted_loss(loss_func): """Create a weighted version of a given loss function. To use this decorator, the loss function must have the signature like `loss_func(pred, target, **kwargs)`. The function only needs to compute element-wise loss without any reduction. This decorator will add weight and reduction arguments to the function. The decorated function will have the signature like `loss_func(pred, target, weight=None, reduction='mean', avg_factor=None, **kwargs)`. :Example: >>> import torch >>> @weighted_loss >>> def l1_loss(pred, target): >>> return (pred - target).abs() >>> pred = torch.Tensor([0, 2, 3]) >>> target = torch.Tensor([1, 1, 1]) >>> weight = torch.Tensor([1, 0, 1]) >>> l1_loss(pred, target) tensor(1.3333) >>> l1_loss(pred, target, weight) tensor(1.) >>> l1_loss(pred, target, reduction='none') tensor([1., 1., 2.]) >>> l1_loss(pred, target, weight, avg_factor=2) tensor(1.5000) """ @functools.wraps(loss_func) def wrapper(pred, target, weight=None, reduction='mean', avg_factor=None, **kwargs): # get element-wise loss loss = loss_func(pred, target, **kwargs) loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss return wrapper
3,003
29.343434
79
py
GFocalV2
GFocalV2-master/mmdet/models/losses/ae_loss.py
import torch import torch.nn as nn import torch.nn.functional as F from ..builder import LOSSES def ae_loss_per_image(tl_preds, br_preds, match): """Associative Embedding Loss in one image. Associative Embedding Loss including two parts: pull loss and push loss. Pull loss makes embedding vectors from same object closer to each other. Push loss distinguish embedding vector from different objects, and makes the gap between them is large enough. During computing, usually there are 3 cases: - no object in image: both pull loss and push loss will be 0. - one object in image: push loss will be 0 and pull loss is computed by the two corner of the only object. - more than one objects in image: pull loss is computed by corner pairs from each object, push loss is computed by each object with all other objects. We use confusion matrix with 0 in diagonal to compute the push loss. Args: tl_preds (tensor): Embedding feature map of left-top corner. br_preds (tensor): Embedding feature map of bottim-right corner. match (list): Downsampled coordinates pair of each ground truth box. """ tl_list, br_list, me_list = [], [], [] if len(match) == 0: # no object in image pull_loss = tl_preds.sum() * 0. push_loss = tl_preds.sum() * 0. else: for m in match: [tl_y, tl_x], [br_y, br_x] = m tl_e = tl_preds[:, tl_y, tl_x].view(-1, 1) br_e = br_preds[:, br_y, br_x].view(-1, 1) tl_list.append(tl_e) br_list.append(br_e) me_list.append((tl_e + br_e) / 2.0) tl_list = torch.cat(tl_list) br_list = torch.cat(br_list) me_list = torch.cat(me_list) assert tl_list.size() == br_list.size() # N is object number in image, M is dimension of embedding vector N, M = tl_list.size() pull_loss = (tl_list - me_list).pow(2) + (br_list - me_list).pow(2) pull_loss = pull_loss.sum() / N margin = 1 # exp setting of CornerNet, details in section 3.3 of paper # confusion matrix of push loss conf_mat = me_list.expand((N, N, M)).permute(1, 0, 2) - me_list conf_weight = 1 - torch.eye(N).type_as(me_list) conf_mat = conf_weight * (margin - conf_mat.sum(-1).abs()) if N > 1: # more than one object in current image push_loss = F.relu(conf_mat).sum() / (N * (N - 1)) else: push_loss = tl_preds.sum() * 0. return pull_loss, push_loss @LOSSES.register_module() class AssociativeEmbeddingLoss(nn.Module): """Associative Embedding Loss. More details can be found in `Associative Embedding <https://arxiv.org/abs/1611.05424>`_ and `CornerNet <https://arxiv.org/abs/1808.01244>`_ . Code is modified from `kp_utils.py <https://github.com/princeton-vl/CornerNet/blob/master/models/py_utils/kp_utils.py#L180>`_ # noqa: E501 Args: pull_weight (float): Loss weight for corners from same object. push_weight (float): Loss weight for corners from different object. """ def __init__(self, pull_weight=0.25, push_weight=0.25): super(AssociativeEmbeddingLoss, self).__init__() self.pull_weight = pull_weight self.push_weight = push_weight def forward(self, pred, target, match): """Forward function.""" batch = pred.size(0) pull_all, push_all = 0.0, 0.0 for i in range(batch): pull, push = ae_loss_per_image(pred[i], target[i], match[i]) pull_all += self.pull_weight * pull push_all += self.push_weight * push return pull_all, push_all
3,757
36.207921
143
py
GFocalV2
GFocalV2-master/mmdet/models/losses/accuracy.py
import torch.nn as nn def accuracy(pred, target, topk=1, thresh=None): """Calculate accuracy according to the prediction and target. Args: pred (torch.Tensor): The model prediction, shape (N, num_class) target (torch.Tensor): The target of each prediction, shape (N, ) topk (int | tuple[int], optional): If the predictions in ``topk`` matches the target, the predictions will be regarded as correct ones. Defaults to 1. thresh (float, optional): If not None, predictions with scores under this threshold are considered incorrect. Default to None. Returns: float | tuple[float]: If the input ``topk`` is a single integer, the function will return a single float as accuracy. If ``topk`` is a tuple containing multiple integers, the function will return a tuple containing accuracies of each ``topk`` number. """ assert isinstance(topk, (int, tuple)) if isinstance(topk, int): topk = (topk, ) return_single = True else: return_single = False maxk = max(topk) if pred.size(0) == 0: accu = [pred.new_tensor(0.) for i in range(len(topk))] return accu[0] if return_single else accu assert pred.ndim == 2 and target.ndim == 1 assert pred.size(0) == target.size(0) assert maxk <= pred.size(1), \ f'maxk {maxk} exceeds pred dimension {pred.size(1)}' pred_value, pred_label = pred.topk(maxk, dim=1) pred_label = pred_label.t() # transpose to shape (maxk, N) correct = pred_label.eq(target.view(1, -1).expand_as(pred_label)) if thresh is not None: # Only prediction values larger than thresh are counted as correct correct = correct & (pred_value > thresh).t() res = [] for k in topk: correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True) res.append(correct_k.mul_(100.0 / pred.size(0))) return res[0] if return_single else res class Accuracy(nn.Module): def __init__(self, topk=(1, ), thresh=None): """Module to calculate the accuracy. Args: topk (tuple, optional): The criterion used to calculate the accuracy. Defaults to (1,). thresh (float, optional): If not None, predictions with scores under this threshold are considered incorrect. Default to None. """ super().__init__() self.topk = topk self.thresh = thresh def forward(self, pred, target): """Forward function to calculate accuracy. Args: pred (torch.Tensor): Prediction of models. target (torch.Tensor): Target for each prediction. Returns: tuple[float]: The accuracies under different topk criterions. """ return accuracy(pred, target, self.topk, self.thresh)
2,905
36.74026
79
py
GFocalV2
GFocalV2-master/mmdet/models/losses/focal_loss.py
import torch.nn as nn import torch.nn.functional as F from mmcv.ops import sigmoid_focal_loss as _sigmoid_focal_loss from ..builder import LOSSES from .utils import weight_reduce_loss # This method is only for debugging def py_sigmoid_focal_loss(pred, target, weight=None, gamma=2.0, alpha=0.25, reduction='mean', avg_factor=None): """PyTorch version of `Focal Loss <https://arxiv.org/abs/1708.02002>`_. Args: pred (torch.Tensor): The prediction with shape (N, C), C is the number of classes target (torch.Tensor): The learning label of the prediction. weight (torch.Tensor, optional): Sample-wise loss weight. gamma (float, optional): The gamma for calculating the modulating factor. Defaults to 2.0. alpha (float, optional): A balanced form for Focal Loss. Defaults to 0.25. reduction (str, optional): The method used to reduce the loss into a scalar. Defaults to 'mean'. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. """ pred_sigmoid = pred.sigmoid() target = target.type_as(pred) pt = (1 - pred_sigmoid) * target + pred_sigmoid * (1 - target) focal_weight = (alpha * target + (1 - alpha) * (1 - target)) * pt.pow(gamma) loss = F.binary_cross_entropy_with_logits( pred, target, reduction='none') * focal_weight loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss def sigmoid_focal_loss(pred, target, weight=None, gamma=2.0, alpha=0.25, reduction='mean', avg_factor=None): r"""A warpper of cuda version `Focal Loss <https://arxiv.org/abs/1708.02002>`_. Args: pred (torch.Tensor): The prediction with shape (N, C), C is the number of classes. target (torch.Tensor): The learning label of the prediction. weight (torch.Tensor, optional): Sample-wise loss weight. gamma (float, optional): The gamma for calculating the modulating factor. Defaults to 2.0. alpha (float, optional): A balanced form for Focal Loss. Defaults to 0.25. reduction (str, optional): The method used to reduce the loss into a scalar. Defaults to 'mean'. Options are "none", "mean" and "sum". avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. """ # Function.apply does not accept keyword arguments, so the decorator # "weighted_loss" is not applicable loss = _sigmoid_focal_loss(pred.contiguous(), target, gamma, alpha, None, 'none') if weight is not None: if weight.shape != loss.shape: if weight.size(0) == loss.size(0): # For most cases, weight is of shape (num_priors, ), # which means it does not have the second axis num_class weight = weight.view(-1, 1) else: # Sometimes, weight per anchor per class is also needed. e.g. # in FSAF. But it may be flattened of shape # (num_priors x num_class, ), while loss is still of shape # (num_priors, num_class). assert weight.numel() == loss.numel() weight = weight.view(loss.size(0), -1) assert weight.ndim == loss.ndim loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss @LOSSES.register_module() class FocalLoss(nn.Module): def __init__(self, use_sigmoid=True, gamma=2.0, alpha=0.25, reduction='mean', loss_weight=1.0): """`Focal Loss <https://arxiv.org/abs/1708.02002>`_ Args: use_sigmoid (bool, optional): Whether to the prediction is used for sigmoid or softmax. Defaults to True. gamma (float, optional): The gamma for calculating the modulating factor. Defaults to 2.0. alpha (float, optional): A balanced form for Focal Loss. Defaults to 0.25. reduction (str, optional): The method used to reduce the loss into a scalar. Defaults to 'mean'. Options are "none", "mean" and "sum". loss_weight (float, optional): Weight of loss. Defaults to 1.0. """ super(FocalLoss, self).__init__() assert use_sigmoid is True, 'Only sigmoid focal loss supported now.' self.use_sigmoid = use_sigmoid self.gamma = gamma self.alpha = alpha self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None): """Forward function. Args: pred (torch.Tensor): The prediction. target (torch.Tensor): The learning label of the prediction. weight (torch.Tensor, optional): The weight of loss for each prediction. Defaults to None. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. reduction_override (str, optional): The reduction method used to override the original reduction method of the loss. Options are "none", "mean" and "sum". Returns: torch.Tensor: The calculated loss """ assert reduction_override in (None, 'none', 'mean', 'sum') reduction = ( reduction_override if reduction_override else self.reduction) if self.use_sigmoid: loss_cls = self.loss_weight * sigmoid_focal_loss( pred, target, weight, gamma=self.gamma, alpha=self.alpha, reduction=reduction, avg_factor=avg_factor) else: raise NotImplementedError return loss_cls
6,417
39.620253
79
py
GFocalV2
GFocalV2-master/mmdet/models/losses/cross_entropy_loss.py
import torch import torch.nn as nn import torch.nn.functional as F from ..builder import LOSSES from .utils import weight_reduce_loss def cross_entropy(pred, label, weight=None, reduction='mean', avg_factor=None, class_weight=None): """Calculate the CrossEntropy loss. Args: pred (torch.Tensor): The prediction with shape (N, C), C is the number of classes. label (torch.Tensor): The learning label of the prediction. weight (torch.Tensor, optional): Sample-wise loss weight. reduction (str, optional): The method used to reduce the loss. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. class_weight (list[float], optional): The weight for each class. Returns: torch.Tensor: The calculated loss """ # element-wise losses loss = F.cross_entropy(pred, label, weight=class_weight, reduction='none') # apply weights and do the reduction if weight is not None: weight = weight.float() loss = weight_reduce_loss( loss, weight=weight, reduction=reduction, avg_factor=avg_factor) return loss def _expand_onehot_labels(labels, label_weights, label_channels): bin_labels = labels.new_full((labels.size(0), label_channels), 0) inds = torch.nonzero( (labels >= 0) & (labels < label_channels), as_tuple=False).squeeze() if inds.numel() > 0: bin_labels[inds, labels[inds]] = 1 if label_weights is None: bin_label_weights = None else: bin_label_weights = label_weights.view(-1, 1).expand( label_weights.size(0), label_channels) return bin_labels, bin_label_weights def binary_cross_entropy(pred, label, weight=None, reduction='mean', avg_factor=None, class_weight=None): """Calculate the binary CrossEntropy loss. Args: pred (torch.Tensor): The prediction with shape (N, 1). label (torch.Tensor): The learning label of the prediction. weight (torch.Tensor, optional): Sample-wise loss weight. reduction (str, optional): The method used to reduce the loss. Options are "none", "mean" and "sum". avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. class_weight (list[float], optional): The weight for each class. Returns: torch.Tensor: The calculated loss """ if pred.dim() != label.dim(): label, weight = _expand_onehot_labels(label, weight, pred.size(-1)) # weighted element-wise losses if weight is not None: weight = weight.float() loss = F.binary_cross_entropy_with_logits( pred, label.float(), pos_weight=class_weight, reduction='none') # do the reduction for the weighted loss loss = weight_reduce_loss( loss, weight, reduction=reduction, avg_factor=avg_factor) return loss def mask_cross_entropy(pred, target, label, reduction='mean', avg_factor=None, class_weight=None): """Calculate the CrossEntropy loss for masks. Args: pred (torch.Tensor): The prediction with shape (N, C), C is the number of classes. target (torch.Tensor): The learning label of the prediction. label (torch.Tensor): ``label`` indicates the class label of the mask' corresponding object. This will be used to select the mask in the of the class which the object belongs to when the mask prediction if not class-agnostic. reduction (str, optional): The method used to reduce the loss. Options are "none", "mean" and "sum". avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. class_weight (list[float], optional): The weight for each class. Returns: torch.Tensor: The calculated loss """ # TODO: handle these two reserved arguments assert reduction == 'mean' and avg_factor is None num_rois = pred.size()[0] inds = torch.arange(0, num_rois, dtype=torch.long, device=pred.device) pred_slice = pred[inds, label].squeeze(1) return F.binary_cross_entropy_with_logits( pred_slice, target, weight=class_weight, reduction='mean')[None] @LOSSES.register_module() class CrossEntropyLoss(nn.Module): def __init__(self, use_sigmoid=False, use_mask=False, reduction='mean', class_weight=None, loss_weight=1.0): """CrossEntropyLoss. Args: use_sigmoid (bool, optional): Whether the prediction uses sigmoid of softmax. Defaults to False. use_mask (bool, optional): Whether to use mask cross entropy loss. Defaults to False. reduction (str, optional): . Defaults to 'mean'. Options are "none", "mean" and "sum". class_weight (list[float], optional): Weight of each class. Defaults to None. loss_weight (float, optional): Weight of the loss. Defaults to 1.0. """ super(CrossEntropyLoss, self).__init__() assert (use_sigmoid is False) or (use_mask is False) self.use_sigmoid = use_sigmoid self.use_mask = use_mask self.reduction = reduction self.loss_weight = loss_weight self.class_weight = class_weight if self.use_sigmoid: self.cls_criterion = binary_cross_entropy elif self.use_mask: self.cls_criterion = mask_cross_entropy else: self.cls_criterion = cross_entropy def forward(self, cls_score, label, weight=None, avg_factor=None, reduction_override=None, **kwargs): """Forward function. Args: cls_score (torch.Tensor): The prediction. label (torch.Tensor): The learning label of the prediction. weight (torch.Tensor, optional): Sample-wise loss weight. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. reduction (str, optional): The method used to reduce the loss. Options are "none", "mean" and "sum". Returns: torch.Tensor: The calculated loss """ assert reduction_override in (None, 'none', 'mean', 'sum') reduction = ( reduction_override if reduction_override else self.reduction) if self.class_weight is not None: class_weight = cls_score.new_tensor(self.class_weight) else: class_weight = None loss_cls = self.loss_weight * self.cls_criterion( cls_score, label, weight, class_weight=class_weight, reduction=reduction, avg_factor=avg_factor, **kwargs) return loss_cls
7,352
35.58209
79
py
GFocalV2
GFocalV2-master/mmdet/models/losses/gaussian_focal_loss.py
import torch.nn as nn from ..builder import LOSSES from .utils import weighted_loss @weighted_loss def gaussian_focal_loss(pred, gaussian_target, alpha=2.0, gamma=4.0): """`Focal Loss <https://arxiv.org/abs/1708.02002>`_ for targets in gaussian distribution. Args: pred (torch.Tensor): The prediction. gaussian_target (torch.Tensor): The learning target of the prediction in gaussian distribution. alpha (float, optional): A balanced form for Focal Loss. Defaults to 2.0. gamma (float, optional): The gamma for calculating the modulating factor. Defaults to 4.0. """ eps = 1e-12 pos_weights = gaussian_target.eq(1) neg_weights = (1 - gaussian_target).pow(gamma) pos_loss = -(pred + eps).log() * (1 - pred).pow(alpha) * pos_weights neg_loss = -(1 - pred + eps).log() * pred.pow(alpha) * neg_weights return pos_loss + neg_loss @LOSSES.register_module() class GaussianFocalLoss(nn.Module): """GaussianFocalLoss is a variant of focal loss. More details can be found in the `paper <https://arxiv.org/abs/1808.01244>`_ Code is modified from `kp_utils.py <https://github.com/princeton-vl/CornerNet/blob/master/models/py_utils/kp_utils.py#L152>`_ # noqa: E501 Please notice that the target in GaussianFocalLoss is a gaussian heatmap, not 0/1 binary target. Args: alpha (float): Power of prediction. gamma (float): Power of target for negtive samples. reduction (str): Options are "none", "mean" and "sum". loss_weight (float): Loss weight of current loss. """ def __init__(self, alpha=2.0, gamma=4.0, reduction='mean', loss_weight=1.0): super(GaussianFocalLoss, self).__init__() self.alpha = alpha self.gamma = gamma self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None): """Forward function. Args: pred (torch.Tensor): The prediction. target (torch.Tensor): The learning target of the prediction in gaussian distribution. weight (torch.Tensor, optional): The weight of loss for each prediction. Defaults to None. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. reduction_override (str, optional): The reduction method used to override the original reduction method of the loss. Defaults to None. """ assert reduction_override in (None, 'none', 'mean', 'sum') reduction = ( reduction_override if reduction_override else self.reduction) loss_reg = self.loss_weight * gaussian_focal_loss( pred, target, weight, alpha=self.alpha, gamma=self.gamma, reduction=reduction, avg_factor=avg_factor) return loss_reg
3,211
34.688889
108
py
GFocalV2
GFocalV2-master/mmdet/models/backbones/hrnet.py
import torch.nn as nn from mmcv.cnn import (build_conv_layer, build_norm_layer, constant_init, kaiming_init) from mmcv.runner import load_checkpoint from torch.nn.modules.batchnorm import _BatchNorm from mmdet.utils import get_root_logger from ..builder import BACKBONES from .resnet import BasicBlock, Bottleneck class HRModule(nn.Module): """High-Resolution Module for HRNet. In this module, every branch has 4 BasicBlocks/Bottlenecks. Fusion/Exchange is in this module. """ def __init__(self, num_branches, blocks, num_blocks, in_channels, num_channels, multiscale_output=True, with_cp=False, conv_cfg=None, norm_cfg=dict(type='BN')): super(HRModule, self).__init__() self._check_branches(num_branches, num_blocks, in_channels, num_channels) self.in_channels = in_channels self.num_branches = num_branches self.multiscale_output = multiscale_output self.norm_cfg = norm_cfg self.conv_cfg = conv_cfg self.with_cp = with_cp self.branches = self._make_branches(num_branches, blocks, num_blocks, num_channels) self.fuse_layers = self._make_fuse_layers() self.relu = nn.ReLU(inplace=False) def _check_branches(self, num_branches, num_blocks, in_channels, num_channels): if num_branches != len(num_blocks): error_msg = f'NUM_BRANCHES({num_branches}) ' \ f'!= NUM_BLOCKS({len(num_blocks)})' raise ValueError(error_msg) if num_branches != len(num_channels): error_msg = f'NUM_BRANCHES({num_branches}) ' \ f'!= NUM_CHANNELS({len(num_channels)})' raise ValueError(error_msg) if num_branches != len(in_channels): error_msg = f'NUM_BRANCHES({num_branches}) ' \ f'!= NUM_INCHANNELS({len(in_channels)})' raise ValueError(error_msg) def _make_one_branch(self, branch_index, block, num_blocks, num_channels, stride=1): downsample = None if stride != 1 or \ self.in_channels[branch_index] != \ num_channels[branch_index] * block.expansion: downsample = nn.Sequential( build_conv_layer( self.conv_cfg, self.in_channels[branch_index], num_channels[branch_index] * block.expansion, kernel_size=1, stride=stride, bias=False), build_norm_layer(self.norm_cfg, num_channels[branch_index] * block.expansion)[1]) layers = [] layers.append( block( self.in_channels[branch_index], num_channels[branch_index], stride, downsample=downsample, with_cp=self.with_cp, norm_cfg=self.norm_cfg, conv_cfg=self.conv_cfg)) self.in_channels[branch_index] = \ num_channels[branch_index] * block.expansion for i in range(1, num_blocks[branch_index]): layers.append( block( self.in_channels[branch_index], num_channels[branch_index], with_cp=self.with_cp, norm_cfg=self.norm_cfg, conv_cfg=self.conv_cfg)) return nn.Sequential(*layers) def _make_branches(self, num_branches, block, num_blocks, num_channels): branches = [] for i in range(num_branches): branches.append( self._make_one_branch(i, block, num_blocks, num_channels)) return nn.ModuleList(branches) def _make_fuse_layers(self): if self.num_branches == 1: return None num_branches = self.num_branches in_channels = self.in_channels fuse_layers = [] num_out_branches = num_branches if self.multiscale_output else 1 for i in range(num_out_branches): fuse_layer = [] for j in range(num_branches): if j > i: fuse_layer.append( nn.Sequential( build_conv_layer( self.conv_cfg, in_channels[j], in_channels[i], kernel_size=1, stride=1, padding=0, bias=False), build_norm_layer(self.norm_cfg, in_channels[i])[1], nn.Upsample( scale_factor=2**(j - i), mode='nearest'))) elif j == i: fuse_layer.append(None) else: conv_downsamples = [] for k in range(i - j): if k == i - j - 1: conv_downsamples.append( nn.Sequential( build_conv_layer( self.conv_cfg, in_channels[j], in_channels[i], kernel_size=3, stride=2, padding=1, bias=False), build_norm_layer(self.norm_cfg, in_channels[i])[1])) else: conv_downsamples.append( nn.Sequential( build_conv_layer( self.conv_cfg, in_channels[j], in_channels[j], kernel_size=3, stride=2, padding=1, bias=False), build_norm_layer(self.norm_cfg, in_channels[j])[1], nn.ReLU(inplace=False))) fuse_layer.append(nn.Sequential(*conv_downsamples)) fuse_layers.append(nn.ModuleList(fuse_layer)) return nn.ModuleList(fuse_layers) def forward(self, x): """Forward function.""" if self.num_branches == 1: return [self.branches[0](x[0])] for i in range(self.num_branches): x[i] = self.branches[i](x[i]) x_fuse = [] for i in range(len(self.fuse_layers)): y = 0 for j in range(self.num_branches): if i == j: y += x[j] else: y += self.fuse_layers[i][j](x[j]) x_fuse.append(self.relu(y)) return x_fuse @BACKBONES.register_module() class HRNet(nn.Module): """HRNet backbone. High-Resolution Representations for Labeling Pixels and Regions arXiv: https://arxiv.org/abs/1904.04514 Args: extra (dict): detailed configuration for each stage of HRNet. in_channels (int): Number of input image channels. Default: 3. conv_cfg (dict): dictionary to construct and config conv layer. norm_cfg (dict): dictionary to construct and config norm layer. norm_eval (bool): Whether to set norm layers to eval mode, namely, freeze running stats (mean and var). Note: Effect on Batch Norm and its variants only. with_cp (bool): Use checkpoint or not. Using checkpoint will save some memory while slowing down the training speed. zero_init_residual (bool): whether to use zero init for last norm layer in resblocks to let them behave as identity. Example: >>> from mmdet.models import HRNet >>> import torch >>> extra = dict( >>> stage1=dict( >>> num_modules=1, >>> num_branches=1, >>> block='BOTTLENECK', >>> num_blocks=(4, ), >>> num_channels=(64, )), >>> stage2=dict( >>> num_modules=1, >>> num_branches=2, >>> block='BASIC', >>> num_blocks=(4, 4), >>> num_channels=(32, 64)), >>> stage3=dict( >>> num_modules=4, >>> num_branches=3, >>> block='BASIC', >>> num_blocks=(4, 4, 4), >>> num_channels=(32, 64, 128)), >>> stage4=dict( >>> num_modules=3, >>> num_branches=4, >>> block='BASIC', >>> num_blocks=(4, 4, 4, 4), >>> num_channels=(32, 64, 128, 256))) >>> self = HRNet(extra, in_channels=1) >>> self.eval() >>> inputs = torch.rand(1, 1, 32, 32) >>> level_outputs = self.forward(inputs) >>> for level_out in level_outputs: ... print(tuple(level_out.shape)) (1, 32, 8, 8) (1, 64, 4, 4) (1, 128, 2, 2) (1, 256, 1, 1) """ blocks_dict = {'BASIC': BasicBlock, 'BOTTLENECK': Bottleneck} def __init__(self, extra, in_channels=3, conv_cfg=None, norm_cfg=dict(type='BN'), norm_eval=True, with_cp=False, zero_init_residual=False): super(HRNet, self).__init__() self.extra = extra self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.norm_eval = norm_eval self.with_cp = with_cp self.zero_init_residual = zero_init_residual # stem net self.norm1_name, norm1 = build_norm_layer(self.norm_cfg, 64, postfix=1) self.norm2_name, norm2 = build_norm_layer(self.norm_cfg, 64, postfix=2) self.conv1 = build_conv_layer( self.conv_cfg, in_channels, 64, kernel_size=3, stride=2, padding=1, bias=False) self.add_module(self.norm1_name, norm1) self.conv2 = build_conv_layer( self.conv_cfg, 64, 64, kernel_size=3, stride=2, padding=1, bias=False) self.add_module(self.norm2_name, norm2) self.relu = nn.ReLU(inplace=True) # stage 1 self.stage1_cfg = self.extra['stage1'] num_channels = self.stage1_cfg['num_channels'][0] block_type = self.stage1_cfg['block'] num_blocks = self.stage1_cfg['num_blocks'][0] block = self.blocks_dict[block_type] stage1_out_channels = num_channels * block.expansion self.layer1 = self._make_layer(block, 64, num_channels, num_blocks) # stage 2 self.stage2_cfg = self.extra['stage2'] num_channels = self.stage2_cfg['num_channels'] block_type = self.stage2_cfg['block'] block = self.blocks_dict[block_type] num_channels = [channel * block.expansion for channel in num_channels] self.transition1 = self._make_transition_layer([stage1_out_channels], num_channels) self.stage2, pre_stage_channels = self._make_stage( self.stage2_cfg, num_channels) # stage 3 self.stage3_cfg = self.extra['stage3'] num_channels = self.stage3_cfg['num_channels'] block_type = self.stage3_cfg['block'] block = self.blocks_dict[block_type] num_channels = [channel * block.expansion for channel in num_channels] self.transition2 = self._make_transition_layer(pre_stage_channels, num_channels) self.stage3, pre_stage_channels = self._make_stage( self.stage3_cfg, num_channels) # stage 4 self.stage4_cfg = self.extra['stage4'] num_channels = self.stage4_cfg['num_channels'] block_type = self.stage4_cfg['block'] block = self.blocks_dict[block_type] num_channels = [channel * block.expansion for channel in num_channels] self.transition3 = self._make_transition_layer(pre_stage_channels, num_channels) self.stage4, pre_stage_channels = self._make_stage( self.stage4_cfg, num_channels) @property def norm1(self): """nn.Module: the normalization layer named "norm1" """ return getattr(self, self.norm1_name) @property def norm2(self): """nn.Module: the normalization layer named "norm2" """ return getattr(self, self.norm2_name) def _make_transition_layer(self, num_channels_pre_layer, num_channels_cur_layer): num_branches_cur = len(num_channels_cur_layer) num_branches_pre = len(num_channels_pre_layer) transition_layers = [] for i in range(num_branches_cur): if i < num_branches_pre: if num_channels_cur_layer[i] != num_channels_pre_layer[i]: transition_layers.append( nn.Sequential( build_conv_layer( self.conv_cfg, num_channels_pre_layer[i], num_channels_cur_layer[i], kernel_size=3, stride=1, padding=1, bias=False), build_norm_layer(self.norm_cfg, num_channels_cur_layer[i])[1], nn.ReLU(inplace=True))) else: transition_layers.append(None) else: conv_downsamples = [] for j in range(i + 1 - num_branches_pre): in_channels = num_channels_pre_layer[-1] out_channels = num_channels_cur_layer[i] \ if j == i - num_branches_pre else in_channels conv_downsamples.append( nn.Sequential( build_conv_layer( self.conv_cfg, in_channels, out_channels, kernel_size=3, stride=2, padding=1, bias=False), build_norm_layer(self.norm_cfg, out_channels)[1], nn.ReLU(inplace=True))) transition_layers.append(nn.Sequential(*conv_downsamples)) return nn.ModuleList(transition_layers) def _make_layer(self, block, inplanes, planes, blocks, stride=1): downsample = None if stride != 1 or inplanes != planes * block.expansion: downsample = nn.Sequential( build_conv_layer( self.conv_cfg, inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False), build_norm_layer(self.norm_cfg, planes * block.expansion)[1]) layers = [] layers.append( block( inplanes, planes, stride, downsample=downsample, with_cp=self.with_cp, norm_cfg=self.norm_cfg, conv_cfg=self.conv_cfg)) inplanes = planes * block.expansion for i in range(1, blocks): layers.append( block( inplanes, planes, with_cp=self.with_cp, norm_cfg=self.norm_cfg, conv_cfg=self.conv_cfg)) return nn.Sequential(*layers) def _make_stage(self, layer_config, in_channels, multiscale_output=True): num_modules = layer_config['num_modules'] num_branches = layer_config['num_branches'] num_blocks = layer_config['num_blocks'] num_channels = layer_config['num_channels'] block = self.blocks_dict[layer_config['block']] hr_modules = [] for i in range(num_modules): # multi_scale_output is only used for the last module if not multiscale_output and i == num_modules - 1: reset_multiscale_output = False else: reset_multiscale_output = True hr_modules.append( HRModule( num_branches, block, num_blocks, in_channels, num_channels, reset_multiscale_output, with_cp=self.with_cp, norm_cfg=self.norm_cfg, conv_cfg=self.conv_cfg)) return nn.Sequential(*hr_modules), in_channels def init_weights(self, pretrained=None): """Initialize the weights in backbone. Args: pretrained (str, optional): Path to pre-trained weights. Defaults to None. """ if isinstance(pretrained, str): logger = get_root_logger() load_checkpoint(self, pretrained, strict=False, logger=logger) elif pretrained is None: for m in self.modules(): if isinstance(m, nn.Conv2d): kaiming_init(m) elif isinstance(m, (_BatchNorm, nn.GroupNorm)): constant_init(m, 1) if self.zero_init_residual: for m in self.modules(): if isinstance(m, Bottleneck): constant_init(m.norm3, 0) elif isinstance(m, BasicBlock): constant_init(m.norm2, 0) else: raise TypeError('pretrained must be a str or None') def forward(self, x): """Forward function.""" x = self.conv1(x) x = self.norm1(x) x = self.relu(x) x = self.conv2(x) x = self.norm2(x) x = self.relu(x) x = self.layer1(x) x_list = [] for i in range(self.stage2_cfg['num_branches']): if self.transition1[i] is not None: x_list.append(self.transition1[i](x)) else: x_list.append(x) y_list = self.stage2(x_list) x_list = [] for i in range(self.stage3_cfg['num_branches']): if self.transition2[i] is not None: x_list.append(self.transition2[i](y_list[-1])) else: x_list.append(y_list[i]) y_list = self.stage3(x_list) x_list = [] for i in range(self.stage4_cfg['num_branches']): if self.transition3[i] is not None: x_list.append(self.transition3[i](y_list[-1])) else: x_list.append(y_list[i]) y_list = self.stage4(x_list) return y_list def train(self, mode=True): """Convert the model into training mode whill keeping the normalization layer freezed.""" super(HRNet, self).train(mode) if mode and self.norm_eval: for m in self.modules(): # trick: eval have effect on BatchNorm only if isinstance(m, _BatchNorm): m.eval()
20,359
36.843866
79
py
GFocalV2
GFocalV2-master/mmdet/models/backbones/regnet.py
import numpy as np import torch.nn as nn from mmcv.cnn import build_conv_layer, build_norm_layer from ..builder import BACKBONES from .resnet import ResNet from .resnext import Bottleneck @BACKBONES.register_module() class RegNet(ResNet): """RegNet backbone. More details can be found in `paper <https://arxiv.org/abs/2003.13678>`_ . Args: arch (dict): The parameter of RegNets. - w0 (int): initial width - wa (float): slope of width - wm (float): quantization parameter to quantize the width - depth (int): depth of the backbone - group_w (int): width of group - bot_mul (float): bottleneck ratio, i.e. expansion of bottlneck. strides (Sequence[int]): Strides of the first block of each stage. base_channels (int): Base channels after stem layer. in_channels (int): Number of input image channels. Default: 3. dilations (Sequence[int]): Dilation of each stage. out_indices (Sequence[int]): Output from which stages. style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two layer is the 3x3 conv layer, otherwise the stride-two layer is the first 1x1 conv layer. frozen_stages (int): Stages to be frozen (all param fixed). -1 means not freezing any parameters. norm_cfg (dict): dictionary to construct and config norm layer. norm_eval (bool): Whether to set norm layers to eval mode, namely, freeze running stats (mean and var). Note: Effect on Batch Norm and its variants only. with_cp (bool): Use checkpoint or not. Using checkpoint will save some memory while slowing down the training speed. zero_init_residual (bool): whether to use zero init for last norm layer in resblocks to let them behave as identity. Example: >>> from mmdet.models import RegNet >>> import torch >>> self = RegNet( arch=dict( w0=88, wa=26.31, wm=2.25, group_w=48, depth=25, bot_mul=1.0)) >>> self.eval() >>> inputs = torch.rand(1, 3, 32, 32) >>> level_outputs = self.forward(inputs) >>> for level_out in level_outputs: ... print(tuple(level_out.shape)) (1, 96, 8, 8) (1, 192, 4, 4) (1, 432, 2, 2) (1, 1008, 1, 1) """ arch_settings = { 'regnetx_400mf': dict(w0=24, wa=24.48, wm=2.54, group_w=16, depth=22, bot_mul=1.0), 'regnetx_800mf': dict(w0=56, wa=35.73, wm=2.28, group_w=16, depth=16, bot_mul=1.0), 'regnetx_1.6gf': dict(w0=80, wa=34.01, wm=2.25, group_w=24, depth=18, bot_mul=1.0), 'regnetx_3.2gf': dict(w0=88, wa=26.31, wm=2.25, group_w=48, depth=25, bot_mul=1.0), 'regnetx_4.0gf': dict(w0=96, wa=38.65, wm=2.43, group_w=40, depth=23, bot_mul=1.0), 'regnetx_6.4gf': dict(w0=184, wa=60.83, wm=2.07, group_w=56, depth=17, bot_mul=1.0), 'regnetx_8.0gf': dict(w0=80, wa=49.56, wm=2.88, group_w=120, depth=23, bot_mul=1.0), 'regnetx_12gf': dict(w0=168, wa=73.36, wm=2.37, group_w=112, depth=19, bot_mul=1.0), } def __init__(self, arch, in_channels=3, stem_channels=32, base_channels=32, strides=(2, 2, 2, 2), dilations=(1, 1, 1, 1), out_indices=(0, 1, 2, 3), style='pytorch', deep_stem=False, avg_down=False, frozen_stages=-1, conv_cfg=None, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, dcn=None, stage_with_dcn=(False, False, False, False), plugins=None, with_cp=False, zero_init_residual=True): super(ResNet, self).__init__() # Generate RegNet parameters first if isinstance(arch, str): assert arch in self.arch_settings, \ f'"arch": "{arch}" is not one of the' \ ' arch_settings' arch = self.arch_settings[arch] elif not isinstance(arch, dict): raise ValueError('Expect "arch" to be either a string ' f'or a dict, got {type(arch)}') widths, num_stages = self.generate_regnet( arch['w0'], arch['wa'], arch['wm'], arch['depth'], ) # Convert to per stage format stage_widths, stage_blocks = self.get_stages_from_blocks(widths) # Generate group widths and bot muls group_widths = [arch['group_w'] for _ in range(num_stages)] self.bottleneck_ratio = [arch['bot_mul'] for _ in range(num_stages)] # Adjust the compatibility of stage_widths and group_widths stage_widths, group_widths = self.adjust_width_group( stage_widths, self.bottleneck_ratio, group_widths) # Group params by stage self.stage_widths = stage_widths self.group_widths = group_widths self.depth = sum(stage_blocks) self.stem_channels = stem_channels self.base_channels = base_channels self.num_stages = num_stages assert num_stages >= 1 and num_stages <= 4 self.strides = strides self.dilations = dilations assert len(strides) == len(dilations) == num_stages self.out_indices = out_indices assert max(out_indices) < num_stages self.style = style self.deep_stem = deep_stem self.avg_down = avg_down self.frozen_stages = frozen_stages self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.with_cp = with_cp self.norm_eval = norm_eval self.dcn = dcn self.stage_with_dcn = stage_with_dcn if dcn is not None: assert len(stage_with_dcn) == num_stages self.plugins = plugins self.zero_init_residual = zero_init_residual self.block = Bottleneck expansion_bak = self.block.expansion self.block.expansion = 1 self.stage_blocks = stage_blocks[:num_stages] self._make_stem_layer(in_channels, stem_channels) self.inplanes = stem_channels self.res_layers = [] for i, num_blocks in enumerate(self.stage_blocks): stride = self.strides[i] dilation = self.dilations[i] group_width = self.group_widths[i] width = int(round(self.stage_widths[i] * self.bottleneck_ratio[i])) stage_groups = width // group_width dcn = self.dcn if self.stage_with_dcn[i] else None if self.plugins is not None: stage_plugins = self.make_stage_plugins(self.plugins, i) else: stage_plugins = None res_layer = self.make_res_layer( block=self.block, inplanes=self.inplanes, planes=self.stage_widths[i], num_blocks=num_blocks, stride=stride, dilation=dilation, style=self.style, avg_down=self.avg_down, with_cp=self.with_cp, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, dcn=dcn, plugins=stage_plugins, groups=stage_groups, base_width=group_width, base_channels=self.stage_widths[i]) self.inplanes = self.stage_widths[i] layer_name = f'layer{i + 1}' self.add_module(layer_name, res_layer) self.res_layers.append(layer_name) self._freeze_stages() self.feat_dim = stage_widths[-1] self.block.expansion = expansion_bak def _make_stem_layer(self, in_channels, base_channels): self.conv1 = build_conv_layer( self.conv_cfg, in_channels, base_channels, kernel_size=3, stride=2, padding=1, bias=False) self.norm1_name, norm1 = build_norm_layer( self.norm_cfg, base_channels, postfix=1) self.add_module(self.norm1_name, norm1) self.relu = nn.ReLU(inplace=True) def generate_regnet(self, initial_width, width_slope, width_parameter, depth, divisor=8): """Generates per block width from RegNet parameters. Args: initial_width ([int]): Initial width of the backbone width_slope ([float]): Slope of the quantized linear function width_parameter ([int]): Parameter used to quantize the width. depth ([int]): Depth of the backbone. divisor (int, optional): The divisor of channels. Defaults to 8. Returns: list, int: return a list of widths of each stage and the number \ of stages """ assert width_slope >= 0 assert initial_width > 0 assert width_parameter > 1 assert initial_width % divisor == 0 widths_cont = np.arange(depth) * width_slope + initial_width ks = np.round( np.log(widths_cont / initial_width) / np.log(width_parameter)) widths = initial_width * np.power(width_parameter, ks) widths = np.round(np.divide(widths, divisor)) * divisor num_stages = len(np.unique(widths)) widths, widths_cont = widths.astype(int).tolist(), widths_cont.tolist() return widths, num_stages @staticmethod def quantize_float(number, divisor): """Converts a float to closest non-zero int divisible by divior. Args: number (int): Original number to be quantized. divisor (int): Divisor used to quantize the number. Returns: int: quantized number that is divisible by devisor. """ return int(round(number / divisor) * divisor) def adjust_width_group(self, widths, bottleneck_ratio, groups): """Adjusts the compatibility of widths and groups. Args: widths (list[int]): Width of each stage. bottleneck_ratio (float): Bottleneck ratio. groups (int): number of groups in each stage Returns: tuple(list): The adjusted widths and groups of each stage. """ bottleneck_width = [ int(w * b) for w, b in zip(widths, bottleneck_ratio) ] groups = [min(g, w_bot) for g, w_bot in zip(groups, bottleneck_width)] bottleneck_width = [ self.quantize_float(w_bot, g) for w_bot, g in zip(bottleneck_width, groups) ] widths = [ int(w_bot / b) for w_bot, b in zip(bottleneck_width, bottleneck_ratio) ] return widths, groups def get_stages_from_blocks(self, widths): """Gets widths/stage_blocks of network at each stage. Args: widths (list[int]): Width in each stage. Returns: tuple(list): width and depth of each stage """ width_diff = [ width != width_prev for width, width_prev in zip(widths + [0], [0] + widths) ] stage_widths = [ width for width, diff in zip(widths, width_diff[:-1]) if diff ] stage_blocks = np.diff([ depth for depth, diff in zip(range(len(width_diff)), width_diff) if diff ]).tolist() return stage_widths, stage_blocks def forward(self, x): """Forward function.""" x = self.conv1(x) x = self.norm1(x) x = self.relu(x) outs = [] for i, layer_name in enumerate(self.res_layers): res_layer = getattr(self, layer_name) x = res_layer(x) if i in self.out_indices: outs.append(x) return tuple(outs)
12,269
36.638037
79
py
GFocalV2
GFocalV2-master/mmdet/models/backbones/detectors_resnext.py
import math from mmcv.cnn import build_conv_layer, build_norm_layer from ..builder import BACKBONES from .detectors_resnet import Bottleneck as _Bottleneck from .detectors_resnet import DetectoRS_ResNet class Bottleneck(_Bottleneck): expansion = 4 def __init__(self, inplanes, planes, groups=1, base_width=4, base_channels=64, **kwargs): """Bottleneck block for ResNeXt. If style is "pytorch", the stride-two layer is the 3x3 conv layer, if it is "caffe", the stride-two layer is the first 1x1 conv layer. """ super(Bottleneck, self).__init__(inplanes, planes, **kwargs) if groups == 1: width = self.planes else: width = math.floor(self.planes * (base_width / base_channels)) * groups self.norm1_name, norm1 = build_norm_layer( self.norm_cfg, width, postfix=1) self.norm2_name, norm2 = build_norm_layer( self.norm_cfg, width, postfix=2) self.norm3_name, norm3 = build_norm_layer( self.norm_cfg, self.planes * self.expansion, postfix=3) self.conv1 = build_conv_layer( self.conv_cfg, self.inplanes, width, kernel_size=1, stride=self.conv1_stride, bias=False) self.add_module(self.norm1_name, norm1) fallback_on_stride = False self.with_modulated_dcn = False if self.with_dcn: fallback_on_stride = self.dcn.pop('fallback_on_stride', False) if self.with_sac: self.conv2 = build_conv_layer( self.sac, width, width, kernel_size=3, stride=self.conv2_stride, padding=self.dilation, dilation=self.dilation, groups=groups, bias=False) elif not self.with_dcn or fallback_on_stride: self.conv2 = build_conv_layer( self.conv_cfg, width, width, kernel_size=3, stride=self.conv2_stride, padding=self.dilation, dilation=self.dilation, groups=groups, bias=False) else: assert self.conv_cfg is None, 'conv_cfg must be None for DCN' self.conv2 = build_conv_layer( self.dcn, width, width, kernel_size=3, stride=self.conv2_stride, padding=self.dilation, dilation=self.dilation, groups=groups, bias=False) self.add_module(self.norm2_name, norm2) self.conv3 = build_conv_layer( self.conv_cfg, width, self.planes * self.expansion, kernel_size=1, bias=False) self.add_module(self.norm3_name, norm3) @BACKBONES.register_module() class DetectoRS_ResNeXt(DetectoRS_ResNet): """ResNeXt backbone for DetectoRS. Args: groups (int): The number of groups in ResNeXt. base_width (int): The base width of ResNeXt. """ arch_settings = { 50: (Bottleneck, (3, 4, 6, 3)), 101: (Bottleneck, (3, 4, 23, 3)), 152: (Bottleneck, (3, 8, 36, 3)) } def __init__(self, groups=1, base_width=4, **kwargs): self.groups = groups self.base_width = base_width super(DetectoRS_ResNeXt, self).__init__(**kwargs) def make_res_layer(self, **kwargs): return super().make_res_layer( groups=self.groups, base_width=self.base_width, base_channels=self.base_channels, **kwargs)
3,872
30.487805
77
py
GFocalV2
GFocalV2-master/mmdet/models/backbones/resnet.py
import torch.nn as nn import torch.utils.checkpoint as cp from mmcv.cnn import (build_conv_layer, build_norm_layer, build_plugin_layer, constant_init, kaiming_init) from mmcv.runner import load_checkpoint from torch.nn.modules.batchnorm import _BatchNorm from mmdet.utils import get_root_logger from ..builder import BACKBONES from ..utils import ResLayer class BasicBlock(nn.Module): expansion = 1 def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, style='pytorch', with_cp=False, conv_cfg=None, norm_cfg=dict(type='BN'), dcn=None, plugins=None): super(BasicBlock, self).__init__() assert dcn is None, 'Not implemented yet.' assert plugins is None, 'Not implemented yet.' self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1) self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2) self.conv1 = build_conv_layer( conv_cfg, inplanes, planes, 3, stride=stride, padding=dilation, dilation=dilation, bias=False) self.add_module(self.norm1_name, norm1) self.conv2 = build_conv_layer( conv_cfg, planes, planes, 3, padding=1, bias=False) self.add_module(self.norm2_name, norm2) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride self.dilation = dilation self.with_cp = with_cp @property def norm1(self): """nn.Module: normalization layer after the first convolution layer""" return getattr(self, self.norm1_name) @property def norm2(self): """nn.Module: normalization layer after the second convolution layer""" return getattr(self, self.norm2_name) def forward(self, x): """Forward function.""" def _inner_forward(x): identity = x out = self.conv1(x) out = self.norm1(out) out = self.relu(out) out = self.conv2(out) out = self.norm2(out) if self.downsample is not None: identity = self.downsample(x) out += identity return out if self.with_cp and x.requires_grad: out = cp.checkpoint(_inner_forward, x) else: out = _inner_forward(x) out = self.relu(out) return out class Bottleneck(nn.Module): expansion = 4 def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, style='pytorch', with_cp=False, conv_cfg=None, norm_cfg=dict(type='BN'), dcn=None, plugins=None): """Bottleneck block for ResNet. If style is "pytorch", the stride-two layer is the 3x3 conv layer, if it is "caffe", the stride-two layer is the first 1x1 conv layer. """ super(Bottleneck, self).__init__() assert style in ['pytorch', 'caffe'] assert dcn is None or isinstance(dcn, dict) assert plugins is None or isinstance(plugins, list) if plugins is not None: allowed_position = ['after_conv1', 'after_conv2', 'after_conv3'] assert all(p['position'] in allowed_position for p in plugins) self.inplanes = inplanes self.planes = planes self.stride = stride self.dilation = dilation self.style = style self.with_cp = with_cp self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.dcn = dcn self.with_dcn = dcn is not None self.plugins = plugins self.with_plugins = plugins is not None if self.with_plugins: # collect plugins for conv1/conv2/conv3 self.after_conv1_plugins = [ plugin['cfg'] for plugin in plugins if plugin['position'] == 'after_conv1' ] self.after_conv2_plugins = [ plugin['cfg'] for plugin in plugins if plugin['position'] == 'after_conv2' ] self.after_conv3_plugins = [ plugin['cfg'] for plugin in plugins if plugin['position'] == 'after_conv3' ] if self.style == 'pytorch': self.conv1_stride = 1 self.conv2_stride = stride else: self.conv1_stride = stride self.conv2_stride = 1 self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1) self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2) self.norm3_name, norm3 = build_norm_layer( norm_cfg, planes * self.expansion, postfix=3) self.conv1 = build_conv_layer( conv_cfg, inplanes, planes, kernel_size=1, stride=self.conv1_stride, bias=False) self.add_module(self.norm1_name, norm1) fallback_on_stride = False if self.with_dcn: fallback_on_stride = dcn.pop('fallback_on_stride', False) if not self.with_dcn or fallback_on_stride: self.conv2 = build_conv_layer( conv_cfg, planes, planes, kernel_size=3, stride=self.conv2_stride, padding=dilation, dilation=dilation, bias=False) else: assert self.conv_cfg is None, 'conv_cfg must be None for DCN' self.conv2 = build_conv_layer( dcn, planes, planes, kernel_size=3, stride=self.conv2_stride, padding=dilation, dilation=dilation, bias=False) self.add_module(self.norm2_name, norm2) self.conv3 = build_conv_layer( conv_cfg, planes, planes * self.expansion, kernel_size=1, bias=False) self.add_module(self.norm3_name, norm3) self.relu = nn.ReLU(inplace=True) self.downsample = downsample if self.with_plugins: self.after_conv1_plugin_names = self.make_block_plugins( planes, self.after_conv1_plugins) self.after_conv2_plugin_names = self.make_block_plugins( planes, self.after_conv2_plugins) self.after_conv3_plugin_names = self.make_block_plugins( planes * self.expansion, self.after_conv3_plugins) def make_block_plugins(self, in_channels, plugins): """make plugins for block. Args: in_channels (int): Input channels of plugin. plugins (list[dict]): List of plugins cfg to build. Returns: list[str]: List of the names of plugin. """ assert isinstance(plugins, list) plugin_names = [] for plugin in plugins: plugin = plugin.copy() name, layer = build_plugin_layer( plugin, in_channels=in_channels, postfix=plugin.pop('postfix', '')) assert not hasattr(self, name), f'duplicate plugin {name}' self.add_module(name, layer) plugin_names.append(name) return plugin_names def forward_plugin(self, x, plugin_names): out = x for name in plugin_names: out = getattr(self, name)(x) return out @property def norm1(self): """nn.Module: normalization layer after the first convolution layer""" return getattr(self, self.norm1_name) @property def norm2(self): """nn.Module: normalization layer after the second convolution layer""" return getattr(self, self.norm2_name) @property def norm3(self): """nn.Module: normalization layer after the third convolution layer""" return getattr(self, self.norm3_name) def forward(self, x): """Forward function.""" def _inner_forward(x): identity = x out = self.conv1(x) out = self.norm1(out) out = self.relu(out) if self.with_plugins: out = self.forward_plugin(out, self.after_conv1_plugin_names) out = self.conv2(out) out = self.norm2(out) out = self.relu(out) if self.with_plugins: out = self.forward_plugin(out, self.after_conv2_plugin_names) out = self.conv3(out) out = self.norm3(out) if self.with_plugins: out = self.forward_plugin(out, self.after_conv3_plugin_names) if self.downsample is not None: identity = self.downsample(x) out += identity return out if self.with_cp and x.requires_grad: out = cp.checkpoint(_inner_forward, x) else: out = _inner_forward(x) out = self.relu(out) return out @BACKBONES.register_module() class ResNet(nn.Module): """ResNet backbone. Args: depth (int): Depth of resnet, from {18, 34, 50, 101, 152}. stem_channels (int | None): Number of stem channels. If not specified, it will be the same as `base_channels`. Default: None. base_channels (int): Number of base channels of res layer. Default: 64. in_channels (int): Number of input image channels. Default: 3. num_stages (int): Resnet stages. Default: 4. strides (Sequence[int]): Strides of the first block of each stage. dilations (Sequence[int]): Dilation of each stage. out_indices (Sequence[int]): Output from which stages. style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two layer is the 3x3 conv layer, otherwise the stride-two layer is the first 1x1 conv layer. deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv avg_down (bool): Use AvgPool instead of stride conv when downsampling in the bottleneck. frozen_stages (int): Stages to be frozen (stop grad and set eval mode). -1 means not freezing any parameters. norm_cfg (dict): Dictionary to construct and config norm layer. norm_eval (bool): Whether to set norm layers to eval mode, namely, freeze running stats (mean and var). Note: Effect on Batch Norm and its variants only. plugins (list[dict]): List of plugins for stages, each dict contains: - cfg (dict, required): Cfg dict to build plugin. - position (str, required): Position inside block to insert plugin, options are 'after_conv1', 'after_conv2', 'after_conv3'. - stages (tuple[bool], optional): Stages to apply plugin, length should be same as 'num_stages'. with_cp (bool): Use checkpoint or not. Using checkpoint will save some memory while slowing down the training speed. zero_init_residual (bool): Whether to use zero init for last norm layer in resblocks to let them behave as identity. Example: >>> from mmdet.models import ResNet >>> import torch >>> self = ResNet(depth=18) >>> self.eval() >>> inputs = torch.rand(1, 3, 32, 32) >>> level_outputs = self.forward(inputs) >>> for level_out in level_outputs: ... print(tuple(level_out.shape)) (1, 64, 8, 8) (1, 128, 4, 4) (1, 256, 2, 2) (1, 512, 1, 1) """ arch_settings = { 18: (BasicBlock, (2, 2, 2, 2)), 34: (BasicBlock, (3, 4, 6, 3)), 50: (Bottleneck, (3, 4, 6, 3)), 101: (Bottleneck, (3, 4, 23, 3)), 152: (Bottleneck, (3, 8, 36, 3)) } def __init__(self, depth, in_channels=3, stem_channels=None, base_channels=64, num_stages=4, strides=(1, 2, 2, 2), dilations=(1, 1, 1, 1), out_indices=(0, 1, 2, 3), style='pytorch', deep_stem=False, avg_down=False, frozen_stages=-1, conv_cfg=None, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, dcn=None, stage_with_dcn=(False, False, False, False), plugins=None, with_cp=False, zero_init_residual=True): super(ResNet, self).__init__() if depth not in self.arch_settings: raise KeyError(f'invalid depth {depth} for resnet') self.depth = depth if stem_channels is None: stem_channels = base_channels self.stem_channels = stem_channels self.base_channels = base_channels self.num_stages = num_stages assert num_stages >= 1 and num_stages <= 4 self.strides = strides self.dilations = dilations assert len(strides) == len(dilations) == num_stages self.out_indices = out_indices assert max(out_indices) < num_stages self.style = style self.deep_stem = deep_stem self.avg_down = avg_down self.frozen_stages = frozen_stages self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.with_cp = with_cp self.norm_eval = norm_eval self.dcn = dcn self.stage_with_dcn = stage_with_dcn if dcn is not None: assert len(stage_with_dcn) == num_stages self.plugins = plugins self.zero_init_residual = zero_init_residual self.block, stage_blocks = self.arch_settings[depth] self.stage_blocks = stage_blocks[:num_stages] self.inplanes = stem_channels self._make_stem_layer(in_channels, stem_channels) self.res_layers = [] for i, num_blocks in enumerate(self.stage_blocks): stride = strides[i] dilation = dilations[i] dcn = self.dcn if self.stage_with_dcn[i] else None if plugins is not None: stage_plugins = self.make_stage_plugins(plugins, i) else: stage_plugins = None planes = base_channels * 2**i res_layer = self.make_res_layer( block=self.block, inplanes=self.inplanes, planes=planes, num_blocks=num_blocks, stride=stride, dilation=dilation, style=self.style, avg_down=self.avg_down, with_cp=with_cp, conv_cfg=conv_cfg, norm_cfg=norm_cfg, dcn=dcn, plugins=stage_plugins) self.inplanes = planes * self.block.expansion layer_name = f'layer{i + 1}' self.add_module(layer_name, res_layer) self.res_layers.append(layer_name) self._freeze_stages() self.feat_dim = self.block.expansion * base_channels * 2**( len(self.stage_blocks) - 1) def make_stage_plugins(self, plugins, stage_idx): """Make plugins for ResNet ``stage_idx`` th stage. Currently we support to insert ``context_block``, ``empirical_attention_block``, ``nonlocal_block`` into the backbone like ResNet/ResNeXt. They could be inserted after conv1/conv2/conv3 of Bottleneck. An example of plugins format could be: Examples: >>> plugins=[ ... dict(cfg=dict(type='xxx', arg1='xxx'), ... stages=(False, True, True, True), ... position='after_conv2'), ... dict(cfg=dict(type='yyy'), ... stages=(True, True, True, True), ... position='after_conv3'), ... dict(cfg=dict(type='zzz', postfix='1'), ... stages=(True, True, True, True), ... position='after_conv3'), ... dict(cfg=dict(type='zzz', postfix='2'), ... stages=(True, True, True, True), ... position='after_conv3') ... ] >>> self = ResNet(depth=18) >>> stage_plugins = self.make_stage_plugins(plugins, 0) >>> assert len(stage_plugins) == 3 Suppose ``stage_idx=0``, the structure of blocks in the stage would be: .. code-block:: none conv1-> conv2->conv3->yyy->zzz1->zzz2 Suppose 'stage_idx=1', the structure of blocks in the stage would be: .. code-block:: none conv1-> conv2->xxx->conv3->yyy->zzz1->zzz2 If stages is missing, the plugin would be applied to all stages. Args: plugins (list[dict]): List of plugins cfg to build. The postfix is required if multiple same type plugins are inserted. stage_idx (int): Index of stage to build Returns: list[dict]: Plugins for current stage """ stage_plugins = [] for plugin in plugins: plugin = plugin.copy() stages = plugin.pop('stages', None) assert stages is None or len(stages) == self.num_stages # whether to insert plugin into current stage if stages is None or stages[stage_idx]: stage_plugins.append(plugin) return stage_plugins def make_res_layer(self, **kwargs): """Pack all blocks in a stage into a ``ResLayer``.""" return ResLayer(**kwargs) @property def norm1(self): """nn.Module: the normalization layer named "norm1" """ return getattr(self, self.norm1_name) def _make_stem_layer(self, in_channels, stem_channels): if self.deep_stem: self.stem = nn.Sequential( build_conv_layer( self.conv_cfg, in_channels, stem_channels // 2, kernel_size=3, stride=2, padding=1, bias=False), build_norm_layer(self.norm_cfg, stem_channels // 2)[1], nn.ReLU(inplace=True), build_conv_layer( self.conv_cfg, stem_channels // 2, stem_channels // 2, kernel_size=3, stride=1, padding=1, bias=False), build_norm_layer(self.norm_cfg, stem_channels // 2)[1], nn.ReLU(inplace=True), build_conv_layer( self.conv_cfg, stem_channels // 2, stem_channels, kernel_size=3, stride=1, padding=1, bias=False), build_norm_layer(self.norm_cfg, stem_channels)[1], nn.ReLU(inplace=True)) else: self.conv1 = build_conv_layer( self.conv_cfg, in_channels, stem_channels, kernel_size=7, stride=2, padding=3, bias=False) self.norm1_name, norm1 = build_norm_layer( self.norm_cfg, stem_channels, postfix=1) self.add_module(self.norm1_name, norm1) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) def _freeze_stages(self): if self.frozen_stages >= 0: if self.deep_stem: self.stem.eval() for param in self.stem.parameters(): param.requires_grad = False else: self.norm1.eval() for m in [self.conv1, self.norm1]: for param in m.parameters(): param.requires_grad = False for i in range(1, self.frozen_stages + 1): m = getattr(self, f'layer{i}') m.eval() for param in m.parameters(): param.requires_grad = False def init_weights(self, pretrained=None): """Initialize the weights in backbone. Args: pretrained (str, optional): Path to pre-trained weights. Defaults to None. """ if isinstance(pretrained, str): logger = get_root_logger() load_checkpoint(self, pretrained, strict=False, logger=logger) elif pretrained is None: for m in self.modules(): if isinstance(m, nn.Conv2d): kaiming_init(m) elif isinstance(m, (_BatchNorm, nn.GroupNorm)): constant_init(m, 1) if self.dcn is not None: for m in self.modules(): if isinstance(m, Bottleneck) and hasattr( m.conv2, 'conv_offset'): constant_init(m.conv2.conv_offset, 0) if self.zero_init_residual: for m in self.modules(): if isinstance(m, Bottleneck): constant_init(m.norm3, 0) elif isinstance(m, BasicBlock): constant_init(m.norm2, 0) else: raise TypeError('pretrained must be a str or None') def forward(self, x): """Forward function.""" if self.deep_stem: x = self.stem(x) else: x = self.conv1(x) x = self.norm1(x) x = self.relu(x) x = self.maxpool(x) outs = [] for i, layer_name in enumerate(self.res_layers): res_layer = getattr(self, layer_name) x = res_layer(x) if i in self.out_indices: outs.append(x) return tuple(outs) def train(self, mode=True): """Convert the model into training mode while keep normalization layer freezed.""" super(ResNet, self).train(mode) self._freeze_stages() if mode and self.norm_eval: for m in self.modules(): # trick: eval have effect on BatchNorm only if isinstance(m, _BatchNorm): m.eval() @BACKBONES.register_module() class ResNetV1d(ResNet): r"""ResNetV1d variant described in `Bag of Tricks <https://arxiv.org/pdf/1812.01187.pdf>`_. Compared with default ResNet(ResNetV1b), ResNetV1d replaces the 7x7 conv in the input stem with three 3x3 convs. And in the downsampling block, a 2x2 avg_pool with stride 2 is added before conv, whose stride is changed to 1. """ def __init__(self, **kwargs): super(ResNetV1d, self).__init__( deep_stem=True, avg_down=True, **kwargs)
23,378
34.156391
79
py
GFocalV2
GFocalV2-master/mmdet/models/backbones/detectors_resnet.py
import torch.nn as nn import torch.utils.checkpoint as cp from mmcv.cnn import build_conv_layer, build_norm_layer, constant_init from ..builder import BACKBONES from .resnet import Bottleneck as _Bottleneck from .resnet import ResNet class Bottleneck(_Bottleneck): r"""Bottleneck for the ResNet backbone in `DetectoRS <https://arxiv.org/pdf/2006.02334.pdf>`_. This bottleneck allows the users to specify whether to use SAC (Switchable Atrous Convolution) and RFP (Recursive Feature Pyramid). Args: inplanes (int): The number of input channels. planes (int): The number of output channels before expansion. rfp_inplanes (int, optional): The number of channels from RFP. Default: None. If specified, an additional conv layer will be added for ``rfp_feat``. Otherwise, the structure is the same as base class. sac (dict, optional): Dictionary to construct SAC. Default: None. """ expansion = 4 def __init__(self, inplanes, planes, rfp_inplanes=None, sac=None, **kwargs): super(Bottleneck, self).__init__(inplanes, planes, **kwargs) assert sac is None or isinstance(sac, dict) self.sac = sac self.with_sac = sac is not None if self.with_sac: self.conv2 = build_conv_layer( self.sac, planes, planes, kernel_size=3, stride=self.conv2_stride, padding=self.dilation, dilation=self.dilation, bias=False) self.rfp_inplanes = rfp_inplanes if self.rfp_inplanes: self.rfp_conv = build_conv_layer( None, self.rfp_inplanes, planes * self.expansion, 1, stride=1, bias=True) self.init_weights() def init_weights(self): """Initialize the weights.""" if self.rfp_inplanes: constant_init(self.rfp_conv, 0) def rfp_forward(self, x, rfp_feat): """The forward function that also takes the RFP features as input.""" def _inner_forward(x): identity = x out = self.conv1(x) out = self.norm1(out) out = self.relu(out) if self.with_plugins: out = self.forward_plugin(out, self.after_conv1_plugin_names) out = self.conv2(out) out = self.norm2(out) out = self.relu(out) if self.with_plugins: out = self.forward_plugin(out, self.after_conv2_plugin_names) out = self.conv3(out) out = self.norm3(out) if self.with_plugins: out = self.forward_plugin(out, self.after_conv3_plugin_names) if self.downsample is not None: identity = self.downsample(x) out += identity return out if self.with_cp and x.requires_grad: out = cp.checkpoint(_inner_forward, x) else: out = _inner_forward(x) if self.rfp_inplanes: rfp_feat = self.rfp_conv(rfp_feat) out = out + rfp_feat out = self.relu(out) return out class ResLayer(nn.Sequential): """ResLayer to build ResNet style backbone for RPF in detectoRS. The difference between this module and base class is that we pass ``rfp_inplanes`` to the first block. Args: block (nn.Module): block used to build ResLayer. inplanes (int): inplanes of block. planes (int): planes of block. num_blocks (int): number of blocks. stride (int): stride of the first block. Default: 1 avg_down (bool): Use AvgPool instead of stride conv when downsampling in the bottleneck. Default: False conv_cfg (dict): dictionary to construct and config conv layer. Default: None norm_cfg (dict): dictionary to construct and config norm layer. Default: dict(type='BN') downsample_first (bool): Downsample at the first block or last block. False for Hourglass, True for ResNet. Default: True rfp_inplanes (int, optional): The number of channels from RFP. Default: None. If specified, an additional conv layer will be added for ``rfp_feat``. Otherwise, the structure is the same as base class. """ def __init__(self, block, inplanes, planes, num_blocks, stride=1, avg_down=False, conv_cfg=None, norm_cfg=dict(type='BN'), downsample_first=True, rfp_inplanes=None, **kwargs): self.block = block assert downsample_first, f'downsampel_first={downsample_first} is ' \ 'not supported in DetectoRS' downsample = None if stride != 1 or inplanes != planes * block.expansion: downsample = [] conv_stride = stride if avg_down and stride != 1: conv_stride = 1 downsample.append( nn.AvgPool2d( kernel_size=stride, stride=stride, ceil_mode=True, count_include_pad=False)) downsample.extend([ build_conv_layer( conv_cfg, inplanes, planes * block.expansion, kernel_size=1, stride=conv_stride, bias=False), build_norm_layer(norm_cfg, planes * block.expansion)[1] ]) downsample = nn.Sequential(*downsample) layers = [] layers.append( block( inplanes=inplanes, planes=planes, stride=stride, downsample=downsample, conv_cfg=conv_cfg, norm_cfg=norm_cfg, rfp_inplanes=rfp_inplanes, **kwargs)) inplanes = planes * block.expansion for _ in range(1, num_blocks): layers.append( block( inplanes=inplanes, planes=planes, stride=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, **kwargs)) super(ResLayer, self).__init__(*layers) @BACKBONES.register_module() class DetectoRS_ResNet(ResNet): """ResNet backbone for DetectoRS. Args: sac (dict, optional): Dictionary to construct SAC (Switchable Atrous Convolution). Default: None. stage_with_sac (list): Which stage to use sac. Default: (False, False, False, False). rfp_inplanes (int, optional): The number of channels from RFP. Default: None. If specified, an additional conv layer will be added for ``rfp_feat``. Otherwise, the structure is the same as base class. output_img (bool): If ``True``, the input image will be inserted into the starting position of output. Default: False. pretrained (str, optional): The pretrained model to load. """ arch_settings = { 50: (Bottleneck, (3, 4, 6, 3)), 101: (Bottleneck, (3, 4, 23, 3)), 152: (Bottleneck, (3, 8, 36, 3)) } def __init__(self, sac=None, stage_with_sac=(False, False, False, False), rfp_inplanes=None, output_img=False, pretrained=None, **kwargs): self.sac = sac self.stage_with_sac = stage_with_sac self.rfp_inplanes = rfp_inplanes self.output_img = output_img self.pretrained = pretrained super(DetectoRS_ResNet, self).__init__(**kwargs) self.inplanes = self.stem_channels self.res_layers = [] for i, num_blocks in enumerate(self.stage_blocks): stride = self.strides[i] dilation = self.dilations[i] dcn = self.dcn if self.stage_with_dcn[i] else None sac = self.sac if self.stage_with_sac[i] else None if self.plugins is not None: stage_plugins = self.make_stage_plugins(self.plugins, i) else: stage_plugins = None planes = self.base_channels * 2**i res_layer = self.make_res_layer( block=self.block, inplanes=self.inplanes, planes=planes, num_blocks=num_blocks, stride=stride, dilation=dilation, style=self.style, avg_down=self.avg_down, with_cp=self.with_cp, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, dcn=dcn, sac=sac, rfp_inplanes=rfp_inplanes if i > 0 else None, plugins=stage_plugins) self.inplanes = planes * self.block.expansion layer_name = f'layer{i + 1}' self.add_module(layer_name, res_layer) self.res_layers.append(layer_name) self._freeze_stages() def make_res_layer(self, **kwargs): """Pack all blocks in a stage into a ``ResLayer`` for DetectoRS.""" return ResLayer(**kwargs) def forward(self, x): """Forward function.""" outs = list(super(DetectoRS_ResNet, self).forward(x)) if self.output_img: outs.insert(0, x) return tuple(outs) def rfp_forward(self, x, rfp_feats): """Forward function for RFP.""" if self.deep_stem: x = self.stem(x) else: x = self.conv1(x) x = self.norm1(x) x = self.relu(x) x = self.maxpool(x) outs = [] for i, layer_name in enumerate(self.res_layers): res_layer = getattr(self, layer_name) rfp_feat = rfp_feats[i] if i > 0 else None for layer in res_layer: x = layer.rfp_forward(x, rfp_feat) if i in self.out_indices: outs.append(x) return tuple(outs)
10,517
33.372549
78
py
GFocalV2
GFocalV2-master/mmdet/models/backbones/ssd_vgg.py
import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import VGG, constant_init, kaiming_init, normal_init, xavier_init from mmcv.runner import load_checkpoint from mmdet.utils import get_root_logger from ..builder import BACKBONES @BACKBONES.register_module() class SSDVGG(VGG): """VGG Backbone network for single-shot-detection. Args: input_size (int): width and height of input, from {300, 512}. depth (int): Depth of vgg, from {11, 13, 16, 19}. out_indices (Sequence[int]): Output from which stages. Example: >>> self = SSDVGG(input_size=300, depth=11) >>> self.eval() >>> inputs = torch.rand(1, 3, 300, 300) >>> level_outputs = self.forward(inputs) >>> for level_out in level_outputs: ... print(tuple(level_out.shape)) (1, 1024, 19, 19) (1, 512, 10, 10) (1, 256, 5, 5) (1, 256, 3, 3) (1, 256, 1, 1) """ extra_setting = { 300: (256, 'S', 512, 128, 'S', 256, 128, 256, 128, 256), 512: (256, 'S', 512, 128, 'S', 256, 128, 'S', 256, 128, 'S', 256, 128), } def __init__(self, input_size, depth, with_last_pool=False, ceil_mode=True, out_indices=(3, 4), out_feature_indices=(22, 34), l2_norm_scale=20.): # TODO: in_channels for mmcv.VGG super(SSDVGG, self).__init__( depth, with_last_pool=with_last_pool, ceil_mode=ceil_mode, out_indices=out_indices) assert input_size in (300, 512) self.input_size = input_size self.features.add_module( str(len(self.features)), nn.MaxPool2d(kernel_size=3, stride=1, padding=1)) self.features.add_module( str(len(self.features)), nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6)) self.features.add_module( str(len(self.features)), nn.ReLU(inplace=True)) self.features.add_module( str(len(self.features)), nn.Conv2d(1024, 1024, kernel_size=1)) self.features.add_module( str(len(self.features)), nn.ReLU(inplace=True)) self.out_feature_indices = out_feature_indices self.inplanes = 1024 self.extra = self._make_extra_layers(self.extra_setting[input_size]) self.l2_norm = L2Norm( self.features[out_feature_indices[0] - 1].out_channels, l2_norm_scale) def init_weights(self, pretrained=None): """Initialize the weights in backbone. Args: pretrained (str, optional): Path to pre-trained weights. Defaults to None. """ if isinstance(pretrained, str): logger = get_root_logger() load_checkpoint(self, pretrained, strict=False, logger=logger) elif pretrained is None: for m in self.features.modules(): if isinstance(m, nn.Conv2d): kaiming_init(m) elif isinstance(m, nn.BatchNorm2d): constant_init(m, 1) elif isinstance(m, nn.Linear): normal_init(m, std=0.01) else: raise TypeError('pretrained must be a str or None') for m in self.extra.modules(): if isinstance(m, nn.Conv2d): xavier_init(m, distribution='uniform') constant_init(self.l2_norm, self.l2_norm.scale) def forward(self, x): """Forward function.""" outs = [] for i, layer in enumerate(self.features): x = layer(x) if i in self.out_feature_indices: outs.append(x) for i, layer in enumerate(self.extra): x = F.relu(layer(x), inplace=True) if i % 2 == 1: outs.append(x) outs[0] = self.l2_norm(outs[0]) if len(outs) == 1: return outs[0] else: return tuple(outs) def _make_extra_layers(self, outplanes): layers = [] kernel_sizes = (1, 3) num_layers = 0 outplane = None for i in range(len(outplanes)): if self.inplanes == 'S': self.inplanes = outplane continue k = kernel_sizes[num_layers % 2] if outplanes[i] == 'S': outplane = outplanes[i + 1] conv = nn.Conv2d( self.inplanes, outplane, k, stride=2, padding=1) else: outplane = outplanes[i] conv = nn.Conv2d( self.inplanes, outplane, k, stride=1, padding=0) layers.append(conv) self.inplanes = outplanes[i] num_layers += 1 if self.input_size == 512: layers.append(nn.Conv2d(self.inplanes, 256, 4, padding=1)) return nn.Sequential(*layers) class L2Norm(nn.Module): def __init__(self, n_dims, scale=20., eps=1e-10): """L2 normalization layer. Args: n_dims (int): Number of dimensions to be normalized scale (float, optional): Defaults to 20.. eps (float, optional): Used to avoid division by zero. Defaults to 1e-10. """ super(L2Norm, self).__init__() self.n_dims = n_dims self.weight = nn.Parameter(torch.Tensor(self.n_dims)) self.eps = eps self.scale = scale def forward(self, x): """Forward function.""" # normalization layer convert to FP32 in FP16 training x_float = x.float() norm = x_float.pow(2).sum(1, keepdim=True).sqrt() + self.eps return (self.weight[None, :, None, None].float().expand_as(x_float) * x_float / norm).type_as(x)
5,882
33.605882
79
py
GFocalV2
GFocalV2-master/mmdet/models/backbones/resnext.py
import math from mmcv.cnn import build_conv_layer, build_norm_layer from ..builder import BACKBONES from ..utils import ResLayer from .resnet import Bottleneck as _Bottleneck from .resnet import ResNet class Bottleneck(_Bottleneck): expansion = 4 def __init__(self, inplanes, planes, groups=1, base_width=4, base_channels=64, **kwargs): """Bottleneck block for ResNeXt. If style is "pytorch", the stride-two layer is the 3x3 conv layer, if it is "caffe", the stride-two layer is the first 1x1 conv layer. """ super(Bottleneck, self).__init__(inplanes, planes, **kwargs) if groups == 1: width = self.planes else: width = math.floor(self.planes * (base_width / base_channels)) * groups self.norm1_name, norm1 = build_norm_layer( self.norm_cfg, width, postfix=1) self.norm2_name, norm2 = build_norm_layer( self.norm_cfg, width, postfix=2) self.norm3_name, norm3 = build_norm_layer( self.norm_cfg, self.planes * self.expansion, postfix=3) self.conv1 = build_conv_layer( self.conv_cfg, self.inplanes, width, kernel_size=1, stride=self.conv1_stride, bias=False) self.add_module(self.norm1_name, norm1) fallback_on_stride = False self.with_modulated_dcn = False if self.with_dcn: fallback_on_stride = self.dcn.pop('fallback_on_stride', False) if not self.with_dcn or fallback_on_stride: self.conv2 = build_conv_layer( self.conv_cfg, width, width, kernel_size=3, stride=self.conv2_stride, padding=self.dilation, dilation=self.dilation, groups=groups, bias=False) else: assert self.conv_cfg is None, 'conv_cfg must be None for DCN' self.conv2 = build_conv_layer( self.dcn, width, width, kernel_size=3, stride=self.conv2_stride, padding=self.dilation, dilation=self.dilation, groups=groups, bias=False) self.add_module(self.norm2_name, norm2) self.conv3 = build_conv_layer( self.conv_cfg, width, self.planes * self.expansion, kernel_size=1, bias=False) self.add_module(self.norm3_name, norm3) @BACKBONES.register_module() class ResNeXt(ResNet): """ResNeXt backbone. Args: depth (int): Depth of resnet, from {18, 34, 50, 101, 152}. in_channels (int): Number of input image channels. Default: 3. num_stages (int): Resnet stages. Default: 4. groups (int): Group of resnext. base_width (int): Base width of resnext. strides (Sequence[int]): Strides of the first block of each stage. dilations (Sequence[int]): Dilation of each stage. out_indices (Sequence[int]): Output from which stages. style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two layer is the 3x3 conv layer, otherwise the stride-two layer is the first 1x1 conv layer. frozen_stages (int): Stages to be frozen (all param fixed). -1 means not freezing any parameters. norm_cfg (dict): dictionary to construct and config norm layer. norm_eval (bool): Whether to set norm layers to eval mode, namely, freeze running stats (mean and var). Note: Effect on Batch Norm and its variants only. with_cp (bool): Use checkpoint or not. Using checkpoint will save some memory while slowing down the training speed. zero_init_residual (bool): whether to use zero init for last norm layer in resblocks to let them behave as identity. """ arch_settings = { 50: (Bottleneck, (3, 4, 6, 3)), 101: (Bottleneck, (3, 4, 23, 3)), 152: (Bottleneck, (3, 8, 36, 3)) } def __init__(self, groups=1, base_width=4, **kwargs): self.groups = groups self.base_width = base_width super(ResNeXt, self).__init__(**kwargs) def make_res_layer(self, **kwargs): """Pack all blocks in a stage into a ``ResLayer``""" return ResLayer( groups=self.groups, base_width=self.base_width, base_channels=self.base_channels, **kwargs)
4,731
34.578947
79
py
GFocalV2
GFocalV2-master/mmdet/models/backbones/hourglass.py
import torch.nn as nn from mmcv.cnn import ConvModule from ..builder import BACKBONES from ..utils import ResLayer from .resnet import BasicBlock class HourglassModule(nn.Module): """Hourglass Module for HourglassNet backbone. Generate module recursively and use BasicBlock as the base unit. Args: depth (int): Depth of current HourglassModule. stage_channels (list[int]): Feature channels of sub-modules in current and follow-up HourglassModule. stage_blocks (list[int]): Number of sub-modules stacked in current and follow-up HourglassModule. norm_cfg (dict): Dictionary to construct and config norm layer. """ def __init__(self, depth, stage_channels, stage_blocks, norm_cfg=dict(type='BN', requires_grad=True)): super(HourglassModule, self).__init__() self.depth = depth cur_block = stage_blocks[0] next_block = stage_blocks[1] cur_channel = stage_channels[0] next_channel = stage_channels[1] self.up1 = ResLayer( BasicBlock, cur_channel, cur_channel, cur_block, norm_cfg=norm_cfg) self.low1 = ResLayer( BasicBlock, cur_channel, next_channel, cur_block, stride=2, norm_cfg=norm_cfg) if self.depth > 1: self.low2 = HourglassModule(depth - 1, stage_channels[1:], stage_blocks[1:]) else: self.low2 = ResLayer( BasicBlock, next_channel, next_channel, next_block, norm_cfg=norm_cfg) self.low3 = ResLayer( BasicBlock, next_channel, cur_channel, cur_block, norm_cfg=norm_cfg, downsample_first=False) self.up2 = nn.Upsample(scale_factor=2) def forward(self, x): """Forward function.""" up1 = self.up1(x) low1 = self.low1(x) low2 = self.low2(low1) low3 = self.low3(low2) up2 = self.up2(low3) return up1 + up2 @BACKBONES.register_module() class HourglassNet(nn.Module): """HourglassNet backbone. Stacked Hourglass Networks for Human Pose Estimation. More details can be found in the `paper <https://arxiv.org/abs/1603.06937>`_ . Args: downsample_times (int): Downsample times in a HourglassModule. num_stacks (int): Number of HourglassModule modules stacked, 1 for Hourglass-52, 2 for Hourglass-104. stage_channels (list[int]): Feature channel of each sub-module in a HourglassModule. stage_blocks (list[int]): Number of sub-modules stacked in a HourglassModule. feat_channel (int): Feature channel of conv after a HourglassModule. norm_cfg (dict): Dictionary to construct and config norm layer. Example: >>> from mmdet.models import HourglassNet >>> import torch >>> self = HourglassNet() >>> self.eval() >>> inputs = torch.rand(1, 3, 511, 511) >>> level_outputs = self.forward(inputs) >>> for level_output in level_outputs: ... print(tuple(level_output.shape)) (1, 256, 128, 128) (1, 256, 128, 128) """ def __init__(self, downsample_times=5, num_stacks=2, stage_channels=(256, 256, 384, 384, 384, 512), stage_blocks=(2, 2, 2, 2, 2, 4), feat_channel=256, norm_cfg=dict(type='BN', requires_grad=True)): super(HourglassNet, self).__init__() self.num_stacks = num_stacks assert self.num_stacks >= 1 assert len(stage_channels) == len(stage_blocks) assert len(stage_channels) > downsample_times cur_channel = stage_channels[0] self.stem = nn.Sequential( ConvModule(3, 128, 7, padding=3, stride=2, norm_cfg=norm_cfg), ResLayer(BasicBlock, 128, 256, 1, stride=2, norm_cfg=norm_cfg)) self.hourglass_modules = nn.ModuleList([ HourglassModule(downsample_times, stage_channels, stage_blocks) for _ in range(num_stacks) ]) self.inters = ResLayer( BasicBlock, cur_channel, cur_channel, num_stacks - 1, norm_cfg=norm_cfg) self.conv1x1s = nn.ModuleList([ ConvModule( cur_channel, cur_channel, 1, norm_cfg=norm_cfg, act_cfg=None) for _ in range(num_stacks - 1) ]) self.out_convs = nn.ModuleList([ ConvModule( cur_channel, feat_channel, 3, padding=1, norm_cfg=norm_cfg) for _ in range(num_stacks) ]) self.remap_convs = nn.ModuleList([ ConvModule( feat_channel, cur_channel, 1, norm_cfg=norm_cfg, act_cfg=None) for _ in range(num_stacks - 1) ]) self.relu = nn.ReLU(inplace=True) def init_weights(self, pretrained=None): """Init module weights. We do nothing in this function because all modules we used (ConvModule, BasicBlock and etc.) have default initialization, and currently we don't provide pretrained model of HourglassNet. Detector's __init__() will call backbone's init_weights() with pretrained as input, so we keep this function. """ # Training Centripetal Model needs to reset parameters for Conv2d for m in self.modules(): if isinstance(m, nn.Conv2d): m.reset_parameters() def forward(self, x): """Forward function.""" inter_feat = self.stem(x) out_feats = [] for ind in range(self.num_stacks): single_hourglass = self.hourglass_modules[ind] out_conv = self.out_convs[ind] hourglass_feat = single_hourglass(inter_feat) out_feat = out_conv(hourglass_feat) out_feats.append(out_feat) if ind < self.num_stacks - 1: inter_feat = self.conv1x1s[ind]( inter_feat) + self.remap_convs[ind]( out_feat) inter_feat = self.inters[ind](self.relu(inter_feat)) return out_feats
6,452
31.427136
79
py
GFocalV2
GFocalV2-master/mmdet/models/backbones/res2net.py
import math import torch import torch.nn as nn import torch.utils.checkpoint as cp from mmcv.cnn import (build_conv_layer, build_norm_layer, constant_init, kaiming_init) from mmcv.runner import load_checkpoint from torch.nn.modules.batchnorm import _BatchNorm from mmdet.utils import get_root_logger from ..builder import BACKBONES from .resnet import Bottleneck as _Bottleneck from .resnet import ResNet class Bottle2neck(_Bottleneck): expansion = 4 def __init__(self, inplanes, planes, scales=4, base_width=26, base_channels=64, stage_type='normal', **kwargs): """Bottle2neck block for Res2Net. If style is "pytorch", the stride-two layer is the 3x3 conv layer, if it is "caffe", the stride-two layer is the first 1x1 conv layer. """ super(Bottle2neck, self).__init__(inplanes, planes, **kwargs) assert scales > 1, 'Res2Net degenerates to ResNet when scales = 1.' width = int(math.floor(self.planes * (base_width / base_channels))) self.norm1_name, norm1 = build_norm_layer( self.norm_cfg, width * scales, postfix=1) self.norm3_name, norm3 = build_norm_layer( self.norm_cfg, self.planes * self.expansion, postfix=3) self.conv1 = build_conv_layer( self.conv_cfg, self.inplanes, width * scales, kernel_size=1, stride=self.conv1_stride, bias=False) self.add_module(self.norm1_name, norm1) if stage_type == 'stage' and self.conv2_stride != 1: self.pool = nn.AvgPool2d( kernel_size=3, stride=self.conv2_stride, padding=1) convs = [] bns = [] fallback_on_stride = False if self.with_dcn: fallback_on_stride = self.dcn.pop('fallback_on_stride', False) if not self.with_dcn or fallback_on_stride: for i in range(scales - 1): convs.append( build_conv_layer( self.conv_cfg, width, width, kernel_size=3, stride=self.conv2_stride, padding=self.dilation, dilation=self.dilation, bias=False)) bns.append( build_norm_layer(self.norm_cfg, width, postfix=i + 1)[1]) self.convs = nn.ModuleList(convs) self.bns = nn.ModuleList(bns) else: assert self.conv_cfg is None, 'conv_cfg must be None for DCN' for i in range(scales - 1): convs.append( build_conv_layer( self.dcn, width, width, kernel_size=3, stride=self.conv2_stride, padding=self.dilation, dilation=self.dilation, bias=False)) bns.append( build_norm_layer(self.norm_cfg, width, postfix=i + 1)[1]) self.convs = nn.ModuleList(convs) self.bns = nn.ModuleList(bns) self.conv3 = build_conv_layer( self.conv_cfg, width * scales, self.planes * self.expansion, kernel_size=1, bias=False) self.add_module(self.norm3_name, norm3) self.stage_type = stage_type self.scales = scales self.width = width delattr(self, 'conv2') delattr(self, self.norm2_name) def forward(self, x): """Forward function.""" def _inner_forward(x): identity = x out = self.conv1(x) out = self.norm1(out) out = self.relu(out) if self.with_plugins: out = self.forward_plugin(out, self.after_conv1_plugin_names) spx = torch.split(out, self.width, 1) sp = self.convs[0](spx[0].contiguous()) sp = self.relu(self.bns[0](sp)) out = sp for i in range(1, self.scales - 1): if self.stage_type == 'stage': sp = spx[i] else: sp = sp + spx[i] sp = self.convs[i](sp.contiguous()) sp = self.relu(self.bns[i](sp)) out = torch.cat((out, sp), 1) if self.stage_type == 'normal' or self.conv2_stride == 1: out = torch.cat((out, spx[self.scales - 1]), 1) elif self.stage_type == 'stage': out = torch.cat((out, self.pool(spx[self.scales - 1])), 1) if self.with_plugins: out = self.forward_plugin(out, self.after_conv2_plugin_names) out = self.conv3(out) out = self.norm3(out) if self.with_plugins: out = self.forward_plugin(out, self.after_conv3_plugin_names) if self.downsample is not None: identity = self.downsample(x) out += identity return out if self.with_cp and x.requires_grad: out = cp.checkpoint(_inner_forward, x) else: out = _inner_forward(x) out = self.relu(out) return out class Res2Layer(nn.Sequential): """Res2Layer to build Res2Net style backbone. Args: block (nn.Module): block used to build ResLayer. inplanes (int): inplanes of block. planes (int): planes of block. num_blocks (int): number of blocks. stride (int): stride of the first block. Default: 1 avg_down (bool): Use AvgPool instead of stride conv when downsampling in the bottle2neck. Default: False conv_cfg (dict): dictionary to construct and config conv layer. Default: None norm_cfg (dict): dictionary to construct and config norm layer. Default: dict(type='BN') scales (int): Scales used in Res2Net. Default: 4 base_width (int): Basic width of each scale. Default: 26 """ def __init__(self, block, inplanes, planes, num_blocks, stride=1, avg_down=True, conv_cfg=None, norm_cfg=dict(type='BN'), scales=4, base_width=26, **kwargs): self.block = block downsample = None if stride != 1 or inplanes != planes * block.expansion: downsample = nn.Sequential( nn.AvgPool2d( kernel_size=stride, stride=stride, ceil_mode=True, count_include_pad=False), build_conv_layer( conv_cfg, inplanes, planes * block.expansion, kernel_size=1, stride=1, bias=False), build_norm_layer(norm_cfg, planes * block.expansion)[1], ) layers = [] layers.append( block( inplanes=inplanes, planes=planes, stride=stride, downsample=downsample, conv_cfg=conv_cfg, norm_cfg=norm_cfg, scales=scales, base_width=base_width, stage_type='stage', **kwargs)) inplanes = planes * block.expansion for i in range(1, num_blocks): layers.append( block( inplanes=inplanes, planes=planes, stride=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, scales=scales, base_width=base_width, **kwargs)) super(Res2Layer, self).__init__(*layers) @BACKBONES.register_module() class Res2Net(ResNet): """Res2Net backbone. Args: scales (int): Scales used in Res2Net. Default: 4 base_width (int): Basic width of each scale. Default: 26 depth (int): Depth of res2net, from {50, 101, 152}. in_channels (int): Number of input image channels. Default: 3. num_stages (int): Res2net stages. Default: 4. strides (Sequence[int]): Strides of the first block of each stage. dilations (Sequence[int]): Dilation of each stage. out_indices (Sequence[int]): Output from which stages. style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two layer is the 3x3 conv layer, otherwise the stride-two layer is the first 1x1 conv layer. deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv avg_down (bool): Use AvgPool instead of stride conv when downsampling in the bottle2neck. frozen_stages (int): Stages to be frozen (stop grad and set eval mode). -1 means not freezing any parameters. norm_cfg (dict): Dictionary to construct and config norm layer. norm_eval (bool): Whether to set norm layers to eval mode, namely, freeze running stats (mean and var). Note: Effect on Batch Norm and its variants only. plugins (list[dict]): List of plugins for stages, each dict contains: - cfg (dict, required): Cfg dict to build plugin. - position (str, required): Position inside block to insert plugin, options are 'after_conv1', 'after_conv2', 'after_conv3'. - stages (tuple[bool], optional): Stages to apply plugin, length should be same as 'num_stages'. with_cp (bool): Use checkpoint or not. Using checkpoint will save some memory while slowing down the training speed. zero_init_residual (bool): Whether to use zero init for last norm layer in resblocks to let them behave as identity. Example: >>> from mmdet.models import Res2Net >>> import torch >>> self = Res2Net(depth=50, scales=4, base_width=26) >>> self.eval() >>> inputs = torch.rand(1, 3, 32, 32) >>> level_outputs = self.forward(inputs) >>> for level_out in level_outputs: ... print(tuple(level_out.shape)) (1, 256, 8, 8) (1, 512, 4, 4) (1, 1024, 2, 2) (1, 2048, 1, 1) """ arch_settings = { 50: (Bottle2neck, (3, 4, 6, 3)), 101: (Bottle2neck, (3, 4, 23, 3)), 152: (Bottle2neck, (3, 8, 36, 3)) } def __init__(self, scales=4, base_width=26, style='pytorch', deep_stem=True, avg_down=True, **kwargs): self.scales = scales self.base_width = base_width super(Res2Net, self).__init__( style='pytorch', deep_stem=True, avg_down=True, **kwargs) def make_res_layer(self, **kwargs): return Res2Layer( scales=self.scales, base_width=self.base_width, base_channels=self.base_channels, **kwargs) def init_weights(self, pretrained=None): """Initialize the weights in backbone. Args: pretrained (str, optional): Path to pre-trained weights. Defaults to None. """ if isinstance(pretrained, str): logger = get_root_logger() load_checkpoint(self, pretrained, strict=False, logger=logger) elif pretrained is None: for m in self.modules(): if isinstance(m, nn.Conv2d): kaiming_init(m) elif isinstance(m, (_BatchNorm, nn.GroupNorm)): constant_init(m, 1) if self.dcn is not None: for m in self.modules(): if isinstance(m, Bottle2neck): # dcn in Res2Net bottle2neck is in ModuleList for n in m.convs: if hasattr(n, 'conv_offset'): constant_init(n.conv_offset, 0) if self.zero_init_residual: for m in self.modules(): if isinstance(m, Bottle2neck): constant_init(m.norm3, 0) else: raise TypeError('pretrained must be a str or None')
12,675
35.011364
79
py
GFocalV2
GFocalV2-master/mmdet/models/backbones/darknet.py
# Copyright (c) 2019 Western Digital Corporation or its affiliates. import logging import torch.nn as nn from mmcv.cnn import ConvModule, constant_init, kaiming_init from mmcv.runner import load_checkpoint from torch.nn.modules.batchnorm import _BatchNorm from ..builder import BACKBONES class ResBlock(nn.Module): """The basic residual block used in Darknet. Each ResBlock consists of two ConvModules and the input is added to the final output. Each ConvModule is composed of Conv, BN, and LeakyReLU. In YoloV3 paper, the first convLayer has half of the number of the filters as much as the second convLayer. The first convLayer has filter size of 1x1 and the second one has the filter size of 3x3. Args: in_channels (int): The input channels. Must be even. conv_cfg (dict): Config dict for convolution layer. Default: None. norm_cfg (dict): Dictionary to construct and config norm layer. Default: dict(type='BN', requires_grad=True) act_cfg (dict): Config dict for activation layer. Default: dict(type='LeakyReLU', negative_slope=0.1). """ def __init__(self, in_channels, conv_cfg=None, norm_cfg=dict(type='BN', requires_grad=True), act_cfg=dict(type='LeakyReLU', negative_slope=0.1)): super(ResBlock, self).__init__() assert in_channels % 2 == 0 # ensure the in_channels is even half_in_channels = in_channels // 2 # shortcut cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) self.conv1 = ConvModule(in_channels, half_in_channels, 1, **cfg) self.conv2 = ConvModule( half_in_channels, in_channels, 3, padding=1, **cfg) def forward(self, x): residual = x out = self.conv1(x) out = self.conv2(out) out = out + residual return out @BACKBONES.register_module() class Darknet(nn.Module): """Darknet backbone. Args: depth (int): Depth of Darknet. Currently only support 53. out_indices (Sequence[int]): Output from which stages. frozen_stages (int): Stages to be frozen (stop grad and set eval mode). -1 means not freezing any parameters. Default: -1. conv_cfg (dict): Config dict for convolution layer. Default: None. norm_cfg (dict): Dictionary to construct and config norm layer. Default: dict(type='BN', requires_grad=True) act_cfg (dict): Config dict for activation layer. Default: dict(type='LeakyReLU', negative_slope=0.1). norm_eval (bool): Whether to set norm layers to eval mode, namely, freeze running stats (mean and var). Note: Effect on Batch Norm and its variants only. Example: >>> from mmdet.models import Darknet >>> import torch >>> self = Darknet(depth=53) >>> self.eval() >>> inputs = torch.rand(1, 3, 416, 416) >>> level_outputs = self.forward(inputs) >>> for level_out in level_outputs: ... print(tuple(level_out.shape)) ... (1, 256, 52, 52) (1, 512, 26, 26) (1, 1024, 13, 13) """ # Dict(depth: (layers, channels)) arch_settings = { 53: ((1, 2, 8, 8, 4), ((32, 64), (64, 128), (128, 256), (256, 512), (512, 1024))) } def __init__(self, depth=53, out_indices=(3, 4, 5), frozen_stages=-1, conv_cfg=None, norm_cfg=dict(type='BN', requires_grad=True), act_cfg=dict(type='LeakyReLU', negative_slope=0.1), norm_eval=True): super(Darknet, self).__init__() if depth not in self.arch_settings: raise KeyError(f'invalid depth {depth} for darknet') self.depth = depth self.out_indices = out_indices self.frozen_stages = frozen_stages self.layers, self.channels = self.arch_settings[depth] cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) self.conv1 = ConvModule(3, 32, 3, padding=1, **cfg) self.cr_blocks = ['conv1'] for i, n_layers in enumerate(self.layers): layer_name = f'conv_res_block{i + 1}' in_c, out_c = self.channels[i] self.add_module( layer_name, self.make_conv_res_block(in_c, out_c, n_layers, **cfg)) self.cr_blocks.append(layer_name) self.norm_eval = norm_eval def forward(self, x): outs = [] for i, layer_name in enumerate(self.cr_blocks): cr_block = getattr(self, layer_name) x = cr_block(x) if i in self.out_indices: outs.append(x) return tuple(outs) def init_weights(self, pretrained=None): if isinstance(pretrained, str): logger = logging.getLogger() load_checkpoint(self, pretrained, strict=False, logger=logger) elif pretrained is None: for m in self.modules(): if isinstance(m, nn.Conv2d): kaiming_init(m) elif isinstance(m, (_BatchNorm, nn.GroupNorm)): constant_init(m, 1) else: raise TypeError('pretrained must be a str or None') def _freeze_stages(self): if self.frozen_stages >= 0: for i in range(self.frozen_stages): m = getattr(self, self.cr_blocks[i]) m.eval() for param in m.parameters(): param.requires_grad = False def train(self, mode=True): super(Darknet, self).train(mode) self._freeze_stages() if mode and self.norm_eval: for m in self.modules(): if isinstance(m, _BatchNorm): m.eval() @staticmethod def make_conv_res_block(in_channels, out_channels, res_repeat, conv_cfg=None, norm_cfg=dict(type='BN', requires_grad=True), act_cfg=dict(type='LeakyReLU', negative_slope=0.1)): """In Darknet backbone, ConvLayer is usually followed by ResBlock. This function will make that. The Conv layers always have 3x3 filters with stride=2. The number of the filters in Conv layer is the same as the out channels of the ResBlock. Args: in_channels (int): The number of input channels. out_channels (int): The number of output channels. res_repeat (int): The number of ResBlocks. conv_cfg (dict): Config dict for convolution layer. Default: None. norm_cfg (dict): Dictionary to construct and config norm layer. Default: dict(type='BN', requires_grad=True) act_cfg (dict): Config dict for activation layer. Default: dict(type='LeakyReLU', negative_slope=0.1). """ cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) model = nn.Sequential() model.add_module( 'conv', ConvModule( in_channels, out_channels, 3, stride=2, padding=1, **cfg)) for idx in range(res_repeat): model.add_module('res{}'.format(idx), ResBlock(out_channels, **cfg)) return model
7,574
36.875
79
py
GFocalV2
GFocalV2-master/mmdet/datasets/custom.py
import os.path as osp import warnings import mmcv import numpy as np from torch.utils.data import Dataset from mmdet.core import eval_map, eval_recalls from .builder import DATASETS from .pipelines import Compose @DATASETS.register_module() class CustomDataset(Dataset): """Custom dataset for detection. The annotation format is shown as follows. The `ann` field is optional for testing. .. code-block:: none [ { 'filename': 'a.jpg', 'width': 1280, 'height': 720, 'ann': { 'bboxes': <np.ndarray> (n, 4) in (x1, y1, x2, y2) order. 'labels': <np.ndarray> (n, ), 'bboxes_ignore': <np.ndarray> (k, 4), (optional field) 'labels_ignore': <np.ndarray> (k, 4) (optional field) } }, ... ] Args: ann_file (str): Annotation file path. pipeline (list[dict]): Processing pipeline. classes (str | Sequence[str], optional): Specify classes to load. If is None, ``cls.CLASSES`` will be used. Default: None. data_root (str, optional): Data root for ``ann_file``, ``img_prefix``, ``seg_prefix``, ``proposal_file`` if specified. test_mode (bool, optional): If set True, annotation will not be loaded. filter_empty_gt (bool, optional): If set true, images without bounding boxes of the dataset's classes will be filtered out. This option only works when `test_mode=False`, i.e., we never filter images during tests. """ CLASSES = None def __init__(self, ann_file, pipeline, classes=None, data_root=None, img_prefix='', seg_prefix=None, proposal_file=None, test_mode=False, filter_empty_gt=True): self.ann_file = ann_file self.data_root = data_root self.img_prefix = img_prefix self.seg_prefix = seg_prefix self.proposal_file = proposal_file self.test_mode = test_mode self.filter_empty_gt = filter_empty_gt self.CLASSES = self.get_classes(classes) # join paths if data_root is specified if self.data_root is not None: if not osp.isabs(self.ann_file): self.ann_file = osp.join(self.data_root, self.ann_file) if not (self.img_prefix is None or osp.isabs(self.img_prefix)): self.img_prefix = osp.join(self.data_root, self.img_prefix) if not (self.seg_prefix is None or osp.isabs(self.seg_prefix)): self.seg_prefix = osp.join(self.data_root, self.seg_prefix) if not (self.proposal_file is None or osp.isabs(self.proposal_file)): self.proposal_file = osp.join(self.data_root, self.proposal_file) # load annotations (and proposals) self.data_infos = self.load_annotations(self.ann_file) if self.proposal_file is not None: self.proposals = self.load_proposals(self.proposal_file) else: self.proposals = None # filter images too small and containing no annotations if not test_mode: valid_inds = self._filter_imgs() self.data_infos = [self.data_infos[i] for i in valid_inds] if self.proposals is not None: self.proposals = [self.proposals[i] for i in valid_inds] # set group flag for the sampler self._set_group_flag() # processing pipeline self.pipeline = Compose(pipeline) def __len__(self): """Total number of samples of data.""" return len(self.data_infos) def load_annotations(self, ann_file): """Load annotation from annotation file.""" return mmcv.load(ann_file) def load_proposals(self, proposal_file): """Load proposal from proposal file.""" return mmcv.load(proposal_file) def get_ann_info(self, idx): """Get annotation by index. Args: idx (int): Index of data. Returns: dict: Annotation info of specified index. """ return self.data_infos[idx]['ann'] def get_cat_ids(self, idx): """Get category ids by index. Args: idx (int): Index of data. Returns: list[int]: All categories in the image of specified index. """ return self.data_infos[idx]['ann']['labels'].astype(np.int).tolist() def pre_pipeline(self, results): """Prepare results dict for pipeline.""" results['img_prefix'] = self.img_prefix results['seg_prefix'] = self.seg_prefix results['proposal_file'] = self.proposal_file results['bbox_fields'] = [] results['mask_fields'] = [] results['seg_fields'] = [] def _filter_imgs(self, min_size=32): """Filter images too small.""" if self.filter_empty_gt: warnings.warn( 'CustomDataset does not support filtering empty gt images.') valid_inds = [] for i, img_info in enumerate(self.data_infos): if min(img_info['width'], img_info['height']) >= min_size: valid_inds.append(i) return valid_inds def _set_group_flag(self): """Set flag according to image aspect ratio. Images with aspect ratio greater than 1 will be set as group 1, otherwise group 0. """ self.flag = np.zeros(len(self), dtype=np.uint8) for i in range(len(self)): img_info = self.data_infos[i] if img_info['width'] / img_info['height'] > 1: self.flag[i] = 1 def _rand_another(self, idx): """Get another random index from the same group as the given index.""" pool = np.where(self.flag == self.flag[idx])[0] return np.random.choice(pool) def __getitem__(self, idx): """Get training/test data after pipeline. Args: idx (int): Index of data. Returns: dict: Training/test data (with annotation if `test_mode` is set \ True). """ if self.test_mode: return self.prepare_test_img(idx) while True: data = self.prepare_train_img(idx) if data is None: idx = self._rand_another(idx) continue return data def prepare_train_img(self, idx): """Get training data and annotations after pipeline. Args: idx (int): Index of data. Returns: dict: Training data and annotation after pipeline with new keys \ introduced by pipeline. """ img_info = self.data_infos[idx] ann_info = self.get_ann_info(idx) results = dict(img_info=img_info, ann_info=ann_info) if self.proposals is not None: results['proposals'] = self.proposals[idx] self.pre_pipeline(results) return self.pipeline(results) def prepare_test_img(self, idx): """Get testing data after pipeline. Args: idx (int): Index of data. Returns: dict: Testing data after pipeline with new keys intorduced by \ piepline. """ img_info = self.data_infos[idx] results = dict(img_info=img_info) if self.proposals is not None: results['proposals'] = self.proposals[idx] self.pre_pipeline(results) return self.pipeline(results) @classmethod def get_classes(cls, classes=None): """Get class names of current dataset. Args: classes (Sequence[str] | str | None): If classes is None, use default CLASSES defined by builtin dataset. If classes is a string, take it as a file name. The file contains the name of classes where each line contains one class name. If classes is a tuple or list, override the CLASSES defined by the dataset. Returns: tuple[str] or list[str]: Names of categories of the dataset. """ if classes is None: return cls.CLASSES if isinstance(classes, str): # take it as a file path class_names = mmcv.list_from_file(classes) elif isinstance(classes, (tuple, list)): class_names = classes else: raise ValueError(f'Unsupported type {type(classes)} of classes.') return class_names def format_results(self, results, **kwargs): """Place holder to format result to dataset specific output.""" pass def evaluate(self, results, metric='mAP', logger=None, proposal_nums=(100, 300, 1000), iou_thr=0.5, scale_ranges=None): """Evaluate the dataset. Args: results (list): Testing results of the dataset. metric (str | list[str]): Metrics to be evaluated. logger (logging.Logger | None | str): Logger used for printing related information during evaluation. Default: None. proposal_nums (Sequence[int]): Proposal number used for evaluating recalls, such as recall@100, recall@1000. Default: (100, 300, 1000). iou_thr (float | list[float]): IoU threshold. It must be a float when evaluating mAP, and can be a list when evaluating recall. Default: 0.5. scale_ranges (list[tuple] | None): Scale ranges for evaluating mAP. Default: None. """ if not isinstance(metric, str): assert len(metric) == 1 metric = metric[0] allowed_metrics = ['mAP', 'recall'] if metric not in allowed_metrics: raise KeyError(f'metric {metric} is not supported') annotations = [self.get_ann_info(i) for i in range(len(self))] eval_results = {} if metric == 'mAP': assert isinstance(iou_thr, float) mean_ap, _ = eval_map( results, annotations, scale_ranges=scale_ranges, iou_thr=iou_thr, dataset=self.CLASSES, logger=logger) eval_results['mAP'] = mean_ap elif metric == 'recall': gt_bboxes = [ann['bboxes'] for ann in annotations] if isinstance(iou_thr, float): iou_thr = [iou_thr] recalls = eval_recalls( gt_bboxes, results, proposal_nums, iou_thr, logger=logger) for i, num in enumerate(proposal_nums): for j, iou in enumerate(iou_thr): eval_results[f'recall@{num}@{iou}'] = recalls[i, j] if recalls.shape[1] > 1: ar = recalls.mean(axis=1) for i, num in enumerate(proposal_nums): eval_results[f'AR@{num}'] = ar[i] return eval_results
11,329
34.29595
79
py
GFocalV2
GFocalV2-master/mmdet/datasets/dataset_wrappers.py
import bisect import math from collections import defaultdict import numpy as np from mmcv.utils import print_log from torch.utils.data.dataset import ConcatDataset as _ConcatDataset from .builder import DATASETS from .coco import CocoDataset @DATASETS.register_module() class ConcatDataset(_ConcatDataset): """A wrapper of concatenated dataset. Same as :obj:`torch.utils.data.dataset.ConcatDataset`, but concat the group flag for image aspect ratio. Args: datasets (list[:obj:`Dataset`]): A list of datasets. separate_eval (bool): Whether to evaluate the results separately if it is used as validation dataset. Defaults to True. """ def __init__(self, datasets, separate_eval=True): super(ConcatDataset, self).__init__(datasets) self.CLASSES = datasets[0].CLASSES self.separate_eval = separate_eval if not separate_eval: if any([isinstance(ds, CocoDataset) for ds in datasets]): raise NotImplementedError( 'Evaluating concatenated CocoDataset as a whole is not' ' supported! Please set "separate_eval=True"') elif len(set([type(ds) for ds in datasets])) != 1: raise NotImplementedError( 'All the datasets should have same types') if hasattr(datasets[0], 'flag'): flags = [] for i in range(0, len(datasets)): flags.append(datasets[i].flag) self.flag = np.concatenate(flags) def get_cat_ids(self, idx): """Get category ids of concatenated dataset by index. Args: idx (int): Index of data. Returns: list[int]: All categories in the image of specified index. """ if idx < 0: if -idx > len(self): raise ValueError( 'absolute value of index should not exceed dataset length') idx = len(self) + idx dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx) if dataset_idx == 0: sample_idx = idx else: sample_idx = idx - self.cumulative_sizes[dataset_idx - 1] return self.datasets[dataset_idx].get_cat_ids(sample_idx) def evaluate(self, results, logger=None, **kwargs): """Evaluate the results. Args: results (list[list | tuple]): Testing results of the dataset. logger (logging.Logger | str | None): Logger used for printing related information during evaluation. Default: None. Returns: dict[str: float]: AP results of the total dataset or each separate dataset if `self.separate_eval=True`. """ assert len(results) == self.cumulative_sizes[-1], \ ('Dataset and results have different sizes: ' f'{self.cumulative_sizes[-1]} v.s. {len(results)}') # Check whether all the datasets support evaluation for dataset in self.datasets: assert hasattr(dataset, 'evaluate'), \ f'{type(dataset)} does not implement evaluate function' if self.separate_eval: dataset_idx = -1 total_eval_results = dict() for size, dataset in zip(self.cumulative_sizes, self.datasets): start_idx = 0 if dataset_idx == -1 else \ self.cumulative_sizes[dataset_idx] end_idx = self.cumulative_sizes[dataset_idx + 1] results_per_dataset = results[start_idx:end_idx] print_log( f'\nEvaluateing {dataset.ann_file} with ' f'{len(results_per_dataset)} images now', logger=logger) eval_results_per_dataset = dataset.evaluate( results_per_dataset, logger=logger, **kwargs) dataset_idx += 1 for k, v in eval_results_per_dataset.items(): total_eval_results.update({f'{dataset_idx}_{k}': v}) return total_eval_results elif any([isinstance(ds, CocoDataset) for ds in self.datasets]): raise NotImplementedError( 'Evaluating concatenated CocoDataset as a whole is not' ' supported! Please set "separate_eval=True"') elif len(set([type(ds) for ds in self.datasets])) != 1: raise NotImplementedError( 'All the datasets should have same types') else: original_data_infos = self.datasets[0].data_infos self.datasets[0].data_infos = sum( [dataset.data_infos for dataset in self.datasets], []) eval_results = self.datasets[0].evaluate( results, logger=logger, **kwargs) self.datasets[0].data_infos = original_data_infos return eval_results @DATASETS.register_module() class RepeatDataset(object): """A wrapper of repeated dataset. The length of repeated dataset will be `times` larger than the original dataset. This is useful when the data loading time is long but the dataset is small. Using RepeatDataset can reduce the data loading time between epochs. Args: dataset (:obj:`Dataset`): The dataset to be repeated. times (int): Repeat times. """ def __init__(self, dataset, times): self.dataset = dataset self.times = times self.CLASSES = dataset.CLASSES if hasattr(self.dataset, 'flag'): self.flag = np.tile(self.dataset.flag, times) self._ori_len = len(self.dataset) def __getitem__(self, idx): return self.dataset[idx % self._ori_len] def get_cat_ids(self, idx): """Get category ids of repeat dataset by index. Args: idx (int): Index of data. Returns: list[int]: All categories in the image of specified index. """ return self.dataset.get_cat_ids(idx % self._ori_len) def __len__(self): """Length after repetition.""" return self.times * self._ori_len # Modified from https://github.com/facebookresearch/detectron2/blob/41d475b75a230221e21d9cac5d69655e3415e3a4/detectron2/data/samplers/distributed_sampler.py#L57 # noqa @DATASETS.register_module() class ClassBalancedDataset(object): """A wrapper of repeated dataset with repeat factor. Suitable for training on class imbalanced datasets like LVIS. Following the sampling strategy in the `paper <https://arxiv.org/abs/1908.03195>`_, in each epoch, an image may appear multiple times based on its "repeat factor". The repeat factor for an image is a function of the frequency the rarest category labeled in that image. The "frequency of category c" in [0, 1] is defined by the fraction of images in the training set (without repeats) in which category c appears. The dataset needs to instantiate :func:`self.get_cat_ids` to support ClassBalancedDataset. The repeat factor is computed as followed. 1. For each category c, compute the fraction # of images that contain it: :math:`f(c)` 2. For each category c, compute the category-level repeat factor: :math:`r(c) = max(1, sqrt(t/f(c)))` 3. For each image I, compute the image-level repeat factor: :math:`r(I) = max_{c in I} r(c)` Args: dataset (:obj:`CustomDataset`): The dataset to be repeated. oversample_thr (float): frequency threshold below which data is repeated. For categories with ``f_c >= oversample_thr``, there is no oversampling. For categories with ``f_c < oversample_thr``, the degree of oversampling following the square-root inverse frequency heuristic above. filter_empty_gt (bool, optional): If set true, images without bounding boxes will not be oversampled. Otherwise, they will be categorized as the pure background class and involved into the oversampling. Default: True. """ def __init__(self, dataset, oversample_thr, filter_empty_gt=True): self.dataset = dataset self.oversample_thr = oversample_thr self.filter_empty_gt = filter_empty_gt self.CLASSES = dataset.CLASSES repeat_factors = self._get_repeat_factors(dataset, oversample_thr) repeat_indices = [] for dataset_idx, repeat_factor in enumerate(repeat_factors): repeat_indices.extend([dataset_idx] * math.ceil(repeat_factor)) self.repeat_indices = repeat_indices flags = [] if hasattr(self.dataset, 'flag'): for flag, repeat_factor in zip(self.dataset.flag, repeat_factors): flags.extend([flag] * int(math.ceil(repeat_factor))) assert len(flags) == len(repeat_indices) self.flag = np.asarray(flags, dtype=np.uint8) def _get_repeat_factors(self, dataset, repeat_thr): """Get repeat factor for each images in the dataset. Args: dataset (:obj:`CustomDataset`): The dataset repeat_thr (float): The threshold of frequency. If an image contains the categories whose frequency below the threshold, it would be repeated. Returns: list[float]: The repeat factors for each images in the dataset. """ # 1. For each category c, compute the fraction # of images # that contain it: f(c) category_freq = defaultdict(int) num_images = len(dataset) for idx in range(num_images): cat_ids = set(self.dataset.get_cat_ids(idx)) if len(cat_ids) == 0 and not self.filter_empty_gt: cat_ids = set([len(self.CLASSES)]) for cat_id in cat_ids: category_freq[cat_id] += 1 for k, v in category_freq.items(): category_freq[k] = v / num_images # 2. For each category c, compute the category-level repeat factor: # r(c) = max(1, sqrt(t/f(c))) category_repeat = { cat_id: max(1.0, math.sqrt(repeat_thr / cat_freq)) for cat_id, cat_freq in category_freq.items() } # 3. For each image I, compute the image-level repeat factor: # r(I) = max_{c in I} r(c) repeat_factors = [] for idx in range(num_images): cat_ids = set(self.dataset.get_cat_ids(idx)) if len(cat_ids) == 0 and not self.filter_empty_gt: cat_ids = set([len(self.CLASSES)]) repeat_factor = 1 if len(cat_ids) > 0: repeat_factor = max( {category_repeat[cat_id] for cat_id in cat_ids}) repeat_factors.append(repeat_factor) return repeat_factors def __getitem__(self, idx): ori_index = self.repeat_indices[idx] return self.dataset[ori_index] def __len__(self): """Length after repetition.""" return len(self.repeat_indices)
11,088
38.183746
167
py
GFocalV2
GFocalV2-master/mmdet/datasets/builder.py
import copy import platform import random from functools import partial import numpy as np from mmcv.parallel import collate from mmcv.runner import get_dist_info from mmcv.utils import Registry, build_from_cfg from torch.utils.data import DataLoader from .samplers import DistributedGroupSampler, DistributedSampler, GroupSampler if platform.system() != 'Windows': # https://github.com/pytorch/pytorch/issues/973 import resource rlimit = resource.getrlimit(resource.RLIMIT_NOFILE) hard_limit = rlimit[1] soft_limit = min(4096, hard_limit) resource.setrlimit(resource.RLIMIT_NOFILE, (soft_limit, hard_limit)) DATASETS = Registry('dataset') PIPELINES = Registry('pipeline') def _concat_dataset(cfg, default_args=None): from .dataset_wrappers import ConcatDataset ann_files = cfg['ann_file'] img_prefixes = cfg.get('img_prefix', None) seg_prefixes = cfg.get('seg_prefix', None) proposal_files = cfg.get('proposal_file', None) separate_eval = cfg.get('separate_eval', True) datasets = [] num_dset = len(ann_files) for i in range(num_dset): data_cfg = copy.deepcopy(cfg) # pop 'separate_eval' since it is not a valid key for common datasets. if 'separate_eval' in data_cfg: data_cfg.pop('separate_eval') data_cfg['ann_file'] = ann_files[i] if isinstance(img_prefixes, (list, tuple)): data_cfg['img_prefix'] = img_prefixes[i] if isinstance(seg_prefixes, (list, tuple)): data_cfg['seg_prefix'] = seg_prefixes[i] if isinstance(proposal_files, (list, tuple)): data_cfg['proposal_file'] = proposal_files[i] datasets.append(build_dataset(data_cfg, default_args)) return ConcatDataset(datasets, separate_eval) def build_dataset(cfg, default_args=None): from .dataset_wrappers import (ConcatDataset, RepeatDataset, ClassBalancedDataset) if isinstance(cfg, (list, tuple)): dataset = ConcatDataset([build_dataset(c, default_args) for c in cfg]) elif cfg['type'] == 'ConcatDataset': dataset = ConcatDataset( [build_dataset(c, default_args) for c in cfg['datasets']], cfg.get('separate_eval', True)) elif cfg['type'] == 'RepeatDataset': dataset = RepeatDataset( build_dataset(cfg['dataset'], default_args), cfg['times']) elif cfg['type'] == 'ClassBalancedDataset': dataset = ClassBalancedDataset( build_dataset(cfg['dataset'], default_args), cfg['oversample_thr']) elif isinstance(cfg.get('ann_file'), (list, tuple)): dataset = _concat_dataset(cfg, default_args) else: dataset = build_from_cfg(cfg, DATASETS, default_args) return dataset def build_dataloader(dataset, samples_per_gpu, workers_per_gpu, num_gpus=1, dist=True, shuffle=True, seed=None, **kwargs): """Build PyTorch DataLoader. In distributed training, each GPU/process has a dataloader. In non-distributed training, there is only one dataloader for all GPUs. Args: dataset (Dataset): A PyTorch dataset. samples_per_gpu (int): Number of training samples on each GPU, i.e., batch size of each GPU. workers_per_gpu (int): How many subprocesses to use for data loading for each GPU. num_gpus (int): Number of GPUs. Only used in non-distributed training. dist (bool): Distributed training/test or not. Default: True. shuffle (bool): Whether to shuffle the data at every epoch. Default: True. kwargs: any keyword argument to be used to initialize DataLoader Returns: DataLoader: A PyTorch dataloader. """ rank, world_size = get_dist_info() if dist: # DistributedGroupSampler will definitely shuffle the data to satisfy # that images on each GPU are in the same group if shuffle: sampler = DistributedGroupSampler(dataset, samples_per_gpu, world_size, rank) else: sampler = DistributedSampler( dataset, world_size, rank, shuffle=False) batch_size = samples_per_gpu num_workers = workers_per_gpu else: sampler = GroupSampler(dataset, samples_per_gpu) if shuffle else None batch_size = num_gpus * samples_per_gpu num_workers = num_gpus * workers_per_gpu init_fn = partial( worker_init_fn, num_workers=num_workers, rank=rank, seed=seed) if seed is not None else None data_loader = DataLoader( dataset, batch_size=batch_size, sampler=sampler, num_workers=num_workers, collate_fn=partial(collate, samples_per_gpu=samples_per_gpu), pin_memory=False, worker_init_fn=init_fn, **kwargs) return data_loader def worker_init_fn(worker_id, num_workers, rank, seed): # The seed of each worker equals to # num_worker * rank + worker_id + user_seed worker_seed = num_workers * rank + worker_id + seed np.random.seed(worker_seed) random.seed(worker_seed)
5,291
35.75
79
py
GFocalV2
GFocalV2-master/mmdet/datasets/samplers/group_sampler.py
from __future__ import division import math import numpy as np import torch from mmcv.runner import get_dist_info from torch.utils.data import Sampler class GroupSampler(Sampler): def __init__(self, dataset, samples_per_gpu=1): assert hasattr(dataset, 'flag') self.dataset = dataset self.samples_per_gpu = samples_per_gpu self.flag = dataset.flag.astype(np.int64) self.group_sizes = np.bincount(self.flag) self.num_samples = 0 for i, size in enumerate(self.group_sizes): self.num_samples += int(np.ceil( size / self.samples_per_gpu)) * self.samples_per_gpu def __iter__(self): indices = [] for i, size in enumerate(self.group_sizes): if size == 0: continue indice = np.where(self.flag == i)[0] assert len(indice) == size np.random.shuffle(indice) num_extra = int(np.ceil(size / self.samples_per_gpu) ) * self.samples_per_gpu - len(indice) indice = np.concatenate( [indice, np.random.choice(indice, num_extra)]) indices.append(indice) indices = np.concatenate(indices) indices = [ indices[i * self.samples_per_gpu:(i + 1) * self.samples_per_gpu] for i in np.random.permutation( range(len(indices) // self.samples_per_gpu)) ] indices = np.concatenate(indices) indices = indices.astype(np.int64).tolist() assert len(indices) == self.num_samples return iter(indices) def __len__(self): return self.num_samples class DistributedGroupSampler(Sampler): """Sampler that restricts data loading to a subset of the dataset. It is especially useful in conjunction with :class:`torch.nn.parallel.DistributedDataParallel`. In such case, each process can pass a DistributedSampler instance as a DataLoader sampler, and load a subset of the original dataset that is exclusive to it. .. note:: Dataset is assumed to be of constant size. Arguments: dataset: Dataset used for sampling. num_replicas (optional): Number of processes participating in distributed training. rank (optional): Rank of the current process within num_replicas. """ def __init__(self, dataset, samples_per_gpu=1, num_replicas=None, rank=None): _rank, _num_replicas = get_dist_info() if num_replicas is None: num_replicas = _num_replicas if rank is None: rank = _rank self.dataset = dataset self.samples_per_gpu = samples_per_gpu self.num_replicas = num_replicas self.rank = rank self.epoch = 0 assert hasattr(self.dataset, 'flag') self.flag = self.dataset.flag self.group_sizes = np.bincount(self.flag) self.num_samples = 0 for i, j in enumerate(self.group_sizes): self.num_samples += int( math.ceil(self.group_sizes[i] * 1.0 / self.samples_per_gpu / self.num_replicas)) * self.samples_per_gpu self.total_size = self.num_samples * self.num_replicas def __iter__(self): # deterministically shuffle based on epoch g = torch.Generator() g.manual_seed(self.epoch) indices = [] for i, size in enumerate(self.group_sizes): if size > 0: indice = np.where(self.flag == i)[0] assert len(indice) == size indice = indice[list(torch.randperm(int(size), generator=g))].tolist() extra = int( math.ceil( size * 1.0 / self.samples_per_gpu / self.num_replicas) ) * self.samples_per_gpu * self.num_replicas - len(indice) # pad indice tmp = indice.copy() for _ in range(extra // size): indice.extend(tmp) indice.extend(tmp[:extra % size]) indices.extend(indice) assert len(indices) == self.total_size indices = [ indices[j] for i in list( torch.randperm( len(indices) // self.samples_per_gpu, generator=g)) for j in range(i * self.samples_per_gpu, (i + 1) * self.samples_per_gpu) ] # subsample offset = self.num_samples * self.rank indices = indices[offset:offset + self.num_samples] assert len(indices) == self.num_samples return iter(indices) def __len__(self): return self.num_samples def set_epoch(self, epoch): self.epoch = epoch
4,898
33.744681
78
py
GFocalV2
GFocalV2-master/mmdet/datasets/samplers/distributed_sampler.py
import torch from torch.utils.data import DistributedSampler as _DistributedSampler class DistributedSampler(_DistributedSampler): def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True): super().__init__(dataset, num_replicas=num_replicas, rank=rank) self.shuffle = shuffle def __iter__(self): # deterministically shuffle based on epoch if self.shuffle: g = torch.Generator() g.manual_seed(self.epoch) indices = torch.randperm(len(self.dataset), generator=g).tolist() else: indices = torch.arange(len(self.dataset)).tolist() # add extra samples to make it evenly divisible indices += indices[:(self.total_size - len(indices))] assert len(indices) == self.total_size # subsample indices = indices[self.rank:self.total_size:self.num_replicas] assert len(indices) == self.num_samples return iter(indices)
978
32.758621
77
py
GFocalV2
GFocalV2-master/mmdet/datasets/pipelines/formating.py
from collections.abc import Sequence import mmcv import numpy as np import torch from mmcv.parallel import DataContainer as DC from ..builder import PIPELINES def to_tensor(data): """Convert objects of various python types to :obj:`torch.Tensor`. Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`, :class:`Sequence`, :class:`int` and :class:`float`. Args: data (torch.Tensor | numpy.ndarray | Sequence | int | float): Data to be converted. """ if isinstance(data, torch.Tensor): return data elif isinstance(data, np.ndarray): return torch.from_numpy(data) elif isinstance(data, Sequence) and not mmcv.is_str(data): return torch.tensor(data) elif isinstance(data, int): return torch.LongTensor([data]) elif isinstance(data, float): return torch.FloatTensor([data]) else: raise TypeError(f'type {type(data)} cannot be converted to tensor.') @PIPELINES.register_module() class ToTensor(object): """Convert some results to :obj:`torch.Tensor` by given keys. Args: keys (Sequence[str]): Keys that need to be converted to Tensor. """ def __init__(self, keys): self.keys = keys def __call__(self, results): """Call function to convert data in results to :obj:`torch.Tensor`. Args: results (dict): Result dict contains the data to convert. Returns: dict: The result dict contains the data converted to :obj:`torch.Tensor`. """ for key in self.keys: results[key] = to_tensor(results[key]) return results def __repr__(self): return self.__class__.__name__ + f'(keys={self.keys})' @PIPELINES.register_module() class ImageToTensor(object): """Convert image to :obj:`torch.Tensor` by given keys. The dimension order of input image is (H, W, C). The pipeline will convert it to (C, H, W). If only 2 dimension (H, W) is given, the output would be (1, H, W). Args: keys (Sequence[str]): Key of images to be converted to Tensor. """ def __init__(self, keys): self.keys = keys def __call__(self, results): """Call function to convert image in results to :obj:`torch.Tensor` and transpose the channel order. Args: results (dict): Result dict contains the image data to convert. Returns: dict: The result dict contains the image converted to :obj:`torch.Tensor` and transposed to (C, H, W) order. """ for key in self.keys: img = results[key] if len(img.shape) < 3: img = np.expand_dims(img, -1) results[key] = to_tensor(img.transpose(2, 0, 1)) return results def __repr__(self): return self.__class__.__name__ + f'(keys={self.keys})' @PIPELINES.register_module() class Transpose(object): """Transpose some results by given keys. Args: keys (Sequence[str]): Keys of results to be transposed. order (Sequence[int]): Order of transpose. """ def __init__(self, keys, order): self.keys = keys self.order = order def __call__(self, results): """Call function to transpose the channel order of data in results. Args: results (dict): Result dict contains the data to transpose. Returns: dict: The result dict contains the data transposed to \ ``self.order``. """ for key in self.keys: results[key] = results[key].transpose(self.order) return results def __repr__(self): return self.__class__.__name__ + \ f'(keys={self.keys}, order={self.order})' @PIPELINES.register_module() class ToDataContainer(object): """Convert results to :obj:`mmcv.DataContainer` by given fields. Args: fields (Sequence[dict]): Each field is a dict like ``dict(key='xxx', **kwargs)``. The ``key`` in result will be converted to :obj:`mmcv.DataContainer` with ``**kwargs``. Default: ``(dict(key='img', stack=True), dict(key='gt_bboxes'), dict(key='gt_labels'))``. """ def __init__(self, fields=(dict(key='img', stack=True), dict(key='gt_bboxes'), dict(key='gt_labels'))): self.fields = fields def __call__(self, results): """Call function to convert data in results to :obj:`mmcv.DataContainer`. Args: results (dict): Result dict contains the data to convert. Returns: dict: The result dict contains the data converted to \ :obj:`mmcv.DataContainer`. """ for field in self.fields: field = field.copy() key = field.pop('key') results[key] = DC(results[key], **field) return results def __repr__(self): return self.__class__.__name__ + f'(fields={self.fields})' @PIPELINES.register_module() class DefaultFormatBundle(object): """Default formatting bundle. It simplifies the pipeline of formatting common fields, including "img", "proposals", "gt_bboxes", "gt_labels", "gt_masks" and "gt_semantic_seg". These fields are formatted as follows. - img: (1)transpose, (2)to tensor, (3)to DataContainer (stack=True) - proposals: (1)to tensor, (2)to DataContainer - gt_bboxes: (1)to tensor, (2)to DataContainer - gt_bboxes_ignore: (1)to tensor, (2)to DataContainer - gt_labels: (1)to tensor, (2)to DataContainer - gt_masks: (1)to tensor, (2)to DataContainer (cpu_only=True) - gt_semantic_seg: (1)unsqueeze dim-0 (2)to tensor, \ (3)to DataContainer (stack=True) """ def __call__(self, results): """Call function to transform and format common fields in results. Args: results (dict): Result dict contains the data to convert. Returns: dict: The result dict contains the data that is formatted with \ default bundle. """ if 'img' in results: img = results['img'] # add default meta keys results = self._add_default_meta_keys(results) if len(img.shape) < 3: img = np.expand_dims(img, -1) img = np.ascontiguousarray(img.transpose(2, 0, 1)) results['img'] = DC(to_tensor(img), stack=True) for key in ['proposals', 'gt_bboxes', 'gt_bboxes_ignore', 'gt_labels']: if key not in results: continue results[key] = DC(to_tensor(results[key])) if 'gt_masks' in results: results['gt_masks'] = DC(results['gt_masks'], cpu_only=True) if 'gt_semantic_seg' in results: results['gt_semantic_seg'] = DC( to_tensor(results['gt_semantic_seg'][None, ...]), stack=True) return results def _add_default_meta_keys(self, results): """Add default meta keys. We set default meta keys including `pad_shape`, `scale_factor` and `img_norm_cfg` to avoid the case where no `Resize`, `Normalize` and `Pad` are implemented during the whole pipeline. Args: results (dict): Result dict contains the data to convert. Returns: results (dict): Updated result dict contains the data to convert. """ img = results['img'] results.setdefault('pad_shape', img.shape) results.setdefault('scale_factor', 1.0) num_channels = 1 if len(img.shape) < 3 else img.shape[2] results.setdefault( 'img_norm_cfg', dict( mean=np.zeros(num_channels, dtype=np.float32), std=np.ones(num_channels, dtype=np.float32), to_rgb=False)) return results def __repr__(self): return self.__class__.__name__ @PIPELINES.register_module() class Collect(object): """Collect data from the loader relevant to the specific task. This is usually the last stage of the data loader pipeline. Typically keys is set to some subset of "img", "proposals", "gt_bboxes", "gt_bboxes_ignore", "gt_labels", and/or "gt_masks". The "img_meta" item is always populated. The contents of the "img_meta" dictionary depends on "meta_keys". By default this includes: - "img_shape": shape of the image input to the network as a tuple \ (h, w, c). Note that images may be zero padded on the \ bottom/right if the batch tensor is larger than this shape. - "scale_factor": a float indicating the preprocessing scale - "flip": a boolean indicating if image flip transform was used - "filename": path to the image file - "ori_shape": original shape of the image as a tuple (h, w, c) - "pad_shape": image shape after padding - "img_norm_cfg": a dict of normalization information: - mean - per channel mean subtraction - std - per channel std divisor - to_rgb - bool indicating if bgr was converted to rgb Args: keys (Sequence[str]): Keys of results to be collected in ``data``. meta_keys (Sequence[str], optional): Meta keys to be converted to ``mmcv.DataContainer`` and collected in ``data[img_metas]``. Default: ``('filename', 'ori_filename', 'ori_shape', 'img_shape', 'pad_shape', 'scale_factor', 'flip', 'flip_direction', 'img_norm_cfg')`` """ def __init__(self, keys, meta_keys=('filename', 'ori_filename', 'ori_shape', 'img_shape', 'pad_shape', 'scale_factor', 'flip', 'flip_direction', 'img_norm_cfg')): self.keys = keys self.meta_keys = meta_keys def __call__(self, results): """Call function to collect keys in results. The keys in ``meta_keys`` will be converted to :obj:mmcv.DataContainer. Args: results (dict): Result dict contains the data to collect. Returns: dict: The result dict contains the following keys - keys in``self.keys`` - ``img_metas`` """ data = {} img_meta = {} for key in self.meta_keys: img_meta[key] = results[key] data['img_metas'] = DC(img_meta, cpu_only=True) for key in self.keys: data[key] = results[key] return data def __repr__(self): return self.__class__.__name__ + \ f'(keys={self.keys}, meta_keys={self.meta_keys})' @PIPELINES.register_module() class WrapFieldsToLists(object): """Wrap fields of the data dictionary into lists for evaluation. This class can be used as a last step of a test or validation pipeline for single image evaluation or inference. Example: >>> test_pipeline = [ >>> dict(type='LoadImageFromFile'), >>> dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True), >>> dict(type='Pad', size_divisor=32), >>> dict(type='ImageToTensor', keys=['img']), >>> dict(type='Collect', keys=['img']), >>> dict(type='WrapIntoLists') >>> ] """ def __call__(self, results): """Call function to wrap fields into lists. Args: results (dict): Result dict contains the data to wrap. Returns: dict: The result dict where value of ``self.keys`` are wrapped \ into list. """ # Wrap dict fields into lists for key, val in results.items(): results[key] = [val] return results def __repr__(self): return f'{self.__class__.__name__}()'
12,033
31.969863
79
py
GFocalV2
GFocalV2-master/mmdet/utils/contextmanagers.py
import asyncio import contextlib import logging import os import time from typing import List import torch logger = logging.getLogger(__name__) DEBUG_COMPLETED_TIME = bool(os.environ.get('DEBUG_COMPLETED_TIME', False)) @contextlib.asynccontextmanager async def completed(trace_name='', name='', sleep_interval=0.05, streams: List[torch.cuda.Stream] = None): """Async context manager that waits for work to complete on given CUDA streams.""" if not torch.cuda.is_available(): yield return stream_before_context_switch = torch.cuda.current_stream() if not streams: streams = [stream_before_context_switch] else: streams = [s if s else stream_before_context_switch for s in streams] end_events = [ torch.cuda.Event(enable_timing=DEBUG_COMPLETED_TIME) for _ in streams ] if DEBUG_COMPLETED_TIME: start = torch.cuda.Event(enable_timing=True) stream_before_context_switch.record_event(start) cpu_start = time.monotonic() logger.debug('%s %s starting, streams: %s', trace_name, name, streams) grad_enabled_before = torch.is_grad_enabled() try: yield finally: current_stream = torch.cuda.current_stream() assert current_stream == stream_before_context_switch if DEBUG_COMPLETED_TIME: cpu_end = time.monotonic() for i, stream in enumerate(streams): event = end_events[i] stream.record_event(event) grad_enabled_after = torch.is_grad_enabled() # observed change of torch.is_grad_enabled() during concurrent run of # async_test_bboxes code assert (grad_enabled_before == grad_enabled_after ), 'Unexpected is_grad_enabled() value change' are_done = [e.query() for e in end_events] logger.debug('%s %s completed: %s streams: %s', trace_name, name, are_done, streams) with torch.cuda.stream(stream_before_context_switch): while not all(are_done): await asyncio.sleep(sleep_interval) are_done = [e.query() for e in end_events] logger.debug( '%s %s completed: %s streams: %s', trace_name, name, are_done, streams, ) current_stream = torch.cuda.current_stream() assert current_stream == stream_before_context_switch if DEBUG_COMPLETED_TIME: cpu_time = (cpu_end - cpu_start) * 1000 stream_times_ms = '' for i, stream in enumerate(streams): elapsed_time = start.elapsed_time(end_events[i]) stream_times_ms += f' {stream} {elapsed_time:.2f} ms' logger.info('%s %s %.2f ms %s', trace_name, name, cpu_time, stream_times_ms) @contextlib.asynccontextmanager async def concurrent(streamqueue: asyncio.Queue, trace_name='concurrent', name='stream'): """Run code concurrently in different streams. :param streamqueue: asyncio.Queue instance. Queue tasks define the pool of streams used for concurrent execution. """ if not torch.cuda.is_available(): yield return initial_stream = torch.cuda.current_stream() with torch.cuda.stream(initial_stream): stream = await streamqueue.get() assert isinstance(stream, torch.cuda.Stream) try: with torch.cuda.stream(stream): logger.debug('%s %s is starting, stream: %s', trace_name, name, stream) yield current = torch.cuda.current_stream() assert current == stream logger.debug('%s %s has finished, stream: %s', trace_name, name, stream) finally: streamqueue.task_done() streamqueue.put_nowait(stream)
4,077
32.42623
79
py
GFocalV2
GFocalV2-master/mmdet/utils/profiling.py
import contextlib import sys import time import torch if sys.version_info >= (3, 7): @contextlib.contextmanager def profile_time(trace_name, name, enabled=True, stream=None, end_stream=None): """Print time spent by CPU and GPU. Useful as a temporary context manager to find sweet spots of code suitable for async implementation. """ if (not enabled) or not torch.cuda.is_available(): yield return stream = stream if stream else torch.cuda.current_stream() end_stream = end_stream if end_stream else stream start = torch.cuda.Event(enable_timing=True) end = torch.cuda.Event(enable_timing=True) stream.record_event(start) try: cpu_start = time.monotonic() yield finally: cpu_end = time.monotonic() end_stream.record_event(end) end.synchronize() cpu_time = (cpu_end - cpu_start) * 1000 gpu_time = start.elapsed_time(end) msg = f'{trace_name} {name} cpu_time {cpu_time:.2f} ms ' msg += f'gpu_time {gpu_time:.2f} ms stream {stream}' print(msg, end_stream)
1,288
31.225
73
py
qml
qml-master/docs/source/conf.py
# -*- coding: utf-8 -*- # # QML documentation build configuration file, created by # sphinx-quickstart on Sun Jun 4 14:41:04 2017. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # import os import sys print os.getcwd() sys.path.insert(0, os.path.abspath('../../build/lib.linux-x86_64-2.7/')) import sphinx_rtd_theme # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # # needs_sphinx = '1.5.4' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = ['sphinx.ext.autodoc','sphinx.ext.mathjax'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The master toctree document. master_doc = 'index' # General information about the project. project = u'QML' copyright = u'2017, Anders S. Christensen' author = u'Anders S. Christensen' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. iversion = u'0.4.0' # The full version, including alpha/beta/rc tags. release = u'0.4.0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path exclude_patterns = [] # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # # html_theme = 'alabaster' html_theme = "sphinx_rtd_theme" html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # # html_theme_options = {} # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". # html_static_path = ['_static'] # -- Options for HTMLHelp output ------------------------------------------ # Output file base name for HTML help builder. htmlhelp_basename = 'QMLdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # # 'preamble': '', # Latex figure (float) alignment # # 'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'QML.tex', u'QML Documentation', u'Anders S. Christensen', 'manual'), ] # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'qml', u'QML Documentation', [author], 1) ] # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'QML', u'QML Documentation', author, 'QML', 'One line description of project.', 'Miscellaneous'), ] html_sidebars = { '**': ['globaltoc.html', 'relations.html', 'sourcelink.html', 'searchbox.html'], }
4,965
30.0375
100
py
GBM-perf
GBM-perf-master/v1-keep-old-before_docker/run/2a-xgboost-gpu.py
import xgboost as xgb import pandas as pd from sklearn import metrics import time dtrain = xgb.DMatrix('train-10m.libsvm') dtest = xgb.DMatrix('test-10m.libsvm') #params = {'max_depth':10, 'eta':0.1, 'objective':'binary:logistic'} #params = {'max_depth':10, 'eta':0.1, 'objective':'binary:logistic', 'updater':'grow_colmaker'} #params = {'max_depth':10, 'eta':0.1, 'objective':'binary:logistic', 'updater':'grow_fast_histmaker'} #params = {'max_depth':10, 'eta':0.1, 'objective':'binary:logistic', 'updater':'grow_gpu'} #params = {'max_depth':10, 'eta':0.1, 'objective':'binary:logistic', 'updater':'grow_gpu_hist'} params = {'max_depth':10, 'eta':0.1, 'objective':'binary:logistic', 'tree_method':'gpu_hist'} num_round = 100 start = time.time() md = xgb.train(params, dtrain, num_round) end = time.time() print(end - start) phat = md.predict(dtest) ytest = pd.read_csv("test.csv")['dep_delayed_15min']=='Y' metrics.roc_auc_score(ytest, phat)
955
27.969697
101
py
GBM-perf
GBM-perf-master/analysis/dask/xgb-dask.py
import pandas as pd from sklearn import metrics from dask.distributed import Client, LocalCluster import dask.dataframe as dd import xgboost as xgb cluster = LocalCluster(n_workers=16, threads_per_worker=1) client = Client(cluster) d_train = pd.read_csv("https://raw.githubusercontent.com/szilard/benchm-ml--data/master/int_enc/train-1m-intenc.csv") d_test = pd.read_csv("https://raw.githubusercontent.com/szilard/benchm-ml--data/master/int_enc/test-1m-intenc.csv") dx_train = dd.from_pandas(d_train, npartitions=16) dx_test = dd.from_pandas(d_test, npartitions=1) X_train = dx_train.iloc[:, :-1].to_dask_array(lengths=True) y_train = dx_train.iloc[:,-1:].to_dask_array(lengths=True) X_test = dx_test.iloc[:, :-1].to_dask_array(lengths=True) y_test = dx_test.iloc[:,-1:].to_dask_array(lengths=True) X_train.persist() y_train.persist() client.has_what() dxgb_train = xgb.dask.DaskDMatrix(client, X_train, y_train) dxgb_test = xgb.dask.DaskDMatrix(client, X_test) param = {'objective':'binary:logistic', 'tree_method':'hist', 'max_depth':10, 'eta':0.1} %time md = xgb.dask.train(client, param, dxgb_train, num_boost_round = 100) y_pred = xgb.dask.predict(client, md, dxgb_test) y_pred_loc = y_pred.compute() y_test_loc = y_test.compute() print(metrics.roc_auc_score(y_test_loc, y_pred_loc))
1,319
29
117
py
GBM-perf
GBM-perf-master/analysis/dask/xgb.py
import pandas as pd import numpy as np from sklearn import preprocessing from sklearn import metrics import xgboost as xgb d_train = pd.read_csv("https://raw.githubusercontent.com/szilard/benchm-ml--data/master/int_enc/train-1m-intenc.csv") d_test = pd.read_csv("https://raw.githubusercontent.com/szilard/benchm-ml--data/master/int_enc/test-1m-intenc.csv") X_train = d_train.iloc[:, :-1].to_numpy() y_train = d_train.iloc[:,-1:].to_numpy() X_test = d_test.iloc[:, :-1].to_numpy() y_test = d_test.iloc[:,-1:].to_numpy() dxgb_train = xgb.DMatrix(X_train, label = y_train) dxgb_test = xgb.DMatrix(X_test) param = {'max_depth':10, 'eta':0.1, 'objective':'binary:logistic', 'tree_method':'hist'} %time md = xgb.train(param, dxgb_train, num_boost_round = 100) y_pred = md.predict(dxgb_test) print(metrics.roc_auc_score(y_test, y_pred))
858
28.62069
117
py
GBM-perf
GBM-perf-master/analysis/dask/old_encoding/xgb-intenc.py
import pandas as pd import numpy as np from sklearn import preprocessing from sklearn import metrics import xgboost as xgb d_train = pd.read_csv("https://s3.amazonaws.com/benchm-ml--main/train-1m.csv") d_test = pd.read_csv("https://s3.amazonaws.com/benchm-ml--main/test.csv") d_all = pd.concat([d_train,d_test]) vars_cat = ["Month","DayofMonth","DayOfWeek","UniqueCarrier", "Origin", "Dest"] vars_num = ["DepTime","Distance"] for col in vars_cat: d_all[col] = preprocessing.LabelEncoder().fit_transform(d_all[col]) X_all = d_all[vars_cat+vars_num].to_numpy() y_all = np.where(d_all["dep_delayed_15min"]=="Y",1,0) X_train = X_all[0:d_train.shape[0],] y_train = y_all[0:d_train.shape[0]] X_test = X_all[d_train.shape[0]:(d_train.shape[0]+d_test.shape[0]),] y_test = y_all[d_train.shape[0]:(d_train.shape[0]+d_test.shape[0])] dxgb_train = xgb.DMatrix(X_train, label = y_train) dxgb_test = xgb.DMatrix(X_test) param = {'max_depth':10, 'eta':0.1, 'objective':'binary:logistic', 'tree_method':'hist'} %time md = xgb.train(param, dxgb_train, num_boost_round = 100) y_pred = md.predict(dxgb_test) print(metrics.roc_auc_score(y_test, y_pred)) ## m5.4xlarge 16c (8+8HT) ## Wall time: 3.29 s ## 0.7524071455769888
1,248
28.738095
101
py
GBM-perf
GBM-perf-master/analysis/dask/old_encoding/xgb-dask.py
import pandas as pd from sklearn import metrics from dask.distributed import Client, LocalCluster import dask.dataframe as dd import dask.array as da from dask_ml import preprocessing import xgboost as xgb cluster = LocalCluster(n_workers=16, threads_per_worker=1) client = Client(cluster) d_train = pd.read_csv("https://s3.amazonaws.com/benchm-ml--main/train-1m.csv") d_test = pd.read_csv("https://s3.amazonaws.com/benchm-ml--main/test.csv") d_all = pd.concat([d_train,d_test]) dx_all = dd.from_pandas(d_all, npartitions=16) vars_cat = ["Month","DayofMonth","DayOfWeek","UniqueCarrier", "Origin", "Dest"] vars_num = ["DepTime","Distance"] for col in vars_cat: dx_all[col] = preprocessing.LabelEncoder().fit_transform(dx_all[col]) X_all = dx_all[vars_cat+vars_num].to_dask_array(lengths=True) y_all = da.where((dx_all["dep_delayed_15min"]=="Y").to_dask_array(lengths=True),1,0) X_train = X_all[0:d_train.shape[0],] y_train = y_all[0:d_train.shape[0]] X_test = X_all[d_train.shape[0]:(d_train.shape[0]+d_test.shape[0]),] y_test = y_all[d_train.shape[0]:(d_train.shape[0]+d_test.shape[0])] X_train.persist() y_train.persist() client.has_what() dxgb_train = xgb.dask.DaskDMatrix(client, X_train, y_train) dxgb_test = xgb.dask.DaskDMatrix(client, X_test) param = {'objective':'binary:logistic', 'tree_method':'hist', 'max_depth':10, 'eta':0.1} %time md = xgb.dask.train(client, param, dxgb_train, num_boost_round = 100) y_pred = xgb.dask.predict(client, md, dxgb_test) y_pred_loc = y_pred.compute() y_test_loc = y_test.compute() print(metrics.roc_auc_score(y_test_loc, y_pred_loc)) ## m5.4xlarge 16c (8+8HT) ## Wall time: 20.5 s ## 0.7958538649110775
1,692
28.189655
101
py
GBM-perf
GBM-perf-master/analysis/dask/old_encoding/xgb-dask-deprecated.py
import pandas as pd from sklearn import metrics from dask.distributed import Client, LocalCluster import dask.dataframe as dd import dask.array as da from dask_ml import preprocessing import dask_xgboost as dxgb cluster = LocalCluster(n_workers=16, threads_per_worker=1) client = Client(cluster) d_train = pd.read_csv("https://s3.amazonaws.com/benchm-ml--main/train-1m.csv") d_test = pd.read_csv("https://s3.amazonaws.com/benchm-ml--main/test.csv") d_all = pd.concat([d_train,d_test]) dx_all = dd.from_pandas(d_all, npartitions=16) vars_cat = ["Month","DayofMonth","DayOfWeek","UniqueCarrier", "Origin", "Dest"] vars_num = ["DepTime","Distance"] for col in vars_cat: dx_all[col] = preprocessing.LabelEncoder().fit_transform(dx_all[col]) X_all = dx_all[vars_cat+vars_num].to_dask_array(lengths=True) y_all = da.where((dx_all["dep_delayed_15min"]=="Y").to_dask_array(lengths=True),1,0) X_train = X_all[0:d_train.shape[0],] y_train = y_all[0:d_train.shape[0]] X_test = X_all[d_train.shape[0]:(d_train.shape[0]+d_test.shape[0]),] y_test = y_all[d_train.shape[0]:(d_train.shape[0]+d_test.shape[0])] X_train.persist() y_train.persist() client.has_what() param = {'objective':'binary:logistic', 'tree_method':'hist', 'max_depth':10, 'eta':0.1} %time md = dxgb.train(client, param, X_train, y_train, num_boost_round = 100) y_pred = dxgb.predict(client, md, X_test) y_pred_loc = y_pred.compute() y_test_loc = y_test.compute() print(metrics.roc_auc_score(y_test_loc, y_pred_loc)) ## m5.4xlarge 16c (8+8HT) ## Wall time: 34.3 s ## 0.7928378346764724
1,581
28.849057
101
py
GBM-perf
GBM-perf-master/analysis/intenc/xgboost.py
import pandas as pd import numpy as np from sklearn import preprocessing from sklearn import metrics import xgboost as xgb d_train = pd.read_csv("https://raw.githubusercontent.com/szilard/benchm-ml--data/master/int_enc/train-1m-intenc.csv") d_test = pd.read_csv("https://raw.githubusercontent.com/szilard/benchm-ml--data/master/int_enc/test-1m-intenc.csv") X_train = d_train.iloc[:, :-1].to_numpy() y_train = d_train.iloc[:,-1:].to_numpy() X_test = d_test.iloc[:, :-1].to_numpy() y_test = d_test.iloc[:,-1:].to_numpy() dxgb_train = xgb.DMatrix(X_train, label = y_train) dxgb_test = xgb.DMatrix(X_test) param = {'max_depth':10, 'eta':0.1, 'objective':'binary:logistic', 'tree_method':'hist'} %time md = xgb.train(param, dxgb_train, num_boost_round = 100) y_pred = md.predict(dxgb_test) print(metrics.roc_auc_score(y_test, y_pred)) ## m5.4xlarge 16c (8+8HT) ## Wall time: 3.3 s ## 0.7527781837199401
926
27.96875
117
py
GBM-perf
GBM-perf-master/analysis/random_forest/xgboost.py
import pandas as pd import numpy as np from sklearn import preprocessing from scipy import sparse from sklearn import metrics import xgboost as xgb d_train = pd.read_csv("https://s3.amazonaws.com/benchm-ml--main/train-1m.csv") d_test = pd.read_csv("https://s3.amazonaws.com/benchm-ml--main/test.csv") d_all = pd.concat([d_train,d_test]) vars_cat = ["Month","DayofMonth","DayOfWeek","UniqueCarrier", "Origin", "Dest"] vars_num = ["DepTime","Distance"] for col in vars_cat: d_all[col] = preprocessing.LabelEncoder().fit_transform(d_all[col]) X_all_cat = preprocessing.OneHotEncoder(categories="auto").fit_transform(d_all[vars_cat]) X_all = sparse.hstack((X_all_cat, d_all[vars_num])).tocsr() y_all = np.where(d_all["dep_delayed_15min"]=="Y",1,0) X_train = X_all[0:d_train.shape[0],] y_train = y_all[0:d_train.shape[0]] X_test = X_all[d_train.shape[0]:(d_train.shape[0]+d_test.shape[0]),] y_test = y_all[d_train.shape[0]:(d_train.shape[0]+d_test.shape[0])] md = xgb.XGBClassifier(max_depth=10, n_estimators=100, learning_rate=0.1, tree_method="hist", n_jobs=-1) %time md.fit(X_train, y_train) y_pred = md.predict_proba(X_test)[:,1] print(metrics.confusion_matrix(y_test, y_pred>0.7)) print(metrics.roc_auc_score(y_test, y_pred)) dxgb_train = xgb.DMatrix(X_train, label = y_train) dxgb_test = xgb.DMatrix(X_test) param = {'max_depth':10, 'eta':0.1, 'objective':'binary:logistic', 'tree_method':'hist'} %time md = xgb.train(param, dxgb_train, num_boost_round = 100) y_pred = md.predict(dxgb_test) print(metrics.roc_auc_score(y_test, y_pred))
1,634
29.277778
101
py
GBM-perf
GBM-perf-master/analysis/rapids-ai/xgb-ohe.py
import cudf import xgboost as xgb import pandas as pd import numpy as np from scipy import sparse from sklearn import preprocessing from sklearn import metrics d_train = pd.read_csv("https://s3.amazonaws.com/benchm-ml--main/train-1m.csv") d_test = pd.read_csv("https://s3.amazonaws.com/benchm-ml--main/test.csv") d_all = pd.concat([d_train,d_test]) vars_cat = ["Month","DayofMonth","DayOfWeek","UniqueCarrier", "Origin", "Dest"] vars_num = ["DepTime","Distance"] for col in vars_cat: d_all[col] = preprocessing.LabelEncoder().fit_transform(d_all[col]) X_all_cat = preprocessing.OneHotEncoder(categories="auto").fit_transform(d_all[vars_cat]) X_all = sparse.hstack((X_all_cat, d_all[vars_num])).tocsr() y_all = np.where(d_all["dep_delayed_15min"]=="Y",1,0) X_train = X_all[0:d_train.shape[0]] y_train = y_all[0:d_train.shape[0]] X_test = X_all[d_train.shape[0]:(d_train.shape[0]+d_test.shape[0])] y_test = y_all[d_train.shape[0]:(d_train.shape[0]+d_test.shape[0])] #--- RAM 1.3G (0.7GB before ipython kernel) ## ******* CPU dxgb_train = xgb.DMatrix(X_train, label = y_train) dxgb_test = xgb.DMatrix(X_test) param = {'max_depth':10, 'eta':0.1, 'objective':'binary:logistic', 'tree_method':'hist'} %time md = xgb.train(param, dxgb_train, num_boost_round = 100) y_pred = md.predict(dxgb_test) print(metrics.roc_auc_score(y_test, y_pred)) #Wall time: 8.39 s #0.7490139621617369 #--- 1.4GB RAM ## ******* data RAM (CPU), train GPU param = {'max_depth':10, 'eta':0.1, 'objective':'binary:logistic', 'tree_method':'gpu_hist'} %time md = xgb.train(param, dxgb_train, num_boost_round = 100) y_pred = md.predict(dxgb_test) print(metrics.roc_auc_score(y_test, y_pred)) #Wall time: 7.89 s #0.748896157113143 #--- 0.9GB GPU mem / 2.3GB RAM #--- reset GPU (restart kernel, reload data) ## ******* all GPU (data+train) #X_train_GPU = cudf.DataFrame.from_pandas(X_train) ## doesn't work from sparse matrix #y_train_GPU = cudf.Series(y_train) #X_test_GPU = cudf.DataFrame.from_pandas(X_test) #y_test_GPU = cudf.Series(y_test) #X_train_GPU = cudf.DataFrame.from_pandas(d_train[vars_cat+vars_num]) ## cannot create DMatrix later with cat.vars #y_train_GPU = cudf.Series(y_train) #X_test_GPU = cudf.DataFrame.from_pandas(d_test[vars_cat+vars_num]) #y_test_GPU = cudf.Series(y_test) X_train_GPU = cudf.DataFrame.from_pandas(pd.DataFrame(data=X_train.todense())) y_train_GPU = cudf.Series(y_train) X_test_GPU = cudf.DataFrame.from_pandas(pd.DataFrame(data=X_test.todense())) y_test_GPU = cudf.Series(y_test) # TODO: try CuPy sparse mx #--- 7GB GPU mem / 2GB RAM (max during transfer 7GB RAM) dxgb_train_GPU = xgb.DMatrix(X_train_GPU, label = y_train_GPU) dxgb_test_GPU = xgb.DMatrix(X_test_GPU) #--- 12GB GPU mem param = {'max_depth':10, 'eta':0.1, 'objective':'binary:logistic', 'tree_method':'gpu_hist'} %time md = xgb.train(param, dxgb_train_GPU, num_boost_round = 100) #!!!! crashes kernel (too much GPU mem?) y_pred = md.predict(dxgb_test_GPU) print(metrics.roc_auc_score(y_test, y_pred))
3,112
28.932692
121
py
GBM-perf
GBM-perf-master/analysis/rapids-ai/xgb-intenc.py
import cudf import xgboost as xgb import pandas as pd import numpy as np from sklearn import preprocessing from sklearn import metrics d_train = pd.read_csv("https://s3.amazonaws.com/benchm-ml--main/train-1m.csv") d_test = pd.read_csv("https://s3.amazonaws.com/benchm-ml--main/test.csv") d_all = pd.concat([d_train,d_test]) vars_cat = ["Month","DayofMonth","DayOfWeek","UniqueCarrier", "Origin", "Dest"] vars_num = ["DepTime","Distance"] for col in vars_cat: d_all[col] = preprocessing.LabelEncoder().fit_transform(d_all[col]) X_all = d_all[vars_num+vars_cat] y_all = np.where(d_all["dep_delayed_15min"]=="Y",1,0) X_train = X_all[0:d_train.shape[0]] y_train = y_all[0:d_train.shape[0]] X_test = X_all[d_train.shape[0]:(d_train.shape[0]+d_test.shape[0])] y_test = y_all[d_train.shape[0]:(d_train.shape[0]+d_test.shape[0])] #--- RAM 1.1GB (0.7GB before python kernel) ## ******* CPU dxgb_train = xgb.DMatrix(X_train, label = y_train) dxgb_test = xgb.DMatrix(X_test) param = {'max_depth':10, 'eta':0.1, 'objective':'binary:logistic', 'tree_method':'hist'} %time md = xgb.train(param, dxgb_train, num_boost_round = 100) y_pred = md.predict(dxgb_test) print(metrics.roc_auc_score(y_test, y_pred)) #Wall time: 5.83 s #0.7523232418031532 #--- 1.2GB RAM ## ******* data RAM (CPU), train GPU param = {'max_depth':10, 'eta':0.1, 'objective':'binary:logistic', 'tree_method':'gpu_hist'} %time md = xgb.train(param, dxgb_train, num_boost_round = 100) y_pred = md.predict(dxgb_test) print(metrics.roc_auc_score(y_test, y_pred)) #Wall time: 4.57 s (1st time more because of copy to GPU) #Wall time: 3.06 s #0.7527066683633643 #--- 0.85GB GPU mem #--- reset GPU (restart kernel, reload data) ## ******* all GPU (data+train) X_train_GPU = cudf.DataFrame.from_pandas(X_train) y_train_GPU = cudf.Series(y_train) X_test_GPU = cudf.DataFrame.from_pandas(X_test) y_test_GPU = cudf.Series(y_test) #--- 0.8 GPU mem / 2GB RAM dxgb_train_GPU = xgb.DMatrix(X_train_GPU, label = y_train_GPU) dxgb_test_GPU = xgb.DMatrix(X_test_GPU) #--- 0.9GB GPU mem param = {'max_depth':10, 'eta':0.1, 'objective':'binary:logistic', 'tree_method':'gpu_hist'} %time md = xgb.train(param, dxgb_train_GPU, num_boost_round = 100) y_pred = md.predict(dxgb_test_GPU) print(metrics.roc_auc_score(y_test, y_pred)) #Wall time: 3.16 s #0.7527066683633643 #--- 1GB GPU mem #type(X_train) # pd DF #type(y_train) # np array #type(X_train_GPU) # cuDF #type(y_train_GPU) # cuSer
2,532
24.585859
105
py
GBM-perf
GBM-perf-master/analysis/tensorflow-df/tfdf.py
import tensorflow_decision_forests as tfdf import numpy as np import pandas as pd import tensorflow as tf from sklearn import metrics d_train = pd.read_csv("https://s3.amazonaws.com/benchm-ml--main/train-1m.csv") d_test = pd.read_csv("https://s3.amazonaws.com/benchm-ml--main/test.csv") d_train["dep_delayed_15min"] = np.where(d_train["dep_delayed_15min"]=="Y",1,0) d_test["dep_delayed_15min"] = np.where(d_test["dep_delayed_15min"]=="Y",1,0) dtf_train = tfdf.keras.pd_dataframe_to_tf_dataset(d_train, label="dep_delayed_15min") dtf_test = tfdf.keras.pd_dataframe_to_tf_dataset(d_test, label="dep_delayed_15min") md = tfdf.keras.GradientBoostedTreesModel(max_depth=10, num_trees=100, shrinkage=0.1) %time md.fit(x=dtf_train) y_pred = md.predict(dtf_test) print(metrics.roc_auc_score(d_test["dep_delayed_15min"], y_pred))
836
28.892857
85
py
GBM-perf
GBM-perf-master/analysis/tabnet/tabnet-base.py
from pytorch_tabnet.tab_model import TabNetClassifier import torch import pandas as pd import numpy as np from sklearn import preprocessing from sklearn import metrics d_train = pd.read_csv("https://s3.amazonaws.com/benchm-ml--main/train-0.1m.csv") d_test = pd.read_csv("https://s3.amazonaws.com/benchm-ml--main/test.csv") d_all = pd.concat([d_train,d_test]) vars_cat = ["Month","DayofMonth","DayOfWeek","UniqueCarrier", "Origin", "Dest"] vars_num = ["DepTime","Distance"] for col in vars_cat: d_all[col] = preprocessing.LabelEncoder().fit_transform(d_all[col]) X_all = d_all[vars_num+vars_cat] y_all = np.where(d_all["dep_delayed_15min"]=="Y",1,0) X_train = X_all[0:d_train.shape[0]].to_numpy() y_train = y_all[0:d_train.shape[0]] X_test = X_all[d_train.shape[0]:(d_train.shape[0]+d_test.shape[0])].to_numpy() y_test = y_all[d_train.shape[0]:(d_train.shape[0]+d_test.shape[0])] cat_idxs = [ i for i, col in enumerate(X_all.columns) if col in vars_cat] cat_dims = [ len(np.unique(X_all.iloc[:,i].values)) for i in cat_idxs] cat_emb_dim = np.floor(np.log(cat_dims)).astype(int) md = TabNetClassifier(cat_idxs=cat_idxs, cat_dims=cat_dims, cat_emb_dim=cat_emb_dim, ## optimizer_fn=torch.optim.Adam, ## optimizer_params=dict(lr=2e-2), ## mask_type='sparsemax', n_steps=1, ) %%time md.fit( X_train=X_train, y_train=y_train, max_epochs=10, patience=0, ## batch_size=1024, virtual_batch_size=128, ## weights=0, ) y_pred = md.predict_proba(X_test)[:,1] print(metrics.roc_auc_score(y_test, y_pred))
1,659
28.642857
80
py
GBM-perf
GBM-perf-master/analysis/tabnet/tabnet.py
from pytorch_tabnet.tab_model import TabNetClassifier import torch import pandas as pd import numpy as np from sklearn import preprocessing from sklearn import metrics d_train = pd.read_csv("https://s3.amazonaws.com/benchm-ml--main/train-0.1m.csv") d_test = pd.read_csv("https://s3.amazonaws.com/benchm-ml--main/test.csv") d_all = pd.concat([d_train,d_test]) vars_cat = ["Month","DayofMonth","DayOfWeek","UniqueCarrier", "Origin", "Dest"] vars_num = ["DepTime","Distance"] for col in vars_cat: d_all[col] = preprocessing.LabelEncoder().fit_transform(d_all[col]) X_all = d_all[vars_num+vars_cat] y_all = np.where(d_all["dep_delayed_15min"]=="Y",1,0) X_train = X_all[0:d_train.shape[0]].to_numpy() y_train = y_all[0:d_train.shape[0]] X_test = X_all[d_train.shape[0]:(d_train.shape[0]+d_test.shape[0])].to_numpy() y_test = y_all[d_train.shape[0]:(d_train.shape[0]+d_test.shape[0])] cat_idxs = [ i for i, col in enumerate(X_all.columns) if col in vars_cat] cat_dims = [ len(np.unique(X_all.iloc[:,i].values)) for i in cat_idxs] cat_emb_dim = np.floor(np.log(cat_dims)).astype(int) md = TabNetClassifier(cat_idxs=cat_idxs, cat_dims=cat_dims, cat_emb_dim=cat_emb_dim, ## optimizer_fn=torch.optim.Adam, ## optimizer_params=dict(lr=2e-2), ## mask_type='sparsemax', n_steps=1, ) %%time md.fit( X_train=X_train, y_train=y_train, max_epochs=10, patience=0, ## batch_size=1024, virtual_batch_size=128, ## weights=0, ) y_pred = md.predict_proba(X_test)[:,1] print(metrics.roc_auc_score(y_test, y_pred))
1,658
29.163636
80
py
GBM-perf
GBM-perf-master/analysis/tabnet/tabnet-20best.py
from pytorch_tabnet.tab_model import TabNetClassifier import torch import pandas as pd import numpy as np from sklearn import preprocessing from sklearn import metrics d_train = pd.read_csv("https://s3.amazonaws.com/benchm-ml--main/train-0.1m.csv") d_test = pd.read_csv("https://s3.amazonaws.com/benchm-ml--main/test.csv") d_all = pd.concat([d_train,d_test]) vars_cat = ["Month","DayofMonth","DayOfWeek","UniqueCarrier", "Origin", "Dest"] vars_num = ["DepTime","Distance"] for col in vars_cat: d_all[col] = preprocessing.LabelEncoder().fit_transform(d_all[col]) X_all = d_all[vars_num+vars_cat] y_all = np.where(d_all["dep_delayed_15min"]=="Y",1,0) idx_trvl = np.random.choice(["train", "valid"], p =[.8, .2], size=(d_train.shape[0],)) X_train = X_all[0:d_train.shape[0]][idx_trvl=="train"].to_numpy() y_train = y_all[0:d_train.shape[0]][idx_trvl=="train"] X_valid = X_all[0:d_train.shape[0]][idx_trvl=="valid"].to_numpy() y_valid = y_all[0:d_train.shape[0]][idx_trvl=="valid"] X_test = X_all[d_train.shape[0]:(d_train.shape[0]+d_test.shape[0])].to_numpy() y_test = y_all[d_train.shape[0]:(d_train.shape[0]+d_test.shape[0])] cat_idxs = [ i for i, col in enumerate(X_all.columns) if col in vars_cat] cat_dims = [ len(np.unique(X_all.iloc[:,i].values)) for i in cat_idxs] cat_emb_dim = np.floor(np.log(cat_dims)).astype(int) md = TabNetClassifier(cat_idxs=cat_idxs, cat_dims=cat_dims, cat_emb_dim=cat_emb_dim, ## optimizer_fn=torch.optim.Adam, ## optimizer_params=dict(lr=2e-2), ## mask_type='sparsemax', n_steps=1, ) %%time md.fit( X_train=X_train, y_train=y_train, eval_set=[(X_train, y_train), (X_valid, y_valid)], eval_name=['train', 'valid'], eval_metric=['auc'], max_epochs=20, patience=20, ## batch_size=1024, virtual_batch_size=128, ## weights=0, ) y_pred = md.predict_proba(X_test)[:,1] print(metrics.roc_auc_score(y_test, y_pred))
2,019
32.114754
86
py
GBM-perf
GBM-perf-master/analysis/tabnet/tabnet-best-evil.py
from pytorch_tabnet.tab_model import TabNetClassifier import torch import pandas as pd import numpy as np from sklearn import preprocessing from sklearn import metrics d_train = pd.read_csv("https://s3.amazonaws.com/benchm-ml--main/train-0.1m.csv") d_test = pd.read_csv("https://s3.amazonaws.com/benchm-ml--main/test.csv") d_all = pd.concat([d_train,d_test]) vars_cat = ["Month","DayofMonth","DayOfWeek","UniqueCarrier", "Origin", "Dest"] vars_num = ["DepTime","Distance"] for col in vars_cat: d_all[col] = preprocessing.LabelEncoder().fit_transform(d_all[col]) X_all = d_all[vars_num+vars_cat] y_all = np.where(d_all["dep_delayed_15min"]=="Y",1,0) X_train = X_all[0:d_train.shape[0]].to_numpy() y_train = y_all[0:d_train.shape[0]] X_test = X_all[d_train.shape[0]:(d_train.shape[0]+d_test.shape[0])].to_numpy() y_test = y_all[d_train.shape[0]:(d_train.shape[0]+d_test.shape[0])] cat_idxs = [ i for i, col in enumerate(X_all.columns) if col in vars_cat] cat_dims = [ len(np.unique(X_all.iloc[:,i].values)) for i in cat_idxs] cat_emb_dim = np.floor(np.log(cat_dims)).astype(int) md = TabNetClassifier(cat_idxs=cat_idxs, cat_dims=cat_dims, cat_emb_dim=cat_emb_dim, ## optimizer_fn=torch.optim.Adam, ## optimizer_params=dict(lr=2e-2), ## mask_type='sparsemax', n_steps=1, ) %%time md.fit( X_train=X_train, y_train=y_train, eval_set=[(X_train, y_train), (X_test, y_test)], eval_name=['train', 'test_EVIL'], eval_metric=['auc'], max_epochs=100, patience=100, ## batch_size=1024, virtual_batch_size=128, ## weights=0, ) y_pred = md.predict_proba(X_test)[:,1] print(metrics.roc_auc_score(y_test, y_pred))
1,776
30.175439
80
py
GBM-perf
GBM-perf-master/analysis/tabnet/old-keep/tabnet-estop.py
from pytorch_tabnet.tab_model import TabNetClassifier import torch import pandas as pd import numpy as np from sklearn import preprocessing from sklearn import metrics d_train = pd.read_csv("https://s3.amazonaws.com/benchm-ml--main/train-0.1m.csv") d_test = pd.read_csv("https://s3.amazonaws.com/benchm-ml--main/test.csv") d_all = pd.concat([d_train,d_test]) vars_cat = ["Month","DayofMonth","DayOfWeek","UniqueCarrier", "Origin", "Dest"] vars_num = ["DepTime","Distance"] for col in vars_cat: d_all[col] = preprocessing.LabelEncoder().fit_transform(d_all[col]) X_all = d_all[vars_num+vars_cat] y_all = np.where(d_all["dep_delayed_15min"]=="Y",1,0) cat_idxs = [ i for i, col in enumerate(X_all.columns) if col in vars_cat] cat_dims = [ len(np.unique(X_all.iloc[:,i].values)) for i in cat_idxs] idx_trvl = np.random.choice(["train", "valid"], p =[.8, .2], size=(d_train.shape[0],)) X_train = X_all[0:d_train.shape[0]][idx_trvl=="train"].to_numpy() y_train = y_all[0:d_train.shape[0]][idx_trvl=="train"] X_valid = X_all[0:d_train.shape[0]][idx_trvl=="valid"].to_numpy() y_valid = y_all[0:d_train.shape[0]][idx_trvl=="valid"] X_test = X_all[d_train.shape[0]:(d_train.shape[0]+d_test.shape[0])].to_numpy() y_test = y_all[d_train.shape[0]:(d_train.shape[0]+d_test.shape[0])] md = TabNetClassifier(cat_idxs=cat_idxs, cat_dims=cat_dims, cat_emb_dim=1, optimizer_fn=torch.optim.Adam, optimizer_params=dict(lr=2e-2), scheduler_params={"step_size":50, # how to use learning rate scheduler "gamma":0.9}, scheduler_fn=torch.optim.lr_scheduler.StepLR, mask_type='entmax' # "sparsemax" ) %%time md.fit( X_train=X_train, y_train=y_train, eval_set=[(X_train, y_train), (X_valid, y_valid)], eval_name=['train', 'valid'], eval_metric=['auc'], max_epochs=1000, patience=3, batch_size=1024, virtual_batch_size=128, num_workers=0, weights=1, drop_last=False ) y_pred = md.predict_proba(X_test)[:,1] print(metrics.roc_auc_score(y_test, y_pred))
2,176
32.492308
93
py
GBM-perf
GBM-perf-master/analysis/tabnet/old-keep/tabnet-min.py
from pytorch_tabnet.tab_model import TabNetClassifier import torch import pandas as pd import numpy as np from sklearn import preprocessing from sklearn import metrics d_train = pd.read_csv("https://s3.amazonaws.com/benchm-ml--main/train-0.1m.csv") d_test = pd.read_csv("https://s3.amazonaws.com/benchm-ml--main/test.csv") d_all = pd.concat([d_train,d_test]) vars_cat = ["Month","DayofMonth","DayOfWeek","UniqueCarrier", "Origin", "Dest"] vars_num = ["DepTime","Distance"] for col in vars_cat: d_all[col] = preprocessing.LabelEncoder().fit_transform(d_all[col]) X_all = d_all[vars_num+vars_cat] y_all = np.where(d_all["dep_delayed_15min"]=="Y",1,0) cat_idxs = [ i for i, col in enumerate(X_all.columns) if col in vars_cat] cat_dims = [ len(np.unique(X_all.iloc[:,i].values)) for i in cat_idxs] X_train = X_all[0:d_train.shape[0]].to_numpy() y_train = y_all[0:d_train.shape[0]] X_test = X_all[d_train.shape[0]:(d_train.shape[0]+d_test.shape[0])].to_numpy() y_test = y_all[d_train.shape[0]:(d_train.shape[0]+d_test.shape[0])] md = TabNetClassifier(cat_idxs=cat_idxs, cat_dims=cat_dims, cat_emb_dim=1 ) %%time md.fit( X_train=X_train, y_train=y_train, max_epochs=10, patience=0 ) y_pred = md.predict_proba(X_test)[:,1] print(metrics.roc_auc_score(y_test, y_pred))
1,330
26.163265
80
py
GBM-perf
GBM-perf-master/analysis/tabnet/old-keep/tabnet-1cLR.py
from pytorch_tabnet.tab_model import TabNetClassifier import torch import pandas as pd import numpy as np from sklearn import preprocessing from sklearn import metrics d_train = pd.read_csv("https://s3.amazonaws.com/benchm-ml--main/train-0.1m.csv") d_test = pd.read_csv("https://s3.amazonaws.com/benchm-ml--main/test.csv") d_all = pd.concat([d_train,d_test]) vars_cat = ["Month","DayofMonth","DayOfWeek","UniqueCarrier", "Origin", "Dest"] vars_num = ["DepTime","Distance"] for col in vars_cat: d_all[col] = preprocessing.LabelEncoder().fit_transform(d_all[col]) X_all = d_all[vars_num+vars_cat] y_all = np.where(d_all["dep_delayed_15min"]=="Y",1,0) cat_idxs = [ i for i, col in enumerate(X_all.columns) if col in vars_cat] cat_dims = [ len(np.unique(X_all.iloc[:,i].values)) for i in cat_idxs] X_train = X_all[0:d_train.shape[0]].to_numpy() y_train = y_all[0:d_train.shape[0]] X_test = X_all[d_train.shape[0]:(d_train.shape[0]+d_test.shape[0])].to_numpy() y_test = y_all[d_train.shape[0]:(d_train.shape[0]+d_test.shape[0])] MAX_EPOCH = 10 BS = 1024 md = TabNetClassifier(cat_idxs=cat_idxs, cat_dims=cat_dims, cat_emb_dim=1, ## optimizer_fn=torch.optim.Adam, ## optimizer_params=dict(lr=2e-2), scheduler_fn=torch.optim.lr_scheduler.OneCycleLR, scheduler_params=dict(max_lr=0.05, steps_per_epoch=int(X_train.shape[0] / BS), epochs=MAX_EPOCH, is_batch_level=True), mask_type='entmax' # "sparsemax" ) %%time md.fit( X_train=X_train, y_train=y_train, max_epochs=MAX_EPOCH, patience=0, ## batch_size=1024, virtual_batch_size=128, ## weights=0, drop_last = True ) y_pred = md.predict_proba(X_test)[:,1] print(metrics.roc_auc_score(y_test, y_pred))
1,973
31.360656
88
py
GBM-perf
GBM-perf-master/analysis/tabnet/old-keep/tabnet-epoch10.py
from pytorch_tabnet.tab_model import TabNetClassifier import torch import pandas as pd import numpy as np from sklearn import preprocessing from sklearn import metrics d_train = pd.read_csv("https://s3.amazonaws.com/benchm-ml--main/train-0.1m.csv") d_test = pd.read_csv("https://s3.amazonaws.com/benchm-ml--main/test.csv") d_all = pd.concat([d_train,d_test]) vars_cat = ["Month","DayofMonth","DayOfWeek","UniqueCarrier", "Origin", "Dest"] vars_num = ["DepTime","Distance"] for col in vars_cat: d_all[col] = preprocessing.LabelEncoder().fit_transform(d_all[col]) X_all = d_all[vars_num+vars_cat] y_all = np.where(d_all["dep_delayed_15min"]=="Y",1,0) cat_idxs = [ i for i, col in enumerate(X_all.columns) if col in vars_cat] cat_dims = [ len(np.unique(X_all.iloc[:,i].values)) for i in cat_idxs] X_train = X_all[0:d_train.shape[0]].to_numpy() y_train = y_all[0:d_train.shape[0]] X_test = X_all[d_train.shape[0]:(d_train.shape[0]+d_test.shape[0])].to_numpy() y_test = y_all[d_train.shape[0]:(d_train.shape[0]+d_test.shape[0])] md = TabNetClassifier(cat_idxs=cat_idxs, cat_dims=cat_dims, cat_emb_dim=1, optimizer_fn=torch.optim.Adam, optimizer_params=dict(lr=2e-2), scheduler_params={"step_size":50, # how to use learning rate scheduler "gamma":0.9}, scheduler_fn=torch.optim.lr_scheduler.StepLR, mask_type='entmax' # "sparsemax" ) %%time md.fit( X_train=X_train, y_train=y_train, eval_set=[(X_train, y_train)], eval_name=['train'], eval_metric=['auc'], max_epochs=10, patience=0, batch_size=1024, virtual_batch_size=128, num_workers=0, weights=1, drop_last=False ) y_pred = md.predict_proba(X_test)[:,1] print(metrics.roc_auc_score(y_test, y_pred))
1,897
30.633333
93
py
GBM-perf
GBM-perf-master/analysis/tabnet/xgb/xgb.py
import xgboost as xgb import pandas as pd import numpy as np from sklearn import preprocessing from sklearn import metrics d_train = pd.read_csv("https://s3.amazonaws.com/benchm-ml--main/train-0.1m.csv") d_test = pd.read_csv("https://s3.amazonaws.com/benchm-ml--main/test.csv") d_all = pd.concat([d_train,d_test]) vars_cat = ["Month","DayofMonth","DayOfWeek","UniqueCarrier", "Origin", "Dest"] vars_num = ["DepTime","Distance"] for col in vars_cat: d_all[col] = preprocessing.LabelEncoder().fit_transform(d_all[col]) X_all = d_all[vars_num+vars_cat] y_all = np.where(d_all["dep_delayed_15min"]=="Y",1,0) X_train = X_all[0:d_train.shape[0]] y_train = y_all[0:d_train.shape[0]] X_test = X_all[d_train.shape[0]:(d_train.shape[0]+d_test.shape[0])] y_test = y_all[d_train.shape[0]:(d_train.shape[0]+d_test.shape[0])] dxgb_train = xgb.DMatrix(X_train, label = y_train) dxgb_test = xgb.DMatrix(X_test) param = {'max_depth':10, 'eta':0.1, 'objective':'binary:logistic', 'tree_method':'hist'} %time md = xgb.train(param, dxgb_train, num_boost_round = 100) y_pred = md.predict(dxgb_test) print(metrics.roc_auc_score(y_test, y_pred))
1,156
27.925
101
py
GBM-perf
GBM-perf-master/analysis/h2o4gpu/xgboost.py
import pandas as pd import numpy as np from sklearn import preprocessing from scipy import sparse from sklearn import metrics import xgboost as xgb import time d_train = pd.read_csv("train.csv") d_test = pd.read_csv("test.csv") d = pd.concat([d_train, d_test]) vars_cat = ["Month","DayofMonth","DayOfWeek","UniqueCarrier", "Origin", "Dest"] vars_num = ["DepTime","Distance"] for col in vars_cat: d[col] = preprocessing.LabelEncoder().fit_transform(d[col]) X_cat = preprocessing.OneHotEncoder().fit_transform(d[vars_cat]) X = sparse.hstack((X_cat, d[vars_num]), format = "csr") y = np.where(d["dep_delayed_15min"]=="Y",1,0) X_train = X[0:d_train.shape[0],:] X_test = X[d_train.shape[0]:,:] y_train = y[0:d_train.shape[0]] y_test = y[d_train.shape[0]:] dxgb_train = xgb.DMatrix(X_train, label = y_train) dxgb_test = xgb.DMatrix(X_test) param = {'max_depth':10, 'eta':0.1, 'objective':'binary:logistic', 'tree_method':'gpu_hist', 'silent':1} start = time.time() md = xgb.train(param, dxgb_train, num_boost_round = 100) end = time.time() print(end - start) y_pred = md.predict(dxgb_test) metrics.roc_auc_score(y_test, y_pred)
1,234
23.215686
83
py
TempoQR
TempoQR-main/qa_datasets.py
from pathlib import Path import pkg_resources import pickle from collections import defaultdict from typing import Dict, Tuple, List import json import numpy as np import torch # from qa_models import QA_model import utils from tqdm import tqdm from transformers import RobertaTokenizer from transformers import DistilBertTokenizer from transformers import BertTokenizer import random from torch.utils.data import Dataset, DataLoader # from nltk import word_tokenize # warning: padding id 0 is being used, can have issue like in Tucker # however since so many entities (and timestamps?), it may not pose problem import pdb from copy import deepcopy from collections import defaultdict import random from hard_supervision_functions import retrieve_times class QA_Dataset(Dataset): def __init__(self, split, dataset_name, tokenization_needed=True): filename = 'data/{dataset_name}/questions/{split}.pickle'.format( dataset_name=dataset_name, split=split ) questions = pickle.load(open(filename, 'rb')) #probably change for bert/roberta? self.tokenizer_class = DistilBertTokenizer self.tokenizer = DistilBertTokenizer.from_pretrained('distilbert-base-uncased') self.all_dicts = utils.getAllDicts(dataset_name) print('Total questions = ', len(questions)) self.data = questions self.tokenization_needed = tokenization_needed def getEntitiesLocations(self, question): question_text = question['question'] entities = question['entities'] ent2id = self.all_dicts['ent2id'] loc_ent = [] for e in entities: e_id = ent2id[e] location = question_text.find(e) loc_ent.append((location, e_id)) return loc_ent def getTimesLocations(self, question): question_text = question['question'] times = question['times'] ts2id = self.all_dicts['ts2id'] loc_time = [] for t in times: t_id = ts2id[(t,0,0)] + len(self.all_dicts['ent2id']) # add num entities location = question_text.find(str(t)) loc_time.append((location, t_id)) return loc_time def isTimeString(self, s): # todo: cant do len == 4 since 3 digit times also there if 'Q' not in s: return True else: return False def textToEntTimeId(self, text): if self.isTimeString(text): t = int(text) ts2id = self.all_dicts['ts2id'] t_id = ts2id[(t,0,0)] + len(self.all_dicts['ent2id']) return t_id else: ent2id = self.all_dicts['ent2id'] e_id = ent2id[text] return e_id def getOrderedEntityTimeIds(self, question): loc_ent = self.getEntitiesLocations(question) loc_time = self.getTimesLocations(question) loc_all = loc_ent + loc_time loc_all.sort() ordered_ent_time = [x[1] for x in loc_all] return ordered_ent_time def entitiesToIds(self, entities): output = [] ent2id = self.all_dicts['ent2id'] for e in entities: output.append(ent2id[e]) return output def getIdType(self, id): if id < len(self.all_dicts['ent2id']): return 'entity' else: return 'time' def getEntityToText(self, entity_wd_id): return self.all_dicts['wd_id_to_text'][entity_wd_id] def getEntityIdToText(self, id): ent = self.all_dicts['id2ent'][id] return self.getEntityToText(ent) def getEntityIdToWdId(self, id): return self.all_dicts['id2ent'][id] def timesToIds(self, times): output = [] ts2id = self.all_dicts['ts2id'] for t in times: output.append(ts2id[(t, 0, 0)]) return output def getAnswersFromScores(self, scores, largest=True, k=10): _, ind = torch.topk(scores, k, largest=largest) predict = ind answers = [] for a_id in predict: a_id = a_id.item() type = self.getIdType(a_id) if type == 'entity': # answers.append(self.getEntityIdToText(a_id)) answers.append(self.getEntityIdToWdId(a_id)) else: time_id = a_id - len(self.all_dicts['ent2id']) time = self.all_dicts['id2ts'][time_id] answers.append(time[0]) return answers def getAnswersFromScoresWithScores(self, scores, largest=True, k=10): s, ind = torch.topk(scores, k, largest=largest) predict = ind answers = [] for a_id in predict: a_id = a_id.item() type = self.getIdType(a_id) if type == 'entity': # answers.append(self.getEntityIdToText(a_id)) answers.append(self.getEntityIdToWdId(a_id)) else: time_id = a_id - len(self.all_dicts['ent2id']) time = self.all_dicts['id2ts'][time_id] answers.append(time[0]) return s, answers # from pytorch Transformer: # If a BoolTensor is provided, the positions with the value of True will be ignored # while the position with the value of False will be unchanged. # # so we want to pad with True def padding_tensor(self, sequences, max_len = -1): """ :param sequences: list of tensors :return: """ num = len(sequences) if max_len == -1: max_len = max([s.size(0) for s in sequences]) out_dims = (num, max_len) out_tensor = sequences[0].data.new(*out_dims).fill_(0) # mask = sequences[0].data.new(*out_dims).fill_(0) mask = torch.ones((num, max_len), dtype=torch.bool) # fills with True for i, tensor in enumerate(sequences): length = tensor.size(0) out_tensor[i, :length] = tensor mask[i, :length] = False # fills good area with False return out_tensor, mask def toOneHot(self, indices, vec_len): indices = torch.LongTensor(indices) one_hot = torch.FloatTensor(vec_len) one_hot.zero_() one_hot.scatter_(0, indices, 1) return one_hot def prepare_data(self, data): # we want to prepare answers lists for each question # then at batch prep time, we just stack these # and use scatter question_text = [] entity_time_ids = [] num_total_entities = len(self.all_dicts['ent2id']) answers_arr = [] for question in data: # first pp is question text # needs to be changed after making PD dataset # to randomly sample from list q_text = question['paraphrases'][0] question_text.append(q_text) et_id = self.getOrderedEntityTimeIds(question) entity_time_ids.append(torch.tensor(et_id, dtype=torch.long)) if question['answer_type'] == 'entity': answers = self.entitiesToIds(question['answers']) else: # adding num_total_entities to each time id answers = [x + num_total_entities for x in self.timesToIds(question['answers'])] answers_arr.append(answers) # answers_arr = self.get_stacked_answers_long(answers_arr) return {'question_text': question_text, 'entity_time_ids': entity_time_ids, 'answers_arr': answers_arr} def is_template_keyword(self, word): if '{' in word and '}' in word: return True else: return False def get_keyword_dict(self, template, nl_question): template_tokenized = self.tokenize_template(template) keywords = [] for word in template_tokenized: if not self.is_template_keyword(word): # replace only first occurence nl_question = nl_question.replace(word, '*', 1) else: keywords.append(word[1:-1]) # no brackets text_for_keywords = [] for word in nl_question.split('*'): if word != '': text_for_keywords.append(word) keyword_dict = {} for keyword, text in zip(keywords, text_for_keywords): keyword_dict[keyword] = text return keyword_dict def addEntityAnnotation(self, data): for i in range(len(data)): question = data[i] keyword_dicts = [] # we want for each paraphrase template = question['template'] #for nl_question in question['paraphrases']: nl_question = question['paraphrases'][0] keyword_dict = self.get_keyword_dict(template, nl_question) keyword_dicts.append(keyword_dict) data[i]['keyword_dicts'] = keyword_dicts #print(keyword_dicts) #print(template, nl_question) return data def tokenize_template(self, template): output = [] buffer = '' i = 0 while i < len(template): c = template[i] if c == '{': if buffer != '': output.append(buffer) buffer = '' while template[i] != '}': buffer += template[i] i += 1 buffer += template[i] output.append(buffer) buffer = '' else: buffer += c i += 1 if buffer != '': output.append(buffer) return output class QA_Dataset_Baseline(QA_Dataset): def __init__(self, split, dataset_name, tokenization_needed=True): super().__init__(split, dataset_name, tokenization_needed) print('Preparing data for split %s' % split) # self.data = self.data[:30000] # new_data = [] # # qn_type = 'simple_time' # qn_type = 'simple_entity' # print('Only {} questions'.format(qn_type)) # for qn in self.data: # if qn['type'] == qn_type: # new_data.append(qn) # self.data = new_data ents = self.all_dicts['ent2id'].keys() self.all_dicts['tsstr2id'] = {str(k[0]):v for k,v in self.all_dicts['ts2id'].items()} times = self.all_dicts['tsstr2id'].keys() rels = self.all_dicts['rel2id'].keys() self.prepared_data = self.prepare_data_(self.data) self.num_total_entities = len(self.all_dicts['ent2id']) self.num_total_times = len(self.all_dicts['ts2id']) self.answer_vec_size = self.num_total_entities + self.num_total_times def prepare_data_(self, data): # we want to prepare answers lists for each question # then at batch prep time, we just stack these # and use scatter question_text = [] heads = [] tails = [] times = [] num_total_entities = len(self.all_dicts['ent2id']) answers_arr = [] ent2id = self.all_dicts['ent2id'] self.data_ids_filtered=[] # self.data=[] for i,question in enumerate(data): self.data_ids_filtered.append(i) # first pp is question text # needs to be changed after making PD dataset # to randomly sample from list q_text = question['paraphrases'][0] entities_list_with_locations = self.getEntitiesLocations(question) entities_list_with_locations.sort() entities = [id for location, id in entities_list_with_locations] # ordering necessary otherwise set->list conversion causes randomness head = entities[0] # take an entity if len(entities) > 1: tail = entities[1] else: tail = entities[0] times_in_question = question['times'] if len(times_in_question) > 0: time = self.timesToIds(times_in_question)[0] # take a time. if no time then 0 # exit(0) else: time = 0 time += num_total_entities heads.append(head) times.append(time) tails.append(tail) question_text.append(q_text) if question['answer_type'] == 'entity': answers = self.entitiesToIds(question['answers']) else: # adding num_total_entities to each time id answers = [x + num_total_entities for x in self.timesToIds(question['answers'])] answers_arr.append(answers) # answers_arr = self.get_stacked_answers_long(answers_arr) self.data=[self.data[idx] for idx in self.data_ids_filtered] return {'question_text': question_text, 'head': heads, 'tail': tails, 'time': times, 'answers_arr': answers_arr} def print_prepared_data(self): for k, v in self.prepared_data.items(): print(k, v) def __len__(self): return len(self.data) # return len(self.prepared_data['question_text']) def __getitem__(self, index): data = self.prepared_data question_text = data['question_text'][index] head = data['head'][index] tail = data['tail'][index] time = data['time'][index] answers_arr = data['answers_arr'][index] answers_single = random.choice(answers_arr) return question_text, head, tail, time, answers_single #,answers_khot def _collate_fn(self, items): batch_sentences = [item[0] for item in items] b = self.tokenizer(batch_sentences, padding=True, truncation=True, return_tensors="pt") heads = torch.from_numpy(np.array([item[1] for item in items])) tails = torch.from_numpy(np.array([item[2] for item in items])) times = torch.from_numpy(np.array([item[3] for item in items])) answers_single = torch.from_numpy(np.array([item[4] for item in items])) return b['input_ids'], b['attention_mask'], heads, tails, times, answers_single def get_dataset_ques_info(self): type2num={} for question in self.data: if question["type"] not in type2num: type2num[question["type"]]=0 type2num[question["type"]]+=1 return {"type2num":type2num, "total_num":len(self.data_ids_filtered)}.__str__() # replace entity mention tokens # rather than add + layernorm class QA_Dataset_TempoQR(QA_Dataset): def __init__(self, split, dataset_name, args, tokenization_needed=True): super().__init__(split, dataset_name, tokenization_needed) print('Preparing data for split %s' % split) ents = self.all_dicts['ent2id'].keys() self.all_dicts['tsstr2id'] = {str(k[0]):v for k,v in self.all_dicts['ts2id'].items()} times = self.all_dicts['tsstr2id'].keys() rels = self.all_dicts['rel2id'].keys() self.split = split #This is hard supervision #args: given TKG, whether to corrupt hard, and how to use the reitrieved timestmaps self.data = retrieve_times(args.tkg_file, args.dataset_name, self.data, args.corrupt_hard, args.fuse) self.data = self.addEntityAnnotation(self.data) self.num_total_entities = len(self.all_dicts['ent2id']) self.num_total_times = len(self.all_dicts['ts2id']) self.padding_idx = self.num_total_entities + self.num_total_times # padding id for embedding of ent/time self.answer_vec_size = self.num_total_entities + self.num_total_times self.prepared_data = self.prepare_data2(self.data) def __len__(self): return len(self.data) def getEntityTimeTextIds(self, question, pp_id=0): keyword_dict = question['keyword_dicts'][pp_id] keyword_id_dict = question['annotation'] # this does not depend on paraphrase output_text = [] output_ids = [] entity_time_keywords = set(['head', 'tail', 'time', 'event_head', 'time1', 'time2']) #print(keyword_dict, keyword_id_dict) for keyword, value in keyword_dict.items(): if keyword in entity_time_keywords: wd_id_or_time = keyword_id_dict[keyword] output_text.append(value) output_ids.append(wd_id_or_time) #print(output_text, output_ids) return output_text, output_ids def get_entity_aware_tokenization(self, nl_question, ent_times, ent_times_ids): # what we want finally is that after proper tokenization # of nl question, we know which indices are beginning tokens # of entities and times in the question index_et_pairs = [] index_et_text_pairs = [] for e_text, e_id in zip(ent_times, ent_times_ids): location = nl_question.find(e_text) pair = (location, e_id) index_et_pairs.append(pair) pair = (location, e_text) index_et_text_pairs.append(pair) index_et_pairs.sort() index_et_text_pairs.sort() my_tokenized_question = [] start_index = 0 arr = [] for pair, pair_id in zip(index_et_text_pairs, index_et_pairs): end_index = pair[0] if nl_question[start_index: end_index] != '': my_tokenized_question.append(nl_question[start_index: end_index]) arr.append(self.padding_idx) start_index = end_index end_index = start_index + len(pair[1]) # todo: assuming entity name can't be blank # my_tokenized_question.append(nl_question[start_index: end_index]) my_tokenized_question.append(self.tokenizer.mask_token) matrix_id = self.textToEntTimeId(pair_id[1]) # get id in embedding matrix arr.append(matrix_id) start_index = end_index if nl_question[start_index:] != '': my_tokenized_question.append(nl_question[start_index:]) arr.append(self.padding_idx) tokenized, valid_ids = self.tokenize(my_tokenized_question) entity_time_final = [] index = 0 for vid in valid_ids: if vid == 0: entity_time_final.append(self.padding_idx) else: entity_time_final.append(arr[index]) index += 1 entity_mask = [] # want 0 if entity, 1 if not, since will multiply this later with word embedding for x in entity_time_final: if x == self.padding_idx: entity_mask.append(1.) else: entity_mask.append(0.) #print(entity_time_final) return tokenized, entity_time_final, entity_mask def prepare_data2(self, data): # we want to prepare answers lists for each question # then at batch prep time, we just stack these # and use scatter heads = [] times = [] start_times = [] end_times = [] tails = [] tails2 = [] question_text = [] tokenized_question = [] entity_time_ids_tokenized_question = [] entity_mask_tokenized_question = [] pp_id = 0 num_total_entities = len(self.all_dicts['ent2id']) answers_arr = [] for question in tqdm(data): # randomly sample pp # in test there is only 1 pp, so always pp_id=0 # TODO: this random is causing assertion bug later on # pp_id = random.randint(0, len(question['paraphrases']) - 1) pp_id = 0 nl_question = question['paraphrases'][pp_id] q_text = nl_question et_text, et_ids = self.getEntityTimeTextIds(question, pp_id) entities_list_with_locations = self.getEntitiesLocations(question) entities_list_with_locations.sort() entities = [id for location, id in entities_list_with_locations] # ordering necessary otherwise set->list conversion causes randomness head = entities[0] # take an entity if len(entities) > 1: tail = entities[1] if len(entities) > 2: tail2 = entities[2] else: tail2 = tail else: tail = entities[0] tail2 = tail times_in_question = question['times'] if len(times_in_question) > 0: time = self.timesToIds(times_in_question)[0] # take a time. if no time then 0 start_time = time end_time = time # exit(0) else: time = 0 #check for retrieved timestmaps if len(question['fact']) > 0: ts = [] #add all timestmaps and sort for f in question['fact']: for t in range(int(f[0]), int(f[1])+1): ts.append(t) ts = sorted(ts) sorted_times = self.timesToIds(ts) try: start_time = sorted_times[0] # for random random.choice(sorted_times) except: start_time = 0 try: end_time = sorted_times[-1] except: end_time = 0 else: start_time = 0 end_time = 0 # print('No time in qn!') time += num_total_entities heads.append(head) times.append(time) start_times.append(start_time) end_times.append(end_time) tails.append(tail) tails2.append(tail2) tokenized, entity_time_final, entity_mask = self.get_entity_aware_tokenization(nl_question, et_text, et_ids) assert len(tokenized) == len(entity_time_final) question_text.append(nl_question) tokenized_question.append(self.tokenizer.convert_tokens_to_ids(tokenized)) entity_mask_tokenized_question.append(entity_mask) entity_time_ids_tokenized_question.append(entity_time_final) if question['answer_type'] == 'entity': answers = self.entitiesToIds(question['answers']) else: # adding num_total_entities to each time id answers = [x + num_total_entities for x in self.timesToIds(question['answers'])] answers_arr.append(answers) return {'question_text': question_text, 'tokenized_question': tokenized_question, 'entity_time_ids': entity_time_ids_tokenized_question, 'entity_mask': entity_mask_tokenized_question, 'head': heads, 'tail': tails, 'time': times, 'start_time': start_times, 'end_time': end_times, 'tail2': tails2, 'answers_arr': answers_arr} # tokenization function taken from NER code def tokenize(self, words): """ tokenize input""" tokens = [] valid_positions = [] tokens.append(self.tokenizer.cls_token) valid_positions.append(0) for i, word in enumerate(words): token = self.tokenizer.tokenize(word) tokens.extend(token) for i in range(len(token)): if i == 0: valid_positions.append(1) else: valid_positions.append(0) tokens.append(self.tokenizer.sep_token) valid_positions.append(0) return tokens, valid_positions def __getitem__(self, index): data = self.prepared_data question_text = data['question_text'][index] entity_time_ids = np.array(data['entity_time_ids'][index], dtype=np.long) answers_arr = data['answers_arr'][index] answers_single = random.choice(answers_arr) # answers_khot = self.toOneHot(answers_arr, self.answer_vec_size) tokenized_question = data['tokenized_question'][index] entity_mask = data['entity_mask'][index] head = data['head'][index] tail = data['tail'][index] tail2 = data['tail2'][index] time = data['time'][index] start_time = data['start_time'][index] end_time = data['end_time'][index] return question_text, tokenized_question, entity_time_ids, entity_mask, head, tail, time, start_time, end_time, tail2, answers_single def pad_for_batch(self, to_pad, padding_val, dtype=np.long): padded = np.ones([len(to_pad), len(max(to_pad, key=lambda x: len(x)))], dtype=dtype) * padding_val for i, j in enumerate(to_pad): padded[i][0:len(j)] = j return padded # do this before padding for batch def get_attention_mask(self, tokenized): # first make zeros array of appropriate size mask = np.zeros([len(tokenized), len(max(tokenized, key=lambda x: len(x)))], dtype=np.long) # now set ones everywhere needed for i, j in enumerate(tokenized): mask[i][0:len(j)] = np.ones(len(j), dtype=np.long) return mask def _collate_fn(self, items): # please don't tokenize again # b = self.tokenizer(batch_sentences, padding=True, truncation=False, return_tensors="pt") tokenized_questions = [item[1] for item in items] attention_mask = torch.from_numpy(self.get_attention_mask(tokenized_questions)) input_ids = torch.from_numpy(self.pad_for_batch(tokenized_questions, self.tokenizer.pad_token_id, np.long)) entity_time_ids_list = [item[2] for item in items] entity_time_ids_padded = self.pad_for_batch(entity_time_ids_list, self.padding_idx, np.long) entity_time_ids_padded = torch.from_numpy(entity_time_ids_padded) entity_mask = [item[3] for item in items] # 0 if entity, 1 if not entity_mask_padded = self.pad_for_batch(entity_mask, 1.0, np.float32) # doesnt matter probably cuz attention mask will be used. maybe pad with 1? entity_mask_padded = torch.from_numpy(entity_mask_padded) # can make foll mask in forward function using attention mask # entity_time_ids_padded_mask = ~(attention_mask.bool()) heads = torch.from_numpy(np.array([item[4] for item in items])) tails = torch.from_numpy(np.array([item[5] for item in items])) times = torch.from_numpy(np.array([item[6] for item in items])) start_times = torch.from_numpy(np.array([item[7] for item in items])) end_times = torch.from_numpy(np.array([item[8] for item in items])) tails2 = torch.from_numpy(np.array([item[9] for item in items])) # answers_khot = torch.stack([item[4] for item in items]) answers_single = torch.from_numpy(np.array([item[10] for item in items])) return input_ids, attention_mask, entity_time_ids_padded, entity_mask_padded, heads, tails, times, start_times, end_times, tails2, answers_single
27,598
38.825397
153
py
TempoQR
TempoQR-main/utils.py
import pickle import random import torch import numpy as np from tcomplex import TComplEx from tqdm import tqdm def loadTkbcModel(tkbc_model_file): print('Loading tkbc model from', tkbc_model_file) x = torch.load(tkbc_model_file,map_location=torch.device("cpu")) num_ent = x['embeddings.0.weight'].shape[0] num_rel = x['embeddings.1.weight'].shape[0] num_ts = x['embeddings.2.weight'].shape[0] print('Number ent,rel,ts from loaded model:', num_ent, num_rel, num_ts) sizes = [num_ent, num_rel, num_ent, num_ts] rank = x['embeddings.0.weight'].shape[1] // 2 # complex has 2*rank embedding size tkbc_model = TComplEx(sizes, rank, no_time_emb=False) tkbc_model.load_state_dict(x) tkbc_model.cuda() print('Loaded tkbc model') return tkbc_model def loadTkbcModel_complex(tkbc_model_file): print('Loading complex tkbc model from', tkbc_model_file) tcomplex_file = 'models/wikidata_big/kg_embeddings/tcomplex_17dec.ckpt' #TODO: hack tcomplex_params = torch.load(tcomplex_file) complex_params = torch.load(tkbc_model_file) num_ent = tcomplex_params['embeddings.0.weight'].shape[0] num_rel = tcomplex_params['embeddings.1.weight'].shape[0] num_ts = tcomplex_params['embeddings.2.weight'].shape[0] print('Number ent,rel,ts from loaded model:', num_ent, num_rel, num_ts) sizes = [num_ent, num_rel, num_ent, num_ts] rank = tcomplex_params['embeddings.0.weight'].shape[1] // 2 # complex has 2*rank embedding size # now put complex params in tcomplex model tcomplex_params['embeddings.0.weight'] = complex_params['embeddings.0.weight'] tcomplex_params['embeddings.1.weight'] = complex_params['embeddings.1.weight'] torch.nn.init.xavier_uniform_(tcomplex_params['embeddings.2.weight']) # randomize time embeddings tkbc_model = TComplEx(sizes, rank, no_time_emb=False) tkbc_model.load_state_dict(tcomplex_params) tkbc_model.cuda() print('Loaded complex tkbc model') return tkbc_model def dataIdsToLiterals(d, all_dicts): new_datapoint = [] id2rel = all_dicts['id2rel'] id2ent = all_dicts['id2ent'] id2ts = all_dicts['id2ts'] wd_id_to_text = all_dicts['wd_id_to_text'] new_datapoint.append(wd_id_to_text[id2ent[d[0]]]) new_datapoint.append(wd_id_to_text[id2rel[d[1]]]) new_datapoint.append(wd_id_to_text[id2ent[d[2]]]) new_datapoint.append(id2ts[d[3]]) new_datapoint.append(id2ts[d[4]]) return new_datapoint def getAllDicts(dataset_name): # base_path = '/scratche/home/apoorv/tkbc/tkbc_env/lib/python3.7/site-packages/tkbc-0.0.0-py3.7.egg/tkbc/data/wikidata_small/' base_path = 'data/{dataset_name}/kg/tkbc_processed_data/{dataset_name}/'.format( dataset_name=dataset_name ) dicts = {} for f in ['ent_id', 'rel_id', 'ts_id']: in_file = open(str(base_path + f), 'rb') dicts[f] = pickle.load(in_file) rel2id = dicts['rel_id'] ent2id = dicts['ent_id'] ts2id = dicts['ts_id'] file_ent = 'data/{dataset_name}/kg/wd_id2entity_text.txt'.format( dataset_name=dataset_name ) file_rel = 'data/{dataset_name}/kg/wd_id2relation_text.txt'.format( dataset_name=dataset_name ) def readDict(filename): f = open(filename, 'r') d = {} for line in f: line = line.strip().split('\t') if len(line) == 1: line.append('') # in case literal was blank or whitespace d[line[0]] = line[1] f.close() return d e = readDict(file_ent) r = readDict(file_rel) wd_id_to_text = dict(list(e.items()) + list(r.items())) def getReverseDict(d): return {value: key for key, value in d.items()} id2rel = getReverseDict(rel2id) id2ent = getReverseDict(ent2id) id2ts = getReverseDict(ts2id) all_dicts = {'rel2id': rel2id, 'id2rel': id2rel, 'ent2id': ent2id, 'id2ent': id2ent, 'ts2id': ts2id, 'id2ts': id2ts, 'wd_id_to_text': wd_id_to_text } return all_dicts def checkQuestion(question, target_type): question_type = question['type'] if target_type != question_type: return False return True # def getDataPoint(question, all_dicts): def predictTime(question, model, all_dicts, k=1): entities = list(question['entities']) times = question['times'] target_type = 'simple_time' if checkQuestion(question, target_type) == False: print('Not Entity question') return set() ent2id = all_dicts['ent2id'] rel2id = all_dicts['rel2id'] id2ts = all_dicts['id2ts'] annotation = question['annotation'] head = ent2id[annotation['head']] tail = ent2id[annotation['tail']] # relation = rel2id[list(question['relations'])[0]] relation = list(question['relations'])[0] if 'P' not in relation: relation = 'P' + relation relation = rel2id[relation] #+ model.embeddings[1].weight.shape[0]//2 #+ 90 data_point = [head, relation, tail, 1, 1] data_batch = torch.from_numpy(np.array([data_point])).cuda() time_scores = model.forward_over_time(data_batch) val, ind = torch.topk(time_scores, k, dim=1) topk_set = set() for row in ind: for x in row: topk_set.add(id2ts[x.item()][0]) return topk_set def predictTail(question, model, all_dicts, k=1): entities = list(question['entities']) times = list(question['times']) target_type = 'simple_entity' if checkQuestion(question, target_type) == False: print('Not Entity question') return set() ent2id = all_dicts['ent2id'] rel2id = all_dicts['rel2id'] ts2id = all_dicts['ts2id'] id2ent = all_dicts['id2ent'] head = ent2id[entities[0]] try: time = ts2id[(times[0],0,0)] except: return set() relation = list(question['relations'])[0] if 'P' not in relation: relation = 'P' + relation relation = rel2id[relation] #+ model.embeddings[1].weight.shape[0]//2 #+ 90 data_point = [head, relation, 1, time, time] data_batch = torch.from_numpy(np.array([data_point])).cuda() predictions, factors, time = model.forward(data_batch) val, ind = torch.topk(predictions, k, dim=1) topk_set = set() for row in ind: for x in row: topk_set.add(id2ent[x.item()]) return topk_set def checkIfTkbcEmbeddingsTrained(tkbc_model, dataset_name, split='test'): filename = 'data/{dataset_name}/questions/{split}.pickle'.format( dataset_name=dataset_name, split=split ) questions = pickle.load(open(filename, 'rb')) all_dicts = getAllDicts(dataset_name) for question_type in ['simple_entity', 'simple_time']: correct_count = 0 total_count = 0 k = 1 # hit at k for i in tqdm(range(len(questions))): this_question_type = questions[i]['type'] if question_type == this_question_type and question_type == 'simple_entity': which_question_function = predictTail elif question_type == this_question_type and question_type == 'simple_time': which_question_function = predictTime else: continue total_count += 1 id = i predicted = which_question_function(questions[id], tkbc_model, all_dicts, k) intersection_set = set(questions[id]['answers']).intersection(predicted) if len(intersection_set) > 0: correct_count += 1 print(question_type, correct_count, total_count, correct_count/total_count) def print_info(args): print('#######################') print('Model: '+ args.model) print('Supervision (if applicable): '+args.supervision) print('TKG Embeddings: '+args.tkbc_model_file) print('TKG for QA (if applicable): '+args.tkg_file) print('#######################')
7,990
35.824885
130
py
TempoQR
TempoQR-main/qa_baselines.py
import math import torch from torch import nn import numpy as np from tcomplex import TComplEx from transformers import RobertaModel from transformers import BertModel from transformers import DistilBertModel import pdb # training data: questions # model: # 1. tkbc model embeddings (may or may not be frozen) # 2. question sentence embeddings (may or may not be frozen) # 3. linear layer to project question embeddings (unfrozen) # 4. transformer that takes these embeddings (unfrozen) (cats them along a dimension, also takes a mask) # 5. average output embeddings of transformer or take last token embedding? # 6. linear projection of this embedding to tkbc embedding dimension # 7. score with all possible entities/times and sigmoid # 8. BCE loss (multiple correct possible) class QA_baseline(nn.Module): def __init__(self, tkbc_model, args): super().__init__() self.tkbc_embedding_dim = tkbc_model.embeddings[0].weight.shape[1] self.sentence_embedding_dim = 768 # hardwired from roberta if args.model =='bert': self.pretrained_weights = 'bert-base-uncased' self.lm_model = BertModel.from_pretrained(self.pretrained_weights) elif args.model =='roberta': self.pretrained_weights = 'roberta-base' self.lm_model = RobertaModel.from_pretrained(self.pretrained_weights) else: self.pretrained_weights = 'distilbert-base-uncased' self.lm_model = DistilBertModel.from_pretrained(self.pretrained_weights) if args.lm_frozen == 1: print('Freezing LM params') for param in self.lm_model.parameters(): param.requires_grad = False else: print('Unfrozen LM params') # creating combined embedding of time and entities (entities come first) self.tkbc_model = tkbc_model num_entities = tkbc_model.embeddings[0].weight.shape[0] num_times = tkbc_model.embeddings[2].weight.shape[0] ent_emb_matrix = tkbc_model.embeddings[0].weight.data time_emb_matrix = tkbc_model.embeddings[2].weight.data full_embed_matrix = torch.cat([ent_emb_matrix, time_emb_matrix], dim=0) self.entity_time_embedding = nn.Embedding(num_entities + num_times, self.tkbc_embedding_dim) self.entity_time_embedding.weight.data.copy_(full_embed_matrix) self.num_entities = num_entities self.num_times = num_times if args.frozen == 1: print('Freezing entity/time embeddings') self.entity_time_embedding.weight.requires_grad = False for param in self.tkbc_model.parameters(): param.requires_grad = False else: print('Unfrozen entity/time embeddings') # print('Random starting embedding') self.linear = nn.Linear(768, self.tkbc_embedding_dim) # to project question embedding self.linear1 = nn.Linear(self.tkbc_embedding_dim, self.tkbc_embedding_dim) self.linear2 = nn.Linear(self.tkbc_embedding_dim, self.tkbc_embedding_dim) self.loss = nn.CrossEntropyLoss(reduction='mean') self.dropout = torch.nn.Dropout(0.3) self.bn1 = torch.nn.BatchNorm1d(self.tkbc_embedding_dim) self.bn2 = torch.nn.BatchNorm1d(self.tkbc_embedding_dim) return def getQuestionEmbedding(self, question_tokenized, attention_mask): lm_last_hidden_states = self.lm_model(question_tokenized, attention_mask=attention_mask)[0] states = lm_last_hidden_states.transpose(1,0) cls_embedding = states[0] question_embedding = cls_embedding # question_embedding = torch.mean(roberta_last_hidden_states, dim=1) return question_embedding class QA_lm(QA_baseline): def __init__(self, tkbc_model, args): super().__init__(tkbc_model, args) self.final_linear = nn.Linear(4*self.tkbc_embedding_dim, self.tkbc_embedding_dim) return def forward(self, a): question_tokenized = a[0].cuda() question_attention_mask = a[1].cuda() heads = a[2].cuda() tails = a[3].cuda() times = a[4].cuda() head_embedding = self.entity_time_embedding(heads) tail_embedding = self.entity_time_embedding(tails) time_embedding = self.entity_time_embedding(times) question_embedding = self.getQuestionEmbedding(question_tokenized, question_attention_mask) relation_embedding = self.linear(question_embedding) output = self.final_linear(torch.cat((head_embedding, relation_embedding, tail_embedding, time_embedding), dim=-1)) scores = torch.matmul(output, self.entity_time_embedding.weight.data.T) return scores class QA_embedkgqa(QA_baseline): def __init__(self, tkbc_model, args): super().__init__(tkbc_model, args) return def score(self, head_embedding, relation_embedding): lhs = head_embedding rel = relation_embedding right = torch.cat((self.entity_embedding.weight, self.time_embedding.weight), dim=0) lhs = lhs[:, :self.rank], lhs[:, self.rank:] rel = rel[:, :self.rank], rel[:, self.rank:] right = right[:, :self.rank], right[:, self.rank:] return (lhs[0] * rel[0] - lhs[1] * rel[1]) @ right[0].transpose(0, 1) + (lhs[0] * rel[1] + lhs[1] * rel[0]) @ right[1].transpose(0, 1) def forward(self, a): question_tokenized = a[0].cuda() question_attention_mask = a[1].cuda() heads = a[2].cuda() head_embedding = self.entity_embedding(heads) question_embedding = self.getQuestionEmbedding(question_tokenized, question_attention_mask) relation_embedding = self.linear(question_embedding) relation_embedding1 = self.dropout(self.bn1(self.linear1(relation_embedding))) scores = self.score(head_embedding, relation_embedding1) # exit(0) # scores = torch.cat((scores_entity, scores_time), dim=1) return scores class QA_cronkgqa(QA_baseline): def __init__(self, tkbc_model, args): super().__init__(tkbc_model, args) self.supervision = args.supervision return def infer_time(self, head_embedding, tail_embedding, relation_embedding): lhs = head_embedding rhs = tail_embedding rel = relation_embedding time = self.tkbc_model.embeddings[2].weight # + self.tkbc_model.lin2(self.tkbc_model.time_embedding.weight) # time = self.entity_time_embedding.weight lhs = lhs[:, :self.tkbc_model.rank], lhs[:, self.tkbc_model.rank:] rel = rel[:, :self.tkbc_model.rank], rel[:, self.tkbc_model.rank:] rhs = rhs[:, :self.tkbc_model.rank], rhs[:, self.tkbc_model.rank:] time = time[:, :self.tkbc_model.rank], time[:, self.tkbc_model.rank:] return torch.cat([ (lhs[0] * rel[0] * rhs[0] - lhs[1] * rel[1] * rhs[0] - lhs[1] * rel[0] * rhs[1] + lhs[0] * rel[1] * rhs[1]), (lhs[1] * rel[0] * rhs[0] - lhs[0] * rel[1] * rhs[0] + lhs[0] * rel[0] * rhs[1] - lhs[1] * rel[1] * rhs[1])], dim=-1 ) # scoring function from TComplEx def score_time(self, head_embedding, tail_embedding, relation_embedding): lhs = head_embedding rhs = tail_embedding rel = relation_embedding time = self.tkbc_model.embeddings[2].weight # time = self.entity_time_embedding.weight lhs = lhs[:, :self.tkbc_model.rank], lhs[:, self.tkbc_model.rank:] rel = rel[:, :self.tkbc_model.rank], rel[:, self.tkbc_model.rank:] rhs = rhs[:, :self.tkbc_model.rank], rhs[:, self.tkbc_model.rank:] time = time[:, :self.tkbc_model.rank], time[:, self.tkbc_model.rank:] return ( (lhs[0] * rel[0] * rhs[0] - lhs[1] * rel[1] * rhs[0] - lhs[1] * rel[0] * rhs[1] + lhs[0] * rel[1] * rhs[1]) @ time[0].t() + (lhs[1] * rel[0] * rhs[0] - lhs[0] * rel[1] * rhs[0] + lhs[0] * rel[0] * rhs[1] - lhs[1] * rel[1] * rhs[1]) @ time[1].t() ) def score_entity(self, head_embedding, tail_embedding,relation_embedding, time_embedding): lhs = head_embedding[:, :self.tkbc_model.rank], head_embedding[:, self.tkbc_model.rank:] rel = relation_embedding time = time_embedding rel = rel[:, :self.tkbc_model.rank], rel[:, self.tkbc_model.rank:] time = time[:, :self.tkbc_model.rank], time[:, self.tkbc_model.rank:] right = self.tkbc_model.embeddings[0].weight # right = self.entity_time_embedding.weight right = right[:, :self.tkbc_model.rank], right[:, self.tkbc_model.rank:] rt = rel[0] * time[0], rel[1] * time[0], rel[0] * time[1], rel[1] * time[1] full_rel = rt[0] - rt[3], rt[1] + rt[2] return ( (lhs[0] * full_rel[0] - lhs[1] * full_rel[1]) @ right[0].t() + (lhs[1] * full_rel[0] + lhs[0] * full_rel[1]) @ right[1].t() ) def forward(self, a): question_tokenized = a[0].cuda() question_attention_mask = a[1].cuda() heads = a[2].cuda() tails = a[3].cuda() times = a[4].cuda() head_embedding = self.entity_time_embedding(heads) tail_embedding = self.entity_time_embedding(tails) time_embedding = self.entity_time_embedding(times) question_embedding = self.getQuestionEmbedding(question_tokenized, question_attention_mask) relation_embedding = self.linear(question_embedding) if self.supervision == 'soft': t1_emb = self.infer_time(head_embedding, tail_embedding, relation_embedding) t2_emb = self.infer_time(tail_embedding, head_embedding, relation_embedding) time_embedding = (time_embedding + t1_emb + t2_emb)/3 #just the mean relation_embedding1 = self.dropout(self.bn1(self.linear1(relation_embedding))) relation_embedding2 = self.dropout(self.bn2(self.linear2(relation_embedding))) scores_time = self.score_time(head_embedding, tail_embedding, relation_embedding1) scores_entity = self.score_entity(head_embedding, tail_embedding,relation_embedding2, time_embedding) scores = torch.cat((scores_entity, scores_time), dim=1) return scores
9,331
38.542373
136
py
TempoQR
TempoQR-main/train_qa_model.py
import argparse from typing import Dict import logging import torch from torch import optim import pickle import numpy as np from qa_baselines import QA_baseline, QA_lm, QA_embedkgqa, QA_cronkgqa from qa_tempoqr import QA_TempoQR from qa_datasets import QA_Dataset, QA_Dataset_TempoQR, QA_Dataset_Baseline from torch.utils.data import Dataset, DataLoader import utils from tqdm import tqdm from utils import loadTkbcModel, loadTkbcModel_complex, print_info from collections import defaultdict from datetime import datetime from collections import OrderedDict parser = argparse.ArgumentParser( description="Temporal KGQA" ) parser.add_argument( '--tkbc_model_file', default='tcomplex.ckpt', type=str, help="Pretrained tkbc model checkpoint" ) parser.add_argument( '--tkg_file', default='full.txt', type=str, help="TKG to use for hard-supervision" ) parser.add_argument( '--model', default='tempoqr', type=str, help="Which model to use." ) parser.add_argument( '--supervision', default='soft', type=str, help="Which supervision to use." ) parser.add_argument( '--load_from', default='', type=str, help="Pretrained qa model checkpoint" ) parser.add_argument( '--save_to', default='', type=str, help="Where to save checkpoint." ) parser.add_argument( '--max_epochs', default=20, type=int, help="Number of epochs." ) parser.add_argument( '--eval_k', default=1, type=int, help="Hits@k used for eval. Default 10." ) parser.add_argument( '--valid_freq', default=1, type=int, help="Number of epochs between each valid." ) parser.add_argument( '--batch_size', default=150, type=int, help="Batch size." ) parser.add_argument( '--valid_batch_size', default=50, type=int, help="Valid batch size." ) parser.add_argument( '--frozen', default=1, type=int, help="Whether entity/time embeddings are frozen or not. Default frozen." ) parser.add_argument( '--lm_frozen', default=1, type=int, help="Whether language model params are frozen or not. Default frozen." ) parser.add_argument( '--lr', default=2e-4, type=float, help="Learning rate" ) parser.add_argument( '--mode', default='train', type=str, help="Whether train or eval." ) parser.add_argument( '--eval_split', default='valid', type=str, help="Which split to validate on" ) parser.add_argument( '--dataset_name', default='wikidata_big', type=str, help="Which dataset." ) parser.add_argument( '--lm', default='distill_bert', type=str, help="Lm to use." ) parser.add_argument( '--fuse', default='add', type=str, help="For fusing time embeddings." ) parser.add_argument( '--extra_entities', default=False, type=bool, help="For some question types." ) parser.add_argument( '--corrupt_hard', default=0., type=float, help="For some question types." ) parser.add_argument( '--test', default="test", type=str, help="Test data." ) args = parser.parse_args() print_info(args) def eval(qa_model, dataset, batch_size = 128, split='valid', k=10): num_workers = 4 qa_model.eval() eval_log = [] print_numbers_only = False k_for_reporting = k # not change name in fn signature since named param used in places k_list = [1,10] #k_list = [1,2,5, 10] max_k = max(k_list) eval_log.append("Split %s" % (split)) print('Evaluating split', split) data_loader = DataLoader(dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers, collate_fn=dataset._collate_fn) topk_answers = [] total_loss = 0 loader = tqdm(data_loader, total=len(data_loader), unit="batches") for i_batch, a in enumerate(loader): # if size of split is multiple of batch size, we need this # todo: is there a more elegant way? if i_batch * batch_size == len(dataset.data): break answers_khot = a[-1] # last one assumed to be target scores = qa_model.forward(a) for s in scores: pred = dataset.getAnswersFromScores(s, k=max_k) topk_answers.append(pred) loss = qa_model.loss(scores, answers_khot.cuda()) total_loss += loss.item() eval_log.append('Loss %f' % total_loss) eval_log.append('Eval batch size %d' % batch_size) # do eval for each k in k_list # want multiple hit@k eval_accuracy_for_reporting = 0 for k in k_list: hits_at_k = 0 total = 0 question_types_count = defaultdict(list) simple_complex_count = defaultdict(list) entity_time_count = defaultdict(list) for i, question in enumerate(dataset.data): actual_answers = question['answers'] question_type = question['type'] if 'simple' in question_type: simple_complex_type = 'simple' else: simple_complex_type = 'complex' entity_time_type = question['answer_type'] # question_type = question['template'] predicted = topk_answers[i][:k] if len(set(actual_answers).intersection(set(predicted))) > 0: val_to_append = 1 hits_at_k += 1 else: val_to_append = 0 question_types_count[question_type].append(val_to_append) simple_complex_count[simple_complex_type].append(val_to_append) entity_time_count[entity_time_type].append(val_to_append) total += 1 eval_accuracy = hits_at_k/total if k == k_for_reporting: eval_accuracy_for_reporting = eval_accuracy if not print_numbers_only: eval_log.append('Hits at %d: %f' % (k, round(eval_accuracy, 3))) else: eval_log.append(str(round(eval_accuracy, 3))) question_types_count = dict(sorted(question_types_count.items(), key=lambda x: x[0].lower())) simple_complex_count = dict(sorted(simple_complex_count.items(), key=lambda x: x[0].lower())) entity_time_count = dict(sorted(entity_time_count.items(), key=lambda x: x[0].lower())) # for dictionary in [question_types_count]: for dictionary in [question_types_count, simple_complex_count, entity_time_count]: # for dictionary in [simple_complex_count, entity_time_count]: for key, value in dictionary.items(): hits_at_k = sum(value)/len(value) s = '{q_type} \t {hits_at_k} \t total questions: {num_questions}'.format( q_type = key, hits_at_k = round(hits_at_k, 3), num_questions = len(value) ) if print_numbers_only: s = str(round(hits_at_k, 3)) eval_log.append(s) eval_log.append('') # print eval log as well as return it for s in eval_log: print(s) return eval_accuracy_for_reporting, eval_log def append_log_to_file(eval_log, epoch, filename): f = open(filename, 'a+') now = datetime.now() dt_string = now.strftime("%d/%m/%Y %H:%M:%S") f.write('Log time: %s\n' % dt_string) f.write('Epoch %d\n' % epoch) for line in eval_log: f.write('%s\n' % line) f.write('\n') f.close() def train(qa_model, dataset, valid_dataset, args,result_filename=None): num_workers = 5 optimizer = torch.optim.Adam(qa_model.parameters(), lr=args.lr) optimizer.zero_grad() batch_size = args.batch_size data_loader = DataLoader(dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers, collate_fn=dataset._collate_fn) max_eval_score = 0 if args.save_to == '': args.save_to = 'temp' if result_filename is None: result_filename = 'results/{dataset_name}/{model_file}.log'.format( dataset_name = args.dataset_name, model_file = args.save_to ) checkpoint_file_name = 'models/{dataset_name}/qa_models/{model_file}.ckpt'.format( dataset_name = args.dataset_name, model_file = args.save_to ) # if not loading from any previous file # we want to make new log file # also log the config ie. args to the file if args.load_from == '': print('Creating new log file') f = open(result_filename, 'a+') now = datetime.now() dt_string = now.strftime("%d/%m/%Y %H:%M:%S") f.write('Log time: %s\n' % dt_string) f.write('Config: \n') for key, value in vars(args).items(): key = str(key) value = str(value) f.write('%s:\t%s\n' % (key, value)) f.write('\n') f.close() max_eval_score= 0. print('Starting training') for epoch in range(args.max_epochs): qa_model.train() epoch_loss = 0 loader = tqdm(data_loader, total=len(data_loader), unit="batches") running_loss = 0 for i_batch, a in enumerate(loader): qa_model.zero_grad() # so that don't need 'if condition' here # scores = qa_model.forward(question_tokenized.cuda(), # question_attention_mask.cuda(), entities_times_padded.cuda(), # entities_times_padded_mask.cuda(), question_text) answers_khot = a[-1] # last one assumed to be target scores = qa_model.forward(a) loss = qa_model.loss(scores, answers_khot.cuda()) loss.backward() optimizer.step() epoch_loss += loss.item() running_loss += loss.item() loader.set_postfix(Loss=running_loss/((i_batch+1)*batch_size), Epoch=epoch) loader.set_description('{}/{}'.format(epoch, args.max_epochs)) loader.update() print('Epoch loss = ', epoch_loss) if (epoch + 1) % args.valid_freq == 0: print('Starting eval') eval_score, eval_log = eval(qa_model, valid_dataset, batch_size=args.valid_batch_size, split=args.eval_split, k = args.eval_k) if eval_score > max_eval_score: print('Valid score increased') save_model(qa_model, checkpoint_file_name) max_eval_score = eval_score # log each time, not max # can interpret max score from logs later append_log_to_file(eval_log, epoch, result_filename) def save_model(qa_model, filename): print('Saving model to', filename) torch.save(qa_model.state_dict(), filename) print('Saved model to ', filename) return if args.model != 'embedkgqa': #TODO this is a hack tkbc_model = loadTkbcModel('models/{dataset_name}/kg_embeddings/{tkbc_model_file}'.format( dataset_name = args.dataset_name, tkbc_model_file=args.tkbc_model_file )) else: tkbc_model = loadTkbcModel_complex('models/{dataset_name}/kg_embeddings/{tkbc_model_file}'.format( dataset_name = args.dataset_name, tkbc_model_file=args.tkbc_model_file )) if args.mode == 'test_kge': utils.checkIfTkbcEmbeddingsTrained(tkbc_model, args.dataset_name, args.eval_split) exit(0) train_split = 'train' test = args.test if args.model == 'bert' or args.model == 'roberta': qa_model = QA_lm(tkbc_model, args) dataset = QA_Dataset_Baseline(split=train_split, dataset_name=args.dataset_name) #valid_dataset = QA_Dataset_baseline(split=args.eval_split, dataset_name=args.dataset_name) test_dataset = QA_Dataset_Baseline(split=test, dataset_name=args.dataset_name) elif args.model == 'embedkgqa': qa_model = QA_embedkgqa(tkbc_model, args) dataset = QA_Dataset_Baseline(split=train_split, dataset_name=args.dataset_name) #valid_dataset = QA_Dataset_baseline(split=args.eval_split, dataset_name=args.dataset_name) test_dataset = QA_Dataset_Baseline(split=test, dataset_name=args.dataset_name) elif args.model == 'cronkgqa' and args.supervision != 'hard': qa_model = QA_cronkgqa(tkbc_model, args) dataset = QA_Dataset_Baseline(split=train_split, dataset_name=args.dataset_name) #valid_dataset = QA_Dataset_baseline(split=args.eval_split, dataset_name=args.dataset_name) test_dataset = QA_Dataset_Baseline(split=test, dataset_name=args.dataset_name) elif args.model in ['tempoqr', 'entityqr', 'cronkgqa']: #supervised models qa_model = QA_TempoQR(tkbc_model, args) if args.mode == 'train': dataset = QA_Dataset_TempoQR(split=train_split, dataset_name=args.dataset_name, args=args) #valid_dataset = QA_Dataset_TempoQR(split=args.eval_split, dataset_name=args.dataset_name, args=args) test_dataset = QA_Dataset_TempoQR(split=test, dataset_name=args.dataset_name, args=args) else: print('Model %s not implemented!' % args.model) exit(0) print('Model is', args.model) if args.load_from != '': filename = 'models/{dataset_name}/qa_models/{model_file}.ckpt'.format( dataset_name=args.dataset_name, model_file=args.load_from ) print('Loading model from', filename) qa_model.load_state_dict(torch.load(filename)) print('Loaded qa model from ', filename) else: print('Not loading from checkpoint. Starting fresh!') qa_model = qa_model.cuda() if args.mode == 'eval': score, log = eval(qa_model, test_dataset, batch_size=args.valid_batch_size, split=args.eval_split, k = args.eval_k) exit(0) result_filename = 'results/{dataset_name}/{model_file}.log'.format( dataset_name=args.dataset_name, model_file=args.save_to ) train(qa_model, dataset, test_dataset, args,result_filename=result_filename) # score, log = eval(qa_model, test_dataset, batch_size=args.valid_batch_size, split="test", k=args.eval_k) # log=["######## TEST EVALUATION FINAL (BEST) #########"]+log # append_log_to_file(log,0,result_filename) print('Training finished')
13,849
33.798995
138
py
TempoQR
TempoQR-main/qa_tempoqr.py
import math import torch from torch import nn import numpy as np from tcomplex import TComplEx from transformers import DistilBertModel from torch.nn import LayerNorm import pdb # training data: questions # model: # 1. tkbc model embeddings (may or may not be frozen) # 2. question sentence embeddings (may or may not be frozen) # 3. linear layer to project question embeddings (unfrozen) # 4. transformer that takes these embeddings (unfrozen) (cats them along a dimension, also takes a mask) # 5. average output embeddings of transformer or take last token embedding? # 6. linear projection of this embedding to tkbc embedding dimension # 7. score with all possible entities/times and sigmoid # 8. BCE loss (multiple correct possible) class QA_TempoQR(nn.Module): def __init__(self, tkbc_model, args): super().__init__() self.model = args.model self.supervision = args.supervision self.extra_entities = args.extra_entities self.fuse = args.fuse self.tkbc_embedding_dim = tkbc_model.embeddings[0].weight.shape[1] self.sentence_embedding_dim = 768 # hardwired from self.pretrained_weights = 'distilbert-base-uncased' self.lm_model = DistilBertModel.from_pretrained(self.pretrained_weights) if args.lm_frozen == 1: print('Freezing LM params') for param in self.lm_model.parameters(): param.requires_grad = False else: print('Unfrozen LM params') # transformer self.transformer_dim = self.tkbc_embedding_dim # keeping same so no need to project embeddings self.nhead = 8 self.num_layers = 6 self.transformer_dropout = 0.1 self.encoder_layer = nn.TransformerEncoderLayer(d_model=self.transformer_dim, nhead=self.nhead, dropout=self.transformer_dropout) encoder_norm = LayerNorm(self.transformer_dim) self.transformer_encoder = nn.TransformerEncoder(self.encoder_layer, num_layers=self.num_layers, norm=encoder_norm) self.project_sentence_to_transformer_dim = nn.Linear(self.sentence_embedding_dim, self.transformer_dim) self.project_entity = nn.Linear(self.tkbc_embedding_dim, self.tkbc_embedding_dim) #TKG embeddings self.tkbc_model = tkbc_model num_entities = tkbc_model.embeddings[0].weight.shape[0] num_times = tkbc_model.embeddings[2].weight.shape[0] ent_emb_matrix = tkbc_model.embeddings[0].weight.data time_emb_matrix = tkbc_model.embeddings[2].weight.data full_embed_matrix = torch.cat([ent_emb_matrix, time_emb_matrix], dim=0) # +1 is for padding idx self.entity_time_embedding = nn.Embedding(num_entities + num_times + 1, self.tkbc_embedding_dim, padding_idx=num_entities + num_times) self.entity_time_embedding.weight.data[:-1, :].copy_(full_embed_matrix) if args.frozen == 1: print('Freezing entity/time embeddings') self.entity_time_embedding.weight.requires_grad = False for param in self.tkbc_model.parameters(): param.requires_grad = False else: print('Unfrozen entity/time embeddings') # position embedding for transformer self.max_seq_length = 100 # randomly defining max length of tokens for question self.position_embedding = nn.Embedding(self.max_seq_length, self.tkbc_embedding_dim) # print('Random starting embedding') self.loss = nn.CrossEntropyLoss(reduction='mean') self.layer_norm = nn.LayerNorm(self.transformer_dim) self.linear = nn.Linear(768, self.tkbc_embedding_dim) # to project question embedding self.linearT = nn.Linear(768, self.tkbc_embedding_dim) # to project question embedding self.lin_cat = nn.Linear(3*self.transformer_dim, self.transformer_dim) self.linear1 = nn.Linear(self.tkbc_embedding_dim, self.tkbc_embedding_dim) self.linear2 = nn.Linear(self.tkbc_embedding_dim, self.tkbc_embedding_dim) self.dropout = torch.nn.Dropout(0.3) self.bn1 = torch.nn.BatchNorm1d(self.tkbc_embedding_dim) self.bn2 = torch.nn.BatchNorm1d(self.tkbc_embedding_dim) return def invert_binary_tensor(self, tensor): ones_tensor = torch.ones(tensor.shape, dtype=torch.float32).cuda() inverted = ones_tensor - tensor return inverted def infer_time(self, head_embedding, tail_embedding, relation_embedding): lhs = head_embedding rhs = tail_embedding rel = relation_embedding time = self.tkbc_model.embeddings[2].weight # + self.tkbc_model.lin2(self.tkbc_model.time_embedding.weight) # time = self.entity_time_embedding.weight lhs = lhs[:, :self.tkbc_model.rank], lhs[:, self.tkbc_model.rank:] rel = rel[:, :self.tkbc_model.rank], rel[:, self.tkbc_model.rank:] rhs = rhs[:, :self.tkbc_model.rank], rhs[:, self.tkbc_model.rank:] time = time[:, :self.tkbc_model.rank], time[:, self.tkbc_model.rank:] return torch.cat([ (lhs[0] * rel[0] * rhs[0] - lhs[1] * rel[1] * rhs[0] - lhs[1] * rel[0] * rhs[1] + lhs[0] * rel[1] * rhs[1]), (lhs[1] * rel[0] * rhs[0] - lhs[0] * rel[1] * rhs[0] + lhs[0] * rel[0] * rhs[1] - lhs[1] * rel[1] * rhs[1])], dim=-1 ) # scoring function from TComplEx def score_time(self, head_embedding, tail_embedding, relation_embedding): lhs = head_embedding rhs = tail_embedding rel = relation_embedding time = self.tkbc_model.embeddings[2].weight lhs = lhs[:, :self.tkbc_model.rank], lhs[:, self.tkbc_model.rank:] rel = rel[:, :self.tkbc_model.rank], rel[:, self.tkbc_model.rank:] rhs = rhs[:, :self.tkbc_model.rank], rhs[:, self.tkbc_model.rank:] time = time[:, :self.tkbc_model.rank], time[:, self.tkbc_model.rank:] return ( (lhs[0] * rel[0] * rhs[0] - lhs[1] * rel[1] * rhs[0] - lhs[1] * rel[0] * rhs[1] + lhs[0] * rel[1] * rhs[1]) @ time[0].t() + (lhs[1] * rel[0] * rhs[0] - lhs[0] * rel[1] * rhs[0] + lhs[0] * rel[0] * rhs[1] - lhs[1] * rel[1] * rhs[1]) @ time[1].t() ) def score_entity(self, head_embedding, tail_embedding, relation_embedding, time_embedding): lhs = head_embedding[:, :self.tkbc_model.rank], head_embedding[:, self.tkbc_model.rank:] rel = relation_embedding time = time_embedding rel = rel[:, :self.tkbc_model.rank], rel[:, self.tkbc_model.rank:] time = time[:, :self.tkbc_model.rank], time[:, self.tkbc_model.rank:] right = self.tkbc_model.embeddings[0].weight # right = self.entity_time_embedding.weight right = right[:, :self.tkbc_model.rank], right[:, self.tkbc_model.rank:] rt = rel[0] * time[0], rel[1] * time[0], rel[0] * time[1], rel[1] * time[1] full_rel = rt[0] - rt[3], rt[1] + rt[2] return ( (lhs[0] * full_rel[0] - lhs[1] * full_rel[1]) @ right[0].t() + (lhs[1] * full_rel[0] + lhs[0] * full_rel[1]) @ right[1].t() ) def forward(self, a): #Tokenized questions, where entities are masked from the sentence to have TKG embeddings question_tokenized = a[0].cuda() question_attention_mask = a[1].cuda() entities_times_padded = a[2].cuda() entity_mask_padded = a[3].cuda() #Annotated entities/timestamps heads = a[4].cuda() tails = a[5].cuda() times = a[6].cuda() #t1 and t2 by Hard Supervision t1 = a[7].cuda() t2 = a[8].cuda() #One extra entity for new before & after question type tails2 = a[9].cuda() #TKG embeddings head_embedding = self.entity_time_embedding(heads) tail_embedding = self.entity_time_embedding(tails) tail_embedding2 = self.entity_time_embedding(tails2) time_embedding = self.entity_time_embedding(times) #Hard Supervision t1_emb = self.tkbc_model.embeddings[2](t1) t2_emb = self.tkbc_model.embeddings[2](t2) #entity embeddings to replace in sentence entity_time_embedding = self.entity_time_embedding(entities_times_padded) #context-aware step outputs = self.lm_model(question_tokenized, attention_mask=question_attention_mask) last_hidden_states = outputs.last_hidden_state if self.supervision == 'soft': #infer time embeddings for t1 and t2 with Soft Supervision lm_last_hidden_states = outputs[0] states = lm_last_hidden_states.transpose(1,0) cls_embedding = states[0] cls_embedding = self.linearT(cls_embedding) t1_emb = self.infer_time(head_embedding, tail_embedding, cls_embedding) t2_emb = self.infer_time(tail_embedding, head_embedding, cls_embedding) if self.extra_entities == True and not self.training: #this is for the created before & after questions - we expect one more entity t3_emb = self.infer_time(tail_embedding2, head_embedding, cls_embedding) t2_emb = t2_emb+t3_emb #for simplicity, can be omitted and treated as t3_emb #entity-aware step question_embedding = self.project_sentence_to_transformer_dim(last_hidden_states) entity_mask = entity_mask_padded.unsqueeze(-1).expand(question_embedding.shape) masked_question_embedding = question_embedding * entity_mask # set entity positions 0 entity_time_embedding_projected = self.project_entity(entity_time_embedding) #time-aware step if self.model == 'tempoqr': time_pos_embeddings1 = t1_emb.unsqueeze(0).transpose(0,1) time_pos_embeddings1 = time_pos_embeddings1.expand(entity_time_embedding_projected.shape) time_pos_embeddings2 = t2_emb.unsqueeze(0).transpose(0,1) time_pos_embeddings2 = time_pos_embeddings2.expand(entity_time_embedding_projected.shape) if self.fuse == 'cat': entity_time_embedding_projected = self.lin_cat(torch.cat((entity_time_embedding_projected, time_pos_embeddings1,time_pos_embeddings2), dim=-1)) else: entity_time_embedding_projected = entity_time_embedding_projected + time_pos_embeddings1 + time_pos_embeddings2 # Transformer information fusion layer masked_entity_time_embedding = entity_time_embedding_projected * self.invert_binary_tensor(entity_mask) combined_embed = masked_question_embedding + masked_entity_time_embedding # also need to add position embedding sequence_length = combined_embed.shape[1] v = np.arange(0, sequence_length, dtype=np.long) indices_for_position_embedding = torch.from_numpy(v).cuda() position_embedding = self.position_embedding(indices_for_position_embedding) position_embedding = position_embedding.unsqueeze(0).expand(combined_embed.shape) combined_embed = combined_embed + position_embedding combined_embed = self.layer_norm(combined_embed) combined_embed = torch.transpose(combined_embed, 0, 1) mask2 = ~(question_attention_mask.bool()).cuda() output = self.transformer_encoder(combined_embed, src_key_padding_mask=mask2) # Answer Predictions if self.model == 'cronkgqa': #no transformer, go back and get cls_embedding relation_embedding = cls_embedding else: relation_embedding = output[0]# self.linear(output[0]) #cls token embedding relation_embedding1 = self.dropout(self.bn1(self.linear1(relation_embedding))) relation_embedding2 = self.dropout(self.bn1(self.linear2(relation_embedding))) scores_time = self.score_time(head_embedding, tail_embedding, relation_embedding1) if self.model == 'cronkgqa' or (self.model == 'entityqr' and self.supervision != 'none'): #supervision for cronkgqa and entityqr time_embedding = (time_embedding + t1_emb + t2_emb)/3 #just take the mean scores_entity1 = self.score_entity(head_embedding, tail_embedding, relation_embedding2, time_embedding) scores_entity2 = self.score_entity(tail_embedding, head_embedding, relation_embedding2, time_embedding) scores_entity = torch.maximum(scores_entity1, scores_entity2) scores = torch.cat((scores_entity, scores_time), dim=1) return scores
11,619
39.347222
147
py
TempoQR
TempoQR-main/tcomplex.py
# Copyright (c) Facebook, Inc. and its affiliates. from abc import ABC, abstractmethod from typing import Tuple, List, Dict import math import torch from torch import nn import numpy as np class TKBCModel(nn.Module, ABC): @abstractmethod def get_rhs(self, chunk_begin: int, chunk_size: int): pass @abstractmethod def get_queries(self, queries: torch.Tensor): pass @abstractmethod def score(self, x: torch.Tensor): pass @abstractmethod def forward_over_time(self, x: torch.Tensor): pass def get_ranking( self, queries: torch.Tensor, filters: Dict[Tuple[int, int, int], List[int]], batch_size: int = 1000, chunk_size: int = -1 ): """ Returns filtered ranking for each queries. :param queries: a torch.LongTensor of quadruples (lhs, rel, rhs, timestamp) :param filters: filters[(lhs, rel, ts)] gives the elements to filter from ranking :param batch_size: maximum number of queries processed at once :param chunk_size: maximum number of candidates processed at once :return: """ if chunk_size < 0: chunk_size = self.sizes[2] ranks = torch.ones(len(queries)) with torch.no_grad(): c_begin = 0 while c_begin < self.sizes[2]: b_begin = 0 rhs = self.get_rhs(c_begin, chunk_size) while b_begin < len(queries): these_queries = queries[b_begin:b_begin + batch_size] q = self.get_queries(these_queries) scores = q @ rhs targets = self.score(these_queries) assert not torch.any(torch.isinf(scores)), "inf scores" assert not torch.any(torch.isnan(scores)), "nan scores" assert not torch.any(torch.isinf(targets)), "inf targets" assert not torch.any(torch.isnan(targets)), "nan targets" # set filtered and true scores to -1e6 to be ignored # take care that scores are chunked for i, query in enumerate(these_queries): filter_out = filters[(query[0].item(), query[1].item(), query[3].item())] filter_out += [queries[b_begin + i, 2].item()] if chunk_size < self.sizes[2]: filter_in_chunk = [ int(x - c_begin) for x in filter_out if c_begin <= x < c_begin + chunk_size ] scores[i, torch.LongTensor(filter_in_chunk)] = -1e6 else: scores[i, torch.LongTensor(filter_out)] = -1e6 ranks[b_begin:b_begin + batch_size] += torch.sum( (scores >= targets).float(), dim=1 ).cpu() b_begin += batch_size c_begin += chunk_size return ranks def get_auc( self, queries: torch.Tensor, batch_size: int = 1000 ): """ Returns filtered ranking for each queries. :param queries: a torch.LongTensor of quadruples (lhs, rel, rhs, begin, end) :param batch_size: maximum number of queries processed at once :return: """ all_scores, all_truth = [], [] all_ts_ids = None with torch.no_grad(): b_begin = 0 while b_begin < len(queries): these_queries = queries[b_begin:b_begin + batch_size] scores = self.forward_over_time(these_queries) all_scores.append(scores.cpu().numpy()) if all_ts_ids is None: all_ts_ids = torch.arange(0, scores.shape[1]).cuda()[None, :] assert not torch.any(torch.isinf(scores) + torch.isnan(scores)), "inf or nan scores" truth = (all_ts_ids <= these_queries[:, 4][:, None]) * (all_ts_ids >= these_queries[:, 3][:, None]) all_truth.append(truth.cpu().numpy()) b_begin += batch_size return np.concatenate(all_truth), np.concatenate(all_scores) def get_time_ranking( self, queries: torch.Tensor, filters: List[List[int]], chunk_size: int = -1 ): """ Returns filtered ranking for a batch of queries ordered by timestamp. :param queries: a torch.LongTensor of quadruples (lhs, rel, rhs, timestamp) :param filters: ordered filters :param chunk_size: maximum number of candidates processed at once :return: """ if chunk_size < 0: chunk_size = self.sizes[2] ranks = torch.ones(len(queries)) with torch.no_grad(): c_begin = 0 q = self.get_queries(queries) targets = self.score(queries) while c_begin < self.sizes[2]: rhs = self.get_rhs(c_begin, chunk_size) scores = q @ rhs # set filtered and true scores to -1e6 to be ignored # take care that scores are chunked for i, (query, filter) in enumerate(zip(queries, filters)): filter_out = filter + [query[2].item()] if chunk_size < self.sizes[2]: filter_in_chunk = [ int(x - c_begin) for x in filter_out if c_begin <= x < c_begin + chunk_size ] max_to_filter = max(filter_in_chunk + [-1]) assert max_to_filter < scores.shape[1], f"fuck {scores.shape[1]} {max_to_filter}" scores[i, filter_in_chunk] = -1e6 else: scores[i, filter_out] = -1e6 ranks += torch.sum( (scores >= targets).float(), dim=1 ).cpu() c_begin += chunk_size return ranks class ComplEx(TKBCModel): def __init__( self, sizes: Tuple[int, int, int, int], rank: int, init_size: float = 1e-3 ): super(ComplEx, self).__init__() self.sizes = sizes self.rank = rank self.embeddings = nn.ModuleList([ nn.Embedding(s, 2 * rank, sparse=True) for s in [sizes[0], sizes[1]] ]) self.embeddings[0].weight.data *= init_size self.embeddings[1].weight.data *= init_size @staticmethod def has_time(): return False def forward_over_time(self, x): raise NotImplementedError("no.") def score(self, x): lhs = self.embeddings[0](x[:, 0]) rel = self.embeddings[1](x[:, 1]) rhs = self.embeddings[0](x[:, 2]) lhs = lhs[:, :self.rank], lhs[:, self.rank:] rel = rel[:, :self.rank], rel[:, self.rank:] rhs = rhs[:, :self.rank], rhs[:, self.rank:] return torch.sum( (lhs[0] * rel[0] - lhs[1] * rel[1]) * rhs[0] + (lhs[0] * rel[1] + lhs[1] * rel[0]) * rhs[1], 1, keepdim=True ) def forward(self, x): lhs = self.embeddings[0](x[:, 0]) rel = self.embeddings[1](x[:, 1]) rhs = self.embeddings[0](x[:, 2]) lhs = lhs[:, :self.rank], lhs[:, self.rank:] rel = rel[:, :self.rank], rel[:, self.rank:] rhs = rhs[:, :self.rank], rhs[:, self.rank:] right = self.embeddings[0].weight right = right[:, :self.rank], right[:, self.rank:] return ( (lhs[0] * rel[0] - lhs[1] * rel[1]) @ right[0].transpose(0, 1) + (lhs[0] * rel[1] + lhs[1] * rel[0]) @ right[1].transpose(0, 1) ), ( torch.sqrt(lhs[0] ** 2 + lhs[1] ** 2), torch.sqrt(rel[0] ** 2 + rel[1] ** 2), torch.sqrt(rhs[0] ** 2 + rhs[1] ** 2) ), None def get_rhs(self, chunk_begin: int, chunk_size: int): return self.embeddings[0].weight.data[ chunk_begin:chunk_begin + chunk_size ].transpose(0, 1) def get_queries(self, queries: torch.Tensor): lhs = self.embeddings[0](queries[:, 0]) rel = self.embeddings[1](queries[:, 1]) lhs = lhs[:, :self.rank], lhs[:, self.rank:] rel = rel[:, :self.rank], rel[:, self.rank:] return torch.cat([ lhs[0] * rel[0] - lhs[1] * rel[1], lhs[0] * rel[1] + lhs[1] * rel[0] ], 1) class TComplEx(TKBCModel): def __init__( self, sizes: Tuple[int, int, int, int], rank: int, no_time_emb=False, init_size: float = 1e-2 ): super(TComplEx, self).__init__() self.sizes = sizes self.rank = rank self.embeddings = nn.ModuleList([ nn.Embedding(s, 2 * rank, sparse=True) for s in [sizes[0], sizes[1], sizes[3]] ]) self.embeddings[0].weight.data *= init_size self.embeddings[1].weight.data *= init_size self.embeddings[2].weight.data *= init_size self.no_time_emb = no_time_emb @staticmethod def has_time(): return True def score(self, x): lhs = self.embeddings[0](x[:, 0]) rel = self.embeddings[1](x[:, 1]) rhs = self.embeddings[0](x[:, 2]) time = self.embeddings[2](x[:, 3]) lhs = lhs[:, :self.rank], lhs[:, self.rank:] rel = rel[:, :self.rank], rel[:, self.rank:] rhs = rhs[:, :self.rank], rhs[:, self.rank:] time = time[:, :self.rank], time[:, self.rank:] return torch.sum( (lhs[0] * rel[0] * time[0] - lhs[1] * rel[1] * time[0] - lhs[1] * rel[0] * time[1] - lhs[0] * rel[1] * time[1]) * rhs[0] + (lhs[1] * rel[0] * time[0] + lhs[0] * rel[1] * time[0] + lhs[0] * rel[0] * time[1] - lhs[1] * rel[1] * time[1]) * rhs[1], 1, keepdim=True ) def forward(self, x): lhs = self.embeddings[0](x[:, 0]) rel = self.embeddings[1](x[:, 1]) rhs = self.embeddings[0](x[:, 2]) time = self.embeddings[2](x[:, 3]) lhs = lhs[:, :self.rank], lhs[:, self.rank:] rel = rel[:, :self.rank], rel[:, self.rank:] rhs = rhs[:, :self.rank], rhs[:, self.rank:] time = time[:, :self.rank], time[:, self.rank:] right = self.embeddings[0].weight right = right[:, :self.rank], right[:, self.rank:] rt = rel[0] * time[0], rel[1] * time[0], rel[0] * time[1], rel[1] * time[1] full_rel = rt[0] - rt[3], rt[1] + rt[2] return ( (lhs[0] * full_rel[0] - lhs[1] * full_rel[1]) @ right[0].t() + (lhs[1] * full_rel[0] + lhs[0] * full_rel[1]) @ right[1].t() ), ( torch.sqrt(lhs[0] ** 2 + lhs[1] ** 2), torch.sqrt(full_rel[0] ** 2 + full_rel[1] ** 2), torch.sqrt(rhs[0] ** 2 + rhs[1] ** 2) ), self.embeddings[2].weight[:-1] if self.no_time_emb else self.embeddings[2].weight def forward_over_time(self, x): lhs = self.embeddings[0](x[:, 0]) rel = self.embeddings[1](x[:, 1]) rhs = self.embeddings[0](x[:, 2]) time = self.embeddings[2].weight lhs = lhs[:, :self.rank], lhs[:, self.rank:] rel = rel[:, :self.rank], rel[:, self.rank:] rhs = rhs[:, :self.rank], rhs[:, self.rank:] time = time[:, :self.rank], time[:, self.rank:] return ( (lhs[0] * rel[0] * rhs[0] - lhs[1] * rel[1] * rhs[0] - lhs[1] * rel[0] * rhs[1] + lhs[0] * rel[1] * rhs[1]) @ time[0].t() + (lhs[1] * rel[0] * rhs[0] - lhs[0] * rel[1] * rhs[0] + lhs[0] * rel[0] * rhs[1] - lhs[1] * rel[1] * rhs[1]) @ time[1].t() ) def get_rhs(self, chunk_begin: int, chunk_size: int): return self.embeddings[0].weight.data[ chunk_begin:chunk_begin + chunk_size ].transpose(0, 1) def get_queries(self, queries: torch.Tensor): lhs = self.embeddings[0](queries[:, 0]) rel = self.embeddings[1](queries[:, 1]) time = self.embeddings[2](queries[:, 3]) lhs = lhs[:, :self.rank], lhs[:, self.rank:] rel = rel[:, :self.rank], rel[:, self.rank:] time = time[:, :self.rank], time[:, self.rank:] return torch.cat([ lhs[0] * rel[0] * time[0] - lhs[1] * rel[1] * time[0] - lhs[1] * rel[0] * time[1] - lhs[0] * rel[1] * time[1], lhs[1] * rel[0] * time[0] + lhs[0] * rel[1] * time[0] + lhs[0] * rel[0] * time[1] - lhs[1] * rel[1] * time[1] ], 1) class TNTComplEx(TKBCModel): def __init__( self, sizes: Tuple[int, int, int, int], rank: int, no_time_emb=False, init_size: float = 1e-2 ): super(TNTComplEx, self).__init__() self.sizes = sizes self.rank = rank self.embeddings = nn.ModuleList([ nn.Embedding(s, 2 * rank, sparse=True) for s in [sizes[0], sizes[1], sizes[3], sizes[1]] # last embedding modules contains no_time embeddings ]) self.embeddings[0].weight.data *= init_size self.embeddings[1].weight.data *= init_size self.embeddings[2].weight.data *= init_size self.embeddings[3].weight.data *= init_size self.no_time_emb = no_time_emb @staticmethod def has_time(): return True def score(self, x): lhs = self.embeddings[0](x[:, 0]) rel = self.embeddings[1](x[:, 1]) rel_no_time = self.embeddings[3](x[:, 1]) rhs = self.embeddings[0](x[:, 2]) time = self.embeddings[2](x[:, 3]) lhs = lhs[:, :self.rank], lhs[:, self.rank:] rel = rel[:, :self.rank], rel[:, self.rank:] rhs = rhs[:, :self.rank], rhs[:, self.rank:] time = time[:, :self.rank], time[:, self.rank:] rnt = rel_no_time[:, :self.rank], rel_no_time[:, self.rank:] rt = rel[0] * time[0], rel[1] * time[0], rel[0] * time[1], rel[1] * time[1] full_rel = (rt[0] - rt[3]) + rnt[0], (rt[1] + rt[2]) + rnt[1] return torch.sum( (lhs[0] * full_rel[0] - lhs[1] * full_rel[1]) * rhs[0] + (lhs[1] * full_rel[0] + lhs[0] * full_rel[1]) * rhs[1], 1, keepdim=True ) def forward(self, x): lhs = self.embeddings[0](x[:, 0]) rel = self.embeddings[1](x[:, 1]) rel_no_time = self.embeddings[3](x[:, 1]) rhs = self.embeddings[0](x[:, 2]) time = self.embeddings[2](x[:, 3]) lhs = lhs[:, :self.rank], lhs[:, self.rank:] rel = rel[:, :self.rank], rel[:, self.rank:] rhs = rhs[:, :self.rank], rhs[:, self.rank:] time = time[:, :self.rank], time[:, self.rank:] rnt = rel_no_time[:, :self.rank], rel_no_time[:, self.rank:] right = self.embeddings[0].weight right = right[:, :self.rank], right[:, self.rank:] rt = rel[0] * time[0], rel[1] * time[0], rel[0] * time[1], rel[1] * time[1] rrt = rt[0] - rt[3], rt[1] + rt[2] full_rel = rrt[0] + rnt[0], rrt[1] + rnt[1] regularizer = ( math.pow(2, 1 / 3) * torch.sqrt(lhs[0] ** 2 + lhs[1] ** 2), torch.sqrt(rrt[0] ** 2 + rrt[1] ** 2), torch.sqrt(rnt[0] ** 2 + rnt[1] ** 2), math.pow(2, 1 / 3) * torch.sqrt(rhs[0] ** 2 + rhs[1] ** 2) ) return (( (lhs[0] * full_rel[0] - lhs[1] * full_rel[1]) @ right[0].t() + (lhs[1] * full_rel[0] + lhs[0] * full_rel[1]) @ right[1].t() ), regularizer, self.embeddings[2].weight[:-1] if self.no_time_emb else self.embeddings[2].weight ) def forward_over_time(self, x): lhs = self.embeddings[0](x[:, 0]) rel = self.embeddings[1](x[:, 1]) rhs = self.embeddings[0](x[:, 2]) time = self.embeddings[2].weight lhs = lhs[:, :self.rank], lhs[:, self.rank:] rel = rel[:, :self.rank], rel[:, self.rank:] rhs = rhs[:, :self.rank], rhs[:, self.rank:] time = time[:, :self.rank], time[:, self.rank:] rel_no_time = self.embeddings[3](x[:, 1]) rnt = rel_no_time[:, :self.rank], rel_no_time[:, self.rank:] score_time = ( (lhs[0] * rel[0] * rhs[0] - lhs[1] * rel[1] * rhs[0] - lhs[1] * rel[0] * rhs[1] + lhs[0] * rel[1] * rhs[1]) @ time[0].t() + (lhs[1] * rel[0] * rhs[0] - lhs[0] * rel[1] * rhs[0] + lhs[0] * rel[0] * rhs[1] - lhs[1] * rel[1] * rhs[1]) @ time[1].t() ) base = torch.sum( (lhs[0] * rnt[0] * rhs[0] - lhs[1] * rnt[1] * rhs[0] - lhs[1] * rnt[0] * rhs[1] + lhs[0] * rnt[1] * rhs[1]) + (lhs[1] * rnt[1] * rhs[0] - lhs[0] * rnt[0] * rhs[0] + lhs[0] * rnt[1] * rhs[1] - lhs[1] * rnt[0] * rhs[1]), dim=1, keepdim=True ) return score_time + base def get_rhs(self, chunk_begin: int, chunk_size: int): return self.embeddings[0].weight.data[ chunk_begin:chunk_begin + chunk_size ].transpose(0, 1) def get_queries(self, queries: torch.Tensor): lhs = self.embeddings[0](queries[:, 0]) rel = self.embeddings[1](queries[:, 1]) rel_no_time = self.embeddings[3](queries[:, 1]) time = self.embeddings[2](queries[:, 3]) lhs = lhs[:, :self.rank], lhs[:, self.rank:] rel = rel[:, :self.rank], rel[:, self.rank:] time = time[:, :self.rank], time[:, self.rank:] rnt = rel_no_time[:, :self.rank], rel_no_time[:, self.rank:] rt = rel[0] * time[0], rel[1] * time[0], rel[0] * time[1], rel[1] * time[1] full_rel = (rt[0] - rt[3]) + rnt[0], (rt[1] + rt[2]) + rnt[1] return torch.cat([ lhs[0] * full_rel[0] - lhs[1] * full_rel[1], lhs[1] * full_rel[0] + lhs[0] * full_rel[1] ], 1)
18,136
38.004301
115
py
bonito
bonito-master/setup.py
import os import re from setuptools import setup, find_packages from setuptools.command.install import install __pkg_name__ = 'bonito' require_file = 'requirements.txt' package_name = "ont-%s" % __pkg_name__ verstrline = open(os.path.join(__pkg_name__, '__init__.py'), 'r').read() vsre = r"^__version__ = ['\"]([^'\"]*)['\"]" mo = re.search(vsre, verstrline, re.M) if mo: __version__ = mo.group(1) else: raise RuntimeError('Unable to find version string in "{}/__init__.py".'.format(__pkg_name__)) with open(require_file) as f: requirements = [r.split()[0] for r in f.read().splitlines()] with open('README.md', encoding='utf-8') as f: long_description = f.read() setup( name=package_name, version=__version__, packages=find_packages(), include_package_data=True, install_requires=requirements, long_description=long_description, long_description_content_type='text/markdown', author='Oxford Nanopore Technologies, Ltd', author_email='support@nanoporetech.com', url='https://github.com/nanoporetech/bonito', entry_points = { 'console_scripts': [ '{0} = {0}:main'.format(__pkg_name__) ] }, dependency_links=[ 'https://download.pytorch.org/whl/cu113', ] )
1,270
26.042553
97
py