code
stringlengths
17
6.64M
@mmcv.jit(derivate=True, coderize=True) @weighted_loss def quality_focal_loss(pred, target, beta=2.0): 'Quality Focal Loss (QFL) is from `Generalized Focal Loss: Learning\n Qualified and Distributed Bounding Boxes for Dense Object Detection\n <https://arxiv.org/abs/2006.04388>`_.\n\n Args:\n pred (torch.Tensor): Predicted joint representation of classification\n and quality (IoU) estimation with shape (N, C), C is the number of\n classes.\n target (tuple([torch.Tensor])): Target category label with shape (N,)\n and target quality label with shape (N,).\n beta (float): The beta parameter for calculating the modulating factor.\n Defaults to 2.0.\n\n Returns:\n torch.Tensor: Loss tensor with shape (N,).\n ' assert (len(target) == 2), 'target for QFL must be a tuple of two elements,\n including category label and quality label, respectively' (label, score) = target pred_sigmoid = pred.sigmoid() scale_factor = pred_sigmoid zerolabel = scale_factor.new_zeros(pred.shape) loss = (F.binary_cross_entropy_with_logits(pred, zerolabel, reduction='none') * scale_factor.pow(beta)) bg_class_ind = pred.size(1) pos = ((label >= 0) & (label < bg_class_ind)).nonzero().squeeze(1) pos_label = label[pos].long() scale_factor = (score[pos] - pred_sigmoid[(pos, pos_label)]) loss[(pos, pos_label)] = (F.binary_cross_entropy_with_logits(pred[(pos, pos_label)], score[pos], reduction='none') * scale_factor.abs().pow(beta)) loss = loss.sum(dim=1, keepdim=False) return loss
@weighted_loss def quality_focal_loss_with_prob(pred, target, beta=2.0): 'Quality Focal Loss (QFL) is from `Generalized Focal Loss: Learning\n Qualified and Distributed Bounding Boxes for Dense Object Detection\n <https://arxiv.org/abs/2006.04388>`_.\n Different from `quality_focal_loss`, this function accepts probability\n as input.\n\n Args:\n pred (torch.Tensor): Predicted joint representation of classification\n and quality (IoU) estimation with shape (N, C), C is the number of\n classes.\n target (tuple([torch.Tensor])): Target category label with shape (N,)\n and target quality label with shape (N,).\n beta (float): The beta parameter for calculating the modulating factor.\n Defaults to 2.0.\n\n Returns:\n torch.Tensor: Loss tensor with shape (N,).\n ' assert (len(target) == 2), 'target for QFL must be a tuple of two elements,\n including category label and quality label, respectively' (label, score) = target pred_sigmoid = pred scale_factor = pred_sigmoid zerolabel = scale_factor.new_zeros(pred.shape) loss = (F.binary_cross_entropy(pred, zerolabel, reduction='none') * scale_factor.pow(beta)) bg_class_ind = pred.size(1) pos = ((label >= 0) & (label < bg_class_ind)).nonzero().squeeze(1) pos_label = label[pos].long() scale_factor = (score[pos] - pred_sigmoid[(pos, pos_label)]) loss[(pos, pos_label)] = (F.binary_cross_entropy(pred[(pos, pos_label)], score[pos], reduction='none') * scale_factor.abs().pow(beta)) loss = loss.sum(dim=1, keepdim=False) return loss
@mmcv.jit(derivate=True, coderize=True) @weighted_loss def distribution_focal_loss(pred, label): 'Distribution Focal Loss (DFL) is from `Generalized Focal Loss: Learning\n Qualified and Distributed Bounding Boxes for Dense Object Detection\n <https://arxiv.org/abs/2006.04388>`_.\n\n Args:\n pred (torch.Tensor): Predicted general distribution of bounding boxes\n (before softmax) with shape (N, n+1), n is the max value of the\n integral set `{0, ..., n}` in paper.\n label (torch.Tensor): Target distance label for bounding boxes with\n shape (N,).\n\n Returns:\n torch.Tensor: Loss tensor with shape (N,).\n ' dis_left = label.long() dis_right = (dis_left + 1) weight_left = (dis_right.float() - label) weight_right = (label - dis_left.float()) loss = ((F.cross_entropy(pred, dis_left, reduction='none') * weight_left) + (F.cross_entropy(pred, dis_right, reduction='none') * weight_right)) return loss
@LOSSES.register_module() class QualityFocalLoss(nn.Module): 'Quality Focal Loss (QFL) is a variant of `Generalized Focal Loss:\n Learning Qualified and Distributed Bounding Boxes for Dense Object\n Detection <https://arxiv.org/abs/2006.04388>`_.\n\n Args:\n use_sigmoid (bool): Whether sigmoid operation is conducted in QFL.\n Defaults to True.\n beta (float): The beta parameter for calculating the modulating factor.\n Defaults to 2.0.\n reduction (str): Options are "none", "mean" and "sum".\n loss_weight (float): Loss weight of current loss.\n activated (bool, optional): Whether the input is activated.\n If True, it means the input has been activated and can be\n treated as probabilities. Else, it should be treated as logits.\n Defaults to False.\n ' def __init__(self, use_sigmoid=True, beta=2.0, reduction='mean', loss_weight=1.0, activated=False): super(QualityFocalLoss, self).__init__() assert (use_sigmoid is True), 'Only sigmoid in QFL supported now.' self.use_sigmoid = use_sigmoid self.beta = beta self.reduction = reduction self.loss_weight = loss_weight self.activated = activated def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None): 'Forward function.\n\n Args:\n pred (torch.Tensor): Predicted joint representation of\n classification and quality (IoU) estimation with shape (N, C),\n C is the number of classes.\n target (tuple([torch.Tensor])): Target category label with shape\n (N,) and target quality label with shape (N,).\n weight (torch.Tensor, optional): The weight of loss for each\n prediction. Defaults to None.\n avg_factor (int, optional): Average factor that is used to average\n the loss. Defaults to None.\n reduction_override (str, optional): The reduction method used to\n override the original reduction method of the loss.\n Defaults to None.\n ' assert (reduction_override in (None, 'none', 'mean', 'sum')) reduction = (reduction_override if reduction_override else self.reduction) if self.use_sigmoid: if self.activated: calculate_loss_func = quality_focal_loss_with_prob else: calculate_loss_func = quality_focal_loss loss_cls = (self.loss_weight * calculate_loss_func(pred, target, weight, beta=self.beta, reduction=reduction, avg_factor=avg_factor)) else: raise NotImplementedError return loss_cls
@LOSSES.register_module() class DistributionFocalLoss(nn.Module): "Distribution Focal Loss (DFL) is a variant of `Generalized Focal Loss:\n Learning Qualified and Distributed Bounding Boxes for Dense Object\n Detection <https://arxiv.org/abs/2006.04388>`_.\n\n Args:\n reduction (str): Options are `'none'`, `'mean'` and `'sum'`.\n loss_weight (float): Loss weight of current loss.\n " def __init__(self, reduction='mean', loss_weight=1.0): super(DistributionFocalLoss, self).__init__() self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None): 'Forward function.\n\n Args:\n pred (torch.Tensor): Predicted general distribution of bounding\n boxes (before softmax) with shape (N, n+1), n is the max value\n of the integral set `{0, ..., n}` in paper.\n target (torch.Tensor): Target distance label for bounding boxes\n with shape (N,).\n weight (torch.Tensor, optional): The weight of loss for each\n prediction. Defaults to None.\n avg_factor (int, optional): Average factor that is used to average\n the loss. Defaults to None.\n reduction_override (str, optional): The reduction method used to\n override the original reduction method of the loss.\n Defaults to None.\n ' assert (reduction_override in (None, 'none', 'mean', 'sum')) reduction = (reduction_override if reduction_override else self.reduction) loss_cls = (self.loss_weight * distribution_focal_loss(pred, target, weight, reduction=reduction, avg_factor=avg_factor)) return loss_cls
def _expand_onehot_labels(labels, label_weights, label_channels): bin_labels = labels.new_full((labels.size(0), label_channels), 0) inds = torch.nonzero(((labels >= 0) & (labels < label_channels)), as_tuple=False).squeeze() if (inds.numel() > 0): bin_labels[(inds, labels[inds])] = 1 bin_label_weights = label_weights.view((- 1), 1).expand(label_weights.size(0), label_channels) return (bin_labels, bin_label_weights)
@LOSSES.register_module() class GHMC(nn.Module): 'GHM Classification Loss.\n\n Details of the theorem can be viewed in the paper\n `Gradient Harmonized Single-stage Detector\n <https://arxiv.org/abs/1811.05181>`_.\n\n Args:\n bins (int): Number of the unit regions for distribution calculation.\n momentum (float): The parameter for moving average.\n use_sigmoid (bool): Can only be true for BCE based loss now.\n loss_weight (float): The weight of the total GHM-C loss.\n reduction (str): Options are "none", "mean" and "sum".\n Defaults to "mean"\n ' def __init__(self, bins=10, momentum=0, use_sigmoid=True, loss_weight=1.0, reduction='mean'): super(GHMC, self).__init__() self.bins = bins self.momentum = momentum edges = (torch.arange((bins + 1)).float() / bins) self.register_buffer('edges', edges) self.edges[(- 1)] += 1e-06 if (momentum > 0): acc_sum = torch.zeros(bins) self.register_buffer('acc_sum', acc_sum) self.use_sigmoid = use_sigmoid if (not self.use_sigmoid): raise NotImplementedError self.loss_weight = loss_weight self.reduction = reduction def forward(self, pred, target, label_weight, reduction_override=None, **kwargs): 'Calculate the GHM-C loss.\n\n Args:\n pred (float tensor of size [batch_num, class_num]):\n The direct prediction of classification fc layer.\n target (float tensor of size [batch_num, class_num]):\n Binary class target for each sample.\n label_weight (float tensor of size [batch_num, class_num]):\n the value is 1 if the sample is valid and 0 if ignored.\n reduction_override (str, optional): The reduction method used to\n override the original reduction method of the loss.\n Defaults to None.\n Returns:\n The gradient harmonized loss.\n ' assert (reduction_override in (None, 'none', 'mean', 'sum')) reduction = (reduction_override if reduction_override else self.reduction) if (pred.dim() != target.dim()): (target, label_weight) = _expand_onehot_labels(target, label_weight, pred.size((- 1))) (target, label_weight) = (target.float(), label_weight.float()) edges = self.edges mmt = self.momentum weights = torch.zeros_like(pred) g = torch.abs((pred.sigmoid().detach() - target)) valid = (label_weight > 0) tot = max(valid.float().sum().item(), 1.0) n = 0 for i in range(self.bins): inds = (((g >= edges[i]) & (g < edges[(i + 1)])) & valid) num_in_bin = inds.sum().item() if (num_in_bin > 0): if (mmt > 0): self.acc_sum[i] = ((mmt * self.acc_sum[i]) + ((1 - mmt) * num_in_bin)) weights[inds] = (tot / self.acc_sum[i]) else: weights[inds] = (tot / num_in_bin) n += 1 if (n > 0): weights = (weights / n) loss = F.binary_cross_entropy_with_logits(pred, target, reduction='none') loss = weight_reduce_loss(loss, weights, reduction=reduction, avg_factor=tot) return (loss * self.loss_weight)
@LOSSES.register_module() class GHMR(nn.Module): 'GHM Regression Loss.\n\n Details of the theorem can be viewed in the paper\n `Gradient Harmonized Single-stage Detector\n <https://arxiv.org/abs/1811.05181>`_.\n\n Args:\n mu (float): The parameter for the Authentic Smooth L1 loss.\n bins (int): Number of the unit regions for distribution calculation.\n momentum (float): The parameter for moving average.\n loss_weight (float): The weight of the total GHM-R loss.\n reduction (str): Options are "none", "mean" and "sum".\n Defaults to "mean"\n ' def __init__(self, mu=0.02, bins=10, momentum=0, loss_weight=1.0, reduction='mean'): super(GHMR, self).__init__() self.mu = mu self.bins = bins edges = (torch.arange((bins + 1)).float() / bins) self.register_buffer('edges', edges) self.edges[(- 1)] = 1000.0 self.momentum = momentum if (momentum > 0): acc_sum = torch.zeros(bins) self.register_buffer('acc_sum', acc_sum) self.loss_weight = loss_weight self.reduction = reduction def forward(self, pred, target, label_weight, avg_factor=None, reduction_override=None): 'Calculate the GHM-R loss.\n\n Args:\n pred (float tensor of size [batch_num, 4 (* class_num)]):\n The prediction of box regression layer. Channel number can be 4\n or 4 * class_num depending on whether it is class-agnostic.\n target (float tensor of size [batch_num, 4 (* class_num)]):\n The target regression values with the same size of pred.\n label_weight (float tensor of size [batch_num, 4 (* class_num)]):\n The weight of each sample, 0 if ignored.\n reduction_override (str, optional): The reduction method used to\n override the original reduction method of the loss.\n Defaults to None.\n Returns:\n The gradient harmonized loss.\n ' assert (reduction_override in (None, 'none', 'mean', 'sum')) reduction = (reduction_override if reduction_override else self.reduction) mu = self.mu edges = self.edges mmt = self.momentum diff = (pred - target) loss = (torch.sqrt(((diff * diff) + (mu * mu))) - mu) g = torch.abs((diff / torch.sqrt(((mu * mu) + (diff * diff))))).detach() weights = torch.zeros_like(g) valid = (label_weight > 0) tot = max(label_weight.float().sum().item(), 1.0) n = 0 for i in range(self.bins): inds = (((g >= edges[i]) & (g < edges[(i + 1)])) & valid) num_in_bin = inds.sum().item() if (num_in_bin > 0): n += 1 if (mmt > 0): self.acc_sum[i] = ((mmt * self.acc_sum[i]) + ((1 - mmt) * num_in_bin)) weights[inds] = (tot / self.acc_sum[i]) else: weights[inds] = (tot / num_in_bin) if (n > 0): weights /= n loss = weight_reduce_loss(loss, weights, reduction=reduction, avg_factor=tot) return (loss * self.loss_weight)
@mmcv.jit(derivate=True, coderize=True) @weighted_loss def knowledge_distillation_kl_div_loss(pred, soft_label, T, detach_target=True): 'Loss function for knowledge distilling using KL divergence.\n\n Args:\n pred (Tensor): Predicted logits with shape (N, n + 1).\n soft_label (Tensor): Target logits with shape (N, N + 1).\n T (int): Temperature for distillation.\n detach_target (bool): Remove soft_label from automatic differentiation\n\n Returns:\n torch.Tensor: Loss tensor with shape (N,).\n ' assert (pred.size() == soft_label.size()) target = F.softmax((soft_label / T), dim=1) if detach_target: target = target.detach() kd_loss = (F.kl_div(F.log_softmax((pred / T), dim=1), target, reduction='none').mean(1) * (T * T)) return kd_loss
@LOSSES.register_module() class KnowledgeDistillationKLDivLoss(nn.Module): "Loss function for knowledge distilling using KL divergence.\n\n Args:\n reduction (str): Options are `'none'`, `'mean'` and `'sum'`.\n loss_weight (float): Loss weight of current loss.\n T (int): Temperature for distillation.\n " def __init__(self, reduction='mean', loss_weight=1.0, T=10): super(KnowledgeDistillationKLDivLoss, self).__init__() assert (T >= 1) self.reduction = reduction self.loss_weight = loss_weight self.T = T def forward(self, pred, soft_label, weight=None, avg_factor=None, reduction_override=None): 'Forward function.\n\n Args:\n pred (Tensor): Predicted logits with shape (N, n + 1).\n soft_label (Tensor): Target logits with shape (N, N + 1).\n weight (torch.Tensor, optional): The weight of loss for each\n prediction. Defaults to None.\n avg_factor (int, optional): Average factor that is used to average\n the loss. Defaults to None.\n reduction_override (str, optional): The reduction method used to\n override the original reduction method of the loss.\n Defaults to None.\n ' assert (reduction_override in (None, 'none', 'mean', 'sum')) reduction = (reduction_override if reduction_override else self.reduction) loss_kd = (self.loss_weight * knowledge_distillation_kl_div_loss(pred, soft_label, weight, reduction=reduction, avg_factor=avg_factor, T=self.T)) return loss_kd
@weighted_loss def mse_loss(pred, target): 'Warpper of mse loss.' return F.mse_loss(pred, target, reduction='none')
@LOSSES.register_module() class MSELoss(nn.Module): 'MSELoss.\n\n Args:\n reduction (str, optional): The method that reduces the loss to a\n scalar. Options are "none", "mean" and "sum".\n loss_weight (float, optional): The weight of the loss. Defaults to 1.0\n ' def __init__(self, reduction='mean', loss_weight=1.0): super().__init__() self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None): 'Forward function of loss.\n\n Args:\n pred (torch.Tensor): The prediction.\n target (torch.Tensor): The learning target of the prediction.\n weight (torch.Tensor, optional): Weight of the loss for each\n prediction. Defaults to None.\n avg_factor (int, optional): Average factor that is used to average\n the loss. Defaults to None.\n reduction_override (str, optional): The reduction method used to\n override the original reduction method of the loss.\n Defaults to None.\n\n Returns:\n torch.Tensor: The calculated loss\n ' assert (reduction_override in (None, 'none', 'mean', 'sum')) reduction = (reduction_override if reduction_override else self.reduction) loss = (self.loss_weight * mse_loss(pred, target, weight, reduction=reduction, avg_factor=avg_factor)) return loss
@mmcv.jit(derivate=True, coderize=True) @weighted_loss def smooth_l1_loss(pred, target, beta=1.0): 'Smooth L1 loss.\n\n Args:\n pred (torch.Tensor): The prediction.\n target (torch.Tensor): The learning target of the prediction.\n beta (float, optional): The threshold in the piecewise function.\n Defaults to 1.0.\n\n Returns:\n torch.Tensor: Calculated loss\n ' assert (beta > 0) if (target.numel() == 0): return (pred.sum() * 0) assert (pred.size() == target.size()) diff = torch.abs((pred - target)) loss = torch.where((diff < beta), (((0.5 * diff) * diff) / beta), (diff - (0.5 * beta))) return loss
@mmcv.jit(derivate=True, coderize=True) @weighted_loss def l1_loss(pred, target): 'L1 loss.\n\n Args:\n pred (torch.Tensor): The prediction.\n target (torch.Tensor): The learning target of the prediction.\n\n Returns:\n torch.Tensor: Calculated loss\n ' if (target.numel() == 0): return (pred.sum() * 0) assert (pred.size() == target.size()) loss = torch.abs((pred - target)) return loss
@LOSSES.register_module() class SmoothL1Loss(nn.Module): 'Smooth L1 loss.\n\n Args:\n beta (float, optional): The threshold in the piecewise function.\n Defaults to 1.0.\n reduction (str, optional): The method to reduce the loss.\n Options are "none", "mean" and "sum". Defaults to "mean".\n loss_weight (float, optional): The weight of loss.\n ' def __init__(self, beta=1.0, reduction='mean', loss_weight=1.0): super(SmoothL1Loss, self).__init__() self.beta = beta self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None, **kwargs): 'Forward function.\n\n Args:\n pred (torch.Tensor): The prediction.\n target (torch.Tensor): The learning target of the prediction.\n weight (torch.Tensor, optional): The weight of loss for each\n prediction. Defaults to None.\n avg_factor (int, optional): Average factor that is used to average\n the loss. Defaults to None.\n reduction_override (str, optional): The reduction method used to\n override the original reduction method of the loss.\n Defaults to None.\n ' assert (reduction_override in (None, 'none', 'mean', 'sum')) reduction = (reduction_override if reduction_override else self.reduction) loss_bbox = (self.loss_weight * smooth_l1_loss(pred, target, weight, beta=self.beta, reduction=reduction, avg_factor=avg_factor, **kwargs)) return loss_bbox
@LOSSES.register_module() class L1Loss(nn.Module): 'L1 loss.\n\n Args:\n reduction (str, optional): The method to reduce the loss.\n Options are "none", "mean" and "sum".\n loss_weight (float, optional): The weight of loss.\n ' def __init__(self, reduction='mean', loss_weight=1.0): super(L1Loss, self).__init__() self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None): 'Forward function.\n\n Args:\n pred (torch.Tensor): The prediction.\n target (torch.Tensor): The learning target of the prediction.\n weight (torch.Tensor, optional): The weight of loss for each\n prediction. Defaults to None.\n avg_factor (int, optional): Average factor that is used to average\n the loss. Defaults to None.\n reduction_override (str, optional): The reduction method used to\n override the original reduction method of the loss.\n Defaults to None.\n ' assert (reduction_override in (None, 'none', 'mean', 'sum')) reduction = (reduction_override if reduction_override else self.reduction) loss_bbox = (self.loss_weight * l1_loss(pred, target, weight, reduction=reduction, avg_factor=avg_factor)) return loss_bbox
def reduce_loss(loss, reduction): 'Reduce loss as specified.\n\n Args:\n loss (Tensor): Elementwise loss tensor.\n reduction (str): Options are "none", "mean" and "sum".\n\n Return:\n Tensor: Reduced loss tensor.\n ' reduction_enum = F._Reduction.get_enum(reduction) if (reduction_enum == 0): return loss elif (reduction_enum == 1): return loss.mean() elif (reduction_enum == 2): return loss.sum()
@mmcv.jit(derivate=True, coderize=True) def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): 'Apply element-wise weight and reduce loss.\n\n Args:\n loss (Tensor): Element-wise loss.\n weight (Tensor): Element-wise weights.\n reduction (str): Same as built-in losses of PyTorch.\n avg_factor (float): Average factor when computing the mean of losses.\n\n Returns:\n Tensor: Processed loss values.\n ' if (weight is not None): loss = (loss * weight) if (avg_factor is None): loss = reduce_loss(loss, reduction) elif (reduction == 'mean'): loss = (loss.sum() / avg_factor) elif (reduction != 'none'): raise ValueError('avg_factor can not be used with reduction="sum"') return loss
def weighted_loss(loss_func): "Create a weighted version of a given loss function.\n\n To use this decorator, the loss function must have the signature like\n `loss_func(pred, target, **kwargs)`. The function only needs to compute\n element-wise loss without any reduction. This decorator will add weight\n and reduction arguments to the function. The decorated function will have\n the signature like `loss_func(pred, target, weight=None, reduction='mean',\n avg_factor=None, **kwargs)`.\n\n :Example:\n\n >>> import torch\n >>> @weighted_loss\n >>> def l1_loss(pred, target):\n >>> return (pred - target).abs()\n\n >>> pred = torch.Tensor([0, 2, 3])\n >>> target = torch.Tensor([1, 1, 1])\n >>> weight = torch.Tensor([1, 0, 1])\n\n >>> l1_loss(pred, target)\n tensor(1.3333)\n >>> l1_loss(pred, target, weight)\n tensor(1.)\n >>> l1_loss(pred, target, reduction='none')\n tensor([1., 1., 2.])\n >>> l1_loss(pred, target, weight, avg_factor=2)\n tensor(1.5000)\n " @functools.wraps(loss_func) def wrapper(pred, target, weight=None, reduction='mean', avg_factor=None, **kwargs): loss = loss_func(pred, target, **kwargs) loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss return wrapper
@mmcv.jit(derivate=True, coderize=True) def varifocal_loss(pred, target, weight=None, alpha=0.75, gamma=2.0, iou_weighted=True, reduction='mean', avg_factor=None): '`Varifocal Loss <https://arxiv.org/abs/2008.13367>`_\n\n Args:\n pred (torch.Tensor): The prediction with shape (N, C), C is the\n number of classes\n target (torch.Tensor): The learning target of the iou-aware\n classification score with shape (N, C), C is the number of classes.\n weight (torch.Tensor, optional): The weight of loss for each\n prediction. Defaults to None.\n alpha (float, optional): A balance factor for the negative part of\n Varifocal Loss, which is different from the alpha of Focal Loss.\n Defaults to 0.75.\n gamma (float, optional): The gamma for calculating the modulating\n factor. Defaults to 2.0.\n iou_weighted (bool, optional): Whether to weight the loss of the\n positive example with the iou target. Defaults to True.\n reduction (str, optional): The method used to reduce the loss into\n a scalar. Defaults to \'mean\'. Options are "none", "mean" and\n "sum".\n avg_factor (int, optional): Average factor that is used to average\n the loss. Defaults to None.\n ' assert (pred.size() == target.size()) pred_sigmoid = pred.sigmoid() target = target.type_as(pred) if iou_weighted: focal_weight = ((target * (target > 0.0).float()) + ((alpha * (pred_sigmoid - target).abs().pow(gamma)) * (target <= 0.0).float())) else: focal_weight = ((target > 0.0).float() + ((alpha * (pred_sigmoid - target).abs().pow(gamma)) * (target <= 0.0).float())) loss = (F.binary_cross_entropy_with_logits(pred, target, reduction='none') * focal_weight) loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss
@LOSSES.register_module() class VarifocalLoss(nn.Module): def __init__(self, use_sigmoid=True, alpha=0.75, gamma=2.0, iou_weighted=True, reduction='mean', loss_weight=1.0): '`Varifocal Loss <https://arxiv.org/abs/2008.13367>`_\n\n Args:\n use_sigmoid (bool, optional): Whether the prediction is\n used for sigmoid or softmax. Defaults to True.\n alpha (float, optional): A balance factor for the negative part of\n Varifocal Loss, which is different from the alpha of Focal\n Loss. Defaults to 0.75.\n gamma (float, optional): The gamma for calculating the modulating\n factor. Defaults to 2.0.\n iou_weighted (bool, optional): Whether to weight the loss of the\n positive examples with the iou target. Defaults to True.\n reduction (str, optional): The method used to reduce the loss into\n a scalar. Defaults to \'mean\'. Options are "none", "mean" and\n "sum".\n loss_weight (float, optional): Weight of loss. Defaults to 1.0.\n ' super(VarifocalLoss, self).__init__() assert (use_sigmoid is True), 'Only sigmoid varifocal loss supported now.' assert (alpha >= 0.0) self.use_sigmoid = use_sigmoid self.alpha = alpha self.gamma = gamma self.iou_weighted = iou_weighted self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None): 'Forward function.\n\n Args:\n pred (torch.Tensor): The prediction.\n target (torch.Tensor): The learning target of the prediction.\n weight (torch.Tensor, optional): The weight of loss for each\n prediction. Defaults to None.\n avg_factor (int, optional): Average factor that is used to average\n the loss. Defaults to None.\n reduction_override (str, optional): The reduction method used to\n override the original reduction method of the loss.\n Options are "none", "mean" and "sum".\n\n Returns:\n torch.Tensor: The calculated loss\n ' assert (reduction_override in (None, 'none', 'mean', 'sum')) reduction = (reduction_override if reduction_override else self.reduction) if self.use_sigmoid: loss_cls = (self.loss_weight * varifocal_loss(pred, target, weight, alpha=self.alpha, gamma=self.gamma, iou_weighted=self.iou_weighted, reduction=reduction, avg_factor=avg_factor)) else: raise NotImplementedError return loss_cls
@NECKS.register_module() class BFP(BaseModule): "BFP (Balanced Feature Pyramids)\n\n BFP takes multi-level features as inputs and gather them into a single one,\n then refine the gathered feature and scatter the refined results to\n multi-level features. This module is used in Libra R-CNN (CVPR 2019), see\n the paper `Libra R-CNN: Towards Balanced Learning for Object Detection\n <https://arxiv.org/abs/1904.02701>`_ for details.\n\n Args:\n in_channels (int): Number of input channels (feature maps of all levels\n should have the same channels).\n num_levels (int): Number of input feature levels.\n conv_cfg (dict): The config dict for convolution layers.\n norm_cfg (dict): The config dict for normalization layers.\n refine_level (int): Index of integration and refine level of BSF in\n multi-level features from bottom to top.\n refine_type (str): Type of the refine op, currently support\n [None, 'conv', 'non_local'].\n init_cfg (dict or list[dict], optional): Initialization config dict.\n " def __init__(self, in_channels, num_levels, refine_level=2, refine_type=None, conv_cfg=None, norm_cfg=None, init_cfg=dict(type='Xavier', layer='Conv2d', distribution='uniform')): super(BFP, self).__init__(init_cfg) assert (refine_type in [None, 'conv', 'non_local']) self.in_channels = in_channels self.num_levels = num_levels self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.refine_level = refine_level self.refine_type = refine_type assert (0 <= self.refine_level < self.num_levels) if (self.refine_type == 'conv'): self.refine = ConvModule(self.in_channels, self.in_channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg) elif (self.refine_type == 'non_local'): self.refine = NonLocal2d(self.in_channels, reduction=1, use_scale=False, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg) def forward(self, inputs): 'Forward function.' assert (len(inputs) == self.num_levels) feats = [] gather_size = inputs[self.refine_level].size()[2:] for i in range(self.num_levels): if (i < self.refine_level): gathered = F.adaptive_max_pool2d(inputs[i], output_size=gather_size) else: gathered = F.interpolate(inputs[i], size=gather_size, mode='nearest') feats.append(gathered) bsf = (sum(feats) / len(feats)) if (self.refine_type is not None): bsf = self.refine(bsf) outs = [] for i in range(self.num_levels): out_size = inputs[i].size()[2:] if (i < self.refine_level): residual = F.interpolate(bsf, size=out_size, mode='nearest') else: residual = F.adaptive_max_pool2d(bsf, output_size=out_size) outs.append((residual + inputs[i])) return tuple(outs)
@NECKS.register_module() class ChannelMapper(BaseModule): "Channel Mapper to reduce/increase channels of backbone features.\n\n This is used to reduce/increase channels of backbone features.\n\n Args:\n in_channels (List[int]): Number of input channels per scale.\n out_channels (int): Number of output channels (used at each scale).\n kernel_size (int, optional): kernel_size for reducing channels (used\n at each scale). Default: 3.\n conv_cfg (dict, optional): Config dict for convolution layer.\n Default: None.\n norm_cfg (dict, optional): Config dict for normalization layer.\n Default: None.\n act_cfg (dict, optional): Config dict for activation layer in\n ConvModule. Default: dict(type='ReLU').\n num_outs (int, optional): Number of output feature maps. There\n would be extra_convs when num_outs larger than the length\n of in_channels.\n init_cfg (dict or list[dict], optional): Initialization config dict.\n Example:\n >>> import torch\n >>> in_channels = [2, 3, 5, 7]\n >>> scales = [340, 170, 84, 43]\n >>> inputs = [torch.rand(1, c, s, s)\n ... for c, s in zip(in_channels, scales)]\n >>> self = ChannelMapper(in_channels, 11, 3).eval()\n >>> outputs = self.forward(inputs)\n >>> for i in range(len(outputs)):\n ... print(f'outputs[{i}].shape = {outputs[i].shape}')\n outputs[0].shape = torch.Size([1, 11, 340, 340])\n outputs[1].shape = torch.Size([1, 11, 170, 170])\n outputs[2].shape = torch.Size([1, 11, 84, 84])\n outputs[3].shape = torch.Size([1, 11, 43, 43])\n " def __init__(self, in_channels, out_channels, kernel_size=3, conv_cfg=None, norm_cfg=None, act_cfg=dict(type='ReLU'), num_outs=None, init_cfg=dict(type='Xavier', layer='Conv2d', distribution='uniform')): super(ChannelMapper, self).__init__(init_cfg) assert isinstance(in_channels, list) self.extra_convs = None if (num_outs is None): num_outs = len(in_channels) self.convs = nn.ModuleList() for in_channel in in_channels: self.convs.append(ConvModule(in_channel, out_channels, kernel_size, padding=((kernel_size - 1) // 2), conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)) if (num_outs > len(in_channels)): self.extra_convs = nn.ModuleList() for i in range(len(in_channels), num_outs): if (i == len(in_channels)): in_channel = in_channels[(- 1)] else: in_channel = out_channels self.extra_convs.append(ConvModule(in_channel, out_channels, 3, stride=2, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)) def forward(self, inputs): 'Forward function.' assert (len(inputs) == len(self.convs)) outs = [self.convs[i](inputs[i]) for i in range(len(inputs))] if self.extra_convs: for i in range(len(self.extra_convs)): if (i == 0): outs.append(self.extra_convs[0](inputs[(- 1)])) else: outs.append(self.extra_convs[i](outs[(- 1)])) return tuple(outs)
class Bottleneck(nn.Module): 'Bottleneck block for DilatedEncoder used in `YOLOF.\n\n <https://arxiv.org/abs/2103.09460>`.\n\n The Bottleneck contains three ConvLayers and one residual connection.\n\n Args:\n in_channels (int): The number of input channels.\n mid_channels (int): The number of middle output channels.\n dilation (int): Dilation rate.\n norm_cfg (dict): Dictionary to construct and config norm layer.\n ' def __init__(self, in_channels, mid_channels, dilation, norm_cfg=dict(type='BN', requires_grad=True)): super(Bottleneck, self).__init__() self.conv1 = ConvModule(in_channels, mid_channels, 1, norm_cfg=norm_cfg) self.conv2 = ConvModule(mid_channels, mid_channels, 3, padding=dilation, dilation=dilation, norm_cfg=norm_cfg) self.conv3 = ConvModule(mid_channels, in_channels, 1, norm_cfg=norm_cfg) def forward(self, x): identity = x out = self.conv1(x) out = self.conv2(out) out = self.conv3(out) out = (out + identity) return out
@NECKS.register_module() class DilatedEncoder(nn.Module): 'Dilated Encoder for YOLOF <https://arxiv.org/abs/2103.09460>`.\n\n This module contains two types of components:\n - the original FPN lateral convolution layer and fpn convolution layer,\n which are 1x1 conv + 3x3 conv\n - the dilated residual block\n\n Args:\n in_channels (int): The number of input channels.\n out_channels (int): The number of output channels.\n block_mid_channels (int): The number of middle block output channels\n num_residual_blocks (int): The number of residual blocks.\n ' def __init__(self, in_channels, out_channels, block_mid_channels, num_residual_blocks): super(DilatedEncoder, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.block_mid_channels = block_mid_channels self.num_residual_blocks = num_residual_blocks self.block_dilations = [2, 4, 6, 8] self._init_layers() def _init_layers(self): self.lateral_conv = nn.Conv2d(self.in_channels, self.out_channels, kernel_size=1) self.lateral_norm = BatchNorm2d(self.out_channels) self.fpn_conv = nn.Conv2d(self.out_channels, self.out_channels, kernel_size=3, padding=1) self.fpn_norm = BatchNorm2d(self.out_channels) encoder_blocks = [] for i in range(self.num_residual_blocks): dilation = self.block_dilations[i] encoder_blocks.append(Bottleneck(self.out_channels, self.block_mid_channels, dilation=dilation)) self.dilated_encoder_blocks = nn.Sequential(*encoder_blocks) def init_weights(self): caffe2_xavier_init(self.lateral_conv) caffe2_xavier_init(self.fpn_conv) for m in [self.lateral_norm, self.fpn_norm]: constant_init(m, 1) for m in self.dilated_encoder_blocks.modules(): if isinstance(m, nn.Conv2d): normal_init(m, mean=0, std=0.01) if is_norm(m): constant_init(m, 1) def forward(self, feature): out = self.lateral_norm(self.lateral_conv(feature[(- 1)])) out = self.fpn_norm(self.fpn_conv(out)) return (self.dilated_encoder_blocks(out),)
class Transition(BaseModule): 'Base class for transition.\n\n Args:\n in_channels (int): Number of input channels.\n out_channels (int): Number of output channels.\n ' def __init__(self, in_channels, out_channels, init_cfg=None): super().__init__(init_cfg) self.in_channels = in_channels self.out_channels = out_channels def forward(x): pass
class UpInterpolationConv(Transition): 'A transition used for up-sampling.\n\n Up-sample the input by interpolation then refines the feature by\n a convolution layer.\n\n Args:\n in_channels (int): Number of input channels.\n out_channels (int): Number of output channels.\n scale_factor (int): Up-sampling factor. Default: 2.\n mode (int): Interpolation mode. Default: nearest.\n align_corners (bool): Whether align corners when interpolation.\n Default: None.\n kernel_size (int): Kernel size for the conv. Default: 3.\n ' def __init__(self, in_channels, out_channels, scale_factor=2, mode='nearest', align_corners=None, kernel_size=3, init_cfg=None, **kwargs): super().__init__(in_channels, out_channels, init_cfg) self.mode = mode self.scale_factor = scale_factor self.align_corners = align_corners self.conv = ConvModule(in_channels, out_channels, kernel_size, padding=((kernel_size - 1) // 2), **kwargs) def forward(self, x): x = F.interpolate(x, scale_factor=self.scale_factor, mode=self.mode, align_corners=self.align_corners) x = self.conv(x) return x
class LastConv(Transition): 'A transition used for refining the output of the last stage.\n\n Args:\n in_channels (int): Number of input channels.\n out_channels (int): Number of output channels.\n num_inputs (int): Number of inputs of the FPN features.\n kernel_size (int): Kernel size for the conv. Default: 3.\n ' def __init__(self, in_channels, out_channels, num_inputs, kernel_size=3, init_cfg=None, **kwargs): super().__init__(in_channels, out_channels, init_cfg) self.num_inputs = num_inputs self.conv_out = ConvModule(in_channels, out_channels, kernel_size, padding=((kernel_size - 1) // 2), **kwargs) def forward(self, inputs): assert (len(inputs) == self.num_inputs) return self.conv_out(inputs[(- 1)])
@NECKS.register_module() class FPG(BaseModule): "FPG.\n\n Implementation of `Feature Pyramid Grids (FPG)\n <https://arxiv.org/abs/2004.03580>`_.\n This implementation only gives the basic structure stated in the paper.\n But users can implement different type of transitions to fully explore the\n the potential power of the structure of FPG.\n\n Args:\n in_channels (int): Number of input channels (feature maps of all levels\n should have the same channels).\n out_channels (int): Number of output channels (used at each scale)\n num_outs (int): Number of output scales.\n stack_times (int): The number of times the pyramid architecture will\n be stacked.\n paths (list[str]): Specify the path order of each stack level.\n Each element in the list should be either 'bu' (bottom-up) or\n 'td' (top-down).\n inter_channels (int): Number of inter channels.\n same_up_trans (dict): Transition that goes down at the same stage.\n same_down_trans (dict): Transition that goes up at the same stage.\n across_lateral_trans (dict): Across-pathway same-stage\n across_down_trans (dict): Across-pathway bottom-up connection.\n across_up_trans (dict): Across-pathway top-down connection.\n across_skip_trans (dict): Across-pathway skip connection.\n output_trans (dict): Transition that trans the output of the\n last stage.\n start_level (int): Index of the start input backbone level used to\n build the feature pyramid. Default: 0.\n end_level (int): Index of the end input backbone level (exclusive) to\n build the feature pyramid. Default: -1, which means the last level.\n add_extra_convs (bool): It decides whether to add conv\n layers on top of the original feature maps. Default to False.\n If True, its actual mode is specified by `extra_convs_on_inputs`.\n norm_cfg (dict): Config dict for normalization layer. Default: None.\n init_cfg (dict or list[dict], optional): Initialization config dict.\n " transition_types = {'conv': ConvModule, 'interpolation_conv': UpInterpolationConv, 'last_conv': LastConv} def __init__(self, in_channels, out_channels, num_outs, stack_times, paths, inter_channels=None, same_down_trans=None, same_up_trans=dict(type='conv', kernel_size=3, stride=2, padding=1), across_lateral_trans=dict(type='conv', kernel_size=1), across_down_trans=dict(type='conv', kernel_size=3), across_up_trans=None, across_skip_trans=dict(type='identity'), output_trans=dict(type='last_conv', kernel_size=3), start_level=0, end_level=(- 1), add_extra_convs=False, norm_cfg=None, skip_inds=None, init_cfg=[dict(type='Caffe2Xavier', layer='Conv2d'), dict(type='Constant', layer=['_BatchNorm', '_InstanceNorm', 'GroupNorm', 'LayerNorm'], val=1.0)]): super(FPG, self).__init__(init_cfg) assert isinstance(in_channels, list) self.in_channels = in_channels self.out_channels = out_channels self.num_ins = len(in_channels) self.num_outs = num_outs if (inter_channels is None): self.inter_channels = [out_channels for _ in range(num_outs)] elif isinstance(inter_channels, int): self.inter_channels = [inter_channels for _ in range(num_outs)] else: assert isinstance(inter_channels, list) assert (len(inter_channels) == num_outs) self.inter_channels = inter_channels self.stack_times = stack_times self.paths = paths assert (isinstance(paths, list) and (len(paths) == stack_times)) for d in paths: assert (d in ('bu', 'td')) self.same_down_trans = same_down_trans self.same_up_trans = same_up_trans self.across_lateral_trans = across_lateral_trans self.across_down_trans = across_down_trans self.across_up_trans = across_up_trans self.output_trans = output_trans self.across_skip_trans = across_skip_trans self.with_bias = (norm_cfg is None) if (self.across_skip_trans is not None): (skip_inds is not None) self.skip_inds = skip_inds assert (len(self.skip_inds[0]) <= self.stack_times) if (end_level == (- 1)): self.backbone_end_level = self.num_ins assert (num_outs >= (self.num_ins - start_level)) else: self.backbone_end_level = end_level assert (end_level <= len(in_channels)) assert (num_outs == (end_level - start_level)) self.start_level = start_level self.end_level = end_level self.add_extra_convs = add_extra_convs self.lateral_convs = nn.ModuleList() for i in range(self.start_level, self.backbone_end_level): l_conv = nn.Conv2d(self.in_channels[i], self.inter_channels[(i - self.start_level)], 1) self.lateral_convs.append(l_conv) extra_levels = ((num_outs - self.backbone_end_level) + self.start_level) self.extra_downsamples = nn.ModuleList() for i in range(extra_levels): if self.add_extra_convs: fpn_idx = ((self.backbone_end_level - self.start_level) + i) extra_conv = nn.Conv2d(self.inter_channels[(fpn_idx - 1)], self.inter_channels[fpn_idx], 3, stride=2, padding=1) self.extra_downsamples.append(extra_conv) else: self.extra_downsamples.append(nn.MaxPool2d(1, stride=2)) self.fpn_transitions = nn.ModuleList() for s in range(self.stack_times): stage_trans = nn.ModuleList() for i in range(self.num_outs): trans = nn.ModuleDict() if (s in self.skip_inds[i]): stage_trans.append(trans) continue if ((i == 0) or (self.same_up_trans is None)): same_up_trans = None else: same_up_trans = self.build_trans(self.same_up_trans, self.inter_channels[(i - 1)], self.inter_channels[i]) trans['same_up'] = same_up_trans if ((i == (self.num_outs - 1)) or (self.same_down_trans is None)): same_down_trans = None else: same_down_trans = self.build_trans(self.same_down_trans, self.inter_channels[(i + 1)], self.inter_channels[i]) trans['same_down'] = same_down_trans across_lateral_trans = self.build_trans(self.across_lateral_trans, self.inter_channels[i], self.inter_channels[i]) trans['across_lateral'] = across_lateral_trans if ((i == (self.num_outs - 1)) or (self.across_down_trans is None)): across_down_trans = None else: across_down_trans = self.build_trans(self.across_down_trans, self.inter_channels[(i + 1)], self.inter_channels[i]) trans['across_down'] = across_down_trans if ((i == 0) or (self.across_up_trans is None)): across_up_trans = None else: across_up_trans = self.build_trans(self.across_up_trans, self.inter_channels[(i - 1)], self.inter_channels[i]) trans['across_up'] = across_up_trans if (self.across_skip_trans is None): across_skip_trans = None else: across_skip_trans = self.build_trans(self.across_skip_trans, self.inter_channels[(i - 1)], self.inter_channels[i]) trans['across_skip'] = across_skip_trans stage_trans.append(trans) self.fpn_transitions.append(stage_trans) self.output_transition = nn.ModuleList() for i in range(self.num_outs): trans = self.build_trans(self.output_trans, self.inter_channels[i], self.out_channels, num_inputs=(self.stack_times + 1)) self.output_transition.append(trans) self.relu = nn.ReLU(inplace=True) def build_trans(self, cfg, in_channels, out_channels, **extra_args): cfg_ = cfg.copy() trans_type = cfg_.pop('type') trans_cls = self.transition_types[trans_type] return trans_cls(in_channels, out_channels, **cfg_, **extra_args) def fuse(self, fuse_dict): out = None for item in fuse_dict.values(): if (item is not None): if (out is None): out = item else: out = (out + item) return out def forward(self, inputs): assert (len(inputs) == len(self.in_channels)) feats = [lateral_conv(inputs[(i + self.start_level)]) for (i, lateral_conv) in enumerate(self.lateral_convs)] for downsample in self.extra_downsamples: feats.append(downsample(feats[(- 1)])) outs = [feats] for i in range(self.stack_times): current_outs = outs[(- 1)] next_outs = [] direction = self.paths[i] for j in range(self.num_outs): if (i in self.skip_inds[j]): next_outs.append(outs[(- 1)][j]) continue if (direction == 'td'): lvl = ((self.num_outs - j) - 1) else: lvl = j if (direction == 'td'): same_trans = self.fpn_transitions[i][lvl]['same_down'] else: same_trans = self.fpn_transitions[i][lvl]['same_up'] across_lateral_trans = self.fpn_transitions[i][lvl]['across_lateral'] across_down_trans = self.fpn_transitions[i][lvl]['across_down'] across_up_trans = self.fpn_transitions[i][lvl]['across_up'] across_skip_trans = self.fpn_transitions[i][lvl]['across_skip'] to_fuse = dict(same=None, lateral=None, across_up=None, across_down=None) if (same_trans is not None): to_fuse['same'] = same_trans(next_outs[(- 1)]) if (across_lateral_trans is not None): to_fuse['lateral'] = across_lateral_trans(current_outs[lvl]) if ((lvl > 0) and (across_up_trans is not None)): to_fuse['across_up'] = across_up_trans(current_outs[(lvl - 1)]) if ((lvl < (self.num_outs - 1)) and (across_down_trans is not None)): to_fuse['across_down'] = across_down_trans(current_outs[(lvl + 1)]) if (across_skip_trans is not None): to_fuse['across_skip'] = across_skip_trans(outs[0][lvl]) x = self.fuse(to_fuse) next_outs.append(x) if (direction == 'td'): outs.append(next_outs[::(- 1)]) else: outs.append(next_outs) final_outs = [] for i in range(self.num_outs): lvl_out_list = [] for s in range(len(outs)): lvl_out_list.append(outs[s][i]) lvl_out = self.output_transition[i](lvl_out_list) final_outs.append(lvl_out) return final_outs
@NECKS.register_module() class FPN(BaseModule): "Feature Pyramid Network.\n\n This is an implementation of paper `Feature Pyramid Networks for Object\n Detection <https://arxiv.org/abs/1612.03144>`_.\n\n Args:\n in_channels (List[int]): Number of input channels per scale.\n out_channels (int): Number of output channels (used at each scale)\n num_outs (int): Number of output scales.\n start_level (int): Index of the start input backbone level used to\n build the feature pyramid. Default: 0.\n end_level (int): Index of the end input backbone level (exclusive) to\n build the feature pyramid. Default: -1, which means the last level.\n add_extra_convs (bool | str): If bool, it decides whether to add conv\n layers on top of the original feature maps. Default to False.\n If True, it is equivalent to `add_extra_convs='on_input'`.\n If str, it specifies the source feature map of the extra convs.\n Only the following options are allowed\n\n - 'on_input': Last feat map of neck inputs (i.e. backbone feature).\n - 'on_lateral': Last feature map after lateral convs.\n - 'on_output': The last output feature map after fpn convs.\n relu_before_extra_convs (bool): Whether to apply relu before the extra\n conv. Default: False.\n no_norm_on_lateral (bool): Whether to apply norm on lateral.\n Default: False.\n conv_cfg (dict): Config dict for convolution layer. Default: None.\n norm_cfg (dict): Config dict for normalization layer. Default: None.\n act_cfg (str): Config dict for activation layer in ConvModule.\n Default: None.\n upsample_cfg (dict): Config dict for interpolate layer.\n Default: `dict(mode='nearest')`\n init_cfg (dict or list[dict], optional): Initialization config dict.\n\n Example:\n >>> import torch\n >>> in_channels = [2, 3, 5, 7]\n >>> scales = [340, 170, 84, 43]\n >>> inputs = [torch.rand(1, c, s, s)\n ... for c, s in zip(in_channels, scales)]\n >>> self = FPN(in_channels, 11, len(in_channels)).eval()\n >>> outputs = self.forward(inputs)\n >>> for i in range(len(outputs)):\n ... print(f'outputs[{i}].shape = {outputs[i].shape}')\n outputs[0].shape = torch.Size([1, 11, 340, 340])\n outputs[1].shape = torch.Size([1, 11, 170, 170])\n outputs[2].shape = torch.Size([1, 11, 84, 84])\n outputs[3].shape = torch.Size([1, 11, 43, 43])\n " def __init__(self, in_channels, out_channels, num_outs, start_level=0, end_level=(- 1), add_extra_convs=False, relu_before_extra_convs=False, no_norm_on_lateral=False, conv_cfg=None, norm_cfg=None, act_cfg=None, upsample_cfg=dict(mode='nearest'), init_cfg=dict(type='Xavier', layer='Conv2d', distribution='uniform')): super(FPN, self).__init__(init_cfg) assert isinstance(in_channels, list) self.in_channels = in_channels self.out_channels = out_channels self.num_ins = len(in_channels) self.num_outs = num_outs self.relu_before_extra_convs = relu_before_extra_convs self.no_norm_on_lateral = no_norm_on_lateral self.fp16_enabled = False self.upsample_cfg = upsample_cfg.copy() if (end_level == (- 1)): self.backbone_end_level = self.num_ins assert (num_outs >= (self.num_ins - start_level)) else: self.backbone_end_level = end_level assert (end_level <= len(in_channels)) assert (num_outs == (end_level - start_level)) self.start_level = start_level self.end_level = end_level self.add_extra_convs = add_extra_convs assert isinstance(add_extra_convs, (str, bool)) if isinstance(add_extra_convs, str): assert (add_extra_convs in ('on_input', 'on_lateral', 'on_output')) elif add_extra_convs: self.add_extra_convs = 'on_input' self.lateral_convs = nn.ModuleList() self.fpn_convs = nn.ModuleList() for i in range(self.start_level, self.backbone_end_level): l_conv = ConvModule(in_channels[i], out_channels, 1, conv_cfg=conv_cfg, norm_cfg=(norm_cfg if (not self.no_norm_on_lateral) else None), act_cfg=act_cfg, inplace=False) fpn_conv = ConvModule(out_channels, out_channels, 3, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg, inplace=False) self.lateral_convs.append(l_conv) self.fpn_convs.append(fpn_conv) extra_levels = ((num_outs - self.backbone_end_level) + self.start_level) if (self.add_extra_convs and (extra_levels >= 1)): for i in range(extra_levels): if ((i == 0) and (self.add_extra_convs == 'on_input')): in_channels = self.in_channels[(self.backbone_end_level - 1)] else: in_channels = out_channels extra_fpn_conv = ConvModule(in_channels, out_channels, 3, stride=2, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg, inplace=False) self.fpn_convs.append(extra_fpn_conv) @auto_fp16() def forward(self, inputs): 'Forward function.' assert (len(inputs) == len(self.in_channels)) laterals = [lateral_conv(inputs[(i + self.start_level)]) for (i, lateral_conv) in enumerate(self.lateral_convs)] used_backbone_levels = len(laterals) for i in range((used_backbone_levels - 1), 0, (- 1)): if ('scale_factor' in self.upsample_cfg): laterals[(i - 1)] = (laterals[(i - 1)] + F.interpolate(laterals[i], **self.upsample_cfg)) else: prev_shape = laterals[(i - 1)].shape[2:] laterals[(i - 1)] = (laterals[(i - 1)] + F.interpolate(laterals[i], size=prev_shape, **self.upsample_cfg)) outs = [self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels)] if (self.num_outs > len(outs)): if (not self.add_extra_convs): for i in range((self.num_outs - used_backbone_levels)): outs.append(F.max_pool2d(outs[(- 1)], 1, stride=2)) else: if (self.add_extra_convs == 'on_input'): extra_source = inputs[(self.backbone_end_level - 1)] elif (self.add_extra_convs == 'on_lateral'): extra_source = laterals[(- 1)] elif (self.add_extra_convs == 'on_output'): extra_source = outs[(- 1)] else: raise NotImplementedError outs.append(self.fpn_convs[used_backbone_levels](extra_source)) for i in range((used_backbone_levels + 1), self.num_outs): if self.relu_before_extra_convs: outs.append(self.fpn_convs[i](F.relu(outs[(- 1)]))) else: outs.append(self.fpn_convs[i](outs[(- 1)])) return tuple(outs)
@NECKS.register_module() class HRFPN(BaseModule): 'HRFPN (High Resolution Feature Pyramids)\n\n paper: `High-Resolution Representations for Labeling Pixels and Regions\n <https://arxiv.org/abs/1904.04514>`_.\n\n Args:\n in_channels (list): number of channels for each branch.\n out_channels (int): output channels of feature pyramids.\n num_outs (int): number of output stages.\n pooling_type (str): pooling for generating feature pyramids\n from {MAX, AVG}.\n conv_cfg (dict): dictionary to construct and config conv layer.\n norm_cfg (dict): dictionary to construct and config norm layer.\n with_cp (bool): Use checkpoint or not. Using checkpoint will save some\n memory while slowing down the training speed.\n stride (int): stride of 3x3 convolutional layers\n init_cfg (dict or list[dict], optional): Initialization config dict.\n ' def __init__(self, in_channels, out_channels, num_outs=5, pooling_type='AVG', conv_cfg=None, norm_cfg=None, with_cp=False, stride=1, init_cfg=dict(type='Caffe2Xavier', layer='Conv2d')): super(HRFPN, self).__init__(init_cfg) assert isinstance(in_channels, list) self.in_channels = in_channels self.out_channels = out_channels self.num_ins = len(in_channels) self.num_outs = num_outs self.with_cp = with_cp self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.reduction_conv = ConvModule(sum(in_channels), out_channels, kernel_size=1, conv_cfg=self.conv_cfg, act_cfg=None) self.fpn_convs = nn.ModuleList() for i in range(self.num_outs): self.fpn_convs.append(ConvModule(out_channels, out_channels, kernel_size=3, padding=1, stride=stride, conv_cfg=self.conv_cfg, act_cfg=None)) if (pooling_type == 'MAX'): self.pooling = F.max_pool2d else: self.pooling = F.avg_pool2d def forward(self, inputs): 'Forward function.' assert (len(inputs) == self.num_ins) outs = [inputs[0]] for i in range(1, self.num_ins): outs.append(F.interpolate(inputs[i], scale_factor=(2 ** i), mode='bilinear')) out = torch.cat(outs, dim=1) if (out.requires_grad and self.with_cp): out = checkpoint(self.reduction_conv, out) else: out = self.reduction_conv(out) outs = [out] for i in range(1, self.num_outs): outs.append(self.pooling(out, kernel_size=(2 ** i), stride=(2 ** i))) outputs = [] for i in range(self.num_outs): if (outs[i].requires_grad and self.with_cp): tmp_out = checkpoint(self.fpn_convs[i], outs[i]) else: tmp_out = self.fpn_convs[i](outs[i]) outputs.append(tmp_out) return tuple(outputs)
@NECKS.register_module() class NASFPN(BaseModule): 'NAS-FPN.\n\n Implementation of `NAS-FPN: Learning Scalable Feature Pyramid Architecture\n for Object Detection <https://arxiv.org/abs/1904.07392>`_\n\n Args:\n in_channels (List[int]): Number of input channels per scale.\n out_channels (int): Number of output channels (used at each scale)\n num_outs (int): Number of output scales.\n stack_times (int): The number of times the pyramid architecture will\n be stacked.\n start_level (int): Index of the start input backbone level used to\n build the feature pyramid. Default: 0.\n end_level (int): Index of the end input backbone level (exclusive) to\n build the feature pyramid. Default: -1, which means the last level.\n add_extra_convs (bool): It decides whether to add conv\n layers on top of the original feature maps. Default to False.\n If True, its actual mode is specified by `extra_convs_on_inputs`.\n init_cfg (dict or list[dict], optional): Initialization config dict.\n ' def __init__(self, in_channels, out_channels, num_outs, stack_times, start_level=0, end_level=(- 1), add_extra_convs=False, norm_cfg=None, init_cfg=dict(type='Caffe2Xavier', layer='Conv2d')): super(NASFPN, self).__init__(init_cfg) assert isinstance(in_channels, list) self.in_channels = in_channels self.out_channels = out_channels self.num_ins = len(in_channels) self.num_outs = num_outs self.stack_times = stack_times self.norm_cfg = norm_cfg if (end_level == (- 1)): self.backbone_end_level = self.num_ins assert (num_outs >= (self.num_ins - start_level)) else: self.backbone_end_level = end_level assert (end_level <= len(in_channels)) assert (num_outs == (end_level - start_level)) self.start_level = start_level self.end_level = end_level self.add_extra_convs = add_extra_convs self.lateral_convs = nn.ModuleList() for i in range(self.start_level, self.backbone_end_level): l_conv = ConvModule(in_channels[i], out_channels, 1, norm_cfg=norm_cfg, act_cfg=None) self.lateral_convs.append(l_conv) extra_levels = ((num_outs - self.backbone_end_level) + self.start_level) self.extra_downsamples = nn.ModuleList() for i in range(extra_levels): extra_conv = ConvModule(out_channels, out_channels, 1, norm_cfg=norm_cfg, act_cfg=None) self.extra_downsamples.append(nn.Sequential(extra_conv, nn.MaxPool2d(2, 2))) self.fpn_stages = ModuleList() for _ in range(self.stack_times): stage = nn.ModuleDict() stage['gp_64_4'] = GlobalPoolingCell(in_channels=out_channels, out_channels=out_channels, out_norm_cfg=norm_cfg) stage['sum_44_4'] = SumCell(in_channels=out_channels, out_channels=out_channels, out_norm_cfg=norm_cfg) stage['sum_43_3'] = SumCell(in_channels=out_channels, out_channels=out_channels, out_norm_cfg=norm_cfg) stage['sum_34_4'] = SumCell(in_channels=out_channels, out_channels=out_channels, out_norm_cfg=norm_cfg) stage['gp_43_5'] = GlobalPoolingCell(with_out_conv=False) stage['sum_55_5'] = SumCell(in_channels=out_channels, out_channels=out_channels, out_norm_cfg=norm_cfg) stage['gp_54_7'] = GlobalPoolingCell(with_out_conv=False) stage['sum_77_7'] = SumCell(in_channels=out_channels, out_channels=out_channels, out_norm_cfg=norm_cfg) stage['gp_75_6'] = GlobalPoolingCell(in_channels=out_channels, out_channels=out_channels, out_norm_cfg=norm_cfg) self.fpn_stages.append(stage) def forward(self, inputs): 'Forward function.' feats = [lateral_conv(inputs[(i + self.start_level)]) for (i, lateral_conv) in enumerate(self.lateral_convs)] for downsample in self.extra_downsamples: feats.append(downsample(feats[(- 1)])) (p3, p4, p5, p6, p7) = feats for stage in self.fpn_stages: p4_1 = stage['gp_64_4'](p6, p4, out_size=p4.shape[(- 2):]) p4_2 = stage['sum_44_4'](p4_1, p4, out_size=p4.shape[(- 2):]) p3 = stage['sum_43_3'](p4_2, p3, out_size=p3.shape[(- 2):]) p4 = stage['sum_34_4'](p3, p4_2, out_size=p4.shape[(- 2):]) p5_tmp = stage['gp_43_5'](p4, p3, out_size=p5.shape[(- 2):]) p5 = stage['sum_55_5'](p5, p5_tmp, out_size=p5.shape[(- 2):]) p7_tmp = stage['gp_54_7'](p5, p4_2, out_size=p7.shape[(- 2):]) p7 = stage['sum_77_7'](p7, p7_tmp, out_size=p7.shape[(- 2):]) p6 = stage['gp_75_6'](p7, p5, out_size=p6.shape[(- 2):]) return (p3, p4, p5, p6, p7)
@NECKS.register_module() class NASFCOS_FPN(BaseModule): 'FPN structure in NASFPN.\n\n Implementation of paper `NAS-FCOS: Fast Neural Architecture Search for\n Object Detection <https://arxiv.org/abs/1906.04423>`_\n\n Args:\n in_channels (List[int]): Number of input channels per scale.\n out_channels (int): Number of output channels (used at each scale)\n num_outs (int): Number of output scales.\n start_level (int): Index of the start input backbone level used to\n build the feature pyramid. Default: 0.\n end_level (int): Index of the end input backbone level (exclusive) to\n build the feature pyramid. Default: -1, which means the last level.\n add_extra_convs (bool): It decides whether to add conv\n layers on top of the original feature maps. Default to False.\n If True, its actual mode is specified by `extra_convs_on_inputs`.\n conv_cfg (dict): dictionary to construct and config conv layer.\n norm_cfg (dict): dictionary to construct and config norm layer.\n init_cfg (dict or list[dict], optional): Initialization config dict.\n Default: None\n ' def __init__(self, in_channels, out_channels, num_outs, start_level=1, end_level=(- 1), add_extra_convs=False, conv_cfg=None, norm_cfg=None, init_cfg=None): assert (init_cfg is None), 'To prevent abnormal initialization behavior, init_cfg is not allowed to be set' super(NASFCOS_FPN, self).__init__(init_cfg) assert isinstance(in_channels, list) self.in_channels = in_channels self.out_channels = out_channels self.num_ins = len(in_channels) self.num_outs = num_outs self.norm_cfg = norm_cfg self.conv_cfg = conv_cfg if (end_level == (- 1)): self.backbone_end_level = self.num_ins assert (num_outs >= (self.num_ins - start_level)) else: self.backbone_end_level = end_level assert (end_level <= len(in_channels)) assert (num_outs == (end_level - start_level)) self.start_level = start_level self.end_level = end_level self.add_extra_convs = add_extra_convs self.adapt_convs = nn.ModuleList() for i in range(self.start_level, self.backbone_end_level): adapt_conv = ConvModule(in_channels[i], out_channels, 1, stride=1, padding=0, bias=False, norm_cfg=dict(type='BN'), act_cfg=dict(type='ReLU', inplace=False)) self.adapt_convs.append(adapt_conv) extra_levels = ((num_outs - self.backbone_end_level) + self.start_level) def build_concat_cell(with_input1_conv, with_input2_conv): cell_conv_cfg = dict(kernel_size=1, padding=0, bias=False, groups=out_channels) return ConcatCell(in_channels=out_channels, out_channels=out_channels, with_out_conv=True, out_conv_cfg=cell_conv_cfg, out_norm_cfg=dict(type='BN'), out_conv_order=('norm', 'act', 'conv'), with_input1_conv=with_input1_conv, with_input2_conv=with_input2_conv, input_conv_cfg=conv_cfg, input_norm_cfg=norm_cfg, upsample_mode='nearest') self.fpn = nn.ModuleDict() self.fpn['c22_1'] = build_concat_cell(True, True) self.fpn['c22_2'] = build_concat_cell(True, True) self.fpn['c32'] = build_concat_cell(True, False) self.fpn['c02'] = build_concat_cell(True, False) self.fpn['c42'] = build_concat_cell(True, True) self.fpn['c36'] = build_concat_cell(True, True) self.fpn['c61'] = build_concat_cell(True, True) self.extra_downsamples = nn.ModuleList() for i in range(extra_levels): extra_act_cfg = (None if (i == 0) else dict(type='ReLU', inplace=False)) self.extra_downsamples.append(ConvModule(out_channels, out_channels, 3, stride=2, padding=1, act_cfg=extra_act_cfg, order=('act', 'norm', 'conv'))) def forward(self, inputs): 'Forward function.' feats = [adapt_conv(inputs[(i + self.start_level)]) for (i, adapt_conv) in enumerate(self.adapt_convs)] for (i, module_name) in enumerate(self.fpn): (idx_1, idx_2) = (int(module_name[1]), int(module_name[2])) res = self.fpn[module_name](feats[idx_1], feats[idx_2]) feats.append(res) ret = [] for (idx, input_idx) in zip([9, 8, 7], [1, 2, 3]): (feats1, feats2) = (feats[idx], feats[5]) feats2_resize = F.interpolate(feats2, size=feats1.size()[2:], mode='bilinear', align_corners=False) feats_sum = (feats1 + feats2_resize) ret.append(F.interpolate(feats_sum, size=inputs[input_idx].size()[2:], mode='bilinear', align_corners=False)) for submodule in self.extra_downsamples: ret.append(submodule(ret[(- 1)])) return tuple(ret) def init_weights(self): 'Initialize the weights of module.' super(NASFCOS_FPN, self).init_weights() for module in self.fpn.values(): if hasattr(module, 'conv_out'): caffe2_xavier_init(module.out_conv.conv) for modules in [self.adapt_convs.modules(), self.extra_downsamples.modules()]: for module in modules: if isinstance(module, nn.Conv2d): caffe2_xavier_init(module)
@NECKS.register_module() class PAFPN(FPN): "Path Aggregation Network for Instance Segmentation.\n\n This is an implementation of the `PAFPN in Path Aggregation Network\n <https://arxiv.org/abs/1803.01534>`_.\n\n Args:\n in_channels (List[int]): Number of input channels per scale.\n out_channels (int): Number of output channels (used at each scale)\n num_outs (int): Number of output scales.\n start_level (int): Index of the start input backbone level used to\n build the feature pyramid. Default: 0.\n end_level (int): Index of the end input backbone level (exclusive) to\n build the feature pyramid. Default: -1, which means the last level.\n add_extra_convs (bool | str): If bool, it decides whether to add conv\n layers on top of the original feature maps. Default to False.\n If True, it is equivalent to `add_extra_convs='on_input'`.\n If str, it specifies the source feature map of the extra convs.\n Only the following options are allowed\n\n - 'on_input': Last feat map of neck inputs (i.e. backbone feature).\n - 'on_lateral': Last feature map after lateral convs.\n - 'on_output': The last output feature map after fpn convs.\n relu_before_extra_convs (bool): Whether to apply relu before the extra\n conv. Default: False.\n no_norm_on_lateral (bool): Whether to apply norm on lateral.\n Default: False.\n conv_cfg (dict): Config dict for convolution layer. Default: None.\n norm_cfg (dict): Config dict for normalization layer. Default: None.\n act_cfg (str): Config dict for activation layer in ConvModule.\n Default: None.\n init_cfg (dict or list[dict], optional): Initialization config dict.\n " def __init__(self, in_channels, out_channels, num_outs, start_level=0, end_level=(- 1), add_extra_convs=False, relu_before_extra_convs=False, no_norm_on_lateral=False, conv_cfg=None, norm_cfg=None, act_cfg=None, init_cfg=dict(type='Xavier', layer='Conv2d', distribution='uniform')): super(PAFPN, self).__init__(in_channels, out_channels, num_outs, start_level, end_level, add_extra_convs, relu_before_extra_convs, no_norm_on_lateral, conv_cfg, norm_cfg, act_cfg, init_cfg=init_cfg) self.downsample_convs = nn.ModuleList() self.pafpn_convs = nn.ModuleList() for i in range((self.start_level + 1), self.backbone_end_level): d_conv = ConvModule(out_channels, out_channels, 3, stride=2, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg, inplace=False) pafpn_conv = ConvModule(out_channels, out_channels, 3, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg, inplace=False) self.downsample_convs.append(d_conv) self.pafpn_convs.append(pafpn_conv) @auto_fp16() def forward(self, inputs): 'Forward function.' assert (len(inputs) == len(self.in_channels)) laterals = [lateral_conv(inputs[(i + self.start_level)]) for (i, lateral_conv) in enumerate(self.lateral_convs)] used_backbone_levels = len(laterals) for i in range((used_backbone_levels - 1), 0, (- 1)): prev_shape = laterals[(i - 1)].shape[2:] laterals[(i - 1)] += F.interpolate(laterals[i], size=prev_shape, mode='nearest') inter_outs = [self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels)] for i in range(0, (used_backbone_levels - 1)): inter_outs[(i + 1)] += self.downsample_convs[i](inter_outs[i]) outs = [] outs.append(inter_outs[0]) outs.extend([self.pafpn_convs[(i - 1)](inter_outs[i]) for i in range(1, used_backbone_levels)]) if (self.num_outs > len(outs)): if (not self.add_extra_convs): for i in range((self.num_outs - used_backbone_levels)): outs.append(F.max_pool2d(outs[(- 1)], 1, stride=2)) else: if (self.add_extra_convs == 'on_input'): orig = inputs[(self.backbone_end_level - 1)] outs.append(self.fpn_convs[used_backbone_levels](orig)) elif (self.add_extra_convs == 'on_lateral'): outs.append(self.fpn_convs[used_backbone_levels](laterals[(- 1)])) elif (self.add_extra_convs == 'on_output'): outs.append(self.fpn_convs[used_backbone_levels](outs[(- 1)])) else: raise NotImplementedError for i in range((used_backbone_levels + 1), self.num_outs): if self.relu_before_extra_convs: outs.append(self.fpn_convs[i](F.relu(outs[(- 1)]))) else: outs.append(self.fpn_convs[i](outs[(- 1)])) return tuple(outs)
class ASPP(BaseModule): 'ASPP (Atrous Spatial Pyramid Pooling)\n\n This is an implementation of the ASPP module used in DetectoRS\n (https://arxiv.org/pdf/2006.02334.pdf)\n\n Args:\n in_channels (int): Number of input channels.\n out_channels (int): Number of channels produced by this module\n dilations (tuple[int]): Dilations of the four branches.\n Default: (1, 3, 6, 1)\n init_cfg (dict or list[dict], optional): Initialization config dict.\n ' def __init__(self, in_channels, out_channels, dilations=(1, 3, 6, 1), init_cfg=dict(type='Kaiming', layer='Conv2d')): super().__init__(init_cfg) assert (dilations[(- 1)] == 1) self.aspp = nn.ModuleList() for dilation in dilations: kernel_size = (3 if (dilation > 1) else 1) padding = (dilation if (dilation > 1) else 0) conv = nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=1, dilation=dilation, padding=padding, bias=True) self.aspp.append(conv) self.gap = nn.AdaptiveAvgPool2d(1) def forward(self, x): avg_x = self.gap(x) out = [] for aspp_idx in range(len(self.aspp)): inp = (avg_x if (aspp_idx == (len(self.aspp) - 1)) else x) out.append(F.relu_(self.aspp[aspp_idx](inp))) out[(- 1)] = out[(- 1)].expand_as(out[(- 2)]) out = torch.cat(out, dim=1) return out
@NECKS.register_module() class RFP(FPN): 'RFP (Recursive Feature Pyramid)\n\n This is an implementation of RFP in `DetectoRS\n <https://arxiv.org/pdf/2006.02334.pdf>`_. Different from standard FPN, the\n input of RFP should be multi level features along with origin input image\n of backbone.\n\n Args:\n rfp_steps (int): Number of unrolled steps of RFP.\n rfp_backbone (dict): Configuration of the backbone for RFP.\n aspp_out_channels (int): Number of output channels of ASPP module.\n aspp_dilations (tuple[int]): Dilation rates of four branches.\n Default: (1, 3, 6, 1)\n init_cfg (dict or list[dict], optional): Initialization config dict.\n Default: None\n ' def __init__(self, rfp_steps, rfp_backbone, aspp_out_channels, aspp_dilations=(1, 3, 6, 1), init_cfg=None, **kwargs): assert (init_cfg is None), 'To prevent abnormal initialization behavior, init_cfg is not allowed to be set' super().__init__(init_cfg=init_cfg, **kwargs) self.rfp_steps = rfp_steps self.rfp_modules = ModuleList() for rfp_idx in range(1, rfp_steps): rfp_module = build_backbone(rfp_backbone) self.rfp_modules.append(rfp_module) self.rfp_aspp = ASPP(self.out_channels, aspp_out_channels, aspp_dilations) self.rfp_weight = nn.Conv2d(self.out_channels, 1, kernel_size=1, stride=1, padding=0, bias=True) def init_weights(self): for convs in [self.lateral_convs, self.fpn_convs]: for m in convs.modules(): if isinstance(m, nn.Conv2d): xavier_init(m, distribution='uniform') for rfp_idx in range((self.rfp_steps - 1)): self.rfp_modules[rfp_idx].init_weights() constant_init(self.rfp_weight, 0) def forward(self, inputs): inputs = list(inputs) assert (len(inputs) == (len(self.in_channels) + 1)) img = inputs.pop(0) x = super().forward(tuple(inputs)) for rfp_idx in range((self.rfp_steps - 1)): rfp_feats = ([x[0]] + list((self.rfp_aspp(x[i]) for i in range(1, len(x))))) x_idx = self.rfp_modules[rfp_idx].rfp_forward(img, rfp_feats) x_idx = super().forward(x_idx) x_new = [] for ft_idx in range(len(x_idx)): add_weight = torch.sigmoid(self.rfp_weight(x_idx[ft_idx])) x_new.append(((add_weight * x_idx[ft_idx]) + ((1 - add_weight) * x[ft_idx]))) x = x_new return x
class DetectionBlock(BaseModule): "Detection block in YOLO neck.\n\n Let out_channels = n, the DetectionBlock contains:\n Six ConvLayers, 1 Conv2D Layer and 1 YoloLayer.\n The first 6 ConvLayers are formed the following way:\n 1x1xn, 3x3x2n, 1x1xn, 3x3x2n, 1x1xn, 3x3x2n.\n The Conv2D layer is 1x1x255.\n Some block will have branch after the fifth ConvLayer.\n The input channel is arbitrary (in_channels)\n\n Args:\n in_channels (int): The number of input channels.\n out_channels (int): The number of output channels.\n conv_cfg (dict): Config dict for convolution layer. Default: None.\n norm_cfg (dict): Dictionary to construct and config norm layer.\n Default: dict(type='BN', requires_grad=True)\n act_cfg (dict): Config dict for activation layer.\n Default: dict(type='LeakyReLU', negative_slope=0.1).\n init_cfg (dict or list[dict], optional): Initialization config dict.\n Default: None\n " def __init__(self, in_channels, out_channels, conv_cfg=None, norm_cfg=dict(type='BN', requires_grad=True), act_cfg=dict(type='LeakyReLU', negative_slope=0.1), init_cfg=None): super(DetectionBlock, self).__init__(init_cfg) double_out_channels = (out_channels * 2) cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) self.conv1 = ConvModule(in_channels, out_channels, 1, **cfg) self.conv2 = ConvModule(out_channels, double_out_channels, 3, padding=1, **cfg) self.conv3 = ConvModule(double_out_channels, out_channels, 1, **cfg) self.conv4 = ConvModule(out_channels, double_out_channels, 3, padding=1, **cfg) self.conv5 = ConvModule(double_out_channels, out_channels, 1, **cfg) def forward(self, x): tmp = self.conv1(x) tmp = self.conv2(tmp) tmp = self.conv3(tmp) tmp = self.conv4(tmp) out = self.conv5(tmp) return out
@NECKS.register_module() class YOLOV3Neck(BaseModule): "The neck of YOLOV3.\n\n It can be treated as a simplified version of FPN. It\n will take the result from Darknet backbone and do some upsampling and\n concatenation. It will finally output the detection result.\n\n Note:\n The input feats should be from top to bottom.\n i.e., from high-lvl to low-lvl\n But YOLOV3Neck will process them in reversed order.\n i.e., from bottom (high-lvl) to top (low-lvl)\n\n Args:\n num_scales (int): The number of scales / stages.\n in_channels (List[int]): The number of input channels per scale.\n out_channels (List[int]): The number of output channels per scale.\n conv_cfg (dict, optional): Config dict for convolution layer.\n Default: None.\n norm_cfg (dict, optional): Dictionary to construct and config norm\n layer. Default: dict(type='BN', requires_grad=True)\n act_cfg (dict, optional): Config dict for activation layer.\n Default: dict(type='LeakyReLU', negative_slope=0.1).\n init_cfg (dict or list[dict], optional): Initialization config dict.\n Default: None\n " def __init__(self, num_scales, in_channels, out_channels, conv_cfg=None, norm_cfg=dict(type='BN', requires_grad=True), act_cfg=dict(type='LeakyReLU', negative_slope=0.1), init_cfg=None): super(YOLOV3Neck, self).__init__(init_cfg) assert (num_scales == len(in_channels) == len(out_channels)) self.num_scales = num_scales self.in_channels = in_channels self.out_channels = out_channels cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) self.detect1 = DetectionBlock(in_channels[0], out_channels[0], **cfg) for i in range(1, self.num_scales): (in_c, out_c) = (self.in_channels[i], self.out_channels[i]) inter_c = out_channels[(i - 1)] self.add_module(f'conv{i}', ConvModule(inter_c, out_c, 1, **cfg)) self.add_module(f'detect{(i + 1)}', DetectionBlock((in_c + out_c), out_c, **cfg)) def forward(self, feats): assert (len(feats) == self.num_scales) outs = [] out = self.detect1(feats[(- 1)]) outs.append(out) for (i, x) in enumerate(reversed(feats[:(- 1)])): conv = getattr(self, f'conv{(i + 1)}') tmp = conv(out) tmp = F.interpolate(tmp, scale_factor=2) tmp = torch.cat((tmp, x), 1) detect = getattr(self, f'detect{(i + 2)}') out = detect(tmp) outs.append(out) return tuple(outs)
@NECKS.register_module() class YOLOXPAFPN(BaseModule): "Path Aggregation Network used in YOLOX.\n\n Args:\n in_channels (List[int]): Number of input channels per scale.\n out_channels (int): Number of output channels (used at each scale)\n num_csp_blocks (int): Number of bottlenecks in CSPLayer. Default: 3\n use_depthwise (bool): Whether to depthwise separable convolution in\n blocks. Default: False\n upsample_cfg (dict): Config dict for interpolate layer.\n Default: `dict(scale_factor=2, mode='nearest')`\n conv_cfg (dict, optional): Config dict for convolution layer.\n Default: None, which means using conv2d.\n norm_cfg (dict): Config dict for normalization layer.\n Default: dict(type='BN')\n act_cfg (dict): Config dict for activation layer.\n Default: dict(type='Swish')\n init_cfg (dict or list[dict], optional): Initialization config dict.\n Default: None.\n " def __init__(self, in_channels, out_channels, num_csp_blocks=3, use_depthwise=False, upsample_cfg=dict(scale_factor=2, mode='nearest'), conv_cfg=None, norm_cfg=dict(type='BN', momentum=0.03, eps=0.001), act_cfg=dict(type='Swish'), init_cfg=dict(type='Kaiming', layer='Conv2d', a=math.sqrt(5), distribution='uniform', mode='fan_in', nonlinearity='leaky_relu')): super(YOLOXPAFPN, self).__init__(init_cfg) self.in_channels = in_channels self.out_channels = out_channels conv = (DepthwiseSeparableConvModule if use_depthwise else ConvModule) self.upsample = nn.Upsample(**upsample_cfg) self.reduce_layers = nn.ModuleList() self.top_down_blocks = nn.ModuleList() for idx in range((len(in_channels) - 1), 0, (- 1)): self.reduce_layers.append(ConvModule(in_channels[idx], in_channels[(idx - 1)], 1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)) self.top_down_blocks.append(CSPLayer((in_channels[(idx - 1)] * 2), in_channels[(idx - 1)], num_blocks=num_csp_blocks, add_identity=False, use_depthwise=use_depthwise, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)) self.downsamples = nn.ModuleList() self.bottom_up_blocks = nn.ModuleList() for idx in range((len(in_channels) - 1)): self.downsamples.append(conv(in_channels[idx], in_channels[idx], 3, stride=2, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)) self.bottom_up_blocks.append(CSPLayer((in_channels[idx] * 2), in_channels[(idx + 1)], num_blocks=num_csp_blocks, add_identity=False, use_depthwise=use_depthwise, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)) self.out_convs = nn.ModuleList() for i in range(len(in_channels)): self.out_convs.append(ConvModule(in_channels[i], out_channels, 1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)) def forward(self, inputs): '\n Args:\n inputs (tuple[Tensor]): input features.\n\n Returns:\n tuple[Tensor]: YOLOXPAFPN features.\n ' assert (len(inputs) == len(self.in_channels)) inner_outs = [inputs[(- 1)]] for idx in range((len(self.in_channels) - 1), 0, (- 1)): feat_heigh = inner_outs[0] feat_low = inputs[(idx - 1)] feat_heigh = self.reduce_layers[((len(self.in_channels) - 1) - idx)](feat_heigh) inner_outs[0] = feat_heigh upsample_feat = self.upsample(feat_heigh) inner_out = self.top_down_blocks[((len(self.in_channels) - 1) - idx)](torch.cat([upsample_feat, feat_low], 1)) inner_outs.insert(0, inner_out) outs = [inner_outs[0]] for idx in range((len(self.in_channels) - 1)): feat_low = outs[(- 1)] feat_height = inner_outs[(idx + 1)] downsample_feat = self.downsamples[idx](feat_low) out = self.bottom_up_blocks[idx](torch.cat([downsample_feat, feat_height], 1)) outs.append(out) for (idx, conv) in enumerate(self.out_convs): outs[idx] = conv(outs[idx]) return tuple(outs)
@PLUGIN_LAYERS.register_module() class DropBlock(nn.Module): 'Randomly drop some regions of feature maps.\n\n Please refer to the method proposed in `DropBlock\n <https://arxiv.org/abs/1810.12890>`_ for details.\n\n Args:\n drop_prob (float): The probability of dropping each block.\n block_size (int): The size of dropped blocks.\n warmup_iters (int): The drop probability will linearly increase\n from `0` to `drop_prob` during the first `warmup_iters` iterations.\n Default: 2000.\n ' def __init__(self, drop_prob, block_size, warmup_iters=2000, **kwargs): super(DropBlock, self).__init__() assert ((block_size % 2) == 1) assert (0 < drop_prob <= 1) assert (warmup_iters >= 0) self.drop_prob = drop_prob self.block_size = block_size self.warmup_iters = warmup_iters self.iter_cnt = 0 def forward(self, x): '\n Args:\n x (Tensor): Input feature map on which some areas will be randomly\n dropped.\n\n Returns:\n Tensor: The tensor after DropBlock layer.\n ' if (not self.training): return x self.iter_cnt += 1 (N, C, H, W) = list(x.shape) gamma = self._compute_gamma((H, W)) mask_shape = (N, C, ((H - self.block_size) + 1), ((W - self.block_size) + 1)) mask = torch.bernoulli(torch.full(mask_shape, gamma, device=x.device)) mask = F.pad(mask, ([(self.block_size // 2)] * 4), value=0) mask = F.max_pool2d(input=mask, stride=(1, 1), kernel_size=(self.block_size, self.block_size), padding=(self.block_size // 2)) mask = (1 - mask) x = (((x * mask) * mask.numel()) / (eps + mask.sum())) return x def _compute_gamma(self, feat_size): 'Compute the value of gamma according to paper. gamma is the\n parameter of bernoulli distribution, which controls the number of\n features to drop.\n\n gamma = (drop_prob * fm_area) / (drop_area * keep_area)\n\n Args:\n feat_size (tuple[int, int]): The height and width of feature map.\n\n Returns:\n float: The value of gamma.\n ' gamma = ((self.drop_prob * feat_size[0]) * feat_size[1]) gamma /= (((feat_size[0] - self.block_size) + 1) * ((feat_size[1] - self.block_size) + 1)) gamma /= (self.block_size ** 2) factor = (1.0 if (self.iter_cnt > self.warmup_iters) else (self.iter_cnt / self.warmup_iters)) return (gamma * factor) def extra_repr(self): return f'drop_prob={self.drop_prob}, block_size={self.block_size}, warmup_iters={self.warmup_iters}'
class BaseRoIHead(BaseModule, metaclass=ABCMeta): 'Base class for RoIHeads.' def __init__(self, bbox_roi_extractor=None, bbox_head=None, mask_roi_extractor=None, mask_head=None, shared_head=None, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None): super(BaseRoIHead, self).__init__(init_cfg) self.train_cfg = train_cfg self.test_cfg = test_cfg if (shared_head is not None): shared_head.pretrained = pretrained self.shared_head = build_shared_head(shared_head) if (bbox_head is not None): self.init_bbox_head(bbox_roi_extractor, bbox_head) if (mask_head is not None): self.init_mask_head(mask_roi_extractor, mask_head) self.init_assigner_sampler() @property def with_bbox(self): 'bool: whether the RoI head contains a `bbox_head`' return (hasattr(self, 'bbox_head') and (self.bbox_head is not None)) @property def with_mask(self): 'bool: whether the RoI head contains a `mask_head`' return (hasattr(self, 'mask_head') and (self.mask_head is not None)) @property def with_shared_head(self): 'bool: whether the RoI head contains a `shared_head`' return (hasattr(self, 'shared_head') and (self.shared_head is not None)) @abstractmethod def init_bbox_head(self): 'Initialize ``bbox_head``' pass @abstractmethod def init_mask_head(self): 'Initialize ``mask_head``' pass @abstractmethod def init_assigner_sampler(self): 'Initialize assigner and sampler.' pass @abstractmethod def forward_train(self, x, img_meta, proposal_list, gt_bboxes, gt_labels, gt_bboxes_ignore=None, gt_masks=None, **kwargs): 'Forward function during training.' async def async_simple_test(self, x, proposal_list, img_metas, proposals=None, rescale=False, **kwargs): 'Asynchronized test function.' raise NotImplementedError def simple_test(self, x, proposal_list, img_meta, proposals=None, rescale=False, **kwargs): 'Test without augmentation.' def aug_test(self, x, proposal_list, img_metas, rescale=False, **kwargs): 'Test with augmentations.\n\n If rescale is False, then returned bboxes and masks will fit the scale\n of imgs[0].\n '
@HEADS.register_module() class ConvFCBBoxHead(BBoxHead): 'More general bbox head, with shared conv and fc layers and two optional\n separated branches.\n\n .. code-block:: none\n\n /-> cls convs -> cls fcs -> cls\n shared convs -> shared fcs\n \\-> reg convs -> reg fcs -> reg\n ' def __init__(self, num_shared_convs=0, num_shared_fcs=0, num_cls_convs=0, num_cls_fcs=0, num_reg_convs=0, num_reg_fcs=0, conv_out_channels=256, fc_out_channels=1024, conv_cfg=None, norm_cfg=None, init_cfg=None, *args, **kwargs): super(ConvFCBBoxHead, self).__init__(*args, init_cfg=init_cfg, **kwargs) assert ((((((num_shared_convs + num_shared_fcs) + num_cls_convs) + num_cls_fcs) + num_reg_convs) + num_reg_fcs) > 0) if ((num_cls_convs > 0) or (num_reg_convs > 0)): assert (num_shared_fcs == 0) if (not self.with_cls): assert ((num_cls_convs == 0) and (num_cls_fcs == 0)) if (not self.with_reg): assert ((num_reg_convs == 0) and (num_reg_fcs == 0)) self.num_shared_convs = num_shared_convs self.num_shared_fcs = num_shared_fcs self.num_cls_convs = num_cls_convs self.num_cls_fcs = num_cls_fcs self.num_reg_convs = num_reg_convs self.num_reg_fcs = num_reg_fcs self.conv_out_channels = conv_out_channels self.fc_out_channels = fc_out_channels self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg (self.shared_convs, self.shared_fcs, last_layer_dim) = self._add_conv_fc_branch(self.num_shared_convs, self.num_shared_fcs, self.in_channels, True) self.shared_out_channels = last_layer_dim (self.cls_convs, self.cls_fcs, self.cls_last_dim) = self._add_conv_fc_branch(self.num_cls_convs, self.num_cls_fcs, self.shared_out_channels) (self.reg_convs, self.reg_fcs, self.reg_last_dim) = self._add_conv_fc_branch(self.num_reg_convs, self.num_reg_fcs, self.shared_out_channels) if ((self.num_shared_fcs == 0) and (not self.with_avg_pool)): if (self.num_cls_fcs == 0): self.cls_last_dim *= self.roi_feat_area if (self.num_reg_fcs == 0): self.reg_last_dim *= self.roi_feat_area self.relu = nn.ReLU(inplace=True) if self.with_cls: if self.custom_cls_channels: cls_channels = self.loss_cls.get_cls_channels(self.num_classes) else: cls_channels = (self.num_classes + 1) self.fc_cls = build_linear_layer(self.cls_predictor_cfg, in_features=self.cls_last_dim, out_features=cls_channels) if self.with_reg: out_dim_reg = (4 if self.reg_class_agnostic else (4 * self.num_classes)) self.fc_reg = build_linear_layer(self.reg_predictor_cfg, in_features=self.reg_last_dim, out_features=out_dim_reg) if (init_cfg is None): self.init_cfg += [dict(type='Xavier', distribution='uniform', override=[dict(name='shared_fcs'), dict(name='cls_fcs'), dict(name='reg_fcs')])] def _add_conv_fc_branch(self, num_branch_convs, num_branch_fcs, in_channels, is_shared=False): 'Add shared or separable branch.\n\n convs -> avg pool (optional) -> fcs\n ' last_layer_dim = in_channels branch_convs = nn.ModuleList() if (num_branch_convs > 0): for i in range(num_branch_convs): conv_in_channels = (last_layer_dim if (i == 0) else self.conv_out_channels) branch_convs.append(ConvModule(conv_in_channels, self.conv_out_channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) last_layer_dim = self.conv_out_channels branch_fcs = nn.ModuleList() if (num_branch_fcs > 0): if ((is_shared or (self.num_shared_fcs == 0)) and (not self.with_avg_pool)): last_layer_dim *= self.roi_feat_area for i in range(num_branch_fcs): fc_in_channels = (last_layer_dim if (i == 0) else self.fc_out_channels) branch_fcs.append(nn.Linear(fc_in_channels, self.fc_out_channels)) last_layer_dim = self.fc_out_channels return (branch_convs, branch_fcs, last_layer_dim) def forward(self, x): if (self.num_shared_convs > 0): for conv in self.shared_convs: x = conv(x) if (self.num_shared_fcs > 0): if self.with_avg_pool: x = self.avg_pool(x) x = x.flatten(1) for fc in self.shared_fcs: x = self.relu(fc(x)) x_cls = x x_reg = x for conv in self.cls_convs: x_cls = conv(x_cls) if (x_cls.dim() > 2): if self.with_avg_pool: x_cls = self.avg_pool(x_cls) x_cls = x_cls.flatten(1) for fc in self.cls_fcs: x_cls = self.relu(fc(x_cls)) for conv in self.reg_convs: x_reg = conv(x_reg) if (x_reg.dim() > 2): if self.with_avg_pool: x_reg = self.avg_pool(x_reg) x_reg = x_reg.flatten(1) for fc in self.reg_fcs: x_reg = self.relu(fc(x_reg)) cls_score = (self.fc_cls(x_cls) if self.with_cls else None) bbox_pred = (self.fc_reg(x_reg) if self.with_reg else None) return (cls_score, bbox_pred)
@HEADS.register_module() class Shared2FCBBoxHead(ConvFCBBoxHead): def __init__(self, fc_out_channels=1024, *args, **kwargs): super(Shared2FCBBoxHead, self).__init__(*args, num_shared_convs=0, num_shared_fcs=2, num_cls_convs=0, num_cls_fcs=0, num_reg_convs=0, num_reg_fcs=0, fc_out_channels=fc_out_channels, **kwargs)
@HEADS.register_module() class Shared4Conv1FCBBoxHead(ConvFCBBoxHead): def __init__(self, fc_out_channels=1024, *args, **kwargs): super(Shared4Conv1FCBBoxHead, self).__init__(*args, num_shared_convs=4, num_shared_fcs=1, num_cls_convs=0, num_cls_fcs=0, num_reg_convs=0, num_reg_fcs=0, fc_out_channels=fc_out_channels, **kwargs)
class BasicResBlock(BaseModule): 'Basic residual block.\n\n This block is a little different from the block in the ResNet backbone.\n The kernel size of conv1 is 1 in this block while 3 in ResNet BasicBlock.\n\n Args:\n in_channels (int): Channels of the input feature map.\n out_channels (int): Channels of the output feature map.\n conv_cfg (dict): The config dict for convolution layers.\n norm_cfg (dict): The config dict for normalization layers.\n init_cfg (dict or list[dict], optional): Initialization config dict.\n Default: None\n ' def __init__(self, in_channels, out_channels, conv_cfg=None, norm_cfg=dict(type='BN'), init_cfg=None): super(BasicResBlock, self).__init__(init_cfg) self.conv1 = ConvModule(in_channels, in_channels, kernel_size=3, padding=1, bias=False, conv_cfg=conv_cfg, norm_cfg=norm_cfg) self.conv2 = ConvModule(in_channels, out_channels, kernel_size=1, bias=False, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=None) self.conv_identity = ConvModule(in_channels, out_channels, kernel_size=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=None) self.relu = nn.ReLU(inplace=True) def forward(self, x): identity = x x = self.conv1(x) x = self.conv2(x) identity = self.conv_identity(identity) out = (x + identity) out = self.relu(out) return out
@HEADS.register_module() class DoubleConvFCBBoxHead(BBoxHead): 'Bbox head used in Double-Head R-CNN\n\n .. code-block:: none\n\n /-> cls\n /-> shared convs ->\n \\-> reg\n roi features\n /-> cls\n \\-> shared fc ->\n \\-> reg\n ' def __init__(self, num_convs=0, num_fcs=0, conv_out_channels=1024, fc_out_channels=1024, conv_cfg=None, norm_cfg=dict(type='BN'), init_cfg=dict(type='Normal', override=[dict(type='Normal', name='fc_cls', std=0.01), dict(type='Normal', name='fc_reg', std=0.001), dict(type='Xavier', name='fc_branch', distribution='uniform')]), **kwargs): kwargs.setdefault('with_avg_pool', True) super(DoubleConvFCBBoxHead, self).__init__(init_cfg=init_cfg, **kwargs) assert self.with_avg_pool assert (num_convs > 0) assert (num_fcs > 0) self.num_convs = num_convs self.num_fcs = num_fcs self.conv_out_channels = conv_out_channels self.fc_out_channels = fc_out_channels self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.res_block = BasicResBlock(self.in_channels, self.conv_out_channels) self.conv_branch = self._add_conv_branch() self.fc_branch = self._add_fc_branch() out_dim_reg = (4 if self.reg_class_agnostic else (4 * self.num_classes)) self.fc_reg = nn.Linear(self.conv_out_channels, out_dim_reg) self.fc_cls = nn.Linear(self.fc_out_channels, (self.num_classes + 1)) self.relu = nn.ReLU(inplace=True) def _add_conv_branch(self): 'Add the fc branch which consists of a sequential of conv layers.' branch_convs = ModuleList() for i in range(self.num_convs): branch_convs.append(Bottleneck(inplanes=self.conv_out_channels, planes=(self.conv_out_channels // 4), conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) return branch_convs def _add_fc_branch(self): 'Add the fc branch which consists of a sequential of fc layers.' branch_fcs = ModuleList() for i in range(self.num_fcs): fc_in_channels = ((self.in_channels * self.roi_feat_area) if (i == 0) else self.fc_out_channels) branch_fcs.append(nn.Linear(fc_in_channels, self.fc_out_channels)) return branch_fcs def forward(self, x_cls, x_reg): x_conv = self.res_block(x_reg) for conv in self.conv_branch: x_conv = conv(x_conv) if self.with_avg_pool: x_conv = self.avg_pool(x_conv) x_conv = x_conv.view(x_conv.size(0), (- 1)) bbox_pred = self.fc_reg(x_conv) x_fc = x_cls.view(x_cls.size(0), (- 1)) for fc in self.fc_branch: x_fc = self.relu(fc(x_fc)) cls_score = self.fc_cls(x_fc) return (cls_score, bbox_pred)
@HEADS.register_module() class SCNetBBoxHead(ConvFCBBoxHead): 'BBox head for `SCNet <https://arxiv.org/abs/2012.10150>`_.\n\n This inherits ``ConvFCBBoxHead`` with modified forward() function, allow us\n to get intermediate shared feature.\n ' def _forward_shared(self, x): 'Forward function for shared part.' if (self.num_shared_convs > 0): for conv in self.shared_convs: x = conv(x) if (self.num_shared_fcs > 0): if self.with_avg_pool: x = self.avg_pool(x) x = x.flatten(1) for fc in self.shared_fcs: x = self.relu(fc(x)) return x def _forward_cls_reg(self, x): 'Forward function for classification and regression parts.' x_cls = x x_reg = x for conv in self.cls_convs: x_cls = conv(x_cls) if (x_cls.dim() > 2): if self.with_avg_pool: x_cls = self.avg_pool(x_cls) x_cls = x_cls.flatten(1) for fc in self.cls_fcs: x_cls = self.relu(fc(x_cls)) for conv in self.reg_convs: x_reg = conv(x_reg) if (x_reg.dim() > 2): if self.with_avg_pool: x_reg = self.avg_pool(x_reg) x_reg = x_reg.flatten(1) for fc in self.reg_fcs: x_reg = self.relu(fc(x_reg)) cls_score = (self.fc_cls(x_cls) if self.with_cls else None) bbox_pred = (self.fc_reg(x_reg) if self.with_reg else None) return (cls_score, bbox_pred) def forward(self, x, return_shared_feat=False): 'Forward function.\n\n Args:\n x (Tensor): input features\n return_shared_feat (bool): If True, return cls-reg-shared feature.\n\n Return:\n out (tuple[Tensor]): contain ``cls_score`` and ``bbox_pred``,\n if ``return_shared_feat`` is True, append ``x_shared`` to the\n returned tuple.\n ' x_shared = self._forward_shared(x) out = self._forward_cls_reg(x_shared) if return_shared_feat: out += (x_shared,) return out
@HEADS.register_module() class DoubleHeadRoIHead(StandardRoIHead): 'RoI head for Double Head RCNN.\n\n https://arxiv.org/abs/1904.06493\n ' def __init__(self, reg_roi_scale_factor, **kwargs): super(DoubleHeadRoIHead, self).__init__(**kwargs) self.reg_roi_scale_factor = reg_roi_scale_factor def _bbox_forward(self, x, rois): 'Box head forward function used in both training and testing time.' bbox_cls_feats = self.bbox_roi_extractor(x[:self.bbox_roi_extractor.num_inputs], rois) bbox_reg_feats = self.bbox_roi_extractor(x[:self.bbox_roi_extractor.num_inputs], rois, roi_scale_factor=self.reg_roi_scale_factor) if self.with_shared_head: bbox_cls_feats = self.shared_head(bbox_cls_feats) bbox_reg_feats = self.shared_head(bbox_reg_feats) (cls_score, bbox_pred) = self.bbox_head(bbox_cls_feats, bbox_reg_feats) bbox_results = dict(cls_score=cls_score, bbox_pred=bbox_pred, bbox_feats=bbox_cls_feats) return bbox_results
@HEADS.register_module() class CoarseMaskHead(FCNMaskHead): 'Coarse mask head used in PointRend.\n\n Compared with standard ``FCNMaskHead``, ``CoarseMaskHead`` will downsample\n the input feature map instead of upsample it.\n\n Args:\n num_convs (int): Number of conv layers in the head. Default: 0.\n num_fcs (int): Number of fc layers in the head. Default: 2.\n fc_out_channels (int): Number of output channels of fc layer.\n Default: 1024.\n downsample_factor (int): The factor that feature map is downsampled by.\n Default: 2.\n init_cfg (dict or list[dict], optional): Initialization config dict.\n ' def __init__(self, num_convs=0, num_fcs=2, fc_out_channels=1024, downsample_factor=2, init_cfg=dict(type='Xavier', override=[dict(name='fcs'), dict(type='Constant', val=0.001, name='fc_logits')]), *arg, **kwarg): super(CoarseMaskHead, self).__init__(*arg, num_convs=num_convs, upsample_cfg=dict(type=None), init_cfg=None, **kwarg) self.init_cfg = init_cfg self.num_fcs = num_fcs assert (self.num_fcs > 0) self.fc_out_channels = fc_out_channels self.downsample_factor = downsample_factor assert (self.downsample_factor >= 1) delattr(self, 'conv_logits') if (downsample_factor > 1): downsample_in_channels = (self.conv_out_channels if (self.num_convs > 0) else self.in_channels) self.downsample_conv = ConvModule(downsample_in_channels, self.conv_out_channels, kernel_size=downsample_factor, stride=downsample_factor, padding=0, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg) else: self.downsample_conv = None self.output_size = ((self.roi_feat_size[0] // downsample_factor), (self.roi_feat_size[1] // downsample_factor)) self.output_area = (self.output_size[0] * self.output_size[1]) last_layer_dim = (self.conv_out_channels * self.output_area) self.fcs = ModuleList() for i in range(num_fcs): fc_in_channels = (last_layer_dim if (i == 0) else self.fc_out_channels) self.fcs.append(Linear(fc_in_channels, self.fc_out_channels)) last_layer_dim = self.fc_out_channels output_channels = (self.num_classes * self.output_area) self.fc_logits = Linear(last_layer_dim, output_channels) def init_weights(self): super(FCNMaskHead, self).init_weights() @auto_fp16() def forward(self, x): for conv in self.convs: x = conv(x) if (self.downsample_conv is not None): x = self.downsample_conv(x) x = x.flatten(1) for fc in self.fcs: x = self.relu(fc(x)) mask_pred = self.fc_logits(x).view(x.size(0), self.num_classes, *self.output_size) return mask_pred
@HEADS.register_module() class DynamicMaskHead(FCNMaskHead): 'Dynamic Mask Head for\n `Instances as Queries <http://arxiv.org/abs/2105.01928>`_\n\n Args:\n num_convs (int): Number of convolution layer.\n Defaults to 4.\n roi_feat_size (int): The output size of RoI extractor,\n Defaults to 14.\n in_channels (int): Input feature channels.\n Defaults to 256.\n conv_kernel_size (int): Kernel size of convolution layers.\n Defaults to 3.\n conv_out_channels (int): Output channels of convolution layers.\n Defaults to 256.\n num_classes (int): Number of classes.\n Defaults to 80\n class_agnostic (int): Whether generate class agnostic prediction.\n Defaults to False.\n dropout (float): Probability of drop the channel.\n Defaults to 0.0\n upsample_cfg (dict): The config for upsample layer.\n conv_cfg (dict): The convolution layer config.\n norm_cfg (dict): The norm layer config.\n dynamic_conv_cfg (dict): The dynamic convolution layer config.\n loss_mask (dict): The config for mask loss.\n ' def __init__(self, num_convs=4, roi_feat_size=14, in_channels=256, conv_kernel_size=3, conv_out_channels=256, num_classes=80, class_agnostic=False, upsample_cfg=dict(type='deconv', scale_factor=2), conv_cfg=None, norm_cfg=None, dynamic_conv_cfg=dict(type='DynamicConv', in_channels=256, feat_channels=64, out_channels=256, input_feat_shape=14, with_proj=False, act_cfg=dict(type='ReLU', inplace=True), norm_cfg=dict(type='LN')), loss_mask=dict(type='DiceLoss', loss_weight=8.0), **kwargs): super(DynamicMaskHead, self).__init__(num_convs=num_convs, roi_feat_size=roi_feat_size, in_channels=in_channels, conv_kernel_size=conv_kernel_size, conv_out_channels=conv_out_channels, num_classes=num_classes, class_agnostic=class_agnostic, upsample_cfg=upsample_cfg, conv_cfg=conv_cfg, norm_cfg=norm_cfg, loss_mask=loss_mask, **kwargs) assert (class_agnostic is False), 'DynamicMaskHead only support class_agnostic=False' self.fp16_enabled = False self.instance_interactive_conv = build_transformer(dynamic_conv_cfg) def init_weights(self): 'Use xavier initialization for all weight parameter and set\n classification head bias as a specific value when use focal loss.' for p in self.parameters(): if (p.dim() > 1): nn.init.xavier_uniform_(p) nn.init.constant_(self.conv_logits.bias, 0.0) @auto_fp16() def forward(self, roi_feat, proposal_feat): 'Forward function of DynamicMaskHead.\n\n Args:\n roi_feat (Tensor): Roi-pooling features with shape\n (batch_size*num_proposals, feature_dimensions,\n pooling_h , pooling_w).\n proposal_feat (Tensor): Intermediate feature get from\n diihead in last stage, has shape\n (batch_size*num_proposals, feature_dimensions)\n\n Returns:\n mask_pred (Tensor): Predicted foreground masks with shape\n (batch_size*num_proposals, num_classes,\n pooling_h*2, pooling_w*2).\n ' proposal_feat = proposal_feat.reshape((- 1), self.in_channels) proposal_feat_iic = self.instance_interactive_conv(proposal_feat, roi_feat) x = proposal_feat_iic.permute(0, 2, 1).reshape(roi_feat.size()) for conv in self.convs: x = conv(x) if (self.upsample is not None): x = self.upsample(x) if (self.upsample_method == 'deconv'): x = self.relu(x) mask_pred = self.conv_logits(x) return mask_pred @force_fp32(apply_to=('mask_pred',)) def loss(self, mask_pred, mask_targets, labels): num_pos = labels.new_ones(labels.size()).float().sum() avg_factor = torch.clamp(reduce_mean(num_pos), min=1.0).item() loss = dict() if (mask_pred.size(0) == 0): loss_mask = mask_pred.sum() else: loss_mask = self.loss_mask(mask_pred[(torch.arange(num_pos).long(), labels, ...)].sigmoid(), mask_targets, avg_factor=avg_factor) loss['loss_mask'] = loss_mask return loss def get_targets(self, sampling_results, gt_masks, rcnn_train_cfg): pos_proposals = [res.pos_bboxes for res in sampling_results] pos_assigned_gt_inds = [res.pos_assigned_gt_inds for res in sampling_results] mask_targets = mask_target(pos_proposals, pos_assigned_gt_inds, gt_masks, rcnn_train_cfg) return mask_targets
@HEADS.register_module() class FeatureRelayHead(BaseModule): 'Feature Relay Head used in `SCNet <https://arxiv.org/abs/2012.10150>`_.\n\n Args:\n in_channels (int, optional): number of input channels. Default: 256.\n conv_out_channels (int, optional): number of output channels before\n classification layer. Default: 256.\n roi_feat_size (int, optional): roi feat size at box head. Default: 7.\n scale_factor (int, optional): scale factor to match roi feat size\n at mask head. Default: 2.\n init_cfg (dict or list[dict], optional): Initialization config dict.\n ' def __init__(self, in_channels=1024, out_conv_channels=256, roi_feat_size=7, scale_factor=2, init_cfg=dict(type='Kaiming', layer='Linear')): super(FeatureRelayHead, self).__init__(init_cfg) assert isinstance(roi_feat_size, int) self.in_channels = in_channels self.out_conv_channels = out_conv_channels self.roi_feat_size = roi_feat_size self.out_channels = ((roi_feat_size ** 2) * out_conv_channels) self.scale_factor = scale_factor self.fp16_enabled = False self.fc = nn.Linear(self.in_channels, self.out_channels) self.upsample = nn.Upsample(scale_factor=scale_factor, mode='bilinear', align_corners=True) @auto_fp16() def forward(self, x): 'Forward function.' (N, in_C) = x.shape if (N > 0): out_C = self.out_conv_channels out_HW = self.roi_feat_size x = self.fc(x) x = x.reshape(N, out_C, out_HW, out_HW) x = self.upsample(x) return x return None
@HEADS.register_module() class FusedSemanticHead(BaseModule): 'Multi-level fused semantic segmentation head.\n\n .. code-block:: none\n\n in_1 -> 1x1 conv ---\n |\n in_2 -> 1x1 conv -- |\n ||\n in_3 -> 1x1 conv - ||\n ||| /-> 1x1 conv (mask prediction)\n in_4 -> 1x1 conv -----> 3x3 convs (*4)\n | \\-> 1x1 conv (feature)\n in_5 -> 1x1 conv ---\n ' def __init__(self, num_ins, fusion_level, num_convs=4, in_channels=256, conv_out_channels=256, num_classes=183, conv_cfg=None, norm_cfg=None, ignore_label=None, loss_weight=None, loss_seg=dict(type='CrossEntropyLoss', ignore_index=255, loss_weight=0.2), init_cfg=dict(type='Kaiming', override=dict(name='conv_logits'))): super(FusedSemanticHead, self).__init__(init_cfg) self.num_ins = num_ins self.fusion_level = fusion_level self.num_convs = num_convs self.in_channels = in_channels self.conv_out_channels = conv_out_channels self.num_classes = num_classes self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.fp16_enabled = False self.lateral_convs = nn.ModuleList() for i in range(self.num_ins): self.lateral_convs.append(ConvModule(self.in_channels, self.in_channels, 1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, inplace=False)) self.convs = nn.ModuleList() for i in range(self.num_convs): in_channels = (self.in_channels if (i == 0) else conv_out_channels) self.convs.append(ConvModule(in_channels, conv_out_channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) self.conv_embedding = ConvModule(conv_out_channels, conv_out_channels, 1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg) self.conv_logits = nn.Conv2d(conv_out_channels, self.num_classes, 1) if ignore_label: loss_seg['ignore_index'] = ignore_label if loss_weight: loss_seg['loss_weight'] = loss_weight if (ignore_label or loss_weight): warnings.warn('``ignore_label`` and ``loss_weight`` would be deprecated soon. Please set ``ingore_index`` and ``loss_weight`` in ``loss_seg`` instead.') self.criterion = build_loss(loss_seg) @auto_fp16() def forward(self, feats): x = self.lateral_convs[self.fusion_level](feats[self.fusion_level]) fused_size = tuple(x.shape[(- 2):]) for (i, feat) in enumerate(feats): if (i != self.fusion_level): feat = F.interpolate(feat, size=fused_size, mode='bilinear', align_corners=True) x += self.lateral_convs[i](feat) for i in range(self.num_convs): x = self.convs[i](x) mask_pred = self.conv_logits(x) x = self.conv_embedding(x) return (mask_pred, x) @force_fp32(apply_to=('mask_pred',)) def loss(self, mask_pred, labels): labels = labels.squeeze(1).long() loss_semantic_seg = self.criterion(mask_pred, labels) return loss_semantic_seg
@HEADS.register_module() class GlobalContextHead(BaseModule): 'Global context head used in `SCNet <https://arxiv.org/abs/2012.10150>`_.\n\n Args:\n num_convs (int, optional): number of convolutional layer in GlbCtxHead.\n Default: 4.\n in_channels (int, optional): number of input channels. Default: 256.\n conv_out_channels (int, optional): number of output channels before\n classification layer. Default: 256.\n num_classes (int, optional): number of classes. Default: 80.\n loss_weight (float, optional): global context loss weight. Default: 1.\n conv_cfg (dict, optional): config to init conv layer. Default: None.\n norm_cfg (dict, optional): config to init norm layer. Default: None.\n conv_to_res (bool, optional): if True, 2 convs will be grouped into\n 1 `SimplifiedBasicBlock` using a skip connection. Default: False.\n init_cfg (dict or list[dict], optional): Initialization config dict.\n ' def __init__(self, num_convs=4, in_channels=256, conv_out_channels=256, num_classes=80, loss_weight=1.0, conv_cfg=None, norm_cfg=None, conv_to_res=False, init_cfg=dict(type='Normal', std=0.01, override=dict(name='fc'))): super(GlobalContextHead, self).__init__(init_cfg) self.num_convs = num_convs self.in_channels = in_channels self.conv_out_channels = conv_out_channels self.num_classes = num_classes self.loss_weight = loss_weight self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.conv_to_res = conv_to_res self.fp16_enabled = False if self.conv_to_res: num_res_blocks = (num_convs // 2) self.convs = ResLayer(SimplifiedBasicBlock, in_channels, self.conv_out_channels, num_res_blocks, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg) self.num_convs = num_res_blocks else: self.convs = nn.ModuleList() for i in range(self.num_convs): in_channels = (self.in_channels if (i == 0) else conv_out_channels) self.convs.append(ConvModule(in_channels, conv_out_channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) self.pool = nn.AdaptiveAvgPool2d(1) self.fc = nn.Linear(conv_out_channels, num_classes) self.criterion = nn.BCEWithLogitsLoss() @auto_fp16() def forward(self, feats): 'Forward function.' x = feats[(- 1)] for i in range(self.num_convs): x = self.convs[i](x) x = self.pool(x) mc_pred = x.reshape(x.size(0), (- 1)) mc_pred = self.fc(mc_pred) return (mc_pred, x) @force_fp32(apply_to=('pred',)) def loss(self, pred, labels): 'Loss function.' labels = [lbl.unique() for lbl in labels] targets = pred.new_zeros(pred.size()) for (i, label) in enumerate(labels): targets[(i, label)] = 1.0 loss = (self.loss_weight * self.criterion(pred, targets)) return loss
@HEADS.register_module() class HTCMaskHead(FCNMaskHead): def __init__(self, with_conv_res=True, *args, **kwargs): super(HTCMaskHead, self).__init__(*args, **kwargs) self.with_conv_res = with_conv_res if self.with_conv_res: self.conv_res = ConvModule(self.conv_out_channels, self.conv_out_channels, 1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg) def forward(self, x, res_feat=None, return_logits=True, return_feat=True): if (res_feat is not None): assert self.with_conv_res res_feat = self.conv_res(res_feat) x = (x + res_feat) for conv in self.convs: x = conv(x) res_feat = x outs = [] if return_logits: x = self.upsample(x) if (self.upsample_method == 'deconv'): x = self.relu(x) mask_pred = self.conv_logits(x) outs.append(mask_pred) if return_feat: outs.append(res_feat) return (outs if (len(outs) > 1) else outs[0])
@HEADS.register_module() class SCNetMaskHead(FCNMaskHead): 'Mask head for `SCNet <https://arxiv.org/abs/2012.10150>`_.\n\n Args:\n conv_to_res (bool, optional): if True, change the conv layers to\n ``SimplifiedBasicBlock``.\n ' def __init__(self, conv_to_res=True, **kwargs): super(SCNetMaskHead, self).__init__(**kwargs) self.conv_to_res = conv_to_res if conv_to_res: assert (self.conv_kernel_size == 3) self.num_res_blocks = (self.num_convs // 2) self.convs = ResLayer(SimplifiedBasicBlock, self.in_channels, self.conv_out_channels, self.num_res_blocks, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)
@HEADS.register_module() class SCNetSemanticHead(FusedSemanticHead): 'Mask head for `SCNet <https://arxiv.org/abs/2012.10150>`_.\n\n Args:\n conv_to_res (bool, optional): if True, change the conv layers to\n ``SimplifiedBasicBlock``.\n ' def __init__(self, conv_to_res=True, **kwargs): super(SCNetSemanticHead, self).__init__(**kwargs) self.conv_to_res = conv_to_res if self.conv_to_res: num_res_blocks = (self.num_convs // 2) self.convs = ResLayer(SimplifiedBasicBlock, self.in_channels, self.conv_out_channels, num_res_blocks, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg) self.num_convs = num_res_blocks
@HEADS.register_module() class PISARoIHead(StandardRoIHead): 'The RoI head for `Prime Sample Attention in Object Detection\n <https://arxiv.org/abs/1904.04821>`_.' def forward_train(self, x, img_metas, proposal_list, gt_bboxes, gt_labels, gt_bboxes_ignore=None, gt_masks=None): "Forward function for training.\n\n Args:\n x (list[Tensor]): List of multi-level img features.\n img_metas (list[dict]): List of image info dict where each dict\n has: 'img_shape', 'scale_factor', 'flip', and may also contain\n 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.\n For details on the values of these keys see\n `mmdet/datasets/pipelines/formatting.py:Collect`.\n proposals (list[Tensors]): List of region proposals.\n gt_bboxes (list[Tensor]): Each item are the truth boxes for each\n image in [tl_x, tl_y, br_x, br_y] format.\n gt_labels (list[Tensor]): Class indices corresponding to each box\n gt_bboxes_ignore (list[Tensor], optional): Specify which bounding\n boxes can be ignored when computing the loss.\n gt_masks (None | Tensor) : True segmentation masks for each box\n used if the architecture supports a segmentation task.\n\n Returns:\n dict[str, Tensor]: a dictionary of loss components\n " if (self.with_bbox or self.with_mask): num_imgs = len(img_metas) if (gt_bboxes_ignore is None): gt_bboxes_ignore = [None for _ in range(num_imgs)] sampling_results = [] neg_label_weights = [] for i in range(num_imgs): assign_result = self.bbox_assigner.assign(proposal_list[i], gt_bboxes[i], gt_bboxes_ignore[i], gt_labels[i]) sampling_result = self.bbox_sampler.sample(assign_result, proposal_list[i], gt_bboxes[i], gt_labels[i], feats=[lvl_feat[i][None] for lvl_feat in x]) neg_label_weight = None if isinstance(sampling_result, tuple): (sampling_result, neg_label_weight) = sampling_result sampling_results.append(sampling_result) neg_label_weights.append(neg_label_weight) losses = dict() if self.with_bbox: bbox_results = self._bbox_forward_train(x, sampling_results, gt_bboxes, gt_labels, img_metas, neg_label_weights=neg_label_weights) losses.update(bbox_results['loss_bbox']) if self.with_mask: mask_results = self._mask_forward_train(x, sampling_results, bbox_results['bbox_feats'], gt_masks, img_metas) losses.update(mask_results['loss_mask']) return losses def _bbox_forward(self, x, rois): 'Box forward function used in both training and testing.' bbox_feats = self.bbox_roi_extractor(x[:self.bbox_roi_extractor.num_inputs], rois) if self.with_shared_head: bbox_feats = self.shared_head(bbox_feats) (cls_score, bbox_pred) = self.bbox_head(bbox_feats) bbox_results = dict(cls_score=cls_score, bbox_pred=bbox_pred, bbox_feats=bbox_feats) return bbox_results def _bbox_forward_train(self, x, sampling_results, gt_bboxes, gt_labels, img_metas, neg_label_weights=None): 'Run forward function and calculate loss for box head in training.' rois = bbox2roi([res.bboxes for res in sampling_results]) bbox_results = self._bbox_forward(x, rois) bbox_targets = self.bbox_head.get_targets(sampling_results, gt_bboxes, gt_labels, self.train_cfg) if (neg_label_weights[0] is not None): label_weights = bbox_targets[1] cur_num_rois = 0 for i in range(len(sampling_results)): num_pos = sampling_results[i].pos_inds.size(0) num_neg = sampling_results[i].neg_inds.size(0) label_weights[(cur_num_rois + num_pos):((cur_num_rois + num_pos) + num_neg)] = neg_label_weights[i] cur_num_rois += (num_pos + num_neg) cls_score = bbox_results['cls_score'] bbox_pred = bbox_results['bbox_pred'] isr_cfg = self.train_cfg.get('isr', None) if (isr_cfg is not None): bbox_targets = isr_p(cls_score, bbox_pred, bbox_targets, rois, sampling_results, self.bbox_head.loss_cls, self.bbox_head.bbox_coder, **isr_cfg, num_class=self.bbox_head.num_classes) loss_bbox = self.bbox_head.loss(cls_score, bbox_pred, rois, *bbox_targets) carl_cfg = self.train_cfg.get('carl', None) if (carl_cfg is not None): loss_carl = carl_loss(cls_score, bbox_targets[0], bbox_pred, bbox_targets[2], self.bbox_head.loss_bbox, **carl_cfg, num_class=self.bbox_head.num_classes) loss_bbox.update(loss_carl) bbox_results.update(loss_bbox=loss_bbox) return bbox_results
@SHARED_HEADS.register_module() class ResLayer(BaseModule): def __init__(self, depth, stage=3, stride=2, dilation=1, style='pytorch', norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, with_cp=False, dcn=None, pretrained=None, init_cfg=None): super(ResLayer, self).__init__(init_cfg) self.norm_eval = norm_eval self.norm_cfg = norm_cfg self.stage = stage self.fp16_enabled = False (block, stage_blocks) = ResNet.arch_settings[depth] stage_block = stage_blocks[stage] planes = (64 * (2 ** stage)) inplanes = ((64 * (2 ** (stage - 1))) * block.expansion) res_layer = _ResLayer(block, inplanes, planes, stage_block, stride=stride, dilation=dilation, style=style, with_cp=with_cp, norm_cfg=self.norm_cfg, dcn=dcn) self.add_module(f'layer{(stage + 1)}', res_layer) assert (not (init_cfg and pretrained)), 'init_cfg and pretrained cannot be specified at the same time' if isinstance(pretrained, str): warnings.warn('DeprecationWarning: pretrained is a deprecated, please use "init_cfg" instead') self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) elif (pretrained is None): if (init_cfg is None): self.init_cfg = [dict(type='Kaiming', layer='Conv2d'), dict(type='Constant', val=1, layer=['_BatchNorm', 'GroupNorm'])] else: raise TypeError('pretrained must be a str or None') @auto_fp16() def forward(self, x): res_layer = getattr(self, f'layer{(self.stage + 1)}') out = res_layer(x) return out def train(self, mode=True): super(ResLayer, self).train(mode) if self.norm_eval: for m in self.modules(): if isinstance(m, nn.BatchNorm2d): m.eval()
@HEADS.register_module() class PanopticFPNHead(BaseSemanticHead): 'PanopticFPNHead used in Panoptic FPN.\n\n In this head, the number of output channels is ``num_stuff_classes\n + 1``, including all stuff classes and one thing class. The stuff\n classes will be reset from ``0`` to ``num_stuff_classes - 1``, the\n thing classes will be merged to ``num_stuff_classes``-th channel.\n\n Arg:\n num_things_classes (int): Number of thing classes. Default: 80.\n num_stuff_classes (int): Number of stuff classes. Default: 53.\n num_classes (int): Number of classes, including all stuff\n classes and one thing class. This argument is deprecated,\n please use ``num_things_classes`` and ``num_stuff_classes``.\n The module will automatically infer the num_classes by\n ``num_stuff_classes + 1``.\n in_channels (int): Number of channels in the input feature\n map.\n inner_channels (int): Number of channels in inner features.\n start_level (int): The start level of the input features\n used in PanopticFPN.\n end_level (int): The end level of the used features, the\n ``end_level``-th layer will not be used.\n fg_range (tuple): Range of the foreground classes. It starts\n from ``0`` to ``num_things_classes-1``. Deprecated, please use\n ``num_things_classes`` directly.\n bg_range (tuple): Range of the background classes. It starts\n from ``num_things_classes`` to ``num_things_classes +\n num_stuff_classes - 1``. Deprecated, please use\n ``num_stuff_classes`` and ``num_things_classes`` directly.\n conv_cfg (dict): Dictionary to construct and config\n conv layer. Default: None.\n norm_cfg (dict): Dictionary to construct and config norm layer.\n Use ``GN`` by default.\n init_cfg (dict or list[dict], optional): Initialization config dict.\n loss_seg (dict): the loss of the semantic head.\n ' def __init__(self, num_things_classes=80, num_stuff_classes=53, num_classes=None, in_channels=256, inner_channels=128, start_level=0, end_level=4, fg_range=None, bg_range=None, conv_cfg=None, norm_cfg=dict(type='GN', num_groups=32, requires_grad=True), init_cfg=None, loss_seg=dict(type='CrossEntropyLoss', ignore_index=(- 1), loss_weight=1.0)): if (num_classes is not None): warnings.warn('`num_classes` is deprecated now, please set `num_stuff_classes` directly, the `num_classes` will be set to `num_stuff_classes + 1`') assert (num_classes == (num_stuff_classes + 1)) super(PanopticFPNHead, self).__init__((num_stuff_classes + 1), init_cfg, loss_seg) self.num_things_classes = num_things_classes self.num_stuff_classes = num_stuff_classes if ((fg_range is not None) and (bg_range is not None)): self.fg_range = fg_range self.bg_range = bg_range self.num_things_classes = ((fg_range[1] - fg_range[0]) + 1) self.num_stuff_classes = ((bg_range[1] - bg_range[0]) + 1) warnings.warn(f'`fg_range` and `bg_range` are deprecated now, please use `num_things_classes`={self.num_things_classes} and `num_stuff_classes`={self.num_stuff_classes} instead.') self.start_level = start_level self.end_level = end_level self.num_stages = (end_level - start_level) self.inner_channels = inner_channels self.conv_upsample_layers = ModuleList() for i in range(start_level, end_level): self.conv_upsample_layers.append(ConvUpsample(in_channels, inner_channels, num_layers=(i if (i > 0) else 1), num_upsample=(i if (i > 0) else 0), conv_cfg=conv_cfg, norm_cfg=norm_cfg)) self.conv_logits = nn.Conv2d(inner_channels, self.num_classes, 1) def _set_things_to_void(self, gt_semantic_seg): 'Merge thing classes to one class.\n\n In PanopticFPN, the background labels will be reset from `0` to\n `self.num_stuff_classes-1`, the foreground labels will be merged to\n `self.num_stuff_classes`-th channel.\n ' gt_semantic_seg = gt_semantic_seg.int() fg_mask = (gt_semantic_seg < self.num_things_classes) bg_mask = ((gt_semantic_seg >= self.num_things_classes) * (gt_semantic_seg < (self.num_things_classes + self.num_stuff_classes))) new_gt_seg = torch.clone(gt_semantic_seg) new_gt_seg = torch.where(bg_mask, (gt_semantic_seg - self.num_things_classes), new_gt_seg) new_gt_seg = torch.where(fg_mask, (fg_mask.int() * self.num_stuff_classes), new_gt_seg) return new_gt_seg def loss(self, seg_preds, gt_semantic_seg): 'The loss of PanopticFPN head.\n\n Things classes will be merged to one class in PanopticFPN.\n ' gt_semantic_seg = self._set_things_to_void(gt_semantic_seg) return super().loss(seg_preds, gt_semantic_seg) def init_weights(self): super().init_weights() nn.init.normal_(self.conv_logits.weight.data, 0, 0.01) self.conv_logits.bias.data.zero_() def forward(self, x): assert (self.num_stages <= len(x)) feats = [] for (i, layer) in enumerate(self.conv_upsample_layers): f = layer(x[(self.start_level + i)]) feats.append(f) feats = torch.sum(torch.stack(feats, dim=0), dim=0) seg_preds = self.conv_logits(feats) out = dict(seg_preds=seg_preds, feats=feats) return out
class BasePanopticFusionHead(BaseModule, metaclass=ABCMeta): 'Base class for panoptic heads.' def __init__(self, num_things_classes=80, num_stuff_classes=53, test_cfg=None, loss_panoptic=None, init_cfg=None, **kwargs): super(BasePanopticFusionHead, self).__init__(init_cfg) self.num_things_classes = num_things_classes self.num_stuff_classes = num_stuff_classes self.num_classes = (num_things_classes + num_stuff_classes) self.test_cfg = test_cfg if loss_panoptic: self.loss_panoptic = build_loss(loss_panoptic) else: self.loss_panoptic = None @property def with_loss(self): 'bool: whether the panoptic head contains loss function.' return (self.loss_panoptic is not None) @abstractmethod def forward_train(self, gt_masks=None, gt_semantic_seg=None, **kwargs): 'Forward function during training.' @abstractmethod def simple_test(self, img_metas, det_labels, mask_preds, seg_preds, det_bboxes, cfg=None, **kwargs): 'Test without augmentation.'
def adaptive_avg_pool2d(input, output_size): 'Handle empty batch dimension to adaptive_avg_pool2d.\n\n Args:\n input (tensor): 4D tensor.\n output_size (int, tuple[int,int]): the target output size.\n ' if ((input.numel() == 0) and obsolete_torch_version(TORCH_VERSION, (1, 9))): if isinstance(output_size, int): output_size = [output_size, output_size] output_size = [*input.shape[:2], *output_size] empty = NewEmptyTensorOp.apply(input, output_size) return empty else: return F.adaptive_avg_pool2d(input, output_size)
class AdaptiveAvgPool2d(nn.AdaptiveAvgPool2d): 'Handle empty batch dimension to AdaptiveAvgPool2d.' def forward(self, x): if ((x.numel() == 0) and obsolete_torch_version(TORCH_VERSION, (1, 9))): output_size = self.output_size if isinstance(output_size, int): output_size = [output_size, output_size] else: output_size = [(v if (v is not None) else d) for (v, d) in zip(output_size, x.size()[(- 2):])] output_size = [*x.shape[:2], *output_size] empty = NewEmptyTensorOp.apply(x, output_size) return empty return super().forward(x)
def build_transformer(cfg, default_args=None): 'Builder for Transformer.' return build_from_cfg(cfg, TRANSFORMER, default_args)
def build_linear_layer(cfg, *args, **kwargs): 'Build linear layer.\n Args:\n cfg (None or dict): The linear layer config, which should contain:\n - type (str): Layer type.\n - layer args: Args needed to instantiate an linear layer.\n args (argument list): Arguments passed to the `__init__`\n method of the corresponding linear layer.\n kwargs (keyword arguments): Keyword arguments passed to the `__init__`\n method of the corresponding linear layer.\n Returns:\n nn.Module: Created linear layer.\n ' if (cfg is None): cfg_ = dict(type='Linear') else: if (not isinstance(cfg, dict)): raise TypeError('cfg must be a dict') if ('type' not in cfg): raise KeyError('the cfg dict must contain the key "type"') cfg_ = cfg.copy() layer_type = cfg_.pop('type') if (layer_type not in LINEAR_LAYERS): raise KeyError(f'Unrecognized linear type {layer_type}') else: linear_layer = LINEAR_LAYERS.get(layer_type) layer = linear_layer(*args, **kwargs, **cfg_) return layer
class ConvUpsample(BaseModule): 'ConvUpsample performs 2x upsampling after Conv.\n\n There are several `ConvModule` layers. In the first few layers, upsampling\n will be applied after each layer of convolution. The number of upsampling\n must be no more than the number of ConvModule layers.\n\n Args:\n in_channels (int): Number of channels in the input feature map.\n inner_channels (int): Number of channels produced by the convolution.\n num_layers (int): Number of convolution layers.\n num_upsample (int | optional): Number of upsampling layer. Must be no\n more than num_layers. Upsampling will be applied after the first\n ``num_upsample`` layers of convolution. Default: ``num_layers``.\n conv_cfg (dict): Config dict for convolution layer. Default: None,\n which means using conv2d.\n norm_cfg (dict): Config dict for normalization layer. Default: None.\n init_cfg (dict): Config dict for initialization. Default: None.\n kwargs (key word augments): Other augments used in ConvModule.\n ' def __init__(self, in_channels, inner_channels, num_layers=1, num_upsample=None, conv_cfg=None, norm_cfg=None, init_cfg=None, **kwargs): super(ConvUpsample, self).__init__(init_cfg) if (num_upsample is None): num_upsample = num_layers assert (num_upsample <= num_layers), f'num_upsample({num_upsample})must be no more than num_layers({num_layers})' self.num_layers = num_layers self.num_upsample = num_upsample self.conv = ModuleList() for i in range(num_layers): self.conv.append(ConvModule(in_channels, inner_channels, 3, padding=1, stride=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, **kwargs)) in_channels = inner_channels def forward(self, x): num_upsample = self.num_upsample for i in range(self.num_layers): x = self.conv[i](x) if (num_upsample > 0): num_upsample -= 1 x = F.interpolate(x, scale_factor=2, mode='bilinear', align_corners=False) return x
class DarknetBottleneck(BaseModule): "The basic bottleneck block used in Darknet.\n\n Each ResBlock consists of two ConvModules and the input is added to the\n final output. Each ConvModule is composed of Conv, BN, and LeakyReLU.\n The first convLayer has filter size of 1x1 and the second one has the\n filter size of 3x3.\n\n Args:\n in_channels (int): The input channels of this Module.\n out_channels (int): The output channels of this Module.\n expansion (int): The kernel size of the convolution. Default: 0.5\n add_identity (bool): Whether to add identity to the out.\n Default: True\n use_depthwise (bool): Whether to use depthwise separable convolution.\n Default: False\n conv_cfg (dict): Config dict for convolution layer. Default: None,\n which means using conv2d.\n norm_cfg (dict): Config dict for normalization layer.\n Default: dict(type='BN').\n act_cfg (dict): Config dict for activation layer.\n Default: dict(type='Swish').\n " def __init__(self, in_channels, out_channels, expansion=0.5, add_identity=True, use_depthwise=False, conv_cfg=None, norm_cfg=dict(type='BN', momentum=0.03, eps=0.001), act_cfg=dict(type='Swish'), init_cfg=None): super().__init__(init_cfg) hidden_channels = int((out_channels * expansion)) conv = (DepthwiseSeparableConvModule if use_depthwise else ConvModule) self.conv1 = ConvModule(in_channels, hidden_channels, 1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) self.conv2 = conv(hidden_channels, out_channels, 3, stride=1, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) self.add_identity = (add_identity and (in_channels == out_channels)) def forward(self, x): identity = x out = self.conv1(x) out = self.conv2(out) if self.add_identity: return (out + identity) else: return out
class CSPLayer(BaseModule): "Cross Stage Partial Layer.\n\n Args:\n in_channels (int): The input channels of the CSP layer.\n out_channels (int): The output channels of the CSP layer.\n expand_ratio (float): Ratio to adjust the number of channels of the\n hidden layer. Default: 0.5\n num_blocks (int): Number of blocks. Default: 1\n add_identity (bool): Whether to add identity in blocks.\n Default: True\n use_depthwise (bool): Whether to depthwise separable convolution in\n blocks. Default: False\n conv_cfg (dict, optional): Config dict for convolution layer.\n Default: None, which means using conv2d.\n norm_cfg (dict): Config dict for normalization layer.\n Default: dict(type='BN')\n act_cfg (dict): Config dict for activation layer.\n Default: dict(type='Swish')\n " def __init__(self, in_channels, out_channels, expand_ratio=0.5, num_blocks=1, add_identity=True, use_depthwise=False, conv_cfg=None, norm_cfg=dict(type='BN', momentum=0.03, eps=0.001), act_cfg=dict(type='Swish'), init_cfg=None): super().__init__(init_cfg) mid_channels = int((out_channels * expand_ratio)) self.main_conv = ConvModule(in_channels, mid_channels, 1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) self.short_conv = ConvModule(in_channels, mid_channels, 1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) self.final_conv = ConvModule((2 * mid_channels), out_channels, 1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) self.blocks = nn.Sequential(*[DarknetBottleneck(mid_channels, mid_channels, 1.0, add_identity, use_depthwise, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) for _ in range(num_blocks)]) def forward(self, x): x_short = self.short_conv(x) x_main = self.main_conv(x) x_main = self.blocks(x_main) x_final = torch.cat((x_main, x_short), dim=1) return self.final_conv(x_final)
class InvertedResidual(BaseModule): "Inverted Residual Block.\n\n Args:\n in_channels (int): The input channels of this Module.\n out_channels (int): The output channels of this Module.\n mid_channels (int): The input channels of the depthwise convolution.\n kernel_size (int): The kernel size of the depthwise convolution.\n Default: 3.\n stride (int): The stride of the depthwise convolution. Default: 1.\n se_cfg (dict): Config dict for se layer. Default: None, which means no\n se layer.\n with_expand_conv (bool): Use expand conv or not. If set False,\n mid_channels must be the same with in_channels.\n Default: True.\n conv_cfg (dict): Config dict for convolution layer. Default: None,\n which means using conv2d.\n norm_cfg (dict): Config dict for normalization layer.\n Default: dict(type='BN').\n act_cfg (dict): Config dict for activation layer.\n Default: dict(type='ReLU').\n with_cp (bool): Use checkpoint or not. Using checkpoint will save some\n memory while slowing down the training speed. Default: False.\n init_cfg (dict or list[dict], optional): Initialization config dict.\n Default: None\n\n Returns:\n Tensor: The output tensor.\n " def __init__(self, in_channels, out_channels, mid_channels, kernel_size=3, stride=1, se_cfg=None, with_expand_conv=True, conv_cfg=None, norm_cfg=dict(type='BN'), act_cfg=dict(type='ReLU'), with_cp=False, init_cfg=None): super(InvertedResidual, self).__init__(init_cfg) self.with_res_shortcut = ((stride == 1) and (in_channels == out_channels)) assert (stride in [1, 2]), f'stride must in [1, 2]. But received {stride}.' self.with_cp = with_cp self.with_se = (se_cfg is not None) self.with_expand_conv = with_expand_conv if self.with_se: assert isinstance(se_cfg, dict) if (not self.with_expand_conv): assert (mid_channels == in_channels) if self.with_expand_conv: self.expand_conv = ConvModule(in_channels=in_channels, out_channels=mid_channels, kernel_size=1, stride=1, padding=0, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) self.depthwise_conv = ConvModule(in_channels=mid_channels, out_channels=mid_channels, kernel_size=kernel_size, stride=stride, padding=(kernel_size // 2), groups=mid_channels, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) if self.with_se: self.se = SELayer(**se_cfg) self.linear_conv = ConvModule(in_channels=mid_channels, out_channels=out_channels, kernel_size=1, stride=1, padding=0, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=None) def forward(self, x): def _inner_forward(x): out = x if self.with_expand_conv: out = self.expand_conv(out) out = self.depthwise_conv(out) if self.with_se: out = self.se(out) out = self.linear_conv(out) if self.with_res_shortcut: return (x + out) else: return out if (self.with_cp and x.requires_grad): out = cp.checkpoint(_inner_forward, x) else: out = _inner_forward(x) return out
def make_divisible(value, divisor, min_value=None, min_ratio=0.9): 'Make divisible function.\n\n This function rounds the channel number to the nearest value that can be\n divisible by the divisor. It is taken from the original tf repo. It ensures\n that all layers have a channel number that is divisible by divisor. It can\n be seen here: https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py # noqa\n\n Args:\n value (int): The original channel number.\n divisor (int): The divisor to fully divide the channel number.\n min_value (int): The minimum value of the output channel.\n Default: None, means that the minimum value equal to the divisor.\n min_ratio (float): The minimum ratio of the rounded channel number to\n the original channel number. Default: 0.9.\n\n Returns:\n int: The modified output channel number.\n ' if (min_value is None): min_value = divisor new_value = max(min_value, ((int((value + (divisor / 2))) // divisor) * divisor)) if (new_value < (min_ratio * value)): new_value += divisor return new_value
@LINEAR_LAYERS.register_module(name='NormedLinear') class NormedLinear(nn.Linear): 'Normalized Linear Layer.\n\n Args:\n tempeature (float, optional): Tempeature term. Default to 20.\n power (int, optional): Power term. Default to 1.0.\n eps (float, optional): The minimal value of divisor to\n keep numerical stability. Default to 1e-6.\n ' def __init__(self, *args, tempearture=20, power=1.0, eps=1e-06, **kwargs): super(NormedLinear, self).__init__(*args, **kwargs) self.tempearture = tempearture self.power = power self.eps = eps self.init_weights() def init_weights(self): nn.init.normal_(self.weight, mean=0, std=0.01) if (self.bias is not None): nn.init.constant_(self.bias, 0) def forward(self, x): weight_ = (self.weight / (self.weight.norm(dim=1, keepdim=True).pow(self.power) + self.eps)) x_ = (x / (x.norm(dim=1, keepdim=True).pow(self.power) + self.eps)) x_ = (x_ * self.tempearture) return F.linear(x_, weight_, self.bias)
@CONV_LAYERS.register_module(name='NormedConv2d') class NormedConv2d(nn.Conv2d): 'Normalized Conv2d Layer.\n\n Args:\n tempeature (float, optional): Tempeature term. Default to 20.\n power (int, optional): Power term. Default to 1.0.\n eps (float, optional): The minimal value of divisor to\n keep numerical stability. Default to 1e-6.\n norm_over_kernel (bool, optional): Normalize over kernel.\n Default to False.\n ' def __init__(self, *args, tempearture=20, power=1.0, eps=1e-06, norm_over_kernel=False, **kwargs): super(NormedConv2d, self).__init__(*args, **kwargs) self.tempearture = tempearture self.power = power self.norm_over_kernel = norm_over_kernel self.eps = eps def forward(self, x): if (not self.norm_over_kernel): weight_ = (self.weight / (self.weight.norm(dim=1, keepdim=True).pow(self.power) + self.eps)) else: weight_ = (self.weight / (self.weight.view(self.weight.size(0), (- 1)).norm(dim=1, keepdim=True).pow(self.power)[(..., None, None)] + self.eps)) x_ = (x / (x.norm(dim=1, keepdim=True).pow(self.power) + self.eps)) x_ = (x_ * self.tempearture) if hasattr(self, 'conv2d_forward'): x_ = self.conv2d_forward(x_, weight_) elif (torch.__version__ >= '1.8'): x_ = self._conv_forward(x_, weight_, self.bias) else: x_ = self._conv_forward(x_, weight_) return x_
def preprocess_panoptic_gt(gt_labels, gt_masks, gt_semantic_seg, num_things, num_stuff): 'Preprocess the ground truth for a image.\n\n Args:\n gt_labels (Tensor): Ground truth labels of each bbox,\n with shape (num_gts, ).\n gt_masks (BitmapMasks): Ground truth masks of each instances\n of a image, shape (num_gts, h, w).\n gt_semantic_seg (Tensor): Ground truth of semantic\n segmentation with the shape (1, h, w).\n [0, num_thing_class - 1] means things,\n [num_thing_class, num_class-1] means stuff,\n 255 means VOID.\n target_shape (tuple[int]): Shape of output mask_preds.\n Resize the masks to shape of mask_preds.\n\n Returns:\n tuple: a tuple containing the following targets.\n\n - labels (Tensor): Ground truth class indices for a\n image, with shape (n, ), n is the sum of number\n of stuff type and number of instance in a image.\n - masks (Tensor): Ground truth mask for a image, with\n shape (n, h, w).\n ' num_classes = (num_things + num_stuff) things_labels = gt_labels gt_semantic_seg = gt_semantic_seg.squeeze(0) things_masks = gt_masks.pad(gt_semantic_seg.shape[(- 2):], pad_val=0).to_tensor(dtype=torch.bool, device=gt_labels.device) semantic_labels = torch.unique(gt_semantic_seg, sorted=False, return_inverse=False, return_counts=False) stuff_masks_list = [] stuff_labels_list = [] for label in semantic_labels: if ((label < num_things) or (label >= num_classes)): continue stuff_mask = (gt_semantic_seg == label) stuff_masks_list.append(stuff_mask) stuff_labels_list.append(label) if (len(stuff_masks_list) > 0): stuff_masks = torch.stack(stuff_masks_list, dim=0) stuff_labels = torch.stack(stuff_labels_list, dim=0) labels = torch.cat([things_labels, stuff_labels], dim=0) masks = torch.cat([things_masks, stuff_masks], dim=0) else: labels = things_labels masks = things_masks masks = masks.long() return (labels, masks)
class ResLayer(Sequential): "ResLayer to build ResNet style backbone.\n\n Args:\n block (nn.Module): block used to build ResLayer.\n inplanes (int): inplanes of block.\n planes (int): planes of block.\n num_blocks (int): number of blocks.\n stride (int): stride of the first block. Default: 1\n avg_down (bool): Use AvgPool instead of stride conv when\n downsampling in the bottleneck. Default: False\n conv_cfg (dict): dictionary to construct and config conv layer.\n Default: None\n norm_cfg (dict): dictionary to construct and config norm layer.\n Default: dict(type='BN')\n downsample_first (bool): Downsample at the first block or last block.\n False for Hourglass, True for ResNet. Default: True\n " def __init__(self, block, inplanes, planes, num_blocks, stride=1, avg_down=False, conv_cfg=None, norm_cfg=dict(type='BN'), downsample_first=True, **kwargs): self.block = block downsample = None if ((stride != 1) or (inplanes != (planes * block.expansion))): downsample = [] conv_stride = stride if avg_down: conv_stride = 1 downsample.append(nn.AvgPool2d(kernel_size=stride, stride=stride, ceil_mode=True, count_include_pad=False)) downsample.extend([build_conv_layer(conv_cfg, inplanes, (planes * block.expansion), kernel_size=1, stride=conv_stride, bias=False), build_norm_layer(norm_cfg, (planes * block.expansion))[1]]) downsample = nn.Sequential(*downsample) layers = [] if downsample_first: layers.append(block(inplanes=inplanes, planes=planes, stride=stride, downsample=downsample, conv_cfg=conv_cfg, norm_cfg=norm_cfg, **kwargs)) inplanes = (planes * block.expansion) for _ in range(1, num_blocks): layers.append(block(inplanes=inplanes, planes=planes, stride=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, **kwargs)) else: for _ in range((num_blocks - 1)): layers.append(block(inplanes=inplanes, planes=inplanes, stride=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, **kwargs)) layers.append(block(inplanes=inplanes, planes=planes, stride=stride, downsample=downsample, conv_cfg=conv_cfg, norm_cfg=norm_cfg, **kwargs)) super(ResLayer, self).__init__(*layers)
class SimplifiedBasicBlock(BaseModule): 'Simplified version of original basic residual block. This is used in\n `SCNet <https://arxiv.org/abs/2012.10150>`_.\n\n - Norm layer is now optional\n - Last ReLU in forward function is removed\n ' expansion = 1 def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, style='pytorch', with_cp=False, conv_cfg=None, norm_cfg=dict(type='BN'), dcn=None, plugins=None, init_fg=None): super(SimplifiedBasicBlock, self).__init__(init_fg) assert (dcn is None), 'Not implemented yet.' assert (plugins is None), 'Not implemented yet.' assert (not with_cp), 'Not implemented yet.' self.with_norm = (norm_cfg is not None) with_bias = (True if (norm_cfg is None) else False) self.conv1 = build_conv_layer(conv_cfg, inplanes, planes, 3, stride=stride, padding=dilation, dilation=dilation, bias=with_bias) if self.with_norm: (self.norm1_name, norm1) = build_norm_layer(norm_cfg, planes, postfix=1) self.add_module(self.norm1_name, norm1) self.conv2 = build_conv_layer(conv_cfg, planes, planes, 3, padding=1, bias=with_bias) if self.with_norm: (self.norm2_name, norm2) = build_norm_layer(norm_cfg, planes, postfix=2) self.add_module(self.norm2_name, norm2) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride self.dilation = dilation self.with_cp = with_cp @property def norm1(self): 'nn.Module: normalization layer after the first convolution layer' return (getattr(self, self.norm1_name) if self.with_norm else None) @property def norm2(self): 'nn.Module: normalization layer after the second convolution layer' return (getattr(self, self.norm2_name) if self.with_norm else None) def forward(self, x): 'Forward function.' identity = x out = self.conv1(x) if self.with_norm: out = self.norm1(out) out = self.relu(out) out = self.conv2(out) if self.with_norm: out = self.norm2(out) if (self.downsample is not None): identity = self.downsample(x) out += identity return out
class SELayer(BaseModule): "Squeeze-and-Excitation Module.\n\n Args:\n channels (int): The input (and output) channels of the SE layer.\n ratio (int): Squeeze ratio in SELayer, the intermediate channel will be\n ``int(channels/ratio)``. Default: 16.\n conv_cfg (None or dict): Config dict for convolution layer.\n Default: None, which means using conv2d.\n act_cfg (dict or Sequence[dict]): Config dict for activation layer.\n If act_cfg is a dict, two activation layers will be configurated\n by this dict. If act_cfg is a sequence of dicts, the first\n activation layer will be configurated by the first dict and the\n second activation layer will be configurated by the second dict.\n Default: (dict(type='ReLU'), dict(type='Sigmoid'))\n init_cfg (dict or list[dict], optional): Initialization config dict.\n Default: None\n " def __init__(self, channels, ratio=16, conv_cfg=None, act_cfg=(dict(type='ReLU'), dict(type='Sigmoid')), init_cfg=None): super(SELayer, self).__init__(init_cfg) if isinstance(act_cfg, dict): act_cfg = (act_cfg, act_cfg) assert (len(act_cfg) == 2) assert mmcv.is_tuple_of(act_cfg, dict) self.global_avgpool = nn.AdaptiveAvgPool2d(1) self.conv1 = ConvModule(in_channels=channels, out_channels=int((channels / ratio)), kernel_size=1, stride=1, conv_cfg=conv_cfg, act_cfg=act_cfg[0]) self.conv2 = ConvModule(in_channels=int((channels / ratio)), out_channels=channels, kernel_size=1, stride=1, conv_cfg=conv_cfg, act_cfg=act_cfg[1]) def forward(self, x): out = self.global_avgpool(x) out = self.conv1(out) out = self.conv2(out) return (x * out)
class DyReLU(BaseModule): "Dynamic ReLU (DyReLU) module.\n\n See `Dynamic ReLU <https://arxiv.org/abs/2003.10027>`_ for details.\n Current implementation is specialized for task-aware attention in DyHead.\n HSigmoid arguments in default act_cfg follow DyHead official code.\n https://github.com/microsoft/DynamicHead/blob/master/dyhead/dyrelu.py\n\n Args:\n channels (int): The input (and output) channels of DyReLU module.\n ratio (int): Squeeze ratio in Squeeze-and-Excitation-like module,\n the intermediate channel will be ``int(channels/ratio)``.\n Default: 4.\n conv_cfg (None or dict): Config dict for convolution layer.\n Default: None, which means using conv2d.\n act_cfg (dict or Sequence[dict]): Config dict for activation layer.\n If act_cfg is a dict, two activation layers will be configurated\n by this dict. If act_cfg is a sequence of dicts, the first\n activation layer will be configurated by the first dict and the\n second activation layer will be configurated by the second dict.\n Default: (dict(type='ReLU'), dict(type='HSigmoid', bias=3.0,\n divisor=6.0))\n init_cfg (dict or list[dict], optional): Initialization config dict.\n Default: None\n " def __init__(self, channels, ratio=4, conv_cfg=None, act_cfg=(dict(type='ReLU'), dict(type='HSigmoid', bias=3.0, divisor=6.0)), init_cfg=None): super().__init__(init_cfg=init_cfg) if isinstance(act_cfg, dict): act_cfg = (act_cfg, act_cfg) assert (len(act_cfg) == 2) assert mmcv.is_tuple_of(act_cfg, dict) self.channels = channels self.expansion = 4 self.global_avgpool = nn.AdaptiveAvgPool2d(1) self.conv1 = ConvModule(in_channels=channels, out_channels=int((channels / ratio)), kernel_size=1, stride=1, conv_cfg=conv_cfg, act_cfg=act_cfg[0]) self.conv2 = ConvModule(in_channels=int((channels / ratio)), out_channels=(channels * self.expansion), kernel_size=1, stride=1, conv_cfg=conv_cfg, act_cfg=act_cfg[1]) def forward(self, x): 'Forward function.' coeffs = self.global_avgpool(x) coeffs = self.conv1(coeffs) coeffs = (self.conv2(coeffs) - 0.5) (a1, b1, a2, b2) = torch.split(coeffs, self.channels, dim=1) a1 = ((a1 * 2.0) + 1.0) a2 = (a2 * 2.0) out = torch.max(((x * a1) + b1), ((x * a2) + b2)) return out
def collect_env(): 'Collect the information of the running environments.' env_info = collect_base_env() env_info['MMDetection'] = ((mmdet.__version__ + '+') + get_git_hash()[:7]) return env_info
def get_root_logger(log_file=None, log_level=logging.INFO): 'Get root logger.\n\n Args:\n log_file (str, optional): File path of log. Defaults to None.\n log_level (int, optional): The level of logger.\n Defaults to logging.INFO.\n\n Returns:\n :obj:`logging.Logger`: The obtained logger\n ' logger = get_logger(name='mmdet', log_file=log_file, log_level=log_level) return logger
def find_latest_checkpoint(path, suffix='pth'): 'Find the latest checkpoint from the working directory.\n\n Args:\n path(str): The path to find checkpoints.\n suffix(str): File extension.\n Defaults to pth.\n\n Returns:\n latest_path(str | None): File path of the latest checkpoint.\n References:\n .. [1] https://github.com/microsoft/SoftTeacher\n /blob/main/ssod/utils/patch.py\n ' if (not osp.exists(path)): warnings.warn('The path of checkpoints does not exist.') return None if osp.exists(osp.join(path, f'latest.{suffix}')): return osp.join(path, f'latest.{suffix}') checkpoints = glob.glob(osp.join(path, f'*.{suffix}')) if (len(checkpoints) == 0): warnings.warn('There are no checkpoints in the path.') return None latest = (- 1) latest_path = None for checkpoint in checkpoints: count = int(osp.basename(checkpoint).split('_')[(- 1)].split('.')[0]) if (count > latest): latest = count latest_path = checkpoint return latest_path
def setup_multi_processes(cfg): 'Setup multi-processing environment variables.' if (platform.system() != 'Windows'): mp_start_method = cfg.get('mp_start_method', 'fork') current_method = mp.get_start_method(allow_none=True) if ((current_method is not None) and (current_method != mp_start_method)): warnings.warn(f'Multi-processing start method `{mp_start_method}` is different from the previous setting `{current_method}`.It will be force set to `{mp_start_method}`. You can change this behavior by changing `mp_start_method` in your config.') mp.set_start_method(mp_start_method, force=True) opencv_num_threads = cfg.get('opencv_num_threads', 0) cv2.setNumThreads(opencv_num_threads) if (('OMP_NUM_THREADS' not in os.environ) and (cfg.data.workers_per_gpu > 1)): omp_num_threads = 1 warnings.warn(f'Setting OMP_NUM_THREADS environment variable for each process to be {omp_num_threads} in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.') os.environ['OMP_NUM_THREADS'] = str(omp_num_threads) if (('MKL_NUM_THREADS' not in os.environ) and (cfg.data.workers_per_gpu > 1)): mkl_num_threads = 1 warnings.warn(f'Setting MKL_NUM_THREADS environment variable for each process to be {mkl_num_threads} in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.') os.environ['MKL_NUM_THREADS'] = str(mkl_num_threads)
class NiceRepr(): 'Inherit from this class and define ``__nice__`` to "nicely" print your\n objects.\n\n Defines ``__str__`` and ``__repr__`` in terms of ``__nice__`` function\n Classes that inherit from :class:`NiceRepr` should redefine ``__nice__``.\n If the inheriting class has a ``__len__``, method then the default\n ``__nice__`` method will return its length.\n\n Example:\n >>> class Foo(NiceRepr):\n ... def __nice__(self):\n ... return \'info\'\n >>> foo = Foo()\n >>> assert str(foo) == \'<Foo(info)>\'\n >>> assert repr(foo).startswith(\'<Foo(info) at \')\n\n Example:\n >>> class Bar(NiceRepr):\n ... pass\n >>> bar = Bar()\n >>> import pytest\n >>> with pytest.warns(None) as record:\n >>> assert \'object at\' in str(bar)\n >>> assert \'object at\' in repr(bar)\n\n Example:\n >>> class Baz(NiceRepr):\n ... def __len__(self):\n ... return 5\n >>> baz = Baz()\n >>> assert str(baz) == \'<Baz(5)>\'\n ' def __nice__(self): 'str: a "nice" summary string describing this module' if hasattr(self, '__len__'): return str(len(self)) else: raise NotImplementedError(f'Define the __nice__ method for {self.__class__!r}') def __repr__(self): 'str: the string of the module' try: nice = self.__nice__() classname = self.__class__.__name__ return f'<{classname}({nice}) at {hex(id(self))}>' except NotImplementedError as ex: warnings.warn(str(ex), category=RuntimeWarning) return object.__repr__(self) def __str__(self): 'str: the string of the module' try: classname = self.__class__.__name__ nice = self.__nice__() return f'<{classname}({nice})>' except NotImplementedError as ex: warnings.warn(str(ex), category=RuntimeWarning) return object.__repr__(self)
def ensure_rng(rng=None): 'Coerces input into a random number generator.\n\n If the input is None, then a global random state is returned.\n\n If the input is a numeric value, then that is used as a seed to construct a\n random state. Otherwise the input is returned as-is.\n\n Adapted from [1]_.\n\n Args:\n rng (int | numpy.random.RandomState | None):\n if None, then defaults to the global rng. Otherwise this can be an\n integer or a RandomState class\n Returns:\n (numpy.random.RandomState) : rng -\n a numpy random number generator\n\n References:\n .. [1] https://gitlab.kitware.com/computer-vision/kwarray/blob/master/kwarray/util_random.py#L270 # noqa: E501\n ' if (rng is None): rng = np.random.mtrand._rand elif isinstance(rng, int): rng = np.random.RandomState(rng) else: rng = rng return rng
def parse_version_info(version_str): version_info = [] for x in version_str.split('.'): if x.isdigit(): version_info.append(int(x)) elif (x.find('rc') != (- 1)): patch_version = x.split('rc') version_info.append(int(patch_version[0])) version_info.append(f'rc{patch_version[1]}') return tuple(version_info)
@DETECTORS.register_module() class DDQRCNN(TwoStageDetector): def forward_train(self, img, img_metas, gt_bboxes, gt_labels, gt_bboxes_ignore=None, gt_masks=None, proposals=None, **kwargs): batch_input_shape = tuple(img[0].size()[(- 2):]) for img_meta in img_metas: img_meta['batch_input_shape'] = batch_input_shape losses = dict() x = self.extract_feat(img) rpn_x = x[1:] roi_x = x (rpn_losses, imgs_whwh, distinc_query_dict) = self.rpn_head.forward_train(rpn_x, img_metas, gt_bboxes, gt_labels=gt_labels, gt_bboxes_ignore=gt_bboxes_ignore, **kwargs) proposals = distinc_query_dict['proposals'] object_feats = distinc_query_dict['object_feats'] for (k, v) in rpn_losses.items(): losses[f'rpn_{k}'] = v roi_losses = self.roi_head.forward_train(roi_x, proposals, object_feats, img_metas, gt_bboxes, gt_labels, gt_bboxes_ignore=gt_bboxes_ignore, gt_masks=gt_masks, imgs_whwh=imgs_whwh) losses.update(roi_losses) return losses def simple_test(self, img, img_metas, rescale=True): batch_input_shape = tuple(img[0].size()[(- 2):]) for img_meta in img_metas: img_meta['batch_input_shape'] = batch_input_shape x = self.extract_feat(img) rpn_x = x[1:] roi_x = x (rpn_losses, imgs_whwh, distinc_query_dict) = self.rpn_head.simple_test_rpn(rpn_x, img_metas) proposals = distinc_query_dict['proposals'] object_feats = distinc_query_dict['object_feats'] results = self.roi_head.simple_test(roi_x, proposals, object_feats, img_metas, imgs_whwh=imgs_whwh, rescale=rescale) return results
def padding_to(inputs, max=300): if (max is None): return inputs num_padding = (max - len(inputs)) if (inputs.dim() > 1): padding = inputs.new_zeros(num_padding, *inputs.size()[1:], dtype=inputs.dtype) else: padding = inputs.new_zeros(num_padding, dtype=inputs.dtype) inputs = torch.cat([inputs, padding], dim=0) return inputs
def align_tensor(inputs, max_len=None): if (max_len is None): max_len = max([len(item) for item in inputs]) return torch.stack([padding_to(item, max_len) for item in inputs])
def parse_args(): parser = argparse.ArgumentParser(description='MMDet test (and eval) a model') parser.add_argument('config', help='test config file path') parser.add_argument('checkpoint', help='checkpoint file') parser.add_argument('--work-dir', help='the directory to save the file containing evaluation metrics') parser.add_argument('--out', help='output result file in pickle format') parser.add_argument('--ceph', action='store_true', help='whether not to evaluate the checkpoint during training') parser.add_argument('--fuse-conv-bn', action='store_true', help='Whether to fuse conv and bn, this will slightly increasethe inference speed') parser.add_argument('--gpu-ids', type=int, nargs='+', help='(Deprecated, please use --gpu-id) ids of gpus to use (only applicable to non-distributed training)') parser.add_argument('--gpu-id', type=int, default=0, help='id of gpu to use (only applicable to non-distributed testing)') parser.add_argument('--format-only', action='store_true', help='Format the output results without perform evaluation. It isuseful when you want to format the result to a specific format and submit it to the test server') parser.add_argument('--eval', type=str, nargs='+', help='evaluation metrics, which depends on the dataset, e.g., "bbox", "segm", "proposal" for COCO, and "mAP", "recall" for PASCAL VOC') parser.add_argument('--show', action='store_true', help='show results') parser.add_argument('--show-dir', help='directory where painted images will be saved') parser.add_argument('--show-score-thr', type=float, default=0.3, help='score threshold (default: 0.3)') parser.add_argument('--gpu-collect', action='store_true', help='whether to use gpu to collect results.') parser.add_argument('--tmpdir', help='tmp directory used for collecting results from multiple workers, available when gpu-collect is not specified') parser.add_argument('--cfg-options', nargs='+', action=DictAction, help='override some settings in the used config, the key-value pair in xxx=yyy format will be merged into config file. If the value to be overwritten is a list, it should be like key="[a,b]" or key=a,b It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" Note that the quotation marks are necessary and that no white space is allowed.') parser.add_argument('--options', nargs='+', action=DictAction, help='custom options for evaluation, the key-value pair in xxx=yyy format will be kwargs for dataset.evaluate() function (deprecate), change to --eval-options instead.') parser.add_argument('--eval-options', nargs='+', action=DictAction, help='custom options for evaluation, the key-value pair in xxx=yyy format will be kwargs for dataset.evaluate() function') parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm', 'mpi'], default='none', help='job launcher') parser.add_argument('--local_rank', type=int, default=0) args = parser.parse_args() if ('LOCAL_RANK' not in os.environ): os.environ['LOCAL_RANK'] = str(args.local_rank) if (args.options and args.eval_options): raise ValueError('--options and --eval-options cannot be both specified, --options is deprecated in favor of --eval-options') if args.options: warnings.warn('--options is deprecated in favor of --eval-options') args.eval_options = args.options return args
def replace_ceph_backend(cfg): cfg_pretty_text = cfg.pretty_text replace_strs = "file_client_args = dict(\n backend='petrel',\n path_mapping=dict({\n '.data/INPLACEHOLD/': 's3://openmmlab/datasets/detection/INPLACEHOLD/',\n 'data/INPLACEHOLD/': 's3://openmmlab/datasets/detection/INPLACEHOLD/'\n }))\n " if ('cityscapes' in cfg_pretty_text): replace_strs = replace_strs.replace('INPLACEHOLD', 'cityscapes') elif ('coco' in cfg_pretty_text): replace_strs = replace_strs.replace('INPLACEHOLD', 'coco') else: NotImplemented('Does not support global replacement') replace_strs = replace_strs.replace(' ', '').replace('\n', '') replace_strs = ("LoadImageFromFile'," + replace_strs) cfg_pretty_text = cfg_pretty_text.replace("LoadImageFromFile'", replace_strs) cfg = cfg.fromstring(cfg_pretty_text, file_format='.py') return cfg
def main(): args = parse_args() assert (args.out or args.eval or args.format_only or args.show or args.show_dir), 'Please specify at least one operation (save/eval/format/show the results / save the results) with the argument "--out", "--eval", "--format-only", "--show" or "--show-dir"' if (args.eval and args.format_only): raise ValueError('--eval and --format_only cannot be both specified') if ((args.out is not None) and (not args.out.endswith(('.pkl', '.pickle')))): raise ValueError('The output file must be a pkl file.') cfg = Config.fromfile(args.config) if (args.cfg_options is not None): cfg.merge_from_dict(args.cfg_options) setup_multi_processes(cfg) if args.ceph: cfg = replace_ceph_backend(cfg) if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True if ('pretrained' in cfg.model): cfg.model.pretrained = None elif ('init_cfg' in cfg.model.backbone): cfg.model.backbone.init_cfg = None if cfg.model.get('neck'): if isinstance(cfg.model.neck, list): for neck_cfg in cfg.model.neck: if neck_cfg.get('rfp_backbone'): if neck_cfg.rfp_backbone.get('pretrained'): neck_cfg.rfp_backbone.pretrained = None elif cfg.model.neck.get('rfp_backbone'): if cfg.model.neck.rfp_backbone.get('pretrained'): cfg.model.neck.rfp_backbone.pretrained = None samples_per_gpu = 1 if isinstance(cfg.data.test, dict): cfg.data.test.test_mode = True samples_per_gpu = cfg.data.test.pop('samples_per_gpu', 1) if (samples_per_gpu > 1): cfg.data.test.pipeline = replace_ImageToTensor(cfg.data.test.pipeline) elif isinstance(cfg.data.test, list): for ds_cfg in cfg.data.test: ds_cfg.test_mode = True samples_per_gpu = max([ds_cfg.pop('samples_per_gpu', 1) for ds_cfg in cfg.data.test]) if (samples_per_gpu > 1): for ds_cfg in cfg.data.test: ds_cfg.pipeline = replace_ImageToTensor(ds_cfg.pipeline) if (args.gpu_ids is not None): cfg.gpu_ids = args.gpu_ids[0:1] warnings.warn('`--gpu-ids` is deprecated, please use `--gpu-id`. Because we only support single GPU mode in non-distributed testing. Use the first GPU in `gpu_ids` now.') else: cfg.gpu_ids = [args.gpu_id] if (args.launcher == 'none'): distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) (rank, _) = get_dist_info() if ((args.work_dir is not None) and (rank == 0)): mmcv.mkdir_or_exist(osp.abspath(args.work_dir)) timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime()) json_file = osp.join(args.work_dir, f'eval_{timestamp}.json') dataset = build_dataset(cfg.data.test) data_loader = build_dataloader(dataset, samples_per_gpu=samples_per_gpu, workers_per_gpu=cfg.data.workers_per_gpu, dist=distributed, shuffle=False) cfg.model.train_cfg = None model = build_detector(cfg.model, test_cfg=cfg.get('test_cfg')) fp16_cfg = cfg.get('fp16', None) if (fp16_cfg is not None): wrap_fp16_model(model) checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu') if args.fuse_conv_bn: model = fuse_conv_bn(model) if ('CLASSES' in checkpoint.get('meta', {})): model.CLASSES = checkpoint['meta']['CLASSES'] else: model.CLASSES = dataset.CLASSES if (not distributed): model = MMDataParallel(model, device_ids=cfg.gpu_ids) outputs = single_gpu_test(model, data_loader, args.show, args.show_dir, args.show_score_thr) else: model = MMDistributedDataParallel(model.cuda(), device_ids=[torch.cuda.current_device()], broadcast_buffers=False) outputs = multi_gpu_test(model, data_loader, args.tmpdir, args.gpu_collect) (rank, _) = get_dist_info() if (rank == 0): if args.out: print(f''' writing results to {args.out}''') mmcv.dump(outputs, args.out) kwargs = ({} if (args.eval_options is None) else args.eval_options) if args.format_only: dataset.format_results(outputs, **kwargs) if args.eval: eval_kwargs = cfg.get('evaluation', {}).copy() for key in ['interval', 'tmpdir', 'start', 'gpu_collect', 'save_best', 'rule', 'dynamic_intervals']: eval_kwargs.pop(key, None) eval_kwargs.update(dict(metric=args.eval, **kwargs)) metric = dataset.evaluate(outputs, **eval_kwargs) print(metric) metric_dict = dict(config=args.config, metric=metric) if ((args.work_dir is not None) and (rank == 0)): mmcv.dump(metric_dict, json_file)
def parse_args(): parser = argparse.ArgumentParser(description='Train a detector') parser.add_argument('config', help='train config file path') parser.add_argument('--work-dir', help='the dir to save logs and models') parser.add_argument('--resume-from', help='the checkpoint file to resume from') parser.add_argument('--no-validate', action='store_true', help='whether not to evaluate the checkpoint during training') parser.add_argument('--ceph', action='store_true', help='whether not to evaluate the checkpoint during training') parser.add_argument('--pss', action='store_true', help='whether not to evaluate the checkpoint during training') group_gpus = parser.add_mutually_exclusive_group() group_gpus.add_argument('--gpus', type=int, help='number of gpus to use (only applicable to non-distributed training)') group_gpus.add_argument('--gpu-ids', type=int, nargs='+', help='ids of gpus to use (only applicable to non-distributed training)') parser.add_argument('--seed', type=int, default=None, help='random seed') parser.add_argument('--port', type=int, default=20001, help='random seed') parser.add_argument('--deterministic', action='store_true', help='whether to set deterministic options for CUDNN backend.') parser.add_argument('--options', nargs='+', action=DictAction, help='override some settings in the used config, the key-value pair in xxx=yyy format will be merged into config file (deprecate), change to --cfg-options instead.') parser.add_argument('--cfg-options', nargs='+', action=DictAction, help='override some settings in the used config, the key-value pair in xxx=yyy format will be merged into config file. If the value to be overwritten is a list, it should be like key="[a,b]" or key=a,b It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" Note that the quotation marks are necessary and that no white space is allowed.') parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm', 'mpi'], default='none', help='job launcher') parser.add_argument('--local_rank', type=int, default=0) args = parser.parse_args() if ('LOCAL_RANK' not in os.environ): os.environ['LOCAL_RANK'] = str(args.local_rank) if (args.options and args.cfg_options): raise ValueError('--options and --cfg-options cannot be both specified, --options is deprecated in favor of --cfg-options') if args.options: warnings.warn('--options is deprecated in favor of --cfg-options') args.cfg_options = args.options return args
def get_device(): 'Returns an available device, cpu, cuda or mlu.' is_device_available = {'cuda': torch.cuda.is_available()} device_list = [k for (k, v) in is_device_available.items() if v] return (device_list[0] if (len(device_list) == 1) else 'cpu')
def replace_ceph_backend(cfg): cfg_pretty_text = cfg.pretty_text replace_strs = "file_client_args = dict(\n backend='petrel',\n path_mapping=dict({\n '.data/INPLACEHOLD/': 's3://openmmlab/datasets/detection/INPLACEHOLD/',\n 'data/INPLACEHOLD/': 's3://openmmlab/datasets/detection/INPLACEHOLD/'\n }))\n " if ('cityscapes' in cfg_pretty_text): replace_strs = replace_strs.replace('INPLACEHOLD', 'cityscapes') elif ('coco' in cfg_pretty_text): replace_strs = replace_strs.replace('INPLACEHOLD', 'coco') else: NotImplemented('Does not support global replacement') replace_strs = replace_strs.replace(' ', '').replace('\n', '') replace_strs = ("LoadImageFromFile'," + replace_strs) cfg_pretty_text = cfg_pretty_text.replace("LoadImageFromFile'", replace_strs) cfg = cfg.fromstring(cfg_pretty_text, file_format='.py') return cfg
def main(): args = parse_args() cfg = Config.fromfile(args.config) if args.ceph: cfg = replace_ceph_backend(cfg) if (args.cfg_options is not None): for (k, v) in args.cfg_options.items(): if v.startswith('dict'): args.cfg_options[k] = eval(v) cfg.merge_from_dict(args.cfg_options) if cfg.get('custom_imports', None): from mmcv.utils import import_modules_from_strings import_modules_from_strings(**cfg['custom_imports']) if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True if (args.work_dir is not None): cfg.work_dir = args.work_dir elif (cfg.get('work_dir', None) is None): cfg.work_dir = osp.join('./work_dirs', osp.splitext(osp.basename(args.config))[0]) if (args.resume_from is not None): cfg.resume_from = args.resume_from if (args.gpu_ids is not None): cfg.gpu_ids = args.gpu_ids else: cfg.gpu_ids = (range(1) if (args.gpus is None) else range(args.gpus)) cfg.device = get_device() if (args.launcher == 'none'): distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) (_, world_size) = get_dist_info() cfg.gpu_ids = range(world_size) mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir)) cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config))) timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime()) log_file = osp.join(cfg.work_dir, f'{timestamp}.log') logger = get_root_logger(log_file=log_file, log_level=cfg.log_level) meta = dict() env_info_dict = collect_env() env_info = '\n'.join([f'{k}: {v}' for (k, v) in env_info_dict.items()]) dash_line = (('-' * 60) + '\n') logger.info((((('Environment info:\n' + dash_line) + env_info) + '\n') + dash_line)) meta['env_info'] = env_info meta['config'] = cfg.pretty_text logger.info(f'Distributed training: {distributed}') logger.info(f'''Config: {cfg.pretty_text}''') seed = init_random_seed(args.seed) logger.info(f'Set random seed to {seed}, deterministic: {args.deterministic}') set_random_seed(seed, deterministic=args.deterministic) cfg.seed = seed meta['seed'] = seed meta['exp_name'] = osp.basename(args.config) model = build_detector(cfg.model, train_cfg=cfg.get('train_cfg'), test_cfg=cfg.get('test_cfg')) model.init_weights() if args.pss: for (n, v) in model.named_parameters(): if ('pss' not in n): v.requires_grad = False datasets = [build_dataset(cfg.data.train)] if (len(cfg.workflow) == 2): val_dataset = copy.deepcopy(cfg.data.val) val_dataset.pipeline = cfg.data.train.pipeline datasets.append(build_dataset(val_dataset)) if (cfg.checkpoint_config is not None): cfg.checkpoint_config.meta = dict(mmdet_version=(__version__ + get_git_hash()[:7]), CLASSES=datasets[0].CLASSES) model.CLASSES = datasets[0].CLASSES train_detector(model, datasets, cfg, distributed=distributed, validate=(not args.no_validate), timestamp=timestamp, meta=meta)
def _activation_summary(x): 'Helper to create summaries for activations.\n\n Creates a summary that provides a histogram of activations.\n Creates a summary that measures the sparsity of activations.\n\n Args:\n x: Tensor\n Returns:\n nothing\n ' tensor_name = re.sub(('%s_[0-9]*/' % TOWER_NAME), '', x.op.name) tf.summary.histogram((tensor_name + '/activations'), x) tf.summary.scalar((tensor_name + '/sparsity'), tf.nn.zero_fraction(x))
def _variable_on_cpu(name, shape, initializer): 'Helper to create a Variable stored on CPU memory.\n\n Args:\n name: name of the variable\n shape: list of ints\n initializer: initializer for Variable\n\n Returns:\n Variable Tensor\n ' with tf.device('/cpu:0'): dtype = (tf.float16 if FLAGS.use_fp16 else tf.float32) var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype) return var
def _variable_with_weight_decay(name, shape, stddev, wd): 'Helper to create an initialized Variable with weight decay.\n\n Note that the Variable is initialized with a truncated normal distribution.\n A weight decay is added only if one is specified.\n\n Args:\n name: name of the variable\n shape: list of ints\n stddev: standard deviation of a truncated Gaussian\n wd: add L2Loss weight decay multiplied by this float. If None, weight\n decay is not added for this Variable.\n\n Returns:\n Variable Tensor\n ' dtype = (tf.float16 if FLAGS.use_fp16 else tf.float32) var = _variable_on_cpu(name, shape, tf.truncated_normal_initializer(stddev=stddev, dtype=dtype)) if (wd is not None): weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss') tf.add_to_collection('losses', weight_decay) return var
def distorted_inputs(): 'Construct distorted input for CIFAR training using the Reader ops.\n\n Returns:\n images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.\n labels: Labels. 1D tensor of [batch_size] size.\n\n Raises:\n ValueError: If no data_dir\n ' if (not FLAGS.data_dir): raise ValueError('Please supply a data_dir') data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin') (images, labels) = cifar10_input.distorted_inputs(data_dir=data_dir, batch_size=FLAGS.batch_size) if FLAGS.use_fp16: images = tf.cast(images, tf.float16) labels = tf.cast(labels, tf.float16) return (images, labels)
def inputs(eval_data): 'Construct input for CIFAR evaluation using the Reader ops.\n\n Args:\n eval_data: bool, indicating if one should use the train or eval data set.\n\n Returns:\n images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.\n labels: Labels. 1D tensor of [batch_size] size.\n\n Raises:\n ValueError: If no data_dir\n ' if (not FLAGS.data_dir): raise ValueError('Please supply a data_dir') data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin') (images, labels) = cifar10_input.inputs(eval_data=eval_data, data_dir=data_dir, batch_size=FLAGS.batch_size) if FLAGS.use_fp16: images = tf.cast(images, tf.float16) labels = tf.cast(labels, tf.float16) return (images, labels)
def inference(images): 'Build the CIFAR-10 model.\n\n Args:\n images: Images returned from distorted_inputs() or inputs().\n\n Returns:\n Logits.\n ' with tf.variable_scope('conv1') as scope: kernel = _variable_with_weight_decay('weights', shape=[5, 5, 3, 64], stddev=0.05, wd=0.0) conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME') biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.0)) pre_activation = tf.nn.bias_add(conv, biases) conv1 = tf.nn.relu(pre_activation, name=scope.name) _activation_summary(conv1) pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool1') norm1 = tf.nn.lrn(pool1, 4, bias=1.0, alpha=(0.001 / 9.0), beta=0.75, name='norm1') with tf.variable_scope('conv2') as scope: kernel = _variable_with_weight_decay('weights', shape=[5, 5, 64, 64], stddev=0.05, wd=0.0) conv = tf.nn.conv2d(norm1, kernel, [1, 1, 1, 1], padding='SAME') biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.1)) pre_activation = tf.nn.bias_add(conv, biases) conv2 = tf.nn.relu(pre_activation, name=scope.name) _activation_summary(conv2) norm2 = tf.nn.lrn(conv2, 4, bias=1.0, alpha=(0.001 / 9.0), beta=0.75, name='norm2') pool2 = tf.nn.max_pool(norm2, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool2') with tf.variable_scope('local3') as scope: reshape = tf.reshape(pool2, [FLAGS.batch_size, (- 1)]) dim = reshape.get_shape()[1].value weights = _variable_with_weight_decay('weights', shape=[dim, 384], stddev=0.04, wd=0.004) biases = _variable_on_cpu('biases', [384], tf.constant_initializer(0.1)) local3 = tf.nn.relu((tf.matmul(reshape, weights) + biases), name=scope.name) _activation_summary(local3) with tf.variable_scope('local4') as scope: weights = _variable_with_weight_decay('weights', shape=[384, 192], stddev=0.04, wd=0.004) biases = _variable_on_cpu('biases', [192], tf.constant_initializer(0.1)) local4 = tf.nn.relu((tf.matmul(local3, weights) + biases), name=scope.name) _activation_summary(local4) with tf.variable_scope('softmax_linear') as scope: weights = _variable_with_weight_decay('weights', [192, NUM_CLASSES], stddev=(1 / 192.0), wd=0.0) biases = _variable_on_cpu('biases', [NUM_CLASSES], tf.constant_initializer(0.0)) softmax_linear = tf.add(tf.matmul(local4, weights), biases, name=scope.name) _activation_summary(softmax_linear) return softmax_linear
def loss(logits, labels): 'Add L2Loss to all the trainable variables.\n\n Add summary for "Loss" and "Loss/avg".\n Args:\n logits: Logits from inference().\n labels: Labels from distorted_inputs or inputs(). 1-D tensor\n of shape [batch_size]\n\n Returns:\n Loss tensor of type float.\n ' labels = tf.cast(labels, tf.int64) cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits, name='cross_entropy_per_example') cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy') tf.add_to_collection('losses', cross_entropy_mean) return tf.add_n(tf.get_collection('losses'), name='total_loss')