code stringlengths 17 6.64M |
|---|
def _expand_binary_labels(labels, label_weights, label_channels):
bin_labels = labels.new_full((labels.size(0), label_channels), 0)
inds = torch.nonzero((labels >= 1)).squeeze()
if (inds.numel() > 0):
bin_labels[(inds, (labels[inds] - 1))] = 1
if (label_weights is None):
bin_label_weights = None
else:
bin_label_weights = label_weights.view((- 1), 1).expand(label_weights.size(0), label_channels)
return (bin_labels, bin_label_weights)
|
def binary_cross_entropy(pred, label, weight=None, reduction='mean', avg_factor=None):
if (pred.dim() != label.dim()):
(label, weight) = _expand_binary_labels(label, weight, pred.size((- 1)))
if (weight is not None):
weight = weight.float()
loss = F.binary_cross_entropy_with_logits(pred, label.float(), weight, reduction='none')
loss = weight_reduce_loss(loss, reduction=reduction, avg_factor=avg_factor)
return loss
|
def mask_cross_entropy(pred, target, label, reduction='mean', avg_factor=None):
assert ((reduction == 'mean') and (avg_factor is None))
num_rois = pred.size()[0]
inds = torch.arange(0, num_rois, dtype=torch.long, device=pred.device)
pred_slice = pred[(inds, label)].squeeze(1)
return F.binary_cross_entropy_with_logits(pred_slice, target, reduction='mean')[None]
|
@LOSSES.register_module
class CrossEntropyLoss(nn.Module):
def __init__(self, use_sigmoid=False, use_mask=False, reduction='mean', loss_weight=1.0):
super(CrossEntropyLoss, self).__init__()
assert ((use_sigmoid is False) or (use_mask is False))
self.use_sigmoid = use_sigmoid
self.use_mask = use_mask
self.reduction = reduction
self.loss_weight = loss_weight
if self.use_sigmoid:
self.cls_criterion = binary_cross_entropy
elif self.use_mask:
self.cls_criterion = mask_cross_entropy
else:
self.cls_criterion = cross_entropy
def forward(self, cls_score, label, weight=None, avg_factor=None, reduction_override=None, **kwargs):
assert (reduction_override in (None, 'none', 'mean', 'sum'))
reduction = (reduction_override if reduction_override else self.reduction)
loss_cls = (self.loss_weight * self.cls_criterion(cls_score, label, weight, reduction=reduction, avg_factor=avg_factor, **kwargs))
return loss_cls
|
def py_sigmoid_focal_loss(pred, target, weight=None, gamma=2.0, alpha=0.25, reduction='mean', avg_factor=None):
pred_sigmoid = pred.sigmoid()
target = target.type_as(pred)
pt = (((1 - pred_sigmoid) * target) + (pred_sigmoid * (1 - target)))
focal_weight = (((alpha * target) + ((1 - alpha) * (1 - target))) * pt.pow(gamma))
loss = (F.binary_cross_entropy_with_logits(pred, target, reduction='none') * focal_weight)
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
|
def sigmoid_focal_loss(pred, target, weight=None, gamma=2.0, alpha=0.25, reduction='mean', avg_factor=None):
loss = _sigmoid_focal_loss(pred, target, gamma, alpha)
if (weight is not None):
weight = weight.view((- 1), 1)
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
|
@LOSSES.register_module
class FocalLoss(nn.Module):
def __init__(self, use_sigmoid=True, gamma=2.0, alpha=0.25, reduction='mean', loss_weight=1.0):
super(FocalLoss, self).__init__()
assert (use_sigmoid is True), 'Only sigmoid focal loss supported now.'
self.use_sigmoid = use_sigmoid
self.gamma = gamma
self.alpha = alpha
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None):
assert (reduction_override in (None, 'none', 'mean', 'sum'))
reduction = (reduction_override if reduction_override else self.reduction)
if self.use_sigmoid:
loss_cls = (self.loss_weight * sigmoid_focal_loss(pred, target, weight, gamma=self.gamma, alpha=self.alpha, reduction=reduction, avg_factor=avg_factor))
else:
raise NotImplementedError
return loss_cls
|
def _expand_binary_labels(labels, label_weights, label_channels):
bin_labels = labels.new_full((labels.size(0), label_channels), 0)
inds = torch.nonzero((labels >= 1)).squeeze()
if (inds.numel() > 0):
bin_labels[(inds, (labels[inds] - 1))] = 1
bin_label_weights = label_weights.view((- 1), 1).expand(label_weights.size(0), label_channels)
return (bin_labels, bin_label_weights)
|
@LOSSES.register_module
class GHMC(nn.Module):
'GHM Classification Loss.\n\n Details of the theorem can be viewed in the paper\n "Gradient Harmonized Single-stage Detector".\n https://arxiv.org/abs/1811.05181\n\n Args:\n bins (int): Number of the unit regions for distribution calculation.\n momentum (float): The parameter for moving average.\n use_sigmoid (bool): Can only be true for BCE based loss now.\n loss_weight (float): The weight of the total GHM-C loss.\n '
def __init__(self, bins=10, momentum=0, use_sigmoid=True, loss_weight=1.0):
super(GHMC, self).__init__()
self.bins = bins
self.momentum = momentum
edges = (torch.arange((bins + 1)).float() / bins)
self.register_buffer('edges', edges)
self.edges[(- 1)] += 1e-06
if (momentum > 0):
acc_sum = torch.zeros(bins)
self.register_buffer('acc_sum', acc_sum)
self.use_sigmoid = use_sigmoid
if (not self.use_sigmoid):
raise NotImplementedError
self.loss_weight = loss_weight
def forward(self, pred, target, label_weight, *args, **kwargs):
'Calculate the GHM-C loss.\n\n Args:\n pred (float tensor of size [batch_num, class_num]):\n The direct prediction of classification fc layer.\n target (float tensor of size [batch_num, class_num]):\n Binary class target for each sample.\n label_weight (float tensor of size [batch_num, class_num]):\n the value is 1 if the sample is valid and 0 if ignored.\n Returns:\n The gradient harmonized loss.\n '
if (pred.dim() != target.dim()):
(target, label_weight) = _expand_binary_labels(target, label_weight, pred.size((- 1)))
(target, label_weight) = (target.float(), label_weight.float())
edges = self.edges
mmt = self.momentum
weights = torch.zeros_like(pred)
g = torch.abs((pred.sigmoid().detach() - target))
valid = (label_weight > 0)
tot = max(valid.float().sum().item(), 1.0)
n = 0
for i in range(self.bins):
inds = (((g >= edges[i]) & (g < edges[(i + 1)])) & valid)
num_in_bin = inds.sum().item()
if (num_in_bin > 0):
if (mmt > 0):
self.acc_sum[i] = ((mmt * self.acc_sum[i]) + ((1 - mmt) * num_in_bin))
weights[inds] = (tot / self.acc_sum[i])
else:
weights[inds] = (tot / num_in_bin)
n += 1
if (n > 0):
weights = (weights / n)
loss = (F.binary_cross_entropy_with_logits(pred, target, weights, reduction='sum') / tot)
return (loss * self.loss_weight)
|
@LOSSES.register_module
class GHMR(nn.Module):
'GHM Regression Loss.\n\n Details of the theorem can be viewed in the paper\n "Gradient Harmonized Single-stage Detector"\n https://arxiv.org/abs/1811.05181\n\n Args:\n mu (float): The parameter for the Authentic Smooth L1 loss.\n bins (int): Number of the unit regions for distribution calculation.\n momentum (float): The parameter for moving average.\n loss_weight (float): The weight of the total GHM-R loss.\n '
def __init__(self, mu=0.02, bins=10, momentum=0, loss_weight=1.0):
super(GHMR, self).__init__()
self.mu = mu
self.bins = bins
edges = (torch.arange((bins + 1)).float() / bins)
self.register_buffer('edges', edges)
self.edges[(- 1)] = 1000.0
self.momentum = momentum
if (momentum > 0):
acc_sum = torch.zeros(bins)
self.register_buffer('acc_sum', acc_sum)
self.loss_weight = loss_weight
def forward(self, pred, target, label_weight, avg_factor=None):
'Calculate the GHM-R loss.\n\n Args:\n pred (float tensor of size [batch_num, 4 (* class_num)]):\n The prediction of box regression layer. Channel number can be 4\n or 4 * class_num depending on whether it is class-agnostic.\n target (float tensor of size [batch_num, 4 (* class_num)]):\n The target regression values with the same size of pred.\n label_weight (float tensor of size [batch_num, 4 (* class_num)]):\n The weight of each sample, 0 if ignored.\n Returns:\n The gradient harmonized loss.\n '
mu = self.mu
edges = self.edges
mmt = self.momentum
diff = (pred - target)
loss = (torch.sqrt(((diff * diff) + (mu * mu))) - mu)
g = torch.abs((diff / torch.sqrt(((mu * mu) + (diff * diff))))).detach()
weights = torch.zeros_like(g)
valid = (label_weight > 0)
tot = max(label_weight.float().sum().item(), 1.0)
n = 0
for i in range(self.bins):
inds = (((g >= edges[i]) & (g < edges[(i + 1)])) & valid)
num_in_bin = inds.sum().item()
if (num_in_bin > 0):
n += 1
if (mmt > 0):
self.acc_sum[i] = ((mmt * self.acc_sum[i]) + ((1 - mmt) * num_in_bin))
weights[inds] = (tot / self.acc_sum[i])
else:
weights[inds] = (tot / num_in_bin)
if (n > 0):
weights /= n
loss = (loss * weights)
loss = (loss.sum() / tot)
return (loss * self.loss_weight)
|
@weighted_loss
def mse_loss(pred, target):
return F.mse_loss(pred, target, reduction='none')
|
@LOSSES.register_module
class MSELoss(nn.Module):
def __init__(self, reduction='mean', loss_weight=1.0):
super().__init__()
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self, pred, target, weight=None, avg_factor=None):
loss = (self.loss_weight * mse_loss(pred, target, weight, reduction=self.reduction, avg_factor=avg_factor))
return loss
|
@weighted_loss
def smooth_l1_loss(pred, target, beta=1.0):
assert (beta > 0)
assert ((pred.size() == target.size()) and (target.numel() > 0))
diff = torch.abs((pred - target))
loss = torch.where((diff < beta), (((0.5 * diff) * diff) / beta), (diff - (0.5 * beta)))
return loss
|
@LOSSES.register_module
class SmoothL1Loss(nn.Module):
def __init__(self, beta=1.0, reduction='mean', loss_weight=1.0):
super(SmoothL1Loss, self).__init__()
self.beta = beta
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None, **kwargs):
assert (reduction_override in (None, 'none', 'mean', 'sum'))
reduction = (reduction_override if reduction_override else self.reduction)
loss_bbox = (self.loss_weight * smooth_l1_loss(pred, target, weight, beta=self.beta, reduction=reduction, avg_factor=avg_factor, **kwargs))
return loss_bbox
|
def reduce_loss(loss, reduction):
'Reduce loss as specified.\n\n Args:\n loss (Tensor): Elementwise loss tensor.\n reduction (str): Options are "none", "mean" and "sum".\n\n Return:\n Tensor: Reduced loss tensor.\n '
reduction_enum = F._Reduction.get_enum(reduction)
if (reduction_enum == 0):
return loss
elif (reduction_enum == 1):
return loss.mean()
elif (reduction_enum == 2):
return loss.sum()
|
def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None):
'Apply element-wise weight and reduce loss.\n\n Args:\n loss (Tensor): Element-wise loss.\n weight (Tensor): Element-wise weights.\n reduction (str): Same as built-in losses of PyTorch.\n avg_factor (float): Avarage factor when computing the mean of losses.\n\n Returns:\n Tensor: Processed loss values.\n '
if (weight is not None):
loss = (loss * weight)
if (avg_factor is None):
loss = reduce_loss(loss, reduction)
elif (reduction == 'mean'):
loss = (loss.sum() / avg_factor)
elif (reduction != 'none'):
raise ValueError('avg_factor can not be used with reduction="sum"')
return loss
|
def weighted_loss(loss_func):
"Create a weighted version of a given loss function.\n\n To use this decorator, the loss function must have the signature like\n `loss_func(pred, target, **kwargs)`. The function only needs to compute\n element-wise loss without any reduction. This decorator will add weight\n and reduction arguments to the function. The decorated function will have\n the signature like `loss_func(pred, target, weight=None, reduction='mean',\n avg_factor=None, **kwargs)`.\n\n :Example:\n\n >>> import torch\n >>> @weighted_loss\n >>> def l1_loss(pred, target):\n >>> return (pred - target).abs()\n\n >>> pred = torch.Tensor([0, 2, 3])\n >>> target = torch.Tensor([1, 1, 1])\n >>> weight = torch.Tensor([1, 0, 1])\n\n >>> l1_loss(pred, target)\n tensor(1.3333)\n >>> l1_loss(pred, target, weight)\n tensor(1.)\n >>> l1_loss(pred, target, reduction='none')\n tensor([1., 1., 2.])\n >>> l1_loss(pred, target, weight, avg_factor=2)\n tensor(1.5000)\n "
@functools.wraps(loss_func)
def wrapper(pred, target, weight=None, reduction='mean', avg_factor=None, **kwargs):
loss = loss_func(pred, target, **kwargs)
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
return wrapper
|
@HEADS.register_module
class FusedSemanticHead(nn.Module):
'Multi-level fused semantic segmentation head.\n\n in_1 -> 1x1 conv ---\n |\n in_2 -> 1x1 conv -- |\n ||\n in_3 -> 1x1 conv - ||\n ||| /-> 1x1 conv (mask prediction)\n in_4 -> 1x1 conv -----> 3x3 convs (*4)\n | \\-> 1x1 conv (feature)\n in_5 -> 1x1 conv ---\n '
def __init__(self, num_ins, fusion_level, num_convs=4, in_channels=256, conv_out_channels=256, num_classes=183, ignore_label=255, loss_weight=0.2, conv_cfg=None, norm_cfg=None):
super(FusedSemanticHead, self).__init__()
self.num_ins = num_ins
self.fusion_level = fusion_level
self.num_convs = num_convs
self.in_channels = in_channels
self.conv_out_channels = conv_out_channels
self.num_classes = num_classes
self.ignore_label = ignore_label
self.loss_weight = loss_weight
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.fp16_enabled = False
self.lateral_convs = nn.ModuleList()
for i in range(self.num_ins):
self.lateral_convs.append(ConvModule(self.in_channels, self.in_channels, 1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, inplace=False))
self.convs = nn.ModuleList()
for i in range(self.num_convs):
in_channels = (self.in_channels if (i == 0) else conv_out_channels)
self.convs.append(ConvModule(in_channels, conv_out_channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg))
self.conv_embedding = ConvModule(conv_out_channels, conv_out_channels, 1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)
self.conv_logits = nn.Conv2d(conv_out_channels, self.num_classes, 1)
self.criterion = nn.CrossEntropyLoss(ignore_index=ignore_label)
def init_weights(self):
kaiming_init(self.conv_logits)
@auto_fp16()
def forward(self, feats):
x = self.lateral_convs[self.fusion_level](feats[self.fusion_level])
fused_size = tuple(x.shape[(- 2):])
for (i, feat) in enumerate(feats):
if (i != self.fusion_level):
feat = F.interpolate(feat, size=fused_size, mode='bilinear', align_corners=True)
x += self.lateral_convs[i](feat)
for i in range(self.num_convs):
x = self.convs[i](x)
mask_pred = self.conv_logits(x)
x = self.conv_embedding(x)
return (mask_pred, x)
@force_fp32(apply_to=('mask_pred',))
def loss(self, mask_pred, labels):
labels = labels.squeeze(1).long()
loss_semantic_seg = self.criterion(mask_pred, labels)
loss_semantic_seg *= self.loss_weight
return loss_semantic_seg
|
@HEADS.register_module
class HTCMaskHead(FCNMaskHead):
def __init__(self, with_conv_res=True, *args, **kwargs):
super(HTCMaskHead, self).__init__(*args, **kwargs)
self.with_conv_res = with_conv_res
if self.with_conv_res:
self.conv_res = ConvModule(self.conv_out_channels, self.conv_out_channels, 1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)
def init_weights(self):
super(HTCMaskHead, self).init_weights()
if self.with_conv_res:
self.conv_res.init_weights()
def forward(self, x, res_feat=None, return_logits=True, return_feat=True):
if (res_feat is not None):
assert self.with_conv_res
res_feat = self.conv_res(res_feat)
x = (x + res_feat)
for conv in self.convs:
x = conv(x)
res_feat = x
outs = []
if return_logits:
x = self.upsample(x)
if (self.upsample_method == 'deconv'):
x = self.relu(x)
mask_pred = self.conv_logits(x)
outs.append(mask_pred)
if return_feat:
outs.append(res_feat)
return (outs if (len(outs) > 1) else outs[0])
|
@NECKS.register_module
class BFP(nn.Module):
"BFP (Balanced Feature Pyrmamids)\n\n BFP takes multi-level features as inputs and gather them into a single one,\n then refine the gathered feature and scatter the refined results to\n multi-level features. This module is used in Libra R-CNN (CVPR 2019), see\n https://arxiv.org/pdf/1904.02701.pdf for details.\n\n Args:\n in_channels (int): Number of input channels (feature maps of all levels\n should have the same channels).\n num_levels (int): Number of input feature levels.\n conv_cfg (dict): The config dict for convolution layers.\n norm_cfg (dict): The config dict for normalization layers.\n refine_level (int): Index of integration and refine level of BSF in\n multi-level features from bottom to top.\n refine_type (str): Type of the refine op, currently support\n [None, 'conv', 'non_local'].\n "
def __init__(self, in_channels, num_levels, refine_level=2, refine_type=None, conv_cfg=None, norm_cfg=None):
super(BFP, self).__init__()
assert (refine_type in [None, 'conv', 'non_local'])
self.in_channels = in_channels
self.num_levels = num_levels
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.refine_level = refine_level
self.refine_type = refine_type
assert (0 <= self.refine_level < self.num_levels)
if (self.refine_type == 'conv'):
self.refine = ConvModule(self.in_channels, self.in_channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)
elif (self.refine_type == 'non_local'):
self.refine = NonLocal2D(self.in_channels, reduction=1, use_scale=False, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
xavier_init(m, distribution='uniform')
def forward(self, inputs):
assert (len(inputs) == self.num_levels)
feats = []
gather_size = inputs[self.refine_level].size()[2:]
for i in range(self.num_levels):
if (i < self.refine_level):
gathered = F.adaptive_max_pool2d(inputs[i], output_size=gather_size)
else:
gathered = F.interpolate(inputs[i], size=gather_size, mode='nearest')
feats.append(gathered)
bsf = (sum(feats) / len(feats))
if (self.refine_type is not None):
bsf = self.refine(bsf)
outs = []
for i in range(self.num_levels):
out_size = inputs[i].size()[2:]
if (i < self.refine_level):
residual = F.interpolate(bsf, size=out_size, mode='nearest')
else:
residual = F.adaptive_max_pool2d(bsf, output_size=out_size)
outs.append((residual + inputs[i]))
return tuple(outs)
|
@NECKS.register_module
class FPN(nn.Module):
"Feature Pyramid Network.\n\n This is an implementation of - Feature Pyramid Networks for Object\n Detection (https://arxiv.org/abs/1612.03144)\n\n Args:\n in_channels (List[int]):\n number of input channels per scale\n\n out_channels (int):\n number of output channels (used at each scale)\n\n num_outs (int):\n number of output scales\n\n start_level (int):\n index of the first input scale to use as an output scale\n\n end_level (int, default=-1):\n index of the last input scale to use as an output scale\n\n Example:\n >>> import torch\n >>> in_channels = [2, 3, 5, 7]\n >>> scales = [340, 170, 84, 43]\n >>> inputs = [torch.rand(1, c, s, s)\n ... for c, s in zip(in_channels, scales)]\n >>> self = FPN(in_channels, 11, len(in_channels)).eval()\n >>> outputs = self.forward(inputs)\n >>> for i in range(len(outputs)):\n ... print('outputs[{}].shape = {!r}'.format(i, outputs[i].shape))\n outputs[0].shape = torch.Size([1, 11, 340, 340])\n outputs[1].shape = torch.Size([1, 11, 170, 170])\n outputs[2].shape = torch.Size([1, 11, 84, 84])\n outputs[3].shape = torch.Size([1, 11, 43, 43])\n "
def __init__(self, in_channels, out_channels, num_outs, start_level=0, end_level=(- 1), add_extra_convs=False, extra_convs_on_inputs=True, relu_before_extra_convs=False, no_norm_on_lateral=False, conv_cfg=None, norm_cfg=None, act_cfg=None):
super(FPN, self).__init__()
assert isinstance(in_channels, list)
self.in_channels = in_channels
self.out_channels = out_channels
self.num_ins = len(in_channels)
self.num_outs = num_outs
self.relu_before_extra_convs = relu_before_extra_convs
self.no_norm_on_lateral = no_norm_on_lateral
self.fp16_enabled = False
if (end_level == (- 1)):
self.backbone_end_level = self.num_ins
assert (num_outs >= (self.num_ins - start_level))
else:
self.backbone_end_level = end_level
assert (end_level <= len(in_channels))
assert (num_outs == (end_level - start_level))
self.start_level = start_level
self.end_level = end_level
self.add_extra_convs = add_extra_convs
self.extra_convs_on_inputs = extra_convs_on_inputs
self.lateral_convs = nn.ModuleList()
self.fpn_convs = nn.ModuleList()
for i in range(self.start_level, self.backbone_end_level):
l_conv = ConvModule(in_channels[i], out_channels, 1, conv_cfg=conv_cfg, norm_cfg=(norm_cfg if (not self.no_norm_on_lateral) else None), act_cfg=act_cfg, inplace=False)
fpn_conv = ConvModule(out_channels, out_channels, 3, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg, inplace=False)
self.lateral_convs.append(l_conv)
self.fpn_convs.append(fpn_conv)
extra_levels = ((num_outs - self.backbone_end_level) + self.start_level)
if (add_extra_convs and (extra_levels >= 1)):
for i in range(extra_levels):
if ((i == 0) and self.extra_convs_on_inputs):
in_channels = self.in_channels[(self.backbone_end_level - 1)]
else:
in_channels = out_channels
extra_fpn_conv = ConvModule(in_channels, out_channels, 3, stride=2, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg, inplace=False)
self.fpn_convs.append(extra_fpn_conv)
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
xavier_init(m, distribution='uniform')
@auto_fp16()
def forward(self, inputs):
assert (len(inputs) == len(self.in_channels))
laterals = [lateral_conv(inputs[(i + self.start_level)]) for (i, lateral_conv) in enumerate(self.lateral_convs)]
used_backbone_levels = len(laterals)
for i in range((used_backbone_levels - 1), 0, (- 1)):
prev_shape = laterals[(i - 1)].shape[2:]
laterals[(i - 1)] += F.interpolate(laterals[i], size=prev_shape, mode='nearest')
outs = [self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels)]
if (self.num_outs > len(outs)):
if (not self.add_extra_convs):
for i in range((self.num_outs - used_backbone_levels)):
outs.append(F.max_pool2d(outs[(- 1)], 1, stride=2))
else:
if self.extra_convs_on_inputs:
orig = inputs[(self.backbone_end_level - 1)]
outs.append(self.fpn_convs[used_backbone_levels](orig))
else:
outs.append(self.fpn_convs[used_backbone_levels](outs[(- 1)]))
for i in range((used_backbone_levels + 1), self.num_outs):
if self.relu_before_extra_convs:
outs.append(self.fpn_convs[i](F.relu(outs[(- 1)])))
else:
outs.append(self.fpn_convs[i](outs[(- 1)]))
return tuple(outs)
|
@NECKS.register_module
class HRFPN(nn.Module):
'HRFPN (High Resolution Feature Pyrmamids)\n\n arXiv: https://arxiv.org/abs/1904.04514\n\n Args:\n in_channels (list): number of channels for each branch.\n out_channels (int): output channels of feature pyramids.\n num_outs (int): number of output stages.\n pooling_type (str): pooling for generating feature pyramids\n from {MAX, AVG}.\n conv_cfg (dict): dictionary to construct and config conv layer.\n norm_cfg (dict): dictionary to construct and config norm layer.\n with_cp (bool): Use checkpoint or not. Using checkpoint will save some\n memory while slowing down the training speed.\n stride (int): stride of 3x3 convolutional layers\n '
def __init__(self, in_channels, out_channels, num_outs=5, pooling_type='AVG', conv_cfg=None, norm_cfg=None, with_cp=False, stride=1):
super(HRFPN, self).__init__()
assert isinstance(in_channels, list)
self.in_channels = in_channels
self.out_channels = out_channels
self.num_ins = len(in_channels)
self.num_outs = num_outs
self.with_cp = with_cp
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.reduction_conv = ConvModule(sum(in_channels), out_channels, kernel_size=1, conv_cfg=self.conv_cfg, act_cfg=None)
self.fpn_convs = nn.ModuleList()
for i in range(self.num_outs):
self.fpn_convs.append(ConvModule(out_channels, out_channels, kernel_size=3, padding=1, stride=stride, conv_cfg=self.conv_cfg, act_cfg=None))
if (pooling_type == 'MAX'):
self.pooling = F.max_pool2d
else:
self.pooling = F.avg_pool2d
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
caffe2_xavier_init(m)
def forward(self, inputs):
assert (len(inputs) == self.num_ins)
outs = [inputs[0]]
for i in range(1, self.num_ins):
outs.append(F.interpolate(inputs[i], scale_factor=(2 ** i), mode='bilinear'))
out = torch.cat(outs, dim=1)
if (out.requires_grad and self.with_cp):
out = checkpoint(self.reduction_conv, out)
else:
out = self.reduction_conv(out)
outs = [out]
for i in range(1, self.num_outs):
outs.append(self.pooling(out, kernel_size=(2 ** i), stride=(2 ** i)))
outputs = []
for i in range(self.num_outs):
if (outs[i].requires_grad and self.with_cp):
tmp_out = checkpoint(self.fpn_convs[i], outs[i])
else:
tmp_out = self.fpn_convs[i](outs[i])
outputs.append(tmp_out)
return tuple(outputs)
|
class MergingCell(nn.Module):
def __init__(self, channels=256, with_conv=True, norm_cfg=None):
super(MergingCell, self).__init__()
self.with_conv = with_conv
if self.with_conv:
self.conv_out = ConvModule(channels, channels, 3, padding=1, norm_cfg=norm_cfg, order=('act', 'conv', 'norm'))
def _binary_op(self, x1, x2):
raise NotImplementedError
def _resize(self, x, size):
if (x.shape[(- 2):] == size):
return x
elif (x.shape[(- 2):] < size):
return F.interpolate(x, size=size, mode='nearest')
else:
assert (((x.shape[(- 2)] % size[(- 2)]) == 0) and ((x.shape[(- 1)] % size[(- 1)]) == 0))
kernel_size = (x.shape[(- 1)] // size[(- 1)])
x = F.max_pool2d(x, kernel_size=kernel_size, stride=kernel_size)
return x
def forward(self, x1, x2, out_size):
assert (x1.shape[:2] == x2.shape[:2])
assert (len(out_size) == 2)
x1 = self._resize(x1, out_size)
x2 = self._resize(x2, out_size)
x = self._binary_op(x1, x2)
if self.with_conv:
x = self.conv_out(x)
return x
|
class SumCell(MergingCell):
def _binary_op(self, x1, x2):
return (x1 + x2)
|
class GPCell(MergingCell):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.global_pool = nn.AdaptiveAvgPool2d((1, 1))
def _binary_op(self, x1, x2):
x2_att = self.global_pool(x2).sigmoid()
return (x2 + (x2_att * x1))
|
@NECKS.register_module
class NASFPN(nn.Module):
'NAS-FPN.\n\n NAS-FPN: Learning Scalable Feature Pyramid Architecture for Object\n Detection. (https://arxiv.org/abs/1904.07392)\n '
def __init__(self, in_channels, out_channels, num_outs, stack_times, start_level=0, end_level=(- 1), add_extra_convs=False, norm_cfg=None):
super(NASFPN, self).__init__()
assert isinstance(in_channels, list)
self.in_channels = in_channels
self.out_channels = out_channels
self.num_ins = len(in_channels)
self.num_outs = num_outs
self.stack_times = stack_times
self.norm_cfg = norm_cfg
if (end_level == (- 1)):
self.backbone_end_level = self.num_ins
assert (num_outs >= (self.num_ins - start_level))
else:
self.backbone_end_level = end_level
assert (end_level <= len(in_channels))
assert (num_outs == (end_level - start_level))
self.start_level = start_level
self.end_level = end_level
self.add_extra_convs = add_extra_convs
self.lateral_convs = nn.ModuleList()
for i in range(self.start_level, self.backbone_end_level):
l_conv = ConvModule(in_channels[i], out_channels, 1, norm_cfg=norm_cfg, act_cfg=None)
self.lateral_convs.append(l_conv)
extra_levels = ((num_outs - self.backbone_end_level) + self.start_level)
self.extra_downsamples = nn.ModuleList()
for i in range(extra_levels):
extra_conv = ConvModule(out_channels, out_channels, 1, norm_cfg=norm_cfg, act_cfg=None)
self.extra_downsamples.append(nn.Sequential(extra_conv, nn.MaxPool2d(2, 2)))
self.fpn_stages = nn.ModuleList()
for _ in range(self.stack_times):
stage = nn.ModuleDict()
stage['gp_64_4'] = GPCell(out_channels, norm_cfg=norm_cfg)
stage['sum_44_4'] = SumCell(out_channels, norm_cfg=norm_cfg)
stage['sum_43_3'] = SumCell(out_channels, norm_cfg=norm_cfg)
stage['sum_34_4'] = SumCell(out_channels, norm_cfg=norm_cfg)
stage['gp_43_5'] = GPCell(with_conv=False)
stage['sum_55_5'] = SumCell(out_channels, norm_cfg=norm_cfg)
stage['gp_54_7'] = GPCell(with_conv=False)
stage['sum_77_7'] = SumCell(out_channels, norm_cfg=norm_cfg)
stage['gp_75_6'] = GPCell(out_channels, norm_cfg=norm_cfg)
self.fpn_stages.append(stage)
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
caffe2_xavier_init(m)
def forward(self, inputs):
feats = [lateral_conv(inputs[(i + self.start_level)]) for (i, lateral_conv) in enumerate(self.lateral_convs)]
for downsample in self.extra_downsamples:
feats.append(downsample(feats[(- 1)]))
(p3, p4, p5, p6, p7) = feats
for stage in self.fpn_stages:
p4_1 = stage['gp_64_4'](p6, p4, out_size=p4.shape[(- 2):])
p4_2 = stage['sum_44_4'](p4_1, p4, out_size=p4.shape[(- 2):])
p3 = stage['sum_43_3'](p4_2, p3, out_size=p3.shape[(- 2):])
p4 = stage['sum_34_4'](p3, p4_2, out_size=p4.shape[(- 2):])
p5_tmp = stage['gp_43_5'](p4, p3, out_size=p5.shape[(- 2):])
p5 = stage['sum_55_5'](p5, p5_tmp, out_size=p5.shape[(- 2):])
p7_tmp = stage['gp_54_7'](p5, p4_2, out_size=p7.shape[(- 2):])
p7 = stage['sum_77_7'](p7, p7_tmp, out_size=p7.shape[(- 2):])
p6 = stage['gp_75_6'](p7, p5, out_size=p6.shape[(- 2):])
return (p3, p4, p5, p6, p7)
|
@SHARED_HEADS.register_module
class ResLayer(nn.Module):
def __init__(self, depth, stage=3, stride=2, dilation=1, style='pytorch', norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, with_cp=False, dcn=None):
super(ResLayer, self).__init__()
self.norm_eval = norm_eval
self.norm_cfg = norm_cfg
self.stage = stage
self.fp16_enabled = False
(block, stage_blocks) = ResNet.arch_settings[depth]
stage_block = stage_blocks[stage]
planes = (64 * (2 ** stage))
inplanes = ((64 * (2 ** (stage - 1))) * block.expansion)
res_layer = make_res_layer(block, inplanes, planes, stage_block, stride=stride, dilation=dilation, style=style, with_cp=with_cp, norm_cfg=self.norm_cfg, dcn=dcn)
self.add_module('layer{}'.format((stage + 1)), res_layer)
def init_weights(self, pretrained=None):
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif (pretrained is None):
for m in self.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, nn.BatchNorm2d):
constant_init(m, 1)
else:
raise TypeError('pretrained must be a str or None')
@auto_fp16()
def forward(self, x):
res_layer = getattr(self, 'layer{}'.format((self.stage + 1)))
out = res_layer(x)
return out
def train(self, mode=True):
super(ResLayer, self).train(mode)
if self.norm_eval:
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
|
def bias_init_with_prob(prior_prob):
'initialize conv/fc bias value according to giving probablity.'
bias_init = float((- np.log(((1 - prior_prob) / prior_prob))))
return bias_init
|
def build_activation_layer(cfg):
'Build activation layer.\n\n Args:\n cfg (dict): cfg should contain:\n type (str): Identify activation layer type.\n layer args: args needed to instantiate a activation layer.\n\n Returns:\n layer (nn.Module): Created activation layer\n '
assert (isinstance(cfg, dict) and ('type' in cfg))
cfg_ = cfg.copy()
layer_type = cfg_.pop('type')
if (layer_type not in activation_cfg):
raise KeyError('Unrecognized activation type {}'.format(layer_type))
else:
activation = activation_cfg[layer_type]
if (activation is None):
raise NotImplementedError
layer = activation(**cfg_)
return layer
|
class _AffineGridGenerator(Function):
@staticmethod
def forward(ctx, theta, size, align_corners):
ctx.save_for_backward(theta)
ctx.size = size
ctx.align_corners = align_corners
func = affine_grid_cuda.affine_grid_generator_forward
output = func(theta, size, align_corners)
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
theta = ctx.saved_tensors
size = ctx.size
align_corners = ctx.align_corners
func = affine_grid_cuda.affine_grid_generator_backward
grad_input = func(grad_output, theta, size, align_corners)
return (grad_input, None, None)
|
def affine_grid(theta, size, align_corners=False):
if (torch.__version__ >= '1.3'):
return F.affine_grid(theta, size, align_corners)
elif align_corners:
return F.affine_grid(theta, size)
else:
if (not theta.is_floating_point()):
raise ValueError('Expected theta to have floating point type, but got {}'.format(theta.dtype))
if (len(size) == 4):
if ((theta.dim() != 3) or (theta.size((- 2)) != 2) or (theta.size((- 1)) != 3)):
raise ValueError('Expected a batch of 2D affine matrices of shape Nx2x3 for size {}. Got {}.'.format(size, theta.shape))
elif (len(size) == 5):
if ((theta.dim() != 3) or (theta.size((- 2)) != 3) or (theta.size((- 1)) != 4)):
raise ValueError('Expected a batch of 3D affine matrices of shape Nx3x4 for size {}. Got {}.'.format(size, theta.shape))
else:
raise NotImplementedError('affine_grid only supports 4D and 5D sizes, for 2D and 3D affine transforms, respectively. Got size {}.'.format(size))
if (min(size) <= 0):
raise ValueError('Expected non-zero, positive output size. Got {}'.format(size))
return _AffineGridGenerator.apply(theta, size, align_corners)
|
class CARAFENaiveFunction(Function):
@staticmethod
def forward(ctx, features, masks, kernel_size, group_size, scale_factor):
assert (scale_factor >= 1)
assert (masks.size(1) == ((kernel_size * kernel_size) * group_size))
assert (masks.size((- 1)) == (features.size((- 1)) * scale_factor))
assert (masks.size((- 2)) == (features.size((- 2)) * scale_factor))
assert ((features.size(1) % group_size) == 0)
assert ((((kernel_size - 1) % 2) == 0) and (kernel_size >= 1))
ctx.kernel_size = kernel_size
ctx.group_size = group_size
ctx.scale_factor = scale_factor
ctx.feature_size = features.size()
ctx.mask_size = masks.size()
(n, c, h, w) = features.size()
output = features.new_zeros((n, c, (h * scale_factor), (w * scale_factor)))
if features.is_cuda:
carafe_naive_cuda.forward(features, masks, kernel_size, group_size, scale_factor, output)
else:
raise NotImplementedError
if (features.requires_grad or masks.requires_grad):
ctx.save_for_backward(features, masks)
return output
@staticmethod
def backward(ctx, grad_output):
assert grad_output.is_cuda
(features, masks) = ctx.saved_tensors
kernel_size = ctx.kernel_size
group_size = ctx.group_size
scale_factor = ctx.scale_factor
grad_input = torch.zeros_like(features)
grad_masks = torch.zeros_like(masks)
carafe_naive_cuda.backward(grad_output.contiguous(), features, masks, kernel_size, group_size, scale_factor, grad_input, grad_masks)
return (grad_input, grad_masks, None, None, None)
|
class CARAFENaive(Module):
def __init__(self, kernel_size, group_size, scale_factor):
super(CARAFENaive, self).__init__()
assert (isinstance(kernel_size, int) and isinstance(group_size, int) and isinstance(scale_factor, int))
self.kernel_size = kernel_size
self.group_size = group_size
self.scale_factor = scale_factor
def forward(self, features, masks):
return CARAFENaiveFunction.apply(features, masks, self.kernel_size, self.group_size, self.scale_factor)
|
class CARAFEFunction(Function):
@staticmethod
def forward(ctx, features, masks, kernel_size, group_size, scale_factor):
assert (scale_factor >= 1)
assert (masks.size(1) == ((kernel_size * kernel_size) * group_size))
assert (masks.size((- 1)) == (features.size((- 1)) * scale_factor))
assert (masks.size((- 2)) == (features.size((- 2)) * scale_factor))
assert ((features.size(1) % group_size) == 0)
assert ((((kernel_size - 1) % 2) == 0) and (kernel_size >= 1))
ctx.kernel_size = kernel_size
ctx.group_size = group_size
ctx.scale_factor = scale_factor
ctx.feature_size = features.size()
ctx.mask_size = masks.size()
(n, c, h, w) = features.size()
output = features.new_zeros((n, c, (h * scale_factor), (w * scale_factor)))
routput = features.new_zeros(output.size(), requires_grad=False)
rfeatures = features.new_zeros(features.size(), requires_grad=False)
rmasks = masks.new_zeros(masks.size(), requires_grad=False)
if features.is_cuda:
carafe_cuda.forward(features, rfeatures, masks, rmasks, kernel_size, group_size, scale_factor, routput, output)
else:
raise NotImplementedError
if (features.requires_grad or masks.requires_grad):
ctx.save_for_backward(features, masks, rfeatures)
return output
@staticmethod
def backward(ctx, grad_output):
assert grad_output.is_cuda
(features, masks, rfeatures) = ctx.saved_tensors
kernel_size = ctx.kernel_size
group_size = ctx.group_size
scale_factor = ctx.scale_factor
rgrad_output = torch.zeros_like(grad_output, requires_grad=False)
rgrad_input_hs = torch.zeros_like(grad_output, requires_grad=False)
rgrad_input = torch.zeros_like(features, requires_grad=False)
rgrad_masks = torch.zeros_like(masks, requires_grad=False)
grad_input = torch.zeros_like(features, requires_grad=False)
grad_masks = torch.zeros_like(masks, requires_grad=False)
carafe_cuda.backward(grad_output.contiguous(), rfeatures, masks, kernel_size, group_size, scale_factor, rgrad_output, rgrad_input_hs, rgrad_input, rgrad_masks, grad_input, grad_masks)
return (grad_input, grad_masks, None, None, None, None)
|
class CARAFE(Module):
' CARAFE: Content-Aware ReAssembly of FEatures\n\n Please refer to https://arxiv.org/abs/1905.02188 for more details.\n\n Args:\n kernel_size (int): reassemble kernel size\n group_size (int): reassemble group size\n scale_factor (int): upsample ratio\n\n Returns:\n upsampled feature map\n '
def __init__(self, kernel_size, group_size, scale_factor):
super(CARAFE, self).__init__()
assert (isinstance(kernel_size, int) and isinstance(group_size, int) and isinstance(scale_factor, int))
self.kernel_size = kernel_size
self.group_size = group_size
self.scale_factor = scale_factor
def forward(self, features, masks):
return CARAFEFunction.apply(features, masks, self.kernel_size, self.group_size, self.scale_factor)
|
class CARAFEPack(nn.Module):
'A unified package of CARAFE upsampler that contains: 1) channel\n compressor 2) content encoder 3) CARAFE op.\n\n Official implementation of ICCV 2019 paper\n CARAFE: Content-Aware ReAssembly of FEatures\n Please refer to https://arxiv.org/abs/1905.02188 for more details.\n\n Args:\n channels (int): input feature channels\n scale_factor (int): upsample ratio\n up_kernel (int): kernel size of CARAFE op\n up_group (int): group size of CARAFE op\n encoder_kernel (int): kernel size of content encoder\n encoder_dilation (int): dilation of content encoder\n compressed_channels (int): output channels of channels compressor\n\n Returns:\n upsampled feature map\n '
def __init__(self, channels, scale_factor, up_kernel=5, up_group=1, encoder_kernel=3, encoder_dilation=1, compressed_channels=64):
super(CARAFEPack, self).__init__()
self.channels = channels
self.scale_factor = scale_factor
self.up_kernel = up_kernel
self.up_group = up_group
self.encoder_kernel = encoder_kernel
self.encoder_dilation = encoder_dilation
self.compressed_channels = compressed_channels
self.channel_compressor = nn.Conv2d(channels, self.compressed_channels, 1)
self.content_encoder = nn.Conv2d(self.compressed_channels, ((((self.up_kernel * self.up_kernel) * self.up_group) * self.scale_factor) * self.scale_factor), self.encoder_kernel, padding=int((((self.encoder_kernel - 1) * self.encoder_dilation) / 2)), dilation=self.encoder_dilation, groups=1)
self.init_weights()
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
xavier_init(m, distribution='uniform')
normal_init(self.content_encoder, std=0.001)
def kernel_normalizer(self, mask):
mask = F.pixel_shuffle(mask, self.scale_factor)
(n, mask_c, h, w) = mask.size()
mask_channel = int((mask_c / (self.up_kernel * self.up_kernel)))
mask = mask.view(n, mask_channel, (- 1), h, w)
mask = F.softmax(mask, dim=2)
mask = mask.view(n, mask_c, h, w).contiguous()
return mask
def feature_reassemble(self, x, mask):
x = carafe(x, mask, self.up_kernel, self.up_group, self.scale_factor)
return x
def forward(self, x):
compressed_x = self.channel_compressor(x)
mask = self.content_encoder(compressed_x)
mask = self.kernel_normalizer(mask)
x = self.feature_reassemble(x, mask)
return x
|
def last_zero_init(m):
if isinstance(m, nn.Sequential):
constant_init(m[(- 1)], val=0)
else:
constant_init(m, val=0)
|
class ContextBlock(nn.Module):
def __init__(self, inplanes, ratio, pooling_type='att', fusion_types=('channel_add',)):
super(ContextBlock, self).__init__()
assert (pooling_type in ['avg', 'att'])
assert isinstance(fusion_types, (list, tuple))
valid_fusion_types = ['channel_add', 'channel_mul']
assert all([(f in valid_fusion_types) for f in fusion_types])
assert (len(fusion_types) > 0), 'at least one fusion should be used'
self.inplanes = inplanes
self.ratio = ratio
self.planes = int((inplanes * ratio))
self.pooling_type = pooling_type
self.fusion_types = fusion_types
if (pooling_type == 'att'):
self.conv_mask = nn.Conv2d(inplanes, 1, kernel_size=1)
self.softmax = nn.Softmax(dim=2)
else:
self.avg_pool = nn.AdaptiveAvgPool2d(1)
if ('channel_add' in fusion_types):
self.channel_add_conv = nn.Sequential(nn.Conv2d(self.inplanes, self.planes, kernel_size=1), nn.LayerNorm([self.planes, 1, 1]), nn.ReLU(inplace=True), nn.Conv2d(self.planes, self.inplanes, kernel_size=1))
else:
self.channel_add_conv = None
if ('channel_mul' in fusion_types):
self.channel_mul_conv = nn.Sequential(nn.Conv2d(self.inplanes, self.planes, kernel_size=1), nn.LayerNorm([self.planes, 1, 1]), nn.ReLU(inplace=True), nn.Conv2d(self.planes, self.inplanes, kernel_size=1))
else:
self.channel_mul_conv = None
self.reset_parameters()
def reset_parameters(self):
if (self.pooling_type == 'att'):
kaiming_init(self.conv_mask, mode='fan_in')
self.conv_mask.inited = True
if (self.channel_add_conv is not None):
last_zero_init(self.channel_add_conv)
if (self.channel_mul_conv is not None):
last_zero_init(self.channel_mul_conv)
def spatial_pool(self, x):
(batch, channel, height, width) = x.size()
if (self.pooling_type == 'att'):
input_x = x
input_x = input_x.view(batch, channel, (height * width))
input_x = input_x.unsqueeze(1)
context_mask = self.conv_mask(x)
context_mask = context_mask.view(batch, 1, (height * width))
context_mask = self.softmax(context_mask)
context_mask = context_mask.unsqueeze((- 1))
context = torch.matmul(input_x, context_mask)
context = context.view(batch, channel, 1, 1)
else:
context = self.avg_pool(x)
return context
def forward(self, x):
context = self.spatial_pool(x)
out = x
if (self.channel_mul_conv is not None):
channel_mul_term = torch.sigmoid(self.channel_mul_conv(context))
out = (out * channel_mul_term)
if (self.channel_add_conv is not None):
channel_add_term = self.channel_add_conv(context)
out = (out + channel_add_term)
return out
|
def build_conv_layer(cfg, *args, **kwargs):
'Build convolution layer.\n\n Args:\n cfg (None or dict): cfg should contain:\n type (str): identify conv layer type.\n layer args: args needed to instantiate a conv layer.\n\n Returns:\n layer (nn.Module): created conv layer\n '
if (cfg is None):
cfg_ = dict(type='Conv')
else:
assert (isinstance(cfg, dict) and ('type' in cfg))
cfg_ = cfg.copy()
layer_type = cfg_.pop('type')
if (layer_type not in conv_cfg):
raise KeyError('Unrecognized norm type {}'.format(layer_type))
else:
conv_layer = conv_cfg[layer_type]
layer = conv_layer(*args, **kwargs, **cfg_)
return layer
|
class ConvModule(nn.Module):
'A conv block that contains conv/norm/activation layers.\n\n Args:\n in_channels (int): Same as nn.Conv2d.\n out_channels (int): Same as nn.Conv2d.\n kernel_size (int or tuple[int]): Same as nn.Conv2d.\n stride (int or tuple[int]): Same as nn.Conv2d.\n padding (int or tuple[int]): Same as nn.Conv2d.\n dilation (int or tuple[int]): Same as nn.Conv2d.\n groups (int): Same as nn.Conv2d.\n bias (bool or str): If specified as `auto`, it will be decided by the\n norm_cfg. Bias will be set as True if norm_cfg is None, otherwise\n False.\n conv_cfg (dict): Config dict for convolution layer.\n norm_cfg (dict): Config dict for normalization layer.\n act_cfg (dict): Config dict for activation layer, "relu" by default.\n inplace (bool): Whether to use inplace mode for activation.\n order (tuple[str]): The order of conv/norm/activation layers. It is a\n sequence of "conv", "norm" and "act". Examples are\n ("conv", "norm", "act") and ("act", "conv", "norm").\n '
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias='auto', conv_cfg=None, norm_cfg=None, act_cfg=dict(type='ReLU'), inplace=True, order=('conv', 'norm', 'act')):
super(ConvModule, self).__init__()
assert ((conv_cfg is None) or isinstance(conv_cfg, dict))
assert ((norm_cfg is None) or isinstance(norm_cfg, dict))
assert ((act_cfg is None) or isinstance(act_cfg, dict))
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.inplace = inplace
self.order = order
assert (isinstance(self.order, tuple) and (len(self.order) == 3))
assert (set(order) == set(['conv', 'norm', 'act']))
self.with_norm = (norm_cfg is not None)
self.with_activation = (act_cfg is not None)
if (bias == 'auto'):
bias = (False if self.with_norm else True)
self.with_bias = bias
if (self.with_norm and self.with_bias):
warnings.warn('ConvModule has norm and bias at the same time')
self.conv = build_conv_layer(conv_cfg, in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias)
self.in_channels = self.conv.in_channels
self.out_channels = self.conv.out_channels
self.kernel_size = self.conv.kernel_size
self.stride = self.conv.stride
self.padding = self.conv.padding
self.dilation = self.conv.dilation
self.transposed = self.conv.transposed
self.output_padding = self.conv.output_padding
self.groups = self.conv.groups
if self.with_norm:
if (order.index('norm') > order.index('conv')):
norm_channels = out_channels
else:
norm_channels = in_channels
(self.norm_name, norm) = build_norm_layer(norm_cfg, norm_channels)
self.add_module(self.norm_name, norm)
if self.with_activation:
act_cfg_ = act_cfg.copy()
act_cfg_.setdefault('inplace', inplace)
self.activate = build_activation_layer(act_cfg_)
self.init_weights()
@property
def norm(self):
return getattr(self, self.norm_name)
def init_weights(self):
if (self.with_activation and (self.act_cfg['type'] == 'LeakyReLU')):
nonlinearity = 'leaky_relu'
else:
nonlinearity = 'relu'
kaiming_init(self.conv, nonlinearity=nonlinearity)
if self.with_norm:
constant_init(self.norm, 1, bias=0)
def forward(self, x, activate=True, norm=True):
for layer in self.order:
if (layer == 'conv'):
x = self.conv(x)
elif ((layer == 'norm') and norm and self.with_norm):
x = self.norm(x)
elif ((layer == 'act') and activate and self.with_activation):
x = self.activate(x)
return x
|
def conv_ws_2d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1, eps=1e-05):
c_in = weight.size(0)
weight_flat = weight.view(c_in, (- 1))
mean = weight_flat.mean(dim=1, keepdim=True).view(c_in, 1, 1, 1)
std = weight_flat.std(dim=1, keepdim=True).view(c_in, 1, 1, 1)
weight = ((weight - mean) / (std + eps))
return F.conv2d(input, weight, bias, stride, padding, dilation, groups)
|
class ConvWS2d(nn.Conv2d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, eps=1e-05):
super(ConvWS2d, self).__init__(in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias)
self.eps = eps
def forward(self, x):
return conv_ws_2d(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups, self.eps)
|
class DeformRoIPoolingFunction(Function):
@staticmethod
def forward(ctx, data, rois, offset, spatial_scale, out_size, out_channels, no_trans, group_size=1, part_size=None, sample_per_part=4, trans_std=0.0):
(out_h, out_w) = _pair(out_size)
assert (isinstance(out_h, int) and isinstance(out_w, int))
assert (out_h == out_w)
out_size = out_h
ctx.spatial_scale = spatial_scale
ctx.out_size = out_size
ctx.out_channels = out_channels
ctx.no_trans = no_trans
ctx.group_size = group_size
ctx.part_size = (out_size if (part_size is None) else part_size)
ctx.sample_per_part = sample_per_part
ctx.trans_std = trans_std
assert (0.0 <= ctx.trans_std <= 1.0)
if (not data.is_cuda):
raise NotImplementedError
n = rois.shape[0]
output = data.new_empty(n, out_channels, out_size, out_size)
output_count = data.new_empty(n, out_channels, out_size, out_size)
deform_pool_cuda.deform_psroi_pooling_cuda_forward(data, rois, offset, output, output_count, ctx.no_trans, ctx.spatial_scale, ctx.out_channels, ctx.group_size, ctx.out_size, ctx.part_size, ctx.sample_per_part, ctx.trans_std)
if (data.requires_grad or rois.requires_grad or offset.requires_grad):
ctx.save_for_backward(data, rois, offset)
ctx.output_count = output_count
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
if (not grad_output.is_cuda):
raise NotImplementedError
(data, rois, offset) = ctx.saved_tensors
output_count = ctx.output_count
grad_input = torch.zeros_like(data)
grad_rois = None
grad_offset = torch.zeros_like(offset)
deform_pool_cuda.deform_psroi_pooling_cuda_backward(grad_output, data, rois, offset, output_count, grad_input, grad_offset, ctx.no_trans, ctx.spatial_scale, ctx.out_channels, ctx.group_size, ctx.out_size, ctx.part_size, ctx.sample_per_part, ctx.trans_std)
return (grad_input, grad_rois, grad_offset, None, None, None, None, None, None, None, None)
|
class DeformRoIPooling(nn.Module):
def __init__(self, spatial_scale, out_size, out_channels, no_trans, group_size=1, part_size=None, sample_per_part=4, trans_std=0.0):
super(DeformRoIPooling, self).__init__()
self.spatial_scale = spatial_scale
self.out_size = _pair(out_size)
self.out_channels = out_channels
self.no_trans = no_trans
self.group_size = group_size
self.part_size = (out_size if (part_size is None) else part_size)
self.sample_per_part = sample_per_part
self.trans_std = trans_std
def forward(self, data, rois, offset):
if self.no_trans:
offset = data.new_empty(0)
return deform_roi_pooling(data, rois, offset, self.spatial_scale, self.out_size, self.out_channels, self.no_trans, self.group_size, self.part_size, self.sample_per_part, self.trans_std)
|
class DeformRoIPoolingPack(DeformRoIPooling):
def __init__(self, spatial_scale, out_size, out_channels, no_trans, group_size=1, part_size=None, sample_per_part=4, trans_std=0.0, num_offset_fcs=3, deform_fc_channels=1024):
super(DeformRoIPoolingPack, self).__init__(spatial_scale, out_size, out_channels, no_trans, group_size, part_size, sample_per_part, trans_std)
self.num_offset_fcs = num_offset_fcs
self.deform_fc_channels = deform_fc_channels
if (not no_trans):
seq = []
ic = ((self.out_size[0] * self.out_size[1]) * self.out_channels)
for i in range(self.num_offset_fcs):
if (i < (self.num_offset_fcs - 1)):
oc = self.deform_fc_channels
else:
oc = ((self.out_size[0] * self.out_size[1]) * 2)
seq.append(nn.Linear(ic, oc))
ic = oc
if (i < (self.num_offset_fcs - 1)):
seq.append(nn.ReLU(inplace=True))
self.offset_fc = nn.Sequential(*seq)
self.offset_fc[(- 1)].weight.data.zero_()
self.offset_fc[(- 1)].bias.data.zero_()
def forward(self, data, rois):
assert (data.size(1) == self.out_channels)
n = rois.shape[0]
if (n == 0):
return data.new_empty(n, self.out_channels, self.out_size[0], self.out_size[1])
if self.no_trans:
offset = data.new_empty(0)
return deform_roi_pooling(data, rois, offset, self.spatial_scale, self.out_size, self.out_channels, self.no_trans, self.group_size, self.part_size, self.sample_per_part, self.trans_std)
else:
offset = data.new_empty(0)
x = deform_roi_pooling(data, rois, offset, self.spatial_scale, self.out_size, self.out_channels, True, self.group_size, self.part_size, self.sample_per_part, self.trans_std)
offset = self.offset_fc(x.view(n, (- 1)))
offset = offset.view(n, 2, self.out_size[0], self.out_size[1])
return deform_roi_pooling(data, rois, offset, self.spatial_scale, self.out_size, self.out_channels, self.no_trans, self.group_size, self.part_size, self.sample_per_part, self.trans_std)
|
class ModulatedDeformRoIPoolingPack(DeformRoIPooling):
def __init__(self, spatial_scale, out_size, out_channels, no_trans, group_size=1, part_size=None, sample_per_part=4, trans_std=0.0, num_offset_fcs=3, num_mask_fcs=2, deform_fc_channels=1024):
super(ModulatedDeformRoIPoolingPack, self).__init__(spatial_scale, out_size, out_channels, no_trans, group_size, part_size, sample_per_part, trans_std)
self.num_offset_fcs = num_offset_fcs
self.num_mask_fcs = num_mask_fcs
self.deform_fc_channels = deform_fc_channels
if (not no_trans):
offset_fc_seq = []
ic = ((self.out_size[0] * self.out_size[1]) * self.out_channels)
for i in range(self.num_offset_fcs):
if (i < (self.num_offset_fcs - 1)):
oc = self.deform_fc_channels
else:
oc = ((self.out_size[0] * self.out_size[1]) * 2)
offset_fc_seq.append(nn.Linear(ic, oc))
ic = oc
if (i < (self.num_offset_fcs - 1)):
offset_fc_seq.append(nn.ReLU(inplace=True))
self.offset_fc = nn.Sequential(*offset_fc_seq)
self.offset_fc[(- 1)].weight.data.zero_()
self.offset_fc[(- 1)].bias.data.zero_()
mask_fc_seq = []
ic = ((self.out_size[0] * self.out_size[1]) * self.out_channels)
for i in range(self.num_mask_fcs):
if (i < (self.num_mask_fcs - 1)):
oc = self.deform_fc_channels
else:
oc = (self.out_size[0] * self.out_size[1])
mask_fc_seq.append(nn.Linear(ic, oc))
ic = oc
if (i < (self.num_mask_fcs - 1)):
mask_fc_seq.append(nn.ReLU(inplace=True))
else:
mask_fc_seq.append(nn.Sigmoid())
self.mask_fc = nn.Sequential(*mask_fc_seq)
self.mask_fc[(- 2)].weight.data.zero_()
self.mask_fc[(- 2)].bias.data.zero_()
def forward(self, data, rois):
assert (data.size(1) == self.out_channels)
n = rois.shape[0]
if (n == 0):
return data.new_empty(n, self.out_channels, self.out_size[0], self.out_size[1])
if self.no_trans:
offset = data.new_empty(0)
return deform_roi_pooling(data, rois, offset, self.spatial_scale, self.out_size, self.out_channels, self.no_trans, self.group_size, self.part_size, self.sample_per_part, self.trans_std)
else:
offset = data.new_empty(0)
x = deform_roi_pooling(data, rois, offset, self.spatial_scale, self.out_size, self.out_channels, True, self.group_size, self.part_size, self.sample_per_part, self.trans_std)
offset = self.offset_fc(x.view(n, (- 1)))
offset = offset.view(n, 2, self.out_size[0], self.out_size[1])
mask = self.mask_fc(x.view(n, (- 1)))
mask = mask.view(n, 1, self.out_size[0], self.out_size[1])
return (deform_roi_pooling(data, rois, offset, self.spatial_scale, self.out_size, self.out_channels, self.no_trans, self.group_size, self.part_size, self.sample_per_part, self.trans_std) * mask)
|
class _GridSampler(Function):
@staticmethod
def forward(ctx, input, grid, mode_enum, padding_mode_enum, align_corners):
ctx.save_for_backward(input, grid)
ctx.mode_enum = mode_enum
ctx.padding_mode_enum = padding_mode_enum
ctx.align_corners = align_corners
if input.is_cuda:
if (input.dim() == 4):
func = grid_sampler_cuda.grid_sampler_2d_forward_cuda
else:
func = grid_sampler_cuda.grid_sampler_3d_forward_cuda
elif (input.dim() == 4):
func = grid_sampler_cuda.grid_sampler_2d_forward_cpu
else:
func = grid_sampler_cuda.grid_sampler_3d_forward_cpu
output = func(input, grid, mode_enum, padding_mode_enum, align_corners)
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
(input, grid) = ctx.saved_tensors
mode_enum = ctx.mode_enum
padding_mode_enum = ctx.padding_mode_enum
align_corners = ctx.align_corners
if input.is_cuda:
if (input.dim() == 4):
func = grid_sampler_cuda.grid_sampler_2d_backward_cuda
else:
func = grid_sampler_cuda.grid_sampler_3d_backward_cuda
elif (input.dim() == 4):
func = grid_sampler_cuda.grid_sampler_2d_backward_cpu
else:
func = grid_sampler_cuda.grid_sampler_3d_backward_cpu
(grad_input, grad_grid) = func(grad_output, input, grid, mode_enum, padding_mode_enum, align_corners)
return (grad_input, grad_grid, None, None, None)
|
def grid_sample(input, grid, mode='bilinear', padding_mode='zeros', align_corners=False):
if (torch.__version__ >= '1.3'):
return F.grid_sample(input, grid, mode, padding_mode, align_corners)
elif align_corners:
return F.grid_sample(input, grid, mode, padding_mode)
else:
assert (mode in ['bilinear', 'nearest']), 'expected mode to be bilinear or nearest, but got: {}'.format(mode)
assert (padding_mode in ['zeros', 'border', 'reflection']), 'expected padding_mode to be zeros, border, or reflection, but got: {}'.format(padding_mode)
if (mode == 'bilinear'):
mode_enum = 0
else:
mode_enum = 1
if (padding_mode == 'zeros'):
padding_mode_enum = 0
elif (padding_mode == 'border'):
padding_mode_enum = 1
else:
padding_mode_enum = 2
assert (input.device == grid.device), 'expected input and grid to be on same device, but input is on {} and grid is on {}'.format(input.device, grid.device)
assert (input.dtype == grid.dtype), 'expected input and grid to have the same dtype, but input has {} and grid has {}'.format(input.dtype, grid.dtype)
assert ((input.dim() == 4) or (input.dim() == 5)), 'expected 4D or 5D input and grid with same number of dimensionsbut got input with sizes {} and grid with sizes {}'.format(input.size(), grid.size())
assert (input.size(0) == grid.size(0)), 'expected input and grid to have the same batch size, but got input with sizes {} and grid with sizes {}'.format(input.size(), grid.size())
assert (grid.size((- 1)) == (input.dim() - 2)), 'expected grid to have size {} in last {} dimension, but got grid with sizes '.format((input.dim() - 2), grid.size())
for i in range(2, input.dim()):
assert (input.size(i) > 0), 'expected input to have non-empty spatial dimensions, but input has sizes {} with dimension {} being empty'.format(input.sizes(), i)
return _GridSampler.apply(input, grid, mode_enum, padding_mode_enum, align_corners)
|
class NonLocal2D(nn.Module):
'Non-local module.\n\n See https://arxiv.org/abs/1711.07971 for details.\n\n Args:\n in_channels (int): Channels of the input feature map.\n reduction (int): Channel reduction ratio.\n use_scale (bool): Whether to scale pairwise_weight by 1/inter_channels.\n conv_cfg (dict): The config dict for convolution layers.\n (only applicable to conv_out)\n norm_cfg (dict): The config dict for normalization layers.\n (only applicable to conv_out)\n mode (str): Options are `embedded_gaussian` and `dot_product`.\n '
def __init__(self, in_channels, reduction=2, use_scale=True, conv_cfg=None, norm_cfg=None, mode='embedded_gaussian'):
super(NonLocal2D, self).__init__()
self.in_channels = in_channels
self.reduction = reduction
self.use_scale = use_scale
self.inter_channels = (in_channels // reduction)
self.mode = mode
assert (mode in ['embedded_gaussian', 'dot_product'])
self.g = ConvModule(self.in_channels, self.inter_channels, kernel_size=1, act_cfg=None)
self.theta = ConvModule(self.in_channels, self.inter_channels, kernel_size=1, act_cfg=None)
self.phi = ConvModule(self.in_channels, self.inter_channels, kernel_size=1, act_cfg=None)
self.conv_out = ConvModule(self.inter_channels, self.in_channels, kernel_size=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=None)
self.init_weights()
def init_weights(self, std=0.01, zeros_init=True):
for m in [self.g, self.theta, self.phi]:
normal_init(m.conv, std=std)
if zeros_init:
constant_init(self.conv_out.conv, 0)
else:
normal_init(self.conv_out.conv, std=std)
def embedded_gaussian(self, theta_x, phi_x):
pairwise_weight = torch.matmul(theta_x, phi_x)
if self.use_scale:
pairwise_weight /= (theta_x.shape[(- 1)] ** 0.5)
pairwise_weight = pairwise_weight.softmax(dim=(- 1))
return pairwise_weight
def dot_product(self, theta_x, phi_x):
pairwise_weight = torch.matmul(theta_x, phi_x)
pairwise_weight /= pairwise_weight.shape[(- 1)]
return pairwise_weight
def forward(self, x):
(n, _, h, w) = x.shape
g_x = self.g(x).view(n, self.inter_channels, (- 1))
g_x = g_x.permute(0, 2, 1)
theta_x = self.theta(x).view(n, self.inter_channels, (- 1))
theta_x = theta_x.permute(0, 2, 1)
phi_x = self.phi(x).view(n, self.inter_channels, (- 1))
pairwise_func = getattr(self, self.mode)
pairwise_weight = pairwise_func(theta_x, phi_x)
y = torch.matmul(pairwise_weight, g_x)
y = y.permute(0, 2, 1).reshape(n, self.inter_channels, h, w)
output = (x + self.conv_out(y))
return output
|
def build_norm_layer(cfg, num_features, postfix=''):
'Build normalization layer.\n\n Args:\n cfg (dict): cfg should contain:\n type (str): identify norm layer type.\n layer args: args needed to instantiate a norm layer.\n requires_grad (bool): [optional] whether stop gradient updates\n num_features (int): number of channels from input.\n postfix (int, str): appended into norm abbreviation to\n create named layer.\n\n Returns:\n name (str): abbreviation + postfix\n layer (nn.Module): created norm layer\n '
assert (isinstance(cfg, dict) and ('type' in cfg))
cfg_ = cfg.copy()
layer_type = cfg_.pop('type')
if (layer_type not in norm_cfg):
raise KeyError('Unrecognized norm type {}'.format(layer_type))
else:
(abbr, norm_layer) = norm_cfg[layer_type]
if (norm_layer is None):
raise NotImplementedError
assert isinstance(postfix, (int, str))
name = (abbr + str(postfix))
requires_grad = cfg_.pop('requires_grad', True)
cfg_.setdefault('eps', 1e-05)
if (layer_type != 'GN'):
layer = norm_layer(num_features, **cfg_)
if (layer_type == 'SyncBN'):
layer._specify_ddp_gpu_num(1)
else:
assert ('num_groups' in cfg_)
layer = norm_layer(num_channels=num_features, **cfg_)
for param in layer.parameters():
param.requires_grad = requires_grad
return (name, layer)
|
class RoIAlignFunction(Function):
@staticmethod
def forward(ctx, features, rois, out_size, spatial_scale, sample_num=0, aligned=True):
(out_h, out_w) = _pair(out_size)
assert (isinstance(out_h, int) and isinstance(out_w, int))
ctx.spatial_scale = spatial_scale
ctx.sample_num = sample_num
ctx.save_for_backward(rois)
ctx.feature_size = features.size()
ctx.aligned = aligned
if features.is_cuda:
if (not aligned):
(batch_size, num_channels, data_height, data_width) = features.size()
num_rois = rois.size(0)
output = features.new_zeros(num_rois, num_channels, out_h, out_w)
roi_align_cuda.forward_v1(features, rois, out_h, out_w, spatial_scale, sample_num, output)
else:
output = roi_align_cuda.forward_v2(features, rois, spatial_scale, out_h, out_w, sample_num, aligned)
else:
raise NotImplementedError
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
feature_size = ctx.feature_size
spatial_scale = ctx.spatial_scale
sample_num = ctx.sample_num
rois = ctx.saved_tensors[0]
aligned = ctx.aligned
assert ((feature_size is not None) and grad_output.is_cuda)
(batch_size, num_channels, data_height, data_width) = feature_size
out_w = grad_output.size(3)
out_h = grad_output.size(2)
grad_input = grad_rois = None
if (not aligned):
if ctx.needs_input_grad[0]:
grad_input = rois.new_zeros(batch_size, num_channels, data_height, data_width)
roi_align_cuda.backward_v1(grad_output.contiguous(), rois, out_h, out_w, spatial_scale, sample_num, grad_input)
else:
grad_input = roi_align_cuda.backward_v2(grad_output, rois, spatial_scale, out_h, out_w, batch_size, num_channels, data_height, data_width, sample_num, aligned)
return (grad_input, grad_rois, None, None, None, None)
|
class RoIAlign(nn.Module):
def __init__(self, out_size, spatial_scale, sample_num=0, use_torchvision=False, aligned=False):
"\n Args:\n out_size (tuple): h, w\n spatial_scale (float): scale the input boxes by this number\n sample_num (int): number of inputs samples to take for each\n output sample. 2 to take samples densely for current models.\n use_torchvision (bool): whether to use roi_align from torchvision\n aligned (bool): if False, use the legacy implementation in\n MMDetection. If True, align the results more perfectly.\n\n Note:\n The implementation of RoIAlign when aligned=True is modified from\n https://github.com/facebookresearch/detectron2/\n\n The meaning of aligned=True:\n\n Given a continuous coordinate c, its two neighboring pixel\n indices (in our pixel model) are computed by floor(c - 0.5) and\n ceil(c - 0.5). For example, c=1.3 has pixel neighbors with discrete\n indices [0] and [1] (which are sampled from the underlying signal\n at continuous coordinates 0.5 and 1.5). But the original roi_align\n (aligned=False) does not subtract the 0.5 when computing\n neighboring pixel indices and therefore it uses pixels with a\n slightly incorrect alignment (relative to our pixel model) when\n performing bilinear interpolation.\n\n With `aligned=True`,\n we first appropriately scale the ROI and then shift it by -0.5\n prior to calling roi_align. This produces the correct neighbors;\n\n The difference does not make a difference to the model's\n performance if ROIAlign is used together with conv layers.\n "
super(RoIAlign, self).__init__()
self.out_size = _pair(out_size)
self.spatial_scale = float(spatial_scale)
self.aligned = aligned
self.sample_num = int(sample_num)
self.use_torchvision = use_torchvision
assert (not (use_torchvision and aligned)), 'Torchvision does not support aligned RoIAlgin'
def forward(self, features, rois):
'\n Args:\n features: NCHW images\n rois: Bx5 boxes. First column is the index into N. The other 4\n columns are xyxy.\n '
assert ((rois.dim() == 2) and (rois.size(1) == 5))
if self.use_torchvision:
from torchvision.ops import roi_align as tv_roi_align
return tv_roi_align(features, rois, self.out_size, self.spatial_scale, self.sample_num)
else:
return roi_align(features, rois, self.out_size, self.spatial_scale, self.sample_num, self.aligned)
def __repr__(self):
format_str = self.__class__.__name__
format_str += '(out_size={}, spatial_scale={}, sample_num={}'.format(self.out_size, self.spatial_scale, self.sample_num)
format_str += ', use_torchvision={}, aligned={})'.format(self.use_torchvision, self.aligned)
return format_str
|
class RoIPoolFunction(Function):
@staticmethod
def forward(ctx, features, rois, out_size, spatial_scale):
assert features.is_cuda
(out_h, out_w) = _pair(out_size)
assert (isinstance(out_h, int) and isinstance(out_w, int))
ctx.save_for_backward(rois)
num_channels = features.size(1)
num_rois = rois.size(0)
out_size = (num_rois, num_channels, out_h, out_w)
output = features.new_zeros(out_size)
argmax = features.new_zeros(out_size, dtype=torch.int)
roi_pool_cuda.forward(features, rois, out_h, out_w, spatial_scale, output, argmax)
ctx.spatial_scale = spatial_scale
ctx.feature_size = features.size()
ctx.argmax = argmax
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
assert grad_output.is_cuda
spatial_scale = ctx.spatial_scale
feature_size = ctx.feature_size
argmax = ctx.argmax
rois = ctx.saved_tensors[0]
assert (feature_size is not None)
grad_input = grad_rois = None
if ctx.needs_input_grad[0]:
grad_input = grad_output.new_zeros(feature_size)
roi_pool_cuda.backward(grad_output.contiguous(), rois, argmax, spatial_scale, grad_input)
return (grad_input, grad_rois, None, None)
|
class RoIPool(nn.Module):
def __init__(self, out_size, spatial_scale, use_torchvision=False):
super(RoIPool, self).__init__()
self.out_size = _pair(out_size)
self.spatial_scale = float(spatial_scale)
self.use_torchvision = use_torchvision
def forward(self, features, rois):
if self.use_torchvision:
from torchvision.ops import roi_pool as tv_roi_pool
return tv_roi_pool(features, rois, self.out_size, self.spatial_scale)
else:
return roi_pool(features, rois, self.out_size, self.spatial_scale)
def __repr__(self):
format_str = self.__class__.__name__
format_str += '(out_size={}, spatial_scale={}'.format(self.out_size, self.spatial_scale)
format_str += ', use_torchvision={})'.format(self.use_torchvision)
return format_str
|
class Scale(nn.Module):
'A learnable scale parameter.'
def __init__(self, scale=1.0):
super(Scale, self).__init__()
self.scale = nn.Parameter(torch.tensor(scale, dtype=torch.float))
def forward(self, x):
return (x * self.scale)
|
class SigmoidFocalLossFunction(Function):
@staticmethod
def forward(ctx, input, target, gamma=2.0, alpha=0.25):
ctx.save_for_backward(input, target)
num_classes = input.shape[1]
ctx.num_classes = num_classes
ctx.gamma = gamma
ctx.alpha = alpha
loss = sigmoid_focal_loss_cuda.forward(input, target, num_classes, gamma, alpha)
return loss
@staticmethod
@once_differentiable
def backward(ctx, d_loss):
(input, target) = ctx.saved_tensors
num_classes = ctx.num_classes
gamma = ctx.gamma
alpha = ctx.alpha
d_loss = d_loss.contiguous()
d_input = sigmoid_focal_loss_cuda.backward(input, target, d_loss, num_classes, gamma, alpha)
return (d_input, None, None, None, None)
|
class SigmoidFocalLoss(nn.Module):
def __init__(self, gamma, alpha):
super(SigmoidFocalLoss, self).__init__()
self.gamma = gamma
self.alpha = alpha
def forward(self, logits, targets):
assert logits.is_cuda
loss = sigmoid_focal_loss(logits, targets, self.gamma, self.alpha)
return loss.sum()
def __repr__(self):
tmpstr = (self.__class__.__name__ + '(gamma={}, alpha={})'.format(self.gamma, self.alpha))
return tmpstr
|
class PixelShufflePack(nn.Module):
'Pixel Shuffle upsample layer.\n\n Args:\n in_channels (int): Number of input channels\n out_channels (int): Number of output channels\n scale_factor (int): Upsample ratio\n upsample_kernel (int): Kernel size of Conv layer to expand the channels\n\n Returns:\n upsampled feature map\n '
def __init__(self, in_channels, out_channels, scale_factor, upsample_kernel):
super(PixelShufflePack, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.scale_factor = scale_factor
self.upsample_kernel = upsample_kernel
self.upsample_conv = nn.Conv2d(self.in_channels, ((self.out_channels * scale_factor) * scale_factor), self.upsample_kernel, padding=((self.upsample_kernel - 1) // 2))
self.init_weights()
def init_weights(self):
xavier_init(self.upsample_conv, distribution='uniform')
def forward(self, x):
x = self.upsample_conv(x)
x = F.pixel_shuffle(x, self.scale_factor)
return x
|
def build_upsample_layer(cfg):
'Build upsample layer.\n\n Args:\n cfg (dict): cfg should contain:\n type (str): Identify upsample layer type.\n upsample ratio (int): Upsample ratio\n layer args: args needed to instantiate a upsample layer.\n\n Returns:\n layer (nn.Module): Created upsample layer\n '
assert (isinstance(cfg, dict) and ('type' in cfg))
cfg_ = cfg.copy()
layer_type = cfg_.pop('type')
if (layer_type not in upsample_cfg):
raise KeyError('Unrecognized upsample type {}'.format(layer_type))
else:
upsample = upsample_cfg[layer_type]
if (upsample is None):
raise NotImplementedError
layer = upsample(**cfg_)
return layer
|
def collect_env():
env_info = {}
env_info['sys.platform'] = sys.platform
env_info['Python'] = sys.version.replace('\n', '')
cuda_available = torch.cuda.is_available()
env_info['CUDA available'] = cuda_available
if cuda_available:
from torch.utils.cpp_extension import CUDA_HOME
env_info['CUDA_HOME'] = CUDA_HOME
if ((CUDA_HOME is not None) and osp.isdir(CUDA_HOME)):
try:
nvcc = osp.join(CUDA_HOME, 'bin/nvcc')
nvcc = subprocess.check_output('"{}" -V | tail -n1'.format(nvcc), shell=True)
nvcc = nvcc.decode('utf-8').strip()
except subprocess.SubprocessError:
nvcc = 'Not Available'
env_info['NVCC'] = nvcc
devices = defaultdict(list)
for k in range(torch.cuda.device_count()):
devices[torch.cuda.get_device_name(k)].append(str(k))
for (name, devids) in devices.items():
env_info[('GPU ' + ','.join(devids))] = name
gcc = subprocess.check_output('gcc --version | head -n1', shell=True)
gcc = gcc.decode('utf-8').strip()
env_info['GCC'] = gcc
env_info['PyTorch'] = torch.__version__
env_info['PyTorch compiling details'] = torch.__config__.show()
env_info['TorchVision'] = torchvision.__version__
env_info['OpenCV'] = cv2.__version__
env_info['MMCV'] = mmcv.__version__
env_info['MMDetection'] = mmdet.__version__
from mmdet.ops import get_compiler_version, get_compiling_cuda_version
env_info['MMDetection Compiler'] = get_compiler_version()
env_info['MMDetection CUDA Compiler'] = get_compiling_cuda_version()
return env_info
|
def get_model_complexity_info(model, input_res, print_per_layer_stat=True, as_strings=True, input_constructor=None, ost=sys.stdout):
assert (type(input_res) is tuple)
assert (len(input_res) >= 2)
flops_model = add_flops_counting_methods(model)
flops_model.eval().start_flops_count()
if input_constructor:
input = input_constructor(input_res)
_ = flops_model(**input)
else:
batch = torch.ones(()).new_empty((1, *input_res), dtype=next(flops_model.parameters()).dtype, device=next(flops_model.parameters()).device)
flops_model(batch)
if print_per_layer_stat:
print_model_with_flops(flops_model, ost=ost)
flops_count = flops_model.compute_average_flops_cost()
params_count = get_model_parameters_number(flops_model)
flops_model.stop_flops_count()
if as_strings:
return (flops_to_string(flops_count), params_to_string(params_count))
return (flops_count, params_count)
|
def flops_to_string(flops, units='GMac', precision=2):
if (units is None):
if ((flops // (10 ** 9)) > 0):
return (str(round((flops / (10.0 ** 9)), precision)) + ' GMac')
elif ((flops // (10 ** 6)) > 0):
return (str(round((flops / (10.0 ** 6)), precision)) + ' MMac')
elif ((flops // (10 ** 3)) > 0):
return (str(round((flops / (10.0 ** 3)), precision)) + ' KMac')
else:
return (str(flops) + ' Mac')
elif (units == 'GMac'):
return ((str(round((flops / (10.0 ** 9)), precision)) + ' ') + units)
elif (units == 'MMac'):
return ((str(round((flops / (10.0 ** 6)), precision)) + ' ') + units)
elif (units == 'KMac'):
return ((str(round((flops / (10.0 ** 3)), precision)) + ' ') + units)
else:
return (str(flops) + ' Mac')
|
def params_to_string(params_num):
"converting number to string.\n\n :param float params_num: number\n :returns str: number\n\n >>> params_to_string(1e9)\n '1000.0 M'\n >>> params_to_string(2e5)\n '200.0 k'\n >>> params_to_string(3e-9)\n '3e-09'\n "
if ((params_num // (10 ** 6)) > 0):
return (str(round((params_num / (10 ** 6)), 2)) + ' M')
elif (params_num // (10 ** 3)):
return (str(round((params_num / (10 ** 3)), 2)) + ' k')
else:
return str(params_num)
|
def print_model_with_flops(model, units='GMac', precision=3, ost=sys.stdout):
total_flops = model.compute_average_flops_cost()
def accumulate_flops(self):
if is_supported_instance(self):
return (self.__flops__ / model.__batch_counter__)
else:
sum = 0
for m in self.children():
sum += m.accumulate_flops()
return sum
def flops_repr(self):
accumulated_flops_cost = self.accumulate_flops()
return ', '.join([flops_to_string(accumulated_flops_cost, units=units, precision=precision), '{:.3%} MACs'.format((accumulated_flops_cost / total_flops)), self.original_extra_repr()])
def add_extra_repr(m):
m.accumulate_flops = accumulate_flops.__get__(m)
flops_extra_repr = flops_repr.__get__(m)
if (m.extra_repr != flops_extra_repr):
m.original_extra_repr = m.extra_repr
m.extra_repr = flops_extra_repr
assert (m.extra_repr != m.original_extra_repr)
def del_extra_repr(m):
if hasattr(m, 'original_extra_repr'):
m.extra_repr = m.original_extra_repr
del m.original_extra_repr
if hasattr(m, 'accumulate_flops'):
del m.accumulate_flops
model.apply(add_extra_repr)
print(model, file=ost)
model.apply(del_extra_repr)
|
def get_model_parameters_number(model):
params_num = sum((p.numel() for p in model.parameters() if p.requires_grad))
return params_num
|
def add_flops_counting_methods(net_main_module):
net_main_module.start_flops_count = start_flops_count.__get__(net_main_module)
net_main_module.stop_flops_count = stop_flops_count.__get__(net_main_module)
net_main_module.reset_flops_count = reset_flops_count.__get__(net_main_module)
net_main_module.compute_average_flops_cost = compute_average_flops_cost.__get__(net_main_module)
net_main_module.reset_flops_count()
net_main_module.apply(add_flops_mask_variable_or_reset)
return net_main_module
|
def compute_average_flops_cost(self):
'A method that will be available after add_flops_counting_methods() is\n called on a desired net object.\n\n Returns current mean flops consumption per image.\n '
batches_count = self.__batch_counter__
flops_sum = 0
for module in self.modules():
if is_supported_instance(module):
flops_sum += module.__flops__
return (flops_sum / batches_count)
|
def start_flops_count(self):
'A method that will be available after add_flops_counting_methods() is\n called on a desired net object.\n\n Activates the computation of mean flops consumption per image. Call it\n before you run the network.\n '
add_batch_counter_hook_function(self)
self.apply(add_flops_counter_hook_function)
|
def stop_flops_count(self):
'A method that will be available after add_flops_counting_methods() is\n called on a desired net object.\n\n Stops computing the mean flops consumption per image. Call whenever you\n want to pause the computation.\n '
remove_batch_counter_hook_function(self)
self.apply(remove_flops_counter_hook_function)
|
def reset_flops_count(self):
'A method that will be available after add_flops_counting_methods() is\n called on a desired net object.\n\n Resets statistics computed so far.\n '
add_batch_counter_variables_or_reset(self)
self.apply(add_flops_counter_variable_or_reset)
|
def add_flops_mask(module, mask):
def add_flops_mask_func(module):
if isinstance(module, torch.nn.Conv2d):
module.__mask__ = mask
module.apply(add_flops_mask_func)
|
def remove_flops_mask(module):
module.apply(add_flops_mask_variable_or_reset)
|
def is_supported_instance(module):
for mod in hook_mapping:
if issubclass(type(module), mod):
return True
return False
|
def empty_flops_counter_hook(module, input, output):
module.__flops__ += 0
|
def upsample_flops_counter_hook(module, input, output):
output_size = output[0]
batch_size = output_size.shape[0]
output_elements_count = batch_size
for val in output_size.shape[1:]:
output_elements_count *= val
module.__flops__ += int(output_elements_count)
|
def relu_flops_counter_hook(module, input, output):
active_elements_count = output.numel()
module.__flops__ += int(active_elements_count)
|
def linear_flops_counter_hook(module, input, output):
input = input[0]
batch_size = input.shape[0]
module.__flops__ += int(((batch_size * input.shape[1]) * output.shape[1]))
|
def pool_flops_counter_hook(module, input, output):
input = input[0]
module.__flops__ += int(np.prod(input.shape))
|
def bn_flops_counter_hook(module, input, output):
input = input[0]
batch_flops = np.prod(input.shape)
if module.affine:
batch_flops *= 2
module.__flops__ += int(batch_flops)
|
def gn_flops_counter_hook(module, input, output):
elems = np.prod(input[0].shape)
batch_flops = (3 * elems)
if module.affine:
batch_flops += elems
module.__flops__ += int(batch_flops)
|
def deconv_flops_counter_hook(conv_module, input, output):
input = input[0]
batch_size = input.shape[0]
(input_height, input_width) = input.shape[2:]
(kernel_height, kernel_width) = conv_module.kernel_size
in_channels = conv_module.in_channels
out_channels = conv_module.out_channels
groups = conv_module.groups
filters_per_channel = (out_channels // groups)
conv_per_position_flops = (((kernel_height * kernel_width) * in_channels) * filters_per_channel)
active_elements_count = ((batch_size * input_height) * input_width)
overall_conv_flops = (conv_per_position_flops * active_elements_count)
bias_flops = 0
if (conv_module.bias is not None):
(output_height, output_width) = output.shape[2:]
bias_flops = (((out_channels * batch_size) * output_height) * output_height)
overall_flops = (overall_conv_flops + bias_flops)
conv_module.__flops__ += int(overall_flops)
|
def conv_flops_counter_hook(conv_module, input, output):
input = input[0]
batch_size = input.shape[0]
output_dims = list(output.shape[2:])
kernel_dims = list(conv_module.kernel_size)
in_channels = conv_module.in_channels
out_channels = conv_module.out_channels
groups = conv_module.groups
filters_per_channel = (out_channels // groups)
conv_per_position_flops = ((np.prod(kernel_dims) * in_channels) * filters_per_channel)
active_elements_count = (batch_size * np.prod(output_dims))
if (conv_module.__mask__ is not None):
(output_height, output_width) = output.shape[2:]
flops_mask = conv_module.__mask__.expand(batch_size, 1, output_height, output_width)
active_elements_count = flops_mask.sum()
overall_conv_flops = (conv_per_position_flops * active_elements_count)
bias_flops = 0
if (conv_module.bias is not None):
bias_flops = (out_channels * active_elements_count)
overall_flops = (overall_conv_flops + bias_flops)
conv_module.__flops__ += int(overall_flops)
|
def batch_counter_hook(module, input, output):
batch_size = 1
if (len(input) > 0):
input = input[0]
batch_size = len(input)
else:
print('Warning! No positional inputs found for a module, assuming batch size is 1.')
module.__batch_counter__ += batch_size
|
def add_batch_counter_variables_or_reset(module):
module.__batch_counter__ = 0
|
def add_batch_counter_hook_function(module):
if hasattr(module, '__batch_counter_handle__'):
return
handle = module.register_forward_hook(batch_counter_hook)
module.__batch_counter_handle__ = handle
|
def remove_batch_counter_hook_function(module):
if hasattr(module, '__batch_counter_handle__'):
module.__batch_counter_handle__.remove()
del module.__batch_counter_handle__
|
def add_flops_counter_variable_or_reset(module):
if is_supported_instance(module):
module.__flops__ = 0
|
def add_flops_counter_hook_function(module):
if is_supported_instance(module):
if hasattr(module, '__flops_handle__'):
return
for (mod_type, counter_hook) in hook_mapping.items():
if issubclass(type(module), mod_type):
handle = module.register_forward_hook(counter_hook)
break
module.__flops_handle__ = handle
|
def remove_flops_counter_hook_function(module):
if is_supported_instance(module):
if hasattr(module, '__flops_handle__'):
module.__flops_handle__.remove()
del module.__flops_handle__
|
def add_flops_mask_variable_or_reset(module):
if is_supported_instance(module):
module.__mask__ = None
|
def get_root_logger(log_file=None, log_level=logging.INFO):
'Get the root logger.\n\n The logger will be initialized if it has not been initialized. By default a\n StreamHandler will be added. If `log_file` is specified, a FileHandler will\n also be added. The name of the root logger is the top-level package name,\n e.g., "mmdet".\n\n Args:\n log_file (str | None): The log filename. If specified, a FileHandler\n will be added to the root logger.\n log_level (int): The root logger level. Note that only the process of\n rank 0 is affected, while other processes will set the level to\n "Error" and be silent most of the time.\n\n Returns:\n logging.Logger: The root logger.\n '
logger = logging.getLogger(__name__.split('.')[0])
if logger.hasHandlers():
return logger
format_str = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(format=format_str, level=log_level)
(rank, _) = get_dist_info()
if (rank != 0):
logger.setLevel('ERROR')
elif (log_file is not None):
file_handler = logging.FileHandler(log_file, 'w')
file_handler.setFormatter(logging.Formatter(format_str))
file_handler.setLevel(log_level)
logger.addHandler(file_handler)
return logger
|
def print_log(msg, logger=None, level=logging.INFO):
'Print a log message.\n\n Args:\n msg (str): The message to be logged.\n logger (logging.Logger | str | None): The logger to be used. Some\n special loggers are:\n - "root": the root logger obtained with `get_root_logger()`.\n - "silent": no message will be printed.\n - None: The `print()` method will be used to print log messages.\n level (int): Logging level. Only available when `logger` is a Logger\n object or "root".\n '
if (logger is None):
print(msg)
elif (logger == 'root'):
_logger = get_root_logger()
_logger.log(level, msg)
elif isinstance(logger, logging.Logger):
logger.log(level, msg)
elif (logger != 'silent'):
raise TypeError('logger should be either a logging.Logger object, "root", "silent" or None, but got {}'.format(logger))
|
class Registry(object):
def __init__(self, name):
self._name = name
self._module_dict = dict()
def __repr__(self):
format_str = (self.__class__.__name__ + '(name={}, items={})'.format(self._name, list(self._module_dict.keys())))
return format_str
@property
def name(self):
return self._name
@property
def module_dict(self):
return self._module_dict
def get(self, key):
return self._module_dict.get(key, None)
def _register_module(self, module_class, force=False):
'Register a module.\n\n Args:\n module (:obj:`nn.Module`): Module to be registered.\n '
if (not inspect.isclass(module_class)):
raise TypeError('module must be a class, but got {}'.format(type(module_class)))
module_name = module_class.__name__
if ((not force) and (module_name in self._module_dict)):
raise KeyError('{} is already registered in {}'.format(module_name, self.name))
self._module_dict[module_name] = module_class
def register_module(self, cls=None, force=False):
if (cls is None):
return partial(self.register_module, force=force)
self._register_module(cls, force=force)
return cls
|
def build_from_cfg(cfg, registry, default_args=None):
'Build a module from config dict.\n\n Args:\n cfg (dict): Config dict. It should at least contain the key "type".\n registry (:obj:`Registry`): The registry to search the type from.\n default_args (dict, optional): Default initialization arguments.\n\n Returns:\n obj: The constructed object.\n '
assert (isinstance(cfg, dict) and ('type' in cfg))
assert (isinstance(default_args, dict) or (default_args is None))
args = cfg.copy()
obj_type = args.pop('type')
if mmcv.is_str(obj_type):
obj_cls = registry.get(obj_type)
if (obj_cls is None):
raise KeyError('{} is not in the {} registry'.format(obj_type, registry.name))
elif inspect.isclass(obj_type):
obj_cls = obj_type
else:
raise TypeError('type must be a str or valid type, but got {}'.format(type(obj_type)))
if (default_args is not None):
for (name, value) in default_args.items():
args.setdefault(name, value)
return obj_cls(**args)
|
class NiceRepr(object):
'Inherit from this class and define ``__nice__`` to "nicely" print your\n objects.\n\n Defines ``__str__`` and ``__repr__`` in terms of ``__nice__`` function\n Classes that inherit from :class:`NiceRepr` should redefine ``__nice__``.\n If the inheriting class has a ``__len__``, method then the default\n ``__nice__`` method will return its length.\n\n Example:\n >>> class Foo(NiceRepr):\n ... def __nice__(self):\n ... return \'info\'\n >>> foo = Foo()\n >>> assert str(foo) == \'<Foo(info)>\'\n >>> assert repr(foo).startswith(\'<Foo(info) at \')\n\n Example:\n >>> class Bar(NiceRepr):\n ... pass\n >>> bar = Bar()\n >>> import pytest\n >>> with pytest.warns(None) as record:\n >>> assert \'object at\' in str(bar)\n >>> assert \'object at\' in repr(bar)\n\n Example:\n >>> class Baz(NiceRepr):\n ... def __len__(self):\n ... return 5\n >>> baz = Baz()\n >>> assert str(baz) == \'<Baz(5)>\'\n '
def __nice__(self):
if hasattr(self, '__len__'):
return str(len(self))
else:
raise NotImplementedError('Define the __nice__ method for {!r}'.format(self.__class__))
def __repr__(self):
try:
nice = self.__nice__()
classname = self.__class__.__name__
return '<{0}({1}) at {2}>'.format(classname, nice, hex(id(self)))
except NotImplementedError as ex:
warnings.warn(str(ex), category=RuntimeWarning)
return object.__repr__(self)
def __str__(self):
try:
classname = self.__class__.__name__
nice = self.__nice__()
return '<{0}({1})>'.format(classname, nice)
except NotImplementedError as ex:
warnings.warn(str(ex), category=RuntimeWarning)
return object.__repr__(self)
|
def readme():
with open('README.md', encoding='utf-8') as f:
content = f.read()
return content
|
def get_git_hash():
def _minimal_ext_cmd(cmd):
env = {}
for k in ['SYSTEMROOT', 'PATH', 'HOME']:
v = os.environ.get(k)
if (v is not None):
env[k] = v
env['LANGUAGE'] = 'C'
env['LANG'] = 'C'
env['LC_ALL'] = 'C'
out = subprocess.Popen(cmd, stdout=subprocess.PIPE, env=env).communicate()[0]
return out
try:
out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])
sha = out.strip().decode('ascii')
except OSError:
sha = 'unknown'
return sha
|
def get_hash():
if os.path.exists('.git'):
sha = get_git_hash()[:7]
elif os.path.exists(version_file):
try:
from mmdet.version import __version__
sha = __version__.split('+')[(- 1)]
except ImportError:
raise ImportError('Unable to get git version')
else:
sha = 'unknown'
return sha
|
def write_version_py():
content = "# GENERATED VERSION FILE\n# TIME: {}\n\n__version__ = '{}'\nshort_version = '{}'\n"
sha = get_hash()
VERSION = ((SHORT_VERSION + '+') + sha)
with open(version_file, 'w') as f:
f.write(content.format(time.asctime(), VERSION, SHORT_VERSION))
|
def get_version():
with open(version_file, 'r') as f:
exec(compile(f.read(), version_file, 'exec'))
return locals()['__version__']
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.