code stringlengths 17 6.64M |
|---|
class MergingCell(nn.Module):
def __init__(self, channels=256, with_conv=True, norm_cfg=None):
super(MergingCell, self).__init__()
self.with_conv = with_conv
if self.with_conv:
self.conv_out = ConvModule(channels, channels, 3, padding=1, norm_cfg=norm_cfg, order=('act', 'conv', 'norm'))
def _binary_op(self, x1, x2):
raise NotImplementedError
def _resize(self, x, size):
if (x.shape[(- 2):] == size):
return x
elif (x.shape[(- 2):] < size):
return F.interpolate(x, size=size, mode='nearest')
else:
assert (((x.shape[(- 2)] % size[(- 2)]) == 0) and ((x.shape[(- 1)] % size[(- 1)]) == 0))
kernel_size = (x.shape[(- 1)] // size[(- 1)])
x = F.max_pool2d(x, kernel_size=kernel_size, stride=kernel_size)
return x
def forward(self, x1, x2, out_size):
assert (x1.shape[:2] == x2.shape[:2])
assert (len(out_size) == 2)
x1 = self._resize(x1, out_size)
x2 = self._resize(x2, out_size)
x = self._binary_op(x1, x2)
if self.with_conv:
x = self.conv_out(x)
return x
|
class SumCell(MergingCell):
def _binary_op(self, x1, x2):
return (x1 + x2)
|
class GPCell(MergingCell):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.global_pool = nn.AdaptiveAvgPool2d((1, 1))
def _binary_op(self, x1, x2):
x2_att = self.global_pool(x2).sigmoid()
return (x2 + (x2_att * x1))
|
@NECKS.register_module
class NASFPN(nn.Module):
'NAS-FPN.\n\n NAS-FPN: Learning Scalable Feature Pyramid Architecture for Object\n Detection. (https://arxiv.org/abs/1904.07392)\n '
def __init__(self, in_channels, out_channels, num_outs, stack_times, start_level=0, end_level=(- 1), add_extra_convs=False, norm_cfg=None):
super(NASFPN, self).__init__()
assert isinstance(in_channels, list)
self.in_channels = in_channels
self.out_channels = out_channels
self.num_ins = len(in_channels)
self.num_outs = num_outs
self.stack_times = stack_times
self.norm_cfg = norm_cfg
if (end_level == (- 1)):
self.backbone_end_level = self.num_ins
assert (num_outs >= (self.num_ins - start_level))
else:
self.backbone_end_level = end_level
assert (end_level <= len(in_channels))
assert (num_outs == (end_level - start_level))
self.start_level = start_level
self.end_level = end_level
self.add_extra_convs = add_extra_convs
self.lateral_convs = nn.ModuleList()
for i in range(self.start_level, self.backbone_end_level):
l_conv = ConvModule(in_channels[i], out_channels, 1, norm_cfg=norm_cfg, act_cfg=None)
self.lateral_convs.append(l_conv)
extra_levels = ((num_outs - self.backbone_end_level) + self.start_level)
self.extra_downsamples = nn.ModuleList()
for i in range(extra_levels):
extra_conv = ConvModule(out_channels, out_channels, 1, norm_cfg=norm_cfg, act_cfg=None)
self.extra_downsamples.append(nn.Sequential(extra_conv, nn.MaxPool2d(2, 2)))
self.fpn_stages = nn.ModuleList()
for _ in range(self.stack_times):
stage = nn.ModuleDict()
stage['gp_64_4'] = GPCell(out_channels, norm_cfg=norm_cfg)
stage['sum_44_4'] = SumCell(out_channels, norm_cfg=norm_cfg)
stage['sum_43_3'] = SumCell(out_channels, norm_cfg=norm_cfg)
stage['sum_34_4'] = SumCell(out_channels, norm_cfg=norm_cfg)
stage['gp_43_5'] = GPCell(with_conv=False)
stage['sum_55_5'] = SumCell(out_channels, norm_cfg=norm_cfg)
stage['gp_54_7'] = GPCell(with_conv=False)
stage['sum_77_7'] = SumCell(out_channels, norm_cfg=norm_cfg)
stage['gp_75_6'] = GPCell(out_channels, norm_cfg=norm_cfg)
self.fpn_stages.append(stage)
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
caffe2_xavier_init(m)
def forward(self, inputs):
feats = [lateral_conv(inputs[(i + self.start_level)]) for (i, lateral_conv) in enumerate(self.lateral_convs)]
for downsample in self.extra_downsamples:
feats.append(downsample(feats[(- 1)]))
(p3, p4, p5, p6, p7) = feats
for stage in self.fpn_stages:
p4_1 = stage['gp_64_4'](p6, p4, out_size=p4.shape[(- 2):])
p4_2 = stage['sum_44_4'](p4_1, p4, out_size=p4.shape[(- 2):])
p3 = stage['sum_43_3'](p4_2, p3, out_size=p3.shape[(- 2):])
p4 = stage['sum_34_4'](p3, p4_2, out_size=p4.shape[(- 2):])
p5_tmp = stage['gp_43_5'](p4, p3, out_size=p5.shape[(- 2):])
p5 = stage['sum_55_5'](p5, p5_tmp, out_size=p5.shape[(- 2):])
p7_tmp = stage['gp_54_7'](p5, p4_2, out_size=p7.shape[(- 2):])
p7 = stage['sum_77_7'](p7, p7_tmp, out_size=p7.shape[(- 2):])
p6 = stage['gp_75_6'](p7, p5, out_size=p6.shape[(- 2):])
return (p3, p4, p5, p6, p7)
|
@SHARED_HEADS.register_module
class ResLayer(nn.Module):
def __init__(self, depth, stage=3, stride=2, dilation=1, style='pytorch', norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, with_cp=False, dcn=None):
super(ResLayer, self).__init__()
self.norm_eval = norm_eval
self.norm_cfg = norm_cfg
self.stage = stage
self.fp16_enabled = False
(block, stage_blocks) = ResNet.arch_settings[depth]
stage_block = stage_blocks[stage]
planes = (64 * (2 ** stage))
inplanes = ((64 * (2 ** (stage - 1))) * block.expansion)
res_layer = make_res_layer(block, inplanes, planes, stage_block, stride=stride, dilation=dilation, style=style, with_cp=with_cp, norm_cfg=self.norm_cfg, dcn=dcn)
self.add_module('layer{}'.format((stage + 1)), res_layer)
def init_weights(self, pretrained=None):
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif (pretrained is None):
for m in self.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, nn.BatchNorm2d):
constant_init(m, 1)
else:
raise TypeError('pretrained must be a str or None')
@auto_fp16()
def forward(self, x):
res_layer = getattr(self, 'layer{}'.format((self.stage + 1)))
out = res_layer(x)
return out
def train(self, mode=True):
super(ResLayer, self).train(mode)
if self.norm_eval:
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
|
def bias_init_with_prob(prior_prob):
'initialize conv/fc bias value according to giving probablity.'
bias_init = float((- np.log(((1 - prior_prob) / prior_prob))))
return bias_init
|
def build_activation_layer(cfg):
'Build activation layer.\n\n Args:\n cfg (dict): cfg should contain:\n type (str): Identify activation layer type.\n layer args: args needed to instantiate a activation layer.\n\n Returns:\n layer (nn.Module): Created activation layer\n '
assert (isinstance(cfg, dict) and ('type' in cfg))
cfg_ = cfg.copy()
layer_type = cfg_.pop('type')
if (layer_type not in activation_cfg):
raise KeyError('Unrecognized activation type {}'.format(layer_type))
else:
activation = activation_cfg[layer_type]
if (activation is None):
raise NotImplementedError
layer = activation(**cfg_)
return layer
|
class _AffineGridGenerator(Function):
@staticmethod
def forward(ctx, theta, size, align_corners):
ctx.save_for_backward(theta)
ctx.size = size
ctx.align_corners = align_corners
func = affine_grid_cuda.affine_grid_generator_forward
output = func(theta, size, align_corners)
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
theta = ctx.saved_tensors
size = ctx.size
align_corners = ctx.align_corners
func = affine_grid_cuda.affine_grid_generator_backward
grad_input = func(grad_output, theta, size, align_corners)
return (grad_input, None, None)
|
def affine_grid(theta, size, align_corners=False):
if (torch.__version__ >= '1.3'):
return F.affine_grid(theta, size, align_corners)
elif align_corners:
return F.affine_grid(theta, size)
else:
if (not theta.is_floating_point()):
raise ValueError('Expected theta to have floating point type, but got {}'.format(theta.dtype))
if (len(size) == 4):
if ((theta.dim() != 3) or (theta.size((- 2)) != 2) or (theta.size((- 1)) != 3)):
raise ValueError('Expected a batch of 2D affine matrices of shape Nx2x3 for size {}. Got {}.'.format(size, theta.shape))
elif (len(size) == 5):
if ((theta.dim() != 3) or (theta.size((- 2)) != 3) or (theta.size((- 1)) != 4)):
raise ValueError('Expected a batch of 3D affine matrices of shape Nx3x4 for size {}. Got {}.'.format(size, theta.shape))
else:
raise NotImplementedError('affine_grid only supports 4D and 5D sizes, for 2D and 3D affine transforms, respectively. Got size {}.'.format(size))
if (min(size) <= 0):
raise ValueError('Expected non-zero, positive output size. Got {}'.format(size))
return _AffineGridGenerator.apply(theta, size, align_corners)
|
class CARAFENaiveFunction(Function):
@staticmethod
def forward(ctx, features, masks, kernel_size, group_size, scale_factor):
assert (scale_factor >= 1)
assert (masks.size(1) == ((kernel_size * kernel_size) * group_size))
assert (masks.size((- 1)) == (features.size((- 1)) * scale_factor))
assert (masks.size((- 2)) == (features.size((- 2)) * scale_factor))
assert ((features.size(1) % group_size) == 0)
assert ((((kernel_size - 1) % 2) == 0) and (kernel_size >= 1))
ctx.kernel_size = kernel_size
ctx.group_size = group_size
ctx.scale_factor = scale_factor
ctx.feature_size = features.size()
ctx.mask_size = masks.size()
(n, c, h, w) = features.size()
output = features.new_zeros((n, c, (h * scale_factor), (w * scale_factor)))
if features.is_cuda:
carafe_naive_cuda.forward(features, masks, kernel_size, group_size, scale_factor, output)
else:
raise NotImplementedError
if (features.requires_grad or masks.requires_grad):
ctx.save_for_backward(features, masks)
return output
@staticmethod
def backward(ctx, grad_output):
assert grad_output.is_cuda
(features, masks) = ctx.saved_tensors
kernel_size = ctx.kernel_size
group_size = ctx.group_size
scale_factor = ctx.scale_factor
grad_input = torch.zeros_like(features)
grad_masks = torch.zeros_like(masks)
carafe_naive_cuda.backward(grad_output.contiguous(), features, masks, kernel_size, group_size, scale_factor, grad_input, grad_masks)
return (grad_input, grad_masks, None, None, None)
|
class CARAFENaive(Module):
def __init__(self, kernel_size, group_size, scale_factor):
super(CARAFENaive, self).__init__()
assert (isinstance(kernel_size, int) and isinstance(group_size, int) and isinstance(scale_factor, int))
self.kernel_size = kernel_size
self.group_size = group_size
self.scale_factor = scale_factor
def forward(self, features, masks):
return CARAFENaiveFunction.apply(features, masks, self.kernel_size, self.group_size, self.scale_factor)
|
class CARAFEFunction(Function):
@staticmethod
def forward(ctx, features, masks, kernel_size, group_size, scale_factor):
assert (scale_factor >= 1)
assert (masks.size(1) == ((kernel_size * kernel_size) * group_size))
assert (masks.size((- 1)) == (features.size((- 1)) * scale_factor))
assert (masks.size((- 2)) == (features.size((- 2)) * scale_factor))
assert ((features.size(1) % group_size) == 0)
assert ((((kernel_size - 1) % 2) == 0) and (kernel_size >= 1))
ctx.kernel_size = kernel_size
ctx.group_size = group_size
ctx.scale_factor = scale_factor
ctx.feature_size = features.size()
ctx.mask_size = masks.size()
(n, c, h, w) = features.size()
output = features.new_zeros((n, c, (h * scale_factor), (w * scale_factor)))
routput = features.new_zeros(output.size(), requires_grad=False)
rfeatures = features.new_zeros(features.size(), requires_grad=False)
rmasks = masks.new_zeros(masks.size(), requires_grad=False)
if features.is_cuda:
carafe_cuda.forward(features, rfeatures, masks, rmasks, kernel_size, group_size, scale_factor, routput, output)
else:
raise NotImplementedError
if (features.requires_grad or masks.requires_grad):
ctx.save_for_backward(features, masks, rfeatures)
return output
@staticmethod
def backward(ctx, grad_output):
assert grad_output.is_cuda
(features, masks, rfeatures) = ctx.saved_tensors
kernel_size = ctx.kernel_size
group_size = ctx.group_size
scale_factor = ctx.scale_factor
rgrad_output = torch.zeros_like(grad_output, requires_grad=False)
rgrad_input_hs = torch.zeros_like(grad_output, requires_grad=False)
rgrad_input = torch.zeros_like(features, requires_grad=False)
rgrad_masks = torch.zeros_like(masks, requires_grad=False)
grad_input = torch.zeros_like(features, requires_grad=False)
grad_masks = torch.zeros_like(masks, requires_grad=False)
carafe_cuda.backward(grad_output.contiguous(), rfeatures, masks, kernel_size, group_size, scale_factor, rgrad_output, rgrad_input_hs, rgrad_input, rgrad_masks, grad_input, grad_masks)
return (grad_input, grad_masks, None, None, None, None)
|
class CARAFE(Module):
' CARAFE: Content-Aware ReAssembly of FEatures\n\n Please refer to https://arxiv.org/abs/1905.02188 for more details.\n\n Args:\n kernel_size (int): reassemble kernel size\n group_size (int): reassemble group size\n scale_factor (int): upsample ratio\n\n Returns:\n upsampled feature map\n '
def __init__(self, kernel_size, group_size, scale_factor):
super(CARAFE, self).__init__()
assert (isinstance(kernel_size, int) and isinstance(group_size, int) and isinstance(scale_factor, int))
self.kernel_size = kernel_size
self.group_size = group_size
self.scale_factor = scale_factor
def forward(self, features, masks):
return CARAFEFunction.apply(features, masks, self.kernel_size, self.group_size, self.scale_factor)
|
class CARAFEPack(nn.Module):
'A unified package of CARAFE upsampler that contains: 1) channel\n compressor 2) content encoder 3) CARAFE op.\n\n Official implementation of ICCV 2019 paper\n CARAFE: Content-Aware ReAssembly of FEatures\n Please refer to https://arxiv.org/abs/1905.02188 for more details.\n\n Args:\n channels (int): input feature channels\n scale_factor (int): upsample ratio\n up_kernel (int): kernel size of CARAFE op\n up_group (int): group size of CARAFE op\n encoder_kernel (int): kernel size of content encoder\n encoder_dilation (int): dilation of content encoder\n compressed_channels (int): output channels of channels compressor\n\n Returns:\n upsampled feature map\n '
def __init__(self, channels, scale_factor, up_kernel=5, up_group=1, encoder_kernel=3, encoder_dilation=1, compressed_channels=64):
super(CARAFEPack, self).__init__()
self.channels = channels
self.scale_factor = scale_factor
self.up_kernel = up_kernel
self.up_group = up_group
self.encoder_kernel = encoder_kernel
self.encoder_dilation = encoder_dilation
self.compressed_channels = compressed_channels
self.channel_compressor = nn.Conv2d(channels, self.compressed_channels, 1)
self.content_encoder = nn.Conv2d(self.compressed_channels, ((((self.up_kernel * self.up_kernel) * self.up_group) * self.scale_factor) * self.scale_factor), self.encoder_kernel, padding=int((((self.encoder_kernel - 1) * self.encoder_dilation) / 2)), dilation=self.encoder_dilation, groups=1)
self.init_weights()
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
xavier_init(m, distribution='uniform')
normal_init(self.content_encoder, std=0.001)
def kernel_normalizer(self, mask):
mask = F.pixel_shuffle(mask, self.scale_factor)
(n, mask_c, h, w) = mask.size()
mask_channel = int((mask_c / (self.up_kernel * self.up_kernel)))
mask = mask.view(n, mask_channel, (- 1), h, w)
mask = F.softmax(mask, dim=2)
mask = mask.view(n, mask_c, h, w).contiguous()
return mask
def feature_reassemble(self, x, mask):
x = carafe(x, mask, self.up_kernel, self.up_group, self.scale_factor)
return x
def forward(self, x):
compressed_x = self.channel_compressor(x)
mask = self.content_encoder(compressed_x)
mask = self.kernel_normalizer(mask)
x = self.feature_reassemble(x, mask)
return x
|
def last_zero_init(m):
if isinstance(m, nn.Sequential):
constant_init(m[(- 1)], val=0)
else:
constant_init(m, val=0)
|
class ContextBlock(nn.Module):
def __init__(self, inplanes, ratio, pooling_type='att', fusion_types=('channel_add',)):
super(ContextBlock, self).__init__()
assert (pooling_type in ['avg', 'att'])
assert isinstance(fusion_types, (list, tuple))
valid_fusion_types = ['channel_add', 'channel_mul']
assert all([(f in valid_fusion_types) for f in fusion_types])
assert (len(fusion_types) > 0), 'at least one fusion should be used'
self.inplanes = inplanes
self.ratio = ratio
self.planes = int((inplanes * ratio))
self.pooling_type = pooling_type
self.fusion_types = fusion_types
if (pooling_type == 'att'):
self.conv_mask = nn.Conv2d(inplanes, 1, kernel_size=1)
self.softmax = nn.Softmax(dim=2)
else:
self.avg_pool = nn.AdaptiveAvgPool2d(1)
if ('channel_add' in fusion_types):
self.channel_add_conv = nn.Sequential(nn.Conv2d(self.inplanes, self.planes, kernel_size=1), nn.LayerNorm([self.planes, 1, 1]), nn.ReLU(inplace=True), nn.Conv2d(self.planes, self.inplanes, kernel_size=1))
else:
self.channel_add_conv = None
if ('channel_mul' in fusion_types):
self.channel_mul_conv = nn.Sequential(nn.Conv2d(self.inplanes, self.planes, kernel_size=1), nn.LayerNorm([self.planes, 1, 1]), nn.ReLU(inplace=True), nn.Conv2d(self.planes, self.inplanes, kernel_size=1))
else:
self.channel_mul_conv = None
self.reset_parameters()
def reset_parameters(self):
if (self.pooling_type == 'att'):
kaiming_init(self.conv_mask, mode='fan_in')
self.conv_mask.inited = True
if (self.channel_add_conv is not None):
last_zero_init(self.channel_add_conv)
if (self.channel_mul_conv is not None):
last_zero_init(self.channel_mul_conv)
def spatial_pool(self, x):
(batch, channel, height, width) = x.size()
if (self.pooling_type == 'att'):
input_x = x
input_x = input_x.view(batch, channel, (height * width))
input_x = input_x.unsqueeze(1)
context_mask = self.conv_mask(x)
context_mask = context_mask.view(batch, 1, (height * width))
context_mask = self.softmax(context_mask)
context_mask = context_mask.unsqueeze((- 1))
context = torch.matmul(input_x, context_mask)
context = context.view(batch, channel, 1, 1)
else:
context = self.avg_pool(x)
return context
def forward(self, x):
context = self.spatial_pool(x)
out = x
if (self.channel_mul_conv is not None):
channel_mul_term = torch.sigmoid(self.channel_mul_conv(context))
out = (out * channel_mul_term)
if (self.channel_add_conv is not None):
channel_add_term = self.channel_add_conv(context)
out = (out + channel_add_term)
return out
|
def build_conv_layer(cfg, *args, **kwargs):
'Build convolution layer.\n\n Args:\n cfg (None or dict): cfg should contain:\n type (str): identify conv layer type.\n layer args: args needed to instantiate a conv layer.\n\n Returns:\n layer (nn.Module): created conv layer\n '
if (cfg is None):
cfg_ = dict(type='Conv')
else:
assert (isinstance(cfg, dict) and ('type' in cfg))
cfg_ = cfg.copy()
layer_type = cfg_.pop('type')
if (layer_type not in conv_cfg):
raise KeyError('Unrecognized norm type {}'.format(layer_type))
else:
conv_layer = conv_cfg[layer_type]
layer = conv_layer(*args, **kwargs, **cfg_)
return layer
|
class ConvModule(nn.Module):
'A conv block that contains conv/norm/activation layers.\n\n Args:\n in_channels (int): Same as nn.Conv2d.\n out_channels (int): Same as nn.Conv2d.\n kernel_size (int or tuple[int]): Same as nn.Conv2d.\n stride (int or tuple[int]): Same as nn.Conv2d.\n padding (int or tuple[int]): Same as nn.Conv2d.\n dilation (int or tuple[int]): Same as nn.Conv2d.\n groups (int): Same as nn.Conv2d.\n bias (bool or str): If specified as `auto`, it will be decided by the\n norm_cfg. Bias will be set as True if norm_cfg is None, otherwise\n False.\n conv_cfg (dict): Config dict for convolution layer.\n norm_cfg (dict): Config dict for normalization layer.\n act_cfg (dict): Config dict for activation layer, "relu" by default.\n inplace (bool): Whether to use inplace mode for activation.\n order (tuple[str]): The order of conv/norm/activation layers. It is a\n sequence of "conv", "norm" and "act". Examples are\n ("conv", "norm", "act") and ("act", "conv", "norm").\n '
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias='auto', conv_cfg=None, norm_cfg=None, act_cfg=dict(type='ReLU'), inplace=True, order=('conv', 'norm', 'act')):
super(ConvModule, self).__init__()
assert ((conv_cfg is None) or isinstance(conv_cfg, dict))
assert ((norm_cfg is None) or isinstance(norm_cfg, dict))
assert ((act_cfg is None) or isinstance(act_cfg, dict))
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.inplace = inplace
self.order = order
assert (isinstance(self.order, tuple) and (len(self.order) == 3))
assert (set(order) == set(['conv', 'norm', 'act']))
self.with_norm = (norm_cfg is not None)
self.with_activation = (act_cfg is not None)
if (bias == 'auto'):
bias = (False if self.with_norm else True)
self.with_bias = bias
if (self.with_norm and self.with_bias):
warnings.warn('ConvModule has norm and bias at the same time')
self.conv = build_conv_layer(conv_cfg, in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias)
self.in_channels = self.conv.in_channels
self.out_channels = self.conv.out_channels
self.kernel_size = self.conv.kernel_size
self.stride = self.conv.stride
self.padding = self.conv.padding
self.dilation = self.conv.dilation
self.transposed = self.conv.transposed
self.output_padding = self.conv.output_padding
self.groups = self.conv.groups
if self.with_norm:
if (order.index('norm') > order.index('conv')):
norm_channels = out_channels
else:
norm_channels = in_channels
(self.norm_name, norm) = build_norm_layer(norm_cfg, norm_channels)
self.add_module(self.norm_name, norm)
if self.with_activation:
act_cfg_ = act_cfg.copy()
act_cfg_.setdefault('inplace', inplace)
self.activate = build_activation_layer(act_cfg_)
self.init_weights()
@property
def norm(self):
return getattr(self, self.norm_name)
def init_weights(self):
if (self.with_activation and (self.act_cfg['type'] == 'LeakyReLU')):
nonlinearity = 'leaky_relu'
else:
nonlinearity = 'relu'
kaiming_init(self.conv, nonlinearity=nonlinearity)
if self.with_norm:
constant_init(self.norm, 1, bias=0)
def forward(self, x, activate=True, norm=True):
for layer in self.order:
if (layer == 'conv'):
x = self.conv(x)
elif ((layer == 'norm') and norm and self.with_norm):
x = self.norm(x)
elif ((layer == 'act') and activate and self.with_activation):
x = self.activate(x)
return x
|
def conv_ws_2d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1, eps=1e-05):
c_in = weight.size(0)
weight_flat = weight.view(c_in, (- 1))
mean = weight_flat.mean(dim=1, keepdim=True).view(c_in, 1, 1, 1)
std = weight_flat.std(dim=1, keepdim=True).view(c_in, 1, 1, 1)
weight = ((weight - mean) / (std + eps))
return F.conv2d(input, weight, bias, stride, padding, dilation, groups)
|
class ConvWS2d(nn.Conv2d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, eps=1e-05):
super(ConvWS2d, self).__init__(in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias)
self.eps = eps
def forward(self, x):
return conv_ws_2d(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups, self.eps)
|
class DeformRoIPoolingFunction(Function):
@staticmethod
def forward(ctx, data, rois, offset, spatial_scale, out_size, out_channels, no_trans, group_size=1, part_size=None, sample_per_part=4, trans_std=0.0):
(out_h, out_w) = _pair(out_size)
assert (isinstance(out_h, int) and isinstance(out_w, int))
assert (out_h == out_w)
out_size = out_h
ctx.spatial_scale = spatial_scale
ctx.out_size = out_size
ctx.out_channels = out_channels
ctx.no_trans = no_trans
ctx.group_size = group_size
ctx.part_size = (out_size if (part_size is None) else part_size)
ctx.sample_per_part = sample_per_part
ctx.trans_std = trans_std
assert (0.0 <= ctx.trans_std <= 1.0)
if (not data.is_cuda):
raise NotImplementedError
n = rois.shape[0]
output = data.new_empty(n, out_channels, out_size, out_size)
output_count = data.new_empty(n, out_channels, out_size, out_size)
deform_pool_cuda.deform_psroi_pooling_cuda_forward(data, rois, offset, output, output_count, ctx.no_trans, ctx.spatial_scale, ctx.out_channels, ctx.group_size, ctx.out_size, ctx.part_size, ctx.sample_per_part, ctx.trans_std)
if (data.requires_grad or rois.requires_grad or offset.requires_grad):
ctx.save_for_backward(data, rois, offset)
ctx.output_count = output_count
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
if (not grad_output.is_cuda):
raise NotImplementedError
(data, rois, offset) = ctx.saved_tensors
output_count = ctx.output_count
grad_input = torch.zeros_like(data)
grad_rois = None
grad_offset = torch.zeros_like(offset)
deform_pool_cuda.deform_psroi_pooling_cuda_backward(grad_output, data, rois, offset, output_count, grad_input, grad_offset, ctx.no_trans, ctx.spatial_scale, ctx.out_channels, ctx.group_size, ctx.out_size, ctx.part_size, ctx.sample_per_part, ctx.trans_std)
return (grad_input, grad_rois, grad_offset, None, None, None, None, None, None, None, None)
|
class DeformRoIPooling(nn.Module):
def __init__(self, spatial_scale, out_size, out_channels, no_trans, group_size=1, part_size=None, sample_per_part=4, trans_std=0.0):
super(DeformRoIPooling, self).__init__()
self.spatial_scale = spatial_scale
self.out_size = _pair(out_size)
self.out_channels = out_channels
self.no_trans = no_trans
self.group_size = group_size
self.part_size = (out_size if (part_size is None) else part_size)
self.sample_per_part = sample_per_part
self.trans_std = trans_std
def forward(self, data, rois, offset):
if self.no_trans:
offset = data.new_empty(0)
return deform_roi_pooling(data, rois, offset, self.spatial_scale, self.out_size, self.out_channels, self.no_trans, self.group_size, self.part_size, self.sample_per_part, self.trans_std)
|
class DeformRoIPoolingPack(DeformRoIPooling):
def __init__(self, spatial_scale, out_size, out_channels, no_trans, group_size=1, part_size=None, sample_per_part=4, trans_std=0.0, num_offset_fcs=3, deform_fc_channels=1024):
super(DeformRoIPoolingPack, self).__init__(spatial_scale, out_size, out_channels, no_trans, group_size, part_size, sample_per_part, trans_std)
self.num_offset_fcs = num_offset_fcs
self.deform_fc_channels = deform_fc_channels
if (not no_trans):
seq = []
ic = ((self.out_size[0] * self.out_size[1]) * self.out_channels)
for i in range(self.num_offset_fcs):
if (i < (self.num_offset_fcs - 1)):
oc = self.deform_fc_channels
else:
oc = ((self.out_size[0] * self.out_size[1]) * 2)
seq.append(nn.Linear(ic, oc))
ic = oc
if (i < (self.num_offset_fcs - 1)):
seq.append(nn.ReLU(inplace=True))
self.offset_fc = nn.Sequential(*seq)
self.offset_fc[(- 1)].weight.data.zero_()
self.offset_fc[(- 1)].bias.data.zero_()
def forward(self, data, rois):
assert (data.size(1) == self.out_channels)
n = rois.shape[0]
if (n == 0):
return data.new_empty(n, self.out_channels, self.out_size[0], self.out_size[1])
if self.no_trans:
offset = data.new_empty(0)
return deform_roi_pooling(data, rois, offset, self.spatial_scale, self.out_size, self.out_channels, self.no_trans, self.group_size, self.part_size, self.sample_per_part, self.trans_std)
else:
offset = data.new_empty(0)
x = deform_roi_pooling(data, rois, offset, self.spatial_scale, self.out_size, self.out_channels, True, self.group_size, self.part_size, self.sample_per_part, self.trans_std)
offset = self.offset_fc(x.view(n, (- 1)))
offset = offset.view(n, 2, self.out_size[0], self.out_size[1])
return deform_roi_pooling(data, rois, offset, self.spatial_scale, self.out_size, self.out_channels, self.no_trans, self.group_size, self.part_size, self.sample_per_part, self.trans_std)
|
class ModulatedDeformRoIPoolingPack(DeformRoIPooling):
def __init__(self, spatial_scale, out_size, out_channels, no_trans, group_size=1, part_size=None, sample_per_part=4, trans_std=0.0, num_offset_fcs=3, num_mask_fcs=2, deform_fc_channels=1024):
super(ModulatedDeformRoIPoolingPack, self).__init__(spatial_scale, out_size, out_channels, no_trans, group_size, part_size, sample_per_part, trans_std)
self.num_offset_fcs = num_offset_fcs
self.num_mask_fcs = num_mask_fcs
self.deform_fc_channels = deform_fc_channels
if (not no_trans):
offset_fc_seq = []
ic = ((self.out_size[0] * self.out_size[1]) * self.out_channels)
for i in range(self.num_offset_fcs):
if (i < (self.num_offset_fcs - 1)):
oc = self.deform_fc_channels
else:
oc = ((self.out_size[0] * self.out_size[1]) * 2)
offset_fc_seq.append(nn.Linear(ic, oc))
ic = oc
if (i < (self.num_offset_fcs - 1)):
offset_fc_seq.append(nn.ReLU(inplace=True))
self.offset_fc = nn.Sequential(*offset_fc_seq)
self.offset_fc[(- 1)].weight.data.zero_()
self.offset_fc[(- 1)].bias.data.zero_()
mask_fc_seq = []
ic = ((self.out_size[0] * self.out_size[1]) * self.out_channels)
for i in range(self.num_mask_fcs):
if (i < (self.num_mask_fcs - 1)):
oc = self.deform_fc_channels
else:
oc = (self.out_size[0] * self.out_size[1])
mask_fc_seq.append(nn.Linear(ic, oc))
ic = oc
if (i < (self.num_mask_fcs - 1)):
mask_fc_seq.append(nn.ReLU(inplace=True))
else:
mask_fc_seq.append(nn.Sigmoid())
self.mask_fc = nn.Sequential(*mask_fc_seq)
self.mask_fc[(- 2)].weight.data.zero_()
self.mask_fc[(- 2)].bias.data.zero_()
def forward(self, data, rois):
assert (data.size(1) == self.out_channels)
n = rois.shape[0]
if (n == 0):
return data.new_empty(n, self.out_channels, self.out_size[0], self.out_size[1])
if self.no_trans:
offset = data.new_empty(0)
return deform_roi_pooling(data, rois, offset, self.spatial_scale, self.out_size, self.out_channels, self.no_trans, self.group_size, self.part_size, self.sample_per_part, self.trans_std)
else:
offset = data.new_empty(0)
x = deform_roi_pooling(data, rois, offset, self.spatial_scale, self.out_size, self.out_channels, True, self.group_size, self.part_size, self.sample_per_part, self.trans_std)
offset = self.offset_fc(x.view(n, (- 1)))
offset = offset.view(n, 2, self.out_size[0], self.out_size[1])
mask = self.mask_fc(x.view(n, (- 1)))
mask = mask.view(n, 1, self.out_size[0], self.out_size[1])
return (deform_roi_pooling(data, rois, offset, self.spatial_scale, self.out_size, self.out_channels, self.no_trans, self.group_size, self.part_size, self.sample_per_part, self.trans_std) * mask)
|
class _GridSampler(Function):
@staticmethod
def forward(ctx, input, grid, mode_enum, padding_mode_enum, align_corners):
ctx.save_for_backward(input, grid)
ctx.mode_enum = mode_enum
ctx.padding_mode_enum = padding_mode_enum
ctx.align_corners = align_corners
if input.is_cuda:
if (input.dim() == 4):
func = grid_sampler_cuda.grid_sampler_2d_forward_cuda
else:
func = grid_sampler_cuda.grid_sampler_3d_forward_cuda
elif (input.dim() == 4):
func = grid_sampler_cuda.grid_sampler_2d_forward_cpu
else:
func = grid_sampler_cuda.grid_sampler_3d_forward_cpu
output = func(input, grid, mode_enum, padding_mode_enum, align_corners)
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
(input, grid) = ctx.saved_tensors
mode_enum = ctx.mode_enum
padding_mode_enum = ctx.padding_mode_enum
align_corners = ctx.align_corners
if input.is_cuda:
if (input.dim() == 4):
func = grid_sampler_cuda.grid_sampler_2d_backward_cuda
else:
func = grid_sampler_cuda.grid_sampler_3d_backward_cuda
elif (input.dim() == 4):
func = grid_sampler_cuda.grid_sampler_2d_backward_cpu
else:
func = grid_sampler_cuda.grid_sampler_3d_backward_cpu
(grad_input, grad_grid) = func(grad_output, input, grid, mode_enum, padding_mode_enum, align_corners)
return (grad_input, grad_grid, None, None, None)
|
def grid_sample(input, grid, mode='bilinear', padding_mode='zeros', align_corners=False):
if (torch.__version__ >= '1.3'):
return F.grid_sample(input, grid, mode, padding_mode, align_corners)
elif align_corners:
return F.grid_sample(input, grid, mode, padding_mode)
else:
assert (mode in ['bilinear', 'nearest']), 'expected mode to be bilinear or nearest, but got: {}'.format(mode)
assert (padding_mode in ['zeros', 'border', 'reflection']), 'expected padding_mode to be zeros, border, or reflection, but got: {}'.format(padding_mode)
if (mode == 'bilinear'):
mode_enum = 0
else:
mode_enum = 1
if (padding_mode == 'zeros'):
padding_mode_enum = 0
elif (padding_mode == 'border'):
padding_mode_enum = 1
else:
padding_mode_enum = 2
assert (input.device == grid.device), 'expected input and grid to be on same device, but input is on {} and grid is on {}'.format(input.device, grid.device)
assert (input.dtype == grid.dtype), 'expected input and grid to have the same dtype, but input has {} and grid has {}'.format(input.dtype, grid.dtype)
assert ((input.dim() == 4) or (input.dim() == 5)), 'expected 4D or 5D input and grid with same number of dimensionsbut got input with sizes {} and grid with sizes {}'.format(input.size(), grid.size())
assert (input.size(0) == grid.size(0)), 'expected input and grid to have the same batch size, but got input with sizes {} and grid with sizes {}'.format(input.size(), grid.size())
assert (grid.size((- 1)) == (input.dim() - 2)), 'expected grid to have size {} in last {} dimension, but got grid with sizes '.format((input.dim() - 2), grid.size())
for i in range(2, input.dim()):
assert (input.size(i) > 0), 'expected input to have non-empty spatial dimensions, but input has sizes {} with dimension {} being empty'.format(input.sizes(), i)
return _GridSampler.apply(input, grid, mode_enum, padding_mode_enum, align_corners)
|
class NonLocal2D(nn.Module):
'Non-local module.\n\n See https://arxiv.org/abs/1711.07971 for details.\n\n Args:\n in_channels (int): Channels of the input feature map.\n reduction (int): Channel reduction ratio.\n use_scale (bool): Whether to scale pairwise_weight by 1/inter_channels.\n conv_cfg (dict): The config dict for convolution layers.\n (only applicable to conv_out)\n norm_cfg (dict): The config dict for normalization layers.\n (only applicable to conv_out)\n mode (str): Options are `embedded_gaussian` and `dot_product`.\n '
def __init__(self, in_channels, reduction=2, use_scale=True, conv_cfg=None, norm_cfg=None, mode='embedded_gaussian'):
super(NonLocal2D, self).__init__()
self.in_channels = in_channels
self.reduction = reduction
self.use_scale = use_scale
self.inter_channels = (in_channels // reduction)
self.mode = mode
assert (mode in ['embedded_gaussian', 'dot_product'])
self.g = ConvModule(self.in_channels, self.inter_channels, kernel_size=1, act_cfg=None)
self.theta = ConvModule(self.in_channels, self.inter_channels, kernel_size=1, act_cfg=None)
self.phi = ConvModule(self.in_channels, self.inter_channels, kernel_size=1, act_cfg=None)
self.conv_out = ConvModule(self.inter_channels, self.in_channels, kernel_size=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=None)
self.init_weights()
def init_weights(self, std=0.01, zeros_init=True):
for m in [self.g, self.theta, self.phi]:
normal_init(m.conv, std=std)
if zeros_init:
constant_init(self.conv_out.conv, 0)
else:
normal_init(self.conv_out.conv, std=std)
def embedded_gaussian(self, theta_x, phi_x):
pairwise_weight = torch.matmul(theta_x, phi_x)
if self.use_scale:
pairwise_weight /= (theta_x.shape[(- 1)] ** 0.5)
pairwise_weight = pairwise_weight.softmax(dim=(- 1))
return pairwise_weight
def dot_product(self, theta_x, phi_x):
pairwise_weight = torch.matmul(theta_x, phi_x)
pairwise_weight /= pairwise_weight.shape[(- 1)]
return pairwise_weight
def forward(self, x):
(n, _, h, w) = x.shape
g_x = self.g(x).view(n, self.inter_channels, (- 1))
g_x = g_x.permute(0, 2, 1)
theta_x = self.theta(x).view(n, self.inter_channels, (- 1))
theta_x = theta_x.permute(0, 2, 1)
phi_x = self.phi(x).view(n, self.inter_channels, (- 1))
pairwise_func = getattr(self, self.mode)
pairwise_weight = pairwise_func(theta_x, phi_x)
y = torch.matmul(pairwise_weight, g_x)
y = y.permute(0, 2, 1).reshape(n, self.inter_channels, h, w)
output = (x + self.conv_out(y))
return output
|
def build_norm_layer(cfg, num_features, postfix=''):
'Build normalization layer.\n\n Args:\n cfg (dict): cfg should contain:\n type (str): identify norm layer type.\n layer args: args needed to instantiate a norm layer.\n requires_grad (bool): [optional] whether stop gradient updates\n num_features (int): number of channels from input.\n postfix (int, str): appended into norm abbreviation to\n create named layer.\n\n Returns:\n name (str): abbreviation + postfix\n layer (nn.Module): created norm layer\n '
assert (isinstance(cfg, dict) and ('type' in cfg))
cfg_ = cfg.copy()
layer_type = cfg_.pop('type')
if (layer_type not in norm_cfg):
raise KeyError('Unrecognized norm type {}'.format(layer_type))
else:
(abbr, norm_layer) = norm_cfg[layer_type]
if (norm_layer is None):
raise NotImplementedError
assert isinstance(postfix, (int, str))
name = (abbr + str(postfix))
requires_grad = cfg_.pop('requires_grad', True)
cfg_.setdefault('eps', 1e-05)
if (layer_type != 'GN'):
layer = norm_layer(num_features, **cfg_)
if (layer_type == 'SyncBN'):
layer._specify_ddp_gpu_num(1)
else:
assert ('num_groups' in cfg_)
layer = norm_layer(num_channels=num_features, **cfg_)
for param in layer.parameters():
param.requires_grad = requires_grad
return (name, layer)
|
class RoIAlignFunction(Function):
@staticmethod
def forward(ctx, features, rois, out_size, spatial_scale, sample_num=0, aligned=True):
(out_h, out_w) = _pair(out_size)
assert (isinstance(out_h, int) and isinstance(out_w, int))
ctx.spatial_scale = spatial_scale
ctx.sample_num = sample_num
ctx.save_for_backward(rois)
ctx.feature_size = features.size()
ctx.aligned = aligned
if features.is_cuda:
if (not aligned):
(batch_size, num_channels, data_height, data_width) = features.size()
num_rois = rois.size(0)
output = features.new_zeros(num_rois, num_channels, out_h, out_w)
roi_align_cuda.forward_v1(features, rois, out_h, out_w, spatial_scale, sample_num, output)
else:
output = roi_align_cuda.forward_v2(features, rois, spatial_scale, out_h, out_w, sample_num, aligned)
else:
raise NotImplementedError
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
feature_size = ctx.feature_size
spatial_scale = ctx.spatial_scale
sample_num = ctx.sample_num
rois = ctx.saved_tensors[0]
aligned = ctx.aligned
assert ((feature_size is not None) and grad_output.is_cuda)
(batch_size, num_channels, data_height, data_width) = feature_size
out_w = grad_output.size(3)
out_h = grad_output.size(2)
grad_input = grad_rois = None
if (not aligned):
if ctx.needs_input_grad[0]:
grad_input = rois.new_zeros(batch_size, num_channels, data_height, data_width)
roi_align_cuda.backward_v1(grad_output.contiguous(), rois, out_h, out_w, spatial_scale, sample_num, grad_input)
else:
grad_input = roi_align_cuda.backward_v2(grad_output, rois, spatial_scale, out_h, out_w, batch_size, num_channels, data_height, data_width, sample_num, aligned)
return (grad_input, grad_rois, None, None, None, None)
|
class RoIAlign(nn.Module):
def __init__(self, out_size, spatial_scale, sample_num=0, use_torchvision=False, aligned=False):
"\n Args:\n out_size (tuple): h, w\n spatial_scale (float): scale the input boxes by this number\n sample_num (int): number of inputs samples to take for each\n output sample. 2 to take samples densely for current models.\n use_torchvision (bool): whether to use roi_align from torchvision\n aligned (bool): if False, use the legacy implementation in\n MMDetection. If True, align the results more perfectly.\n\n Note:\n The implementation of RoIAlign when aligned=True is modified from\n https://github.com/facebookresearch/detectron2/\n\n The meaning of aligned=True:\n\n Given a continuous coordinate c, its two neighboring pixel\n indices (in our pixel model) are computed by floor(c - 0.5) and\n ceil(c - 0.5). For example, c=1.3 has pixel neighbors with discrete\n indices [0] and [1] (which are sampled from the underlying signal\n at continuous coordinates 0.5 and 1.5). But the original roi_align\n (aligned=False) does not subtract the 0.5 when computing\n neighboring pixel indices and therefore it uses pixels with a\n slightly incorrect alignment (relative to our pixel model) when\n performing bilinear interpolation.\n\n With `aligned=True`,\n we first appropriately scale the ROI and then shift it by -0.5\n prior to calling roi_align. This produces the correct neighbors;\n\n The difference does not make a difference to the model's\n performance if ROIAlign is used together with conv layers.\n "
super(RoIAlign, self).__init__()
self.out_size = _pair(out_size)
self.spatial_scale = float(spatial_scale)
self.aligned = aligned
self.sample_num = int(sample_num)
self.use_torchvision = use_torchvision
assert (not (use_torchvision and aligned)), 'Torchvision does not support aligned RoIAlgin'
def forward(self, features, rois):
'\n Args:\n features: NCHW images\n rois: Bx5 boxes. First column is the index into N. The other 4\n columns are xyxy.\n '
assert ((rois.dim() == 2) and (rois.size(1) == 5))
if self.use_torchvision:
from torchvision.ops import roi_align as tv_roi_align
return tv_roi_align(features, rois, self.out_size, self.spatial_scale, self.sample_num)
else:
return roi_align(features, rois, self.out_size, self.spatial_scale, self.sample_num, self.aligned)
def __repr__(self):
format_str = self.__class__.__name__
format_str += '(out_size={}, spatial_scale={}, sample_num={}'.format(self.out_size, self.spatial_scale, self.sample_num)
format_str += ', use_torchvision={}, aligned={})'.format(self.use_torchvision, self.aligned)
return format_str
|
class RoIPoolFunction(Function):
@staticmethod
def forward(ctx, features, rois, out_size, spatial_scale):
assert features.is_cuda
(out_h, out_w) = _pair(out_size)
assert (isinstance(out_h, int) and isinstance(out_w, int))
ctx.save_for_backward(rois)
num_channels = features.size(1)
num_rois = rois.size(0)
out_size = (num_rois, num_channels, out_h, out_w)
output = features.new_zeros(out_size)
argmax = features.new_zeros(out_size, dtype=torch.int)
roi_pool_cuda.forward(features, rois, out_h, out_w, spatial_scale, output, argmax)
ctx.spatial_scale = spatial_scale
ctx.feature_size = features.size()
ctx.argmax = argmax
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
assert grad_output.is_cuda
spatial_scale = ctx.spatial_scale
feature_size = ctx.feature_size
argmax = ctx.argmax
rois = ctx.saved_tensors[0]
assert (feature_size is not None)
grad_input = grad_rois = None
if ctx.needs_input_grad[0]:
grad_input = grad_output.new_zeros(feature_size)
roi_pool_cuda.backward(grad_output.contiguous(), rois, argmax, spatial_scale, grad_input)
return (grad_input, grad_rois, None, None)
|
class RoIPool(nn.Module):
def __init__(self, out_size, spatial_scale, use_torchvision=False):
super(RoIPool, self).__init__()
self.out_size = _pair(out_size)
self.spatial_scale = float(spatial_scale)
self.use_torchvision = use_torchvision
def forward(self, features, rois):
if self.use_torchvision:
from torchvision.ops import roi_pool as tv_roi_pool
return tv_roi_pool(features, rois, self.out_size, self.spatial_scale)
else:
return roi_pool(features, rois, self.out_size, self.spatial_scale)
def __repr__(self):
format_str = self.__class__.__name__
format_str += '(out_size={}, spatial_scale={}'.format(self.out_size, self.spatial_scale)
format_str += ', use_torchvision={})'.format(self.use_torchvision)
return format_str
|
class Scale(nn.Module):
'A learnable scale parameter.'
def __init__(self, scale=1.0):
super(Scale, self).__init__()
self.scale = nn.Parameter(torch.tensor(scale, dtype=torch.float))
def forward(self, x):
return (x * self.scale)
|
class SigmoidFocalLossFunction(Function):
@staticmethod
def forward(ctx, input, target, gamma=2.0, alpha=0.25):
ctx.save_for_backward(input, target)
num_classes = input.shape[1]
ctx.num_classes = num_classes
ctx.gamma = gamma
ctx.alpha = alpha
loss = sigmoid_focal_loss_cuda.forward(input, target, num_classes, gamma, alpha)
return loss
@staticmethod
@once_differentiable
def backward(ctx, d_loss):
(input, target) = ctx.saved_tensors
num_classes = ctx.num_classes
gamma = ctx.gamma
alpha = ctx.alpha
d_loss = d_loss.contiguous()
d_input = sigmoid_focal_loss_cuda.backward(input, target, d_loss, num_classes, gamma, alpha)
return (d_input, None, None, None, None)
|
class SigmoidFocalLoss(nn.Module):
def __init__(self, gamma, alpha):
super(SigmoidFocalLoss, self).__init__()
self.gamma = gamma
self.alpha = alpha
def forward(self, logits, targets):
assert logits.is_cuda
loss = sigmoid_focal_loss(logits, targets, self.gamma, self.alpha)
return loss.sum()
def __repr__(self):
tmpstr = (self.__class__.__name__ + '(gamma={}, alpha={})'.format(self.gamma, self.alpha))
return tmpstr
|
class PixelShufflePack(nn.Module):
'Pixel Shuffle upsample layer.\n\n Args:\n in_channels (int): Number of input channels\n out_channels (int): Number of output channels\n scale_factor (int): Upsample ratio\n upsample_kernel (int): Kernel size of Conv layer to expand the channels\n\n Returns:\n upsampled feature map\n '
def __init__(self, in_channels, out_channels, scale_factor, upsample_kernel):
super(PixelShufflePack, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.scale_factor = scale_factor
self.upsample_kernel = upsample_kernel
self.upsample_conv = nn.Conv2d(self.in_channels, ((self.out_channels * scale_factor) * scale_factor), self.upsample_kernel, padding=((self.upsample_kernel - 1) // 2))
self.init_weights()
def init_weights(self):
xavier_init(self.upsample_conv, distribution='uniform')
def forward(self, x):
x = self.upsample_conv(x)
x = F.pixel_shuffle(x, self.scale_factor)
return x
|
def build_upsample_layer(cfg):
'Build upsample layer.\n\n Args:\n cfg (dict): cfg should contain:\n type (str): Identify upsample layer type.\n upsample ratio (int): Upsample ratio\n layer args: args needed to instantiate a upsample layer.\n\n Returns:\n layer (nn.Module): Created upsample layer\n '
assert (isinstance(cfg, dict) and ('type' in cfg))
cfg_ = cfg.copy()
layer_type = cfg_.pop('type')
if (layer_type not in upsample_cfg):
raise KeyError('Unrecognized upsample type {}'.format(layer_type))
else:
upsample = upsample_cfg[layer_type]
if (upsample is None):
raise NotImplementedError
layer = upsample(**cfg_)
return layer
|
def collect_env():
env_info = {}
env_info['sys.platform'] = sys.platform
env_info['Python'] = sys.version.replace('\n', '')
cuda_available = torch.cuda.is_available()
env_info['CUDA available'] = cuda_available
if cuda_available:
from torch.utils.cpp_extension import CUDA_HOME
env_info['CUDA_HOME'] = CUDA_HOME
if ((CUDA_HOME is not None) and osp.isdir(CUDA_HOME)):
try:
nvcc = osp.join(CUDA_HOME, 'bin/nvcc')
nvcc = subprocess.check_output('"{}" -V | tail -n1'.format(nvcc), shell=True)
nvcc = nvcc.decode('utf-8').strip()
except subprocess.SubprocessError:
nvcc = 'Not Available'
env_info['NVCC'] = nvcc
devices = defaultdict(list)
for k in range(torch.cuda.device_count()):
devices[torch.cuda.get_device_name(k)].append(str(k))
for (name, devids) in devices.items():
env_info[('GPU ' + ','.join(devids))] = name
gcc = subprocess.check_output('gcc --version | head -n1', shell=True)
gcc = gcc.decode('utf-8').strip()
env_info['GCC'] = gcc
env_info['PyTorch'] = torch.__version__
env_info['PyTorch compiling details'] = torch.__config__.show()
env_info['TorchVision'] = torchvision.__version__
env_info['OpenCV'] = cv2.__version__
env_info['MMCV'] = mmcv.__version__
env_info['MMDetection'] = mmdet.__version__
from mmdet.ops import get_compiler_version, get_compiling_cuda_version
env_info['MMDetection Compiler'] = get_compiler_version()
env_info['MMDetection CUDA Compiler'] = get_compiling_cuda_version()
return env_info
|
def get_model_complexity_info(model, input_res, print_per_layer_stat=True, as_strings=True, input_constructor=None, ost=sys.stdout):
assert (type(input_res) is tuple)
assert (len(input_res) >= 2)
flops_model = add_flops_counting_methods(model)
flops_model.eval().start_flops_count()
if input_constructor:
input = input_constructor(input_res)
_ = flops_model(**input)
else:
batch = torch.ones(()).new_empty((1, *input_res), dtype=next(flops_model.parameters()).dtype, device=next(flops_model.parameters()).device)
flops_model(batch)
if print_per_layer_stat:
print_model_with_flops(flops_model, ost=ost)
flops_count = flops_model.compute_average_flops_cost()
params_count = get_model_parameters_number(flops_model)
flops_model.stop_flops_count()
if as_strings:
return (flops_to_string(flops_count), params_to_string(params_count))
return (flops_count, params_count)
|
def flops_to_string(flops, units='GMac', precision=2):
if (units is None):
if ((flops // (10 ** 9)) > 0):
return (str(round((flops / (10.0 ** 9)), precision)) + ' GMac')
elif ((flops // (10 ** 6)) > 0):
return (str(round((flops / (10.0 ** 6)), precision)) + ' MMac')
elif ((flops // (10 ** 3)) > 0):
return (str(round((flops / (10.0 ** 3)), precision)) + ' KMac')
else:
return (str(flops) + ' Mac')
elif (units == 'GMac'):
return ((str(round((flops / (10.0 ** 9)), precision)) + ' ') + units)
elif (units == 'MMac'):
return ((str(round((flops / (10.0 ** 6)), precision)) + ' ') + units)
elif (units == 'KMac'):
return ((str(round((flops / (10.0 ** 3)), precision)) + ' ') + units)
else:
return (str(flops) + ' Mac')
|
def params_to_string(params_num):
"converting number to string.\n\n :param float params_num: number\n :returns str: number\n\n >>> params_to_string(1e9)\n '1000.0 M'\n >>> params_to_string(2e5)\n '200.0 k'\n >>> params_to_string(3e-9)\n '3e-09'\n "
if ((params_num // (10 ** 6)) > 0):
return (str(round((params_num / (10 ** 6)), 2)) + ' M')
elif (params_num // (10 ** 3)):
return (str(round((params_num / (10 ** 3)), 2)) + ' k')
else:
return str(params_num)
|
def print_model_with_flops(model, units='GMac', precision=3, ost=sys.stdout):
total_flops = model.compute_average_flops_cost()
def accumulate_flops(self):
if is_supported_instance(self):
return (self.__flops__ / model.__batch_counter__)
else:
sum = 0
for m in self.children():
sum += m.accumulate_flops()
return sum
def flops_repr(self):
accumulated_flops_cost = self.accumulate_flops()
return ', '.join([flops_to_string(accumulated_flops_cost, units=units, precision=precision), '{:.3%} MACs'.format((accumulated_flops_cost / total_flops)), self.original_extra_repr()])
def add_extra_repr(m):
m.accumulate_flops = accumulate_flops.__get__(m)
flops_extra_repr = flops_repr.__get__(m)
if (m.extra_repr != flops_extra_repr):
m.original_extra_repr = m.extra_repr
m.extra_repr = flops_extra_repr
assert (m.extra_repr != m.original_extra_repr)
def del_extra_repr(m):
if hasattr(m, 'original_extra_repr'):
m.extra_repr = m.original_extra_repr
del m.original_extra_repr
if hasattr(m, 'accumulate_flops'):
del m.accumulate_flops
model.apply(add_extra_repr)
print(model, file=ost)
model.apply(del_extra_repr)
|
def get_model_parameters_number(model):
params_num = sum((p.numel() for p in model.parameters() if p.requires_grad))
return params_num
|
def add_flops_counting_methods(net_main_module):
net_main_module.start_flops_count = start_flops_count.__get__(net_main_module)
net_main_module.stop_flops_count = stop_flops_count.__get__(net_main_module)
net_main_module.reset_flops_count = reset_flops_count.__get__(net_main_module)
net_main_module.compute_average_flops_cost = compute_average_flops_cost.__get__(net_main_module)
net_main_module.reset_flops_count()
net_main_module.apply(add_flops_mask_variable_or_reset)
return net_main_module
|
def compute_average_flops_cost(self):
'A method that will be available after add_flops_counting_methods() is\n called on a desired net object.\n\n Returns current mean flops consumption per image.\n '
batches_count = self.__batch_counter__
flops_sum = 0
for module in self.modules():
if is_supported_instance(module):
flops_sum += module.__flops__
return (flops_sum / batches_count)
|
def start_flops_count(self):
'A method that will be available after add_flops_counting_methods() is\n called on a desired net object.\n\n Activates the computation of mean flops consumption per image. Call it\n before you run the network.\n '
add_batch_counter_hook_function(self)
self.apply(add_flops_counter_hook_function)
|
def stop_flops_count(self):
'A method that will be available after add_flops_counting_methods() is\n called on a desired net object.\n\n Stops computing the mean flops consumption per image. Call whenever you\n want to pause the computation.\n '
remove_batch_counter_hook_function(self)
self.apply(remove_flops_counter_hook_function)
|
def reset_flops_count(self):
'A method that will be available after add_flops_counting_methods() is\n called on a desired net object.\n\n Resets statistics computed so far.\n '
add_batch_counter_variables_or_reset(self)
self.apply(add_flops_counter_variable_or_reset)
|
def add_flops_mask(module, mask):
def add_flops_mask_func(module):
if isinstance(module, torch.nn.Conv2d):
module.__mask__ = mask
module.apply(add_flops_mask_func)
|
def remove_flops_mask(module):
module.apply(add_flops_mask_variable_or_reset)
|
def is_supported_instance(module):
for mod in hook_mapping:
if issubclass(type(module), mod):
return True
return False
|
def empty_flops_counter_hook(module, input, output):
module.__flops__ += 0
|
def upsample_flops_counter_hook(module, input, output):
output_size = output[0]
batch_size = output_size.shape[0]
output_elements_count = batch_size
for val in output_size.shape[1:]:
output_elements_count *= val
module.__flops__ += int(output_elements_count)
|
def relu_flops_counter_hook(module, input, output):
active_elements_count = output.numel()
module.__flops__ += int(active_elements_count)
|
def linear_flops_counter_hook(module, input, output):
input = input[0]
batch_size = input.shape[0]
module.__flops__ += int(((batch_size * input.shape[1]) * output.shape[1]))
|
def pool_flops_counter_hook(module, input, output):
input = input[0]
module.__flops__ += int(np.prod(input.shape))
|
def bn_flops_counter_hook(module, input, output):
input = input[0]
batch_flops = np.prod(input.shape)
if module.affine:
batch_flops *= 2
module.__flops__ += int(batch_flops)
|
def gn_flops_counter_hook(module, input, output):
elems = np.prod(input[0].shape)
batch_flops = (3 * elems)
if module.affine:
batch_flops += elems
module.__flops__ += int(batch_flops)
|
def deconv_flops_counter_hook(conv_module, input, output):
input = input[0]
batch_size = input.shape[0]
(input_height, input_width) = input.shape[2:]
(kernel_height, kernel_width) = conv_module.kernel_size
in_channels = conv_module.in_channels
out_channels = conv_module.out_channels
groups = conv_module.groups
filters_per_channel = (out_channels // groups)
conv_per_position_flops = (((kernel_height * kernel_width) * in_channels) * filters_per_channel)
active_elements_count = ((batch_size * input_height) * input_width)
overall_conv_flops = (conv_per_position_flops * active_elements_count)
bias_flops = 0
if (conv_module.bias is not None):
(output_height, output_width) = output.shape[2:]
bias_flops = (((out_channels * batch_size) * output_height) * output_height)
overall_flops = (overall_conv_flops + bias_flops)
conv_module.__flops__ += int(overall_flops)
|
def conv_flops_counter_hook(conv_module, input, output):
input = input[0]
batch_size = input.shape[0]
output_dims = list(output.shape[2:])
kernel_dims = list(conv_module.kernel_size)
in_channels = conv_module.in_channels
out_channels = conv_module.out_channels
groups = conv_module.groups
filters_per_channel = (out_channels // groups)
conv_per_position_flops = ((np.prod(kernel_dims) * in_channels) * filters_per_channel)
active_elements_count = (batch_size * np.prod(output_dims))
if (conv_module.__mask__ is not None):
(output_height, output_width) = output.shape[2:]
flops_mask = conv_module.__mask__.expand(batch_size, 1, output_height, output_width)
active_elements_count = flops_mask.sum()
overall_conv_flops = (conv_per_position_flops * active_elements_count)
bias_flops = 0
if (conv_module.bias is not None):
bias_flops = (out_channels * active_elements_count)
overall_flops = (overall_conv_flops + bias_flops)
conv_module.__flops__ += int(overall_flops)
|
def batch_counter_hook(module, input, output):
batch_size = 1
if (len(input) > 0):
input = input[0]
batch_size = len(input)
else:
print('Warning! No positional inputs found for a module, assuming batch size is 1.')
module.__batch_counter__ += batch_size
|
def add_batch_counter_variables_or_reset(module):
module.__batch_counter__ = 0
|
def add_batch_counter_hook_function(module):
if hasattr(module, '__batch_counter_handle__'):
return
handle = module.register_forward_hook(batch_counter_hook)
module.__batch_counter_handle__ = handle
|
def remove_batch_counter_hook_function(module):
if hasattr(module, '__batch_counter_handle__'):
module.__batch_counter_handle__.remove()
del module.__batch_counter_handle__
|
def add_flops_counter_variable_or_reset(module):
if is_supported_instance(module):
module.__flops__ = 0
|
def add_flops_counter_hook_function(module):
if is_supported_instance(module):
if hasattr(module, '__flops_handle__'):
return
for (mod_type, counter_hook) in hook_mapping.items():
if issubclass(type(module), mod_type):
handle = module.register_forward_hook(counter_hook)
break
module.__flops_handle__ = handle
|
def remove_flops_counter_hook_function(module):
if is_supported_instance(module):
if hasattr(module, '__flops_handle__'):
module.__flops_handle__.remove()
del module.__flops_handle__
|
def add_flops_mask_variable_or_reset(module):
if is_supported_instance(module):
module.__mask__ = None
|
def get_root_logger(log_file=None, log_level=logging.INFO):
'Get the root logger.\n\n The logger will be initialized if it has not been initialized. By default a\n StreamHandler will be added. If `log_file` is specified, a FileHandler will\n also be added. The name of the root logger is the top-level package name,\n e.g., "mmdet".\n\n Args:\n log_file (str | None): The log filename. If specified, a FileHandler\n will be added to the root logger.\n log_level (int): The root logger level. Note that only the process of\n rank 0 is affected, while other processes will set the level to\n "Error" and be silent most of the time.\n\n Returns:\n logging.Logger: The root logger.\n '
logger = logging.getLogger(__name__.split('.')[0])
if logger.hasHandlers():
return logger
format_str = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(format=format_str, level=log_level)
(rank, _) = get_dist_info()
if (rank != 0):
logger.setLevel('ERROR')
elif (log_file is not None):
file_handler = logging.FileHandler(log_file, 'w')
file_handler.setFormatter(logging.Formatter(format_str))
file_handler.setLevel(log_level)
logger.addHandler(file_handler)
return logger
|
def print_log(msg, logger=None, level=logging.INFO):
'Print a log message.\n\n Args:\n msg (str): The message to be logged.\n logger (logging.Logger | str | None): The logger to be used. Some\n special loggers are:\n - "root": the root logger obtained with `get_root_logger()`.\n - "silent": no message will be printed.\n - None: The `print()` method will be used to print log messages.\n level (int): Logging level. Only available when `logger` is a Logger\n object or "root".\n '
if (logger is None):
print(msg)
elif (logger == 'root'):
_logger = get_root_logger()
_logger.log(level, msg)
elif isinstance(logger, logging.Logger):
logger.log(level, msg)
elif (logger != 'silent'):
raise TypeError('logger should be either a logging.Logger object, "root", "silent" or None, but got {}'.format(logger))
|
class Registry(object):
def __init__(self, name):
self._name = name
self._module_dict = dict()
def __repr__(self):
format_str = (self.__class__.__name__ + '(name={}, items={})'.format(self._name, list(self._module_dict.keys())))
return format_str
@property
def name(self):
return self._name
@property
def module_dict(self):
return self._module_dict
def get(self, key):
return self._module_dict.get(key, None)
def _register_module(self, module_class, force=False):
'Register a module.\n\n Args:\n module (:obj:`nn.Module`): Module to be registered.\n '
if (not inspect.isclass(module_class)):
raise TypeError('module must be a class, but got {}'.format(type(module_class)))
module_name = module_class.__name__
if ((not force) and (module_name in self._module_dict)):
raise KeyError('{} is already registered in {}'.format(module_name, self.name))
self._module_dict[module_name] = module_class
def register_module(self, cls=None, force=False):
if (cls is None):
return partial(self.register_module, force=force)
self._register_module(cls, force=force)
return cls
|
def build_from_cfg(cfg, registry, default_args=None):
'Build a module from config dict.\n\n Args:\n cfg (dict): Config dict. It should at least contain the key "type".\n registry (:obj:`Registry`): The registry to search the type from.\n default_args (dict, optional): Default initialization arguments.\n\n Returns:\n obj: The constructed object.\n '
assert (isinstance(cfg, dict) and ('type' in cfg))
assert (isinstance(default_args, dict) or (default_args is None))
args = cfg.copy()
obj_type = args.pop('type')
if mmcv.is_str(obj_type):
obj_cls = registry.get(obj_type)
if (obj_cls is None):
raise KeyError('{} is not in the {} registry'.format(obj_type, registry.name))
elif inspect.isclass(obj_type):
obj_cls = obj_type
else:
raise TypeError('type must be a str or valid type, but got {}'.format(type(obj_type)))
if (default_args is not None):
for (name, value) in default_args.items():
args.setdefault(name, value)
return obj_cls(**args)
|
class NiceRepr(object):
'Inherit from this class and define ``__nice__`` to "nicely" print your\n objects.\n\n Defines ``__str__`` and ``__repr__`` in terms of ``__nice__`` function\n Classes that inherit from :class:`NiceRepr` should redefine ``__nice__``.\n If the inheriting class has a ``__len__``, method then the default\n ``__nice__`` method will return its length.\n\n Example:\n >>> class Foo(NiceRepr):\n ... def __nice__(self):\n ... return \'info\'\n >>> foo = Foo()\n >>> assert str(foo) == \'<Foo(info)>\'\n >>> assert repr(foo).startswith(\'<Foo(info) at \')\n\n Example:\n >>> class Bar(NiceRepr):\n ... pass\n >>> bar = Bar()\n >>> import pytest\n >>> with pytest.warns(None) as record:\n >>> assert \'object at\' in str(bar)\n >>> assert \'object at\' in repr(bar)\n\n Example:\n >>> class Baz(NiceRepr):\n ... def __len__(self):\n ... return 5\n >>> baz = Baz()\n >>> assert str(baz) == \'<Baz(5)>\'\n '
def __nice__(self):
if hasattr(self, '__len__'):
return str(len(self))
else:
raise NotImplementedError('Define the __nice__ method for {!r}'.format(self.__class__))
def __repr__(self):
try:
nice = self.__nice__()
classname = self.__class__.__name__
return '<{0}({1}) at {2}>'.format(classname, nice, hex(id(self)))
except NotImplementedError as ex:
warnings.warn(str(ex), category=RuntimeWarning)
return object.__repr__(self)
def __str__(self):
try:
classname = self.__class__.__name__
nice = self.__nice__()
return '<{0}({1})>'.format(classname, nice)
except NotImplementedError as ex:
warnings.warn(str(ex), category=RuntimeWarning)
return object.__repr__(self)
|
def readme():
with open('README.md', encoding='utf-8') as f:
content = f.read()
return content
|
def get_git_hash():
def _minimal_ext_cmd(cmd):
env = {}
for k in ['SYSTEMROOT', 'PATH', 'HOME']:
v = os.environ.get(k)
if (v is not None):
env[k] = v
env['LANGUAGE'] = 'C'
env['LANG'] = 'C'
env['LC_ALL'] = 'C'
out = subprocess.Popen(cmd, stdout=subprocess.PIPE, env=env).communicate()[0]
return out
try:
out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])
sha = out.strip().decode('ascii')
except OSError:
sha = 'unknown'
return sha
|
def get_hash():
if os.path.exists('.git'):
sha = get_git_hash()[:7]
elif os.path.exists(version_file):
try:
from mmdet.version import __version__
sha = __version__.split('+')[(- 1)]
except ImportError:
raise ImportError('Unable to get git version')
else:
sha = 'unknown'
return sha
|
def write_version_py():
content = "# GENERATED VERSION FILE\n# TIME: {}\n\n__version__ = '{}'\nshort_version = '{}'\n"
sha = get_hash()
VERSION = ((SHORT_VERSION + '+') + sha)
with open(version_file, 'w') as f:
f.write(content.format(time.asctime(), VERSION, SHORT_VERSION))
|
def get_version():
with open(version_file, 'r') as f:
exec(compile(f.read(), version_file, 'exec'))
return locals()['__version__']
|
def make_cuda_ext(name, module, sources):
define_macros = []
if (torch.cuda.is_available() or (os.getenv('FORCE_CUDA', '0') == '1')):
define_macros += [('WITH_CUDA', None)]
else:
raise EnvironmentError('CUDA is required to compile MMDetection!')
return CUDAExtension(name='{}.{}'.format(module, name), sources=[os.path.join(*module.split('.'), p) for p in sources], define_macros=define_macros, extra_compile_args={'cxx': [], 'nvcc': ['-D__CUDA_NO_HALF_OPERATORS__', '-D__CUDA_NO_HALF_CONVERSIONS__', '-D__CUDA_NO_HALF2_OPERATORS__']})
|
def parse_requirements(fname='requirements.txt', with_version=True):
'Parse the package dependencies listed in a requirements file but strips\n specific versioning information.\n\n Args:\n fname (str): path to requirements file\n with_version (bool, default=False): if True include version specs\n\n Returns:\n List[str]: list of requirements items\n\n CommandLine:\n python -c "import setup; print(setup.parse_requirements())"\n '
import sys
from os.path import exists
import re
require_fpath = fname
def parse_line(line):
'Parse information from a line in a requirements text file.'
if line.startswith('-r '):
target = line.split(' ')[1]
for info in parse_require_file(target):
(yield info)
else:
info = {'line': line}
if line.startswith('-e '):
info['package'] = line.split('#egg=')[1]
else:
pat = (('(' + '|'.join(['>=', '==', '>'])) + ')')
parts = re.split(pat, line, maxsplit=1)
parts = [p.strip() for p in parts]
info['package'] = parts[0]
if (len(parts) > 1):
(op, rest) = parts[1:]
if (';' in rest):
(version, platform_deps) = map(str.strip, rest.split(';'))
info['platform_deps'] = platform_deps
else:
version = rest
info['version'] = (op, version)
(yield info)
def parse_require_file(fpath):
with open(fpath, 'r') as f:
for line in f.readlines():
line = line.strip()
if (line and (not line.startswith('#'))):
for info in parse_line(line):
(yield info)
def gen_packages_items():
if exists(require_fpath):
for info in parse_require_file(require_fpath):
parts = [info['package']]
if (with_version and ('version' in info)):
parts.extend(info['version'])
if (not sys.version.startswith('3.4')):
platform_deps = info.get('platform_deps')
if (platform_deps is not None):
parts.append((';' + platform_deps))
item = ''.join(parts)
(yield item)
packages = list(gen_packages_items())
return packages
|
def test_max_iou_assigner():
self = MaxIoUAssigner(pos_iou_thr=0.5, neg_iou_thr=0.5)
bboxes = torch.FloatTensor([[0, 0, 10, 10], [10, 10, 20, 20], [5, 5, 15, 15], [32, 32, 38, 42]])
gt_bboxes = torch.FloatTensor([[0, 0, 10, 9], [0, 10, 10, 19]])
gt_labels = torch.LongTensor([2, 3])
assign_result = self.assign(bboxes, gt_bboxes, gt_labels=gt_labels)
assert (len(assign_result.gt_inds) == 4)
assert (len(assign_result.labels) == 4)
expected_gt_inds = torch.LongTensor([1, 0, 2, 0])
assert torch.all((assign_result.gt_inds == expected_gt_inds))
|
def test_max_iou_assigner_with_ignore():
self = MaxIoUAssigner(pos_iou_thr=0.5, neg_iou_thr=0.5, ignore_iof_thr=0.5, ignore_wrt_candidates=False)
bboxes = torch.FloatTensor([[0, 0, 10, 10], [10, 10, 20, 20], [5, 5, 15, 15], [32, 32, 38, 42]])
gt_bboxes = torch.FloatTensor([[0, 0, 10, 9], [0, 10, 10, 19]])
gt_bboxes_ignore = torch.Tensor([[30, 30, 40, 40]])
assign_result = self.assign(bboxes, gt_bboxes, gt_bboxes_ignore=gt_bboxes_ignore)
expected_gt_inds = torch.LongTensor([1, 0, 2, (- 1)])
assert torch.all((assign_result.gt_inds == expected_gt_inds))
|
def test_max_iou_assigner_with_empty_gt():
'Test corner case where an image might have no true detections.'
self = MaxIoUAssigner(pos_iou_thr=0.5, neg_iou_thr=0.5)
bboxes = torch.FloatTensor([[0, 0, 10, 10], [10, 10, 20, 20], [5, 5, 15, 15], [32, 32, 38, 42]])
gt_bboxes = torch.FloatTensor([])
assign_result = self.assign(bboxes, gt_bboxes)
expected_gt_inds = torch.LongTensor([0, 0, 0, 0])
assert torch.all((assign_result.gt_inds == expected_gt_inds))
|
def test_max_iou_assigner_with_empty_boxes():
'Test corner case where an network might predict no boxes.'
self = MaxIoUAssigner(pos_iou_thr=0.5, neg_iou_thr=0.5)
bboxes = torch.empty((0, 4))
gt_bboxes = torch.FloatTensor([[0, 0, 10, 9], [0, 10, 10, 19]])
gt_labels = torch.LongTensor([2, 3])
assign_result = self.assign(bboxes, gt_bboxes, gt_labels=gt_labels)
assert (len(assign_result.gt_inds) == 0)
assert (tuple(assign_result.labels.shape) == (0,))
assign_result = self.assign(bboxes, gt_bboxes, gt_labels=None)
assert (len(assign_result.gt_inds) == 0)
assert (assign_result.labels is None)
|
def test_max_iou_assigner_with_empty_boxes_and_ignore():
'Test corner case where an network might predict no boxes and\n ignore_iof_thr is on.'
self = MaxIoUAssigner(pos_iou_thr=0.5, neg_iou_thr=0.5, ignore_iof_thr=0.5)
bboxes = torch.empty((0, 4))
gt_bboxes = torch.FloatTensor([[0, 0, 10, 9], [0, 10, 10, 19]])
gt_bboxes_ignore = torch.Tensor([[30, 30, 40, 40]])
gt_labels = torch.LongTensor([2, 3])
assign_result = self.assign(bboxes, gt_bboxes, gt_labels=gt_labels, gt_bboxes_ignore=gt_bboxes_ignore)
assert (len(assign_result.gt_inds) == 0)
assert (tuple(assign_result.labels.shape) == (0,))
assign_result = self.assign(bboxes, gt_bboxes, gt_labels=None, gt_bboxes_ignore=gt_bboxes_ignore)
assert (len(assign_result.gt_inds) == 0)
assert (assign_result.labels is None)
|
def test_max_iou_assigner_with_empty_boxes_and_gt():
'Test corner case where an network might predict no boxes and no gt.'
self = MaxIoUAssigner(pos_iou_thr=0.5, neg_iou_thr=0.5)
bboxes = torch.empty((0, 4))
gt_bboxes = torch.empty((0, 4))
assign_result = self.assign(bboxes, gt_bboxes)
assert (len(assign_result.gt_inds) == 0)
|
def test_point_assigner():
self = PointAssigner()
points = torch.FloatTensor([[0, 0, 1], [10, 10, 1], [5, 5, 1], [32, 32, 1]])
gt_bboxes = torch.FloatTensor([[0, 0, 10, 9], [0, 10, 10, 19]])
assign_result = self.assign(points, gt_bboxes)
expected_gt_inds = torch.LongTensor([1, 2, 1, 0])
assert torch.all((assign_result.gt_inds == expected_gt_inds))
|
def test_point_assigner_with_empty_gt():
'Test corner case where an image might have no true detections.'
self = PointAssigner()
points = torch.FloatTensor([[0, 0, 1], [10, 10, 1], [5, 5, 1], [32, 32, 1]])
gt_bboxes = torch.FloatTensor([])
assign_result = self.assign(points, gt_bboxes)
expected_gt_inds = torch.LongTensor([0, 0, 0, 0])
assert torch.all((assign_result.gt_inds == expected_gt_inds))
|
def test_point_assigner_with_empty_boxes_and_gt():
'Test corner case where an image might predict no points and no gt.'
self = PointAssigner()
points = torch.FloatTensor([])
gt_bboxes = torch.FloatTensor([])
assign_result = self.assign(points, gt_bboxes)
assert (len(assign_result.gt_inds) == 0)
|
def test_approx_iou_assigner():
self = ApproxMaxIoUAssigner(pos_iou_thr=0.5, neg_iou_thr=0.5)
bboxes = torch.FloatTensor([[0, 0, 10, 10], [10, 10, 20, 20], [5, 5, 15, 15], [32, 32, 38, 42]])
gt_bboxes = torch.FloatTensor([[0, 0, 10, 9], [0, 10, 10, 19]])
approxs_per_octave = 1
approxs = bboxes
squares = bboxes
assign_result = self.assign(approxs, squares, approxs_per_octave, gt_bboxes)
expected_gt_inds = torch.LongTensor([1, 0, 2, 0])
assert torch.all((assign_result.gt_inds == expected_gt_inds))
|
def test_approx_iou_assigner_with_empty_gt():
'Test corner case where an image might have no true detections.'
self = ApproxMaxIoUAssigner(pos_iou_thr=0.5, neg_iou_thr=0.5)
bboxes = torch.FloatTensor([[0, 0, 10, 10], [10, 10, 20, 20], [5, 5, 15, 15], [32, 32, 38, 42]])
gt_bboxes = torch.FloatTensor([])
approxs_per_octave = 1
approxs = bboxes
squares = bboxes
assign_result = self.assign(approxs, squares, approxs_per_octave, gt_bboxes)
expected_gt_inds = torch.LongTensor([0, 0, 0, 0])
assert torch.all((assign_result.gt_inds == expected_gt_inds))
|
def test_approx_iou_assigner_with_empty_boxes():
'Test corner case where an network might predict no boxes.'
self = ApproxMaxIoUAssigner(pos_iou_thr=0.5, neg_iou_thr=0.5)
bboxes = torch.empty((0, 4))
gt_bboxes = torch.FloatTensor([[0, 0, 10, 9], [0, 10, 10, 19]])
approxs_per_octave = 1
approxs = bboxes
squares = bboxes
assign_result = self.assign(approxs, squares, approxs_per_octave, gt_bboxes)
assert (len(assign_result.gt_inds) == 0)
|
def test_approx_iou_assigner_with_empty_boxes_and_gt():
'Test corner case where an network might predict no boxes and no gt.'
self = ApproxMaxIoUAssigner(pos_iou_thr=0.5, neg_iou_thr=0.5)
bboxes = torch.empty((0, 4))
gt_bboxes = torch.empty((0, 4))
approxs_per_octave = 1
approxs = bboxes
squares = bboxes
assign_result = self.assign(approxs, squares, approxs_per_octave, gt_bboxes)
assert (len(assign_result.gt_inds) == 0)
|
def test_random_assign_result():
'Test random instantiation of assign result to catch corner cases.'
from mmdet.core.bbox.assigners.assign_result import AssignResult
AssignResult.random()
AssignResult.random(num_gts=0, num_preds=0)
AssignResult.random(num_gts=0, num_preds=3)
AssignResult.random(num_gts=3, num_preds=3)
AssignResult.random(num_gts=0, num_preds=3)
AssignResult.random(num_gts=7, num_preds=7)
AssignResult.random(num_gts=7, num_preds=64)
AssignResult.random(num_gts=24, num_preds=3)
|
class AsyncTestCase(asynctest.TestCase):
use_default_loop = False
forbid_get_event_loop = True
TEST_TIMEOUT = int(os.getenv('ASYNCIO_TEST_TIMEOUT', '30'))
def _run_test_method(self, method):
result = method()
if asyncio.iscoroutine(result):
self.loop.run_until_complete(asyncio.wait_for(result, timeout=self.TEST_TIMEOUT))
|
class MaskRCNNDetector():
def __init__(self, model_config, checkpoint=None, streamqueue_size=3, device='cuda:0'):
self.streamqueue_size = streamqueue_size
self.device = device
self.model = init_detector(model_config, checkpoint=None, device=self.device)
self.streamqueue = None
async def init(self):
self.streamqueue = asyncio.Queue()
for _ in range(self.streamqueue_size):
stream = torch.cuda.Stream(device=self.device)
self.streamqueue.put_nowait(stream)
if (sys.version_info >= (3, 7)):
async def apredict(self, img):
if isinstance(img, str):
img = mmcv.imread(img)
async with concurrent(self.streamqueue):
result = (await async_inference_detector(self.model, img))
return result
|
class AsyncInferenceTestCase(AsyncTestCase):
if (sys.version_info >= (3, 7)):
async def test_simple_inference(self):
if (not torch.cuda.is_available()):
import pytest
pytest.skip('test requires GPU and torch+cuda')
root_dir = os.path.dirname(os.path.dirname(__name__))
model_config = os.path.join(root_dir, 'configs/mask_rcnn_r50_fpn_1x.py')
detector = MaskRCNNDetector(model_config)
(await detector.init())
img_path = os.path.join(root_dir, 'demo/demo.jpg')
(bboxes, _) = (await detector.apredict(img_path))
self.assertTrue(bboxes)
|
def _get_config_directory():
'Find the predefined detector config directory.'
try:
repo_dpath = dirname(dirname(__file__))
except NameError:
import mmdet
repo_dpath = dirname(dirname(mmdet.__file__))
config_dpath = join(repo_dpath, 'configs')
if (not exists(config_dpath)):
raise Exception('Cannot find config path')
return config_dpath
|
def test_config_build_detector():
'Test that all detection models defined in the configs can be\n initialized.'
from xdoctest.utils import import_module_from_path
from mmdet.models import build_detector
config_dpath = _get_config_directory()
print('Found config_dpath = {!r}'.format(config_dpath))
config_names = ['dcn/mask_rcnn_dconv_c3-c5_r50_fpn_1x.py', 'htc/htc_without_semantic_r50_fpn_1x.py', 'cityscapes/mask_rcnn_r50_fpn_1x_cityscapes.py', 'grid_rcnn/grid_rcnn_gn_head_r50_fpn_2x.py', 'double_heads/dh_faster_rcnn_r50_fpn_1x.py', 'empirical_attention/faster_rcnn_r50_fpn_attention_0010_dcn_1x.py', 'guided_anchoring/ga_rpn_r50_caffe_fpn_1x.py', 'foveabox/fovea_r50_fpn_4gpu_1x.py', 'foveabox/fovea_align_gn_ms_r50_fpn_4gpu_2x.py', 'hrnet/fcos_hrnetv2p_w32_gn_1x_4gpu.py', 'gn+ws/mask_rcnn_r50_fpn_gn_ws_2x.py', 'pascal_voc/ssd300_voc.py', 'pascal_voc/faster_rcnn_r50_fpn_1x_voc0712.py', 'pascal_voc/ssd512_voc.py', 'gcnet/mask_rcnn_r50_fpn_sbn_1x.py', 'gn/mask_rcnn_r50_fpn_gn_contrib_2x.py', 'reppoints/reppoints_moment_r50_fpn_2x.py', 'reppoints/reppoints_partial_minmax_r50_fpn_1x.py', 'reppoints/bbox_r50_grid_center_fpn_1x.py', 'reppoints/reppoints_minmax_r50_fpn_1x.py', 'reppoints/bbox_r50_grid_fpn_1x.py', 'fcos/fcos_r50_caffe_fpn_gn_1x_4gpu.py', 'albu_example/mask_rcnn_r50_fpn_1x.py', 'libra_rcnn/libra_faster_rcnn_r50_fpn_1x.py', 'fp16/mask_rcnn_r50_fpn_fp16_1x.py', 'fp16/faster_rcnn_r50_fpn_fp16_1x.py']
print('Using {} config files'.format(len(config_names)))
for config_fname in config_names:
config_fpath = join(config_dpath, config_fname)
config_mod = import_module_from_path(config_fpath)
config_mod.model
config_mod.train_cfg
config_mod.test_cfg
print('Building detector, config_fpath = {!r}'.format(config_fpath))
if ('pretrained' in config_mod.model):
config_mod.model['pretrained'] = None
detector = build_detector(config_mod.model, train_cfg=config_mod.train_cfg, test_cfg=config_mod.test_cfg)
assert (detector is not None)
|
def test_config_data_pipeline():
'Test whether the data pipeline is valid and can process corner cases.\n\n CommandLine:\n xdoctest -m tests/test_config.py test_config_build_data_pipeline\n '
from xdoctest.utils import import_module_from_path
from mmdet.datasets.pipelines import Compose
import numpy as np
config_dpath = _get_config_directory()
print('Found config_dpath = {!r}'.format(config_dpath))
config_names = ['wider_face/ssd300_wider_face.py', 'pascal_voc/ssd300_voc.py', 'pascal_voc/ssd512_voc.py', 'fp16/mask_rcnn_r50_fpn_fp16_1x.py']
print('Using {} config files'.format(len(config_names)))
for config_fname in config_names:
config_fpath = join(config_dpath, config_fname)
config_mod = import_module_from_path(config_fpath)
loading_pipeline = config_mod.train_pipeline.pop(0)
config_mod.train_pipeline.pop(0)
config_mod.test_pipeline.pop(0)
train_pipeline = Compose(config_mod.train_pipeline)
test_pipeline = Compose(config_mod.test_pipeline)
print('Building data pipeline, config_fpath = {!r}'.format(config_fpath))
print('Test training data pipeline: \n{!r}'.format(train_pipeline))
img = np.random.randint(0, 255, size=(888, 666, 3), dtype=np.uint8)
if loading_pipeline.get('to_float32', False):
img = img.astype(np.float32)
results = dict(filename='test_img.png', img=img, img_shape=img.shape, ori_shape=img.shape, gt_bboxes=np.array([[35.2, 11.7, 39.7, 15.7]], dtype=np.float32), gt_labels=np.array([1], dtype=np.int64), gt_masks=[(img[(..., 0)] == 233).astype(np.uint8)])
results['bbox_fields'] = ['gt_bboxes']
results['mask_fields'] = ['gt_masks']
output_results = train_pipeline(results)
assert (output_results is not None)
print('Test testing data pipeline: \n{!r}'.format(test_pipeline))
results = dict(filename='test_img.png', img=img, img_shape=img.shape, ori_shape=img.shape, gt_bboxes=np.array([[35.2, 11.7, 39.7, 15.7]], dtype=np.float32), gt_labels=np.array([1], dtype=np.int64), gt_masks=[(img[(..., 0)] == 233).astype(np.uint8)])
results['bbox_fields'] = ['gt_bboxes']
results['mask_fields'] = ['gt_masks']
output_results = test_pipeline(results)
assert (output_results is not None)
print('Test empty GT with training data pipeline: \n{!r}'.format(train_pipeline))
results = dict(filename='test_img.png', img=img, img_shape=img.shape, ori_shape=img.shape, gt_bboxes=np.zeros((0, 4), dtype=np.float32), gt_labels=np.array([], dtype=np.int64), gt_masks=[])
results['bbox_fields'] = ['gt_bboxes']
results['mask_fields'] = ['gt_masks']
output_results = train_pipeline(results)
assert (output_results is not None)
print('Test empty GT with testing data pipeline: \n{!r}'.format(test_pipeline))
results = dict(filename='test_img.png', img=img, img_shape=img.shape, ori_shape=img.shape, gt_bboxes=np.zeros((0, 4), dtype=np.float32), gt_labels=np.array([], dtype=np.int64), gt_masks=[])
results['bbox_fields'] = ['gt_bboxes']
results['mask_fields'] = ['gt_masks']
output_results = test_pipeline(results)
assert (output_results is not None)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.