code stringlengths 17 6.64M |
|---|
@DATASETS.register_module()
class XMLDataset(CustomDataset):
'XML dataset for detection.\n\n Args:\n min_size (int | float, optional): The minimum size of bounding\n boxes in the images. If the size of a bounding box is less than\n ``min_size``, it would be add to ignored field.\n img_subdir (str): Subdir where images are stored. Default: JPEGImages.\n ann_subdir (str): Subdir where annotations are. Default: Annotations.\n '
def __init__(self, min_size=None, img_subdir='JPEGImages', ann_subdir='Annotations', **kwargs):
assert (self.CLASSES or kwargs.get('classes', None)), 'CLASSES in `XMLDataset` can not be None.'
self.img_subdir = img_subdir
self.ann_subdir = ann_subdir
super(XMLDataset, self).__init__(**kwargs)
self.cat2label = {cat: i for (i, cat) in enumerate(self.CLASSES)}
self.min_size = min_size
def load_annotations(self, ann_file):
'Load annotation from XML style ann_file.\n\n Args:\n ann_file (str): Path of XML file.\n\n Returns:\n list[dict]: Annotation info from XML file.\n '
data_infos = []
img_ids = mmcv.list_from_file(ann_file)
for img_id in img_ids:
filename = osp.join(self.img_subdir, f'{img_id}.jpg')
xml_path = osp.join(self.img_prefix, self.ann_subdir, f'{img_id}.xml')
tree = ET.parse(xml_path)
root = tree.getroot()
size = root.find('size')
if (size is not None):
width = int(size.find('width').text)
height = int(size.find('height').text)
else:
img_path = osp.join(self.img_prefix, filename)
img = Image.open(img_path)
(width, height) = img.size
data_infos.append(dict(id=img_id, filename=filename, width=width, height=height))
return data_infos
def _filter_imgs(self, min_size=32):
'Filter images too small or without annotation.'
valid_inds = []
for (i, img_info) in enumerate(self.data_infos):
if (min(img_info['width'], img_info['height']) < min_size):
continue
if self.filter_empty_gt:
img_id = img_info['id']
xml_path = osp.join(self.img_prefix, self.ann_subdir, f'{img_id}.xml')
tree = ET.parse(xml_path)
root = tree.getroot()
for obj in root.findall('object'):
name = obj.find('name').text
if (name in self.CLASSES):
valid_inds.append(i)
break
else:
valid_inds.append(i)
return valid_inds
def get_ann_info(self, idx):
'Get annotation from XML file by index.\n\n Args:\n idx (int): Index of data.\n\n Returns:\n dict: Annotation info of specified index.\n '
img_id = self.data_infos[idx]['id']
xml_path = osp.join(self.img_prefix, self.ann_subdir, f'{img_id}.xml')
tree = ET.parse(xml_path)
root = tree.getroot()
bboxes = []
labels = []
bboxes_ignore = []
labels_ignore = []
for obj in root.findall('object'):
name = obj.find('name').text
if (name not in self.CLASSES):
continue
label = self.cat2label[name]
difficult = obj.find('difficult')
difficult = (0 if (difficult is None) else int(difficult.text))
bnd_box = obj.find('bndbox')
bbox = [int(float(bnd_box.find('xmin').text)), int(float(bnd_box.find('ymin').text)), int(float(bnd_box.find('xmax').text)), int(float(bnd_box.find('ymax').text))]
ignore = False
if self.min_size:
assert (not self.test_mode)
w = (bbox[2] - bbox[0])
h = (bbox[3] - bbox[1])
if ((w < self.min_size) or (h < self.min_size)):
ignore = True
if (difficult or ignore):
bboxes_ignore.append(bbox)
labels_ignore.append(label)
else:
bboxes.append(bbox)
labels.append(label)
if (not bboxes):
bboxes = np.zeros((0, 4))
labels = np.zeros((0,))
else:
bboxes = (np.array(bboxes, ndmin=2) - 1)
labels = np.array(labels)
if (not bboxes_ignore):
bboxes_ignore = np.zeros((0, 4))
labels_ignore = np.zeros((0,))
else:
bboxes_ignore = (np.array(bboxes_ignore, ndmin=2) - 1)
labels_ignore = np.array(labels_ignore)
ann = dict(bboxes=bboxes.astype(np.float32), labels=labels.astype(np.int64), bboxes_ignore=bboxes_ignore.astype(np.float32), labels_ignore=labels_ignore.astype(np.int64))
return ann
def get_cat_ids(self, idx):
'Get category ids in XML file by index.\n\n Args:\n idx (int): Index of data.\n\n Returns:\n list[int]: All categories in the image of specified index.\n '
cat_ids = []
img_id = self.data_infos[idx]['id']
xml_path = osp.join(self.img_prefix, self.ann_subdir, f'{img_id}.xml')
tree = ET.parse(xml_path)
root = tree.getroot()
for obj in root.findall('object'):
name = obj.find('name').text
if (name not in self.CLASSES):
continue
label = self.cat2label[name]
cat_ids.append(label)
return cat_ids
|
class ResBlock(BaseModule):
"The basic residual block used in Darknet. Each ResBlock consists of two\n ConvModules and the input is added to the final output. Each ConvModule is\n composed of Conv, BN, and LeakyReLU. In YoloV3 paper, the first convLayer\n has half of the number of the filters as much as the second convLayer. The\n first convLayer has filter size of 1x1 and the second one has the filter\n size of 3x3.\n\n Args:\n in_channels (int): The input channels. Must be even.\n conv_cfg (dict): Config dict for convolution layer. Default: None.\n norm_cfg (dict): Dictionary to construct and config norm layer.\n Default: dict(type='BN', requires_grad=True)\n act_cfg (dict): Config dict for activation layer.\n Default: dict(type='LeakyReLU', negative_slope=0.1).\n init_cfg (dict or list[dict], optional): Initialization config dict.\n Default: None\n "
def __init__(self, in_channels, conv_cfg=None, norm_cfg=dict(type='BN', requires_grad=True), act_cfg=dict(type='LeakyReLU', negative_slope=0.1), init_cfg=None):
super(ResBlock, self).__init__(init_cfg)
assert ((in_channels % 2) == 0)
half_in_channels = (in_channels // 2)
cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)
self.conv1 = ConvModule(in_channels, half_in_channels, 1, **cfg)
self.conv2 = ConvModule(half_in_channels, in_channels, 3, padding=1, **cfg)
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.conv2(out)
out = (out + residual)
return out
|
@BACKBONES.register_module()
class Darknet(BaseModule):
"Darknet backbone.\n\n Args:\n depth (int): Depth of Darknet. Currently only support 53.\n out_indices (Sequence[int]): Output from which stages.\n frozen_stages (int): Stages to be frozen (stop grad and set eval mode).\n -1 means not freezing any parameters. Default: -1.\n conv_cfg (dict): Config dict for convolution layer. Default: None.\n norm_cfg (dict): Dictionary to construct and config norm layer.\n Default: dict(type='BN', requires_grad=True)\n act_cfg (dict): Config dict for activation layer.\n Default: dict(type='LeakyReLU', negative_slope=0.1).\n norm_eval (bool): Whether to set norm layers to eval mode, namely,\n freeze running stats (mean and var). Note: Effect on Batch Norm\n and its variants only.\n pretrained (str, optional): model pretrained path. Default: None\n init_cfg (dict or list[dict], optional): Initialization config dict.\n Default: None\n\n Example:\n >>> from mmdet.models import Darknet\n >>> import torch\n >>> self = Darknet(depth=53)\n >>> self.eval()\n >>> inputs = torch.rand(1, 3, 416, 416)\n >>> level_outputs = self.forward(inputs)\n >>> for level_out in level_outputs:\n ... print(tuple(level_out.shape))\n ...\n (1, 256, 52, 52)\n (1, 512, 26, 26)\n (1, 1024, 13, 13)\n "
arch_settings = {53: ((1, 2, 8, 8, 4), ((32, 64), (64, 128), (128, 256), (256, 512), (512, 1024)))}
def __init__(self, depth=53, out_indices=(3, 4, 5), frozen_stages=(- 1), conv_cfg=None, norm_cfg=dict(type='BN', requires_grad=True), act_cfg=dict(type='LeakyReLU', negative_slope=0.1), norm_eval=True, pretrained=None, init_cfg=None):
super(Darknet, self).__init__(init_cfg)
if (depth not in self.arch_settings):
raise KeyError(f'invalid depth {depth} for darknet')
self.depth = depth
self.out_indices = out_indices
self.frozen_stages = frozen_stages
(self.layers, self.channels) = self.arch_settings[depth]
cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)
self.conv1 = ConvModule(3, 32, 3, padding=1, **cfg)
self.cr_blocks = ['conv1']
for (i, n_layers) in enumerate(self.layers):
layer_name = f'conv_res_block{(i + 1)}'
(in_c, out_c) = self.channels[i]
self.add_module(layer_name, self.make_conv_res_block(in_c, out_c, n_layers, **cfg))
self.cr_blocks.append(layer_name)
self.norm_eval = norm_eval
assert (not (init_cfg and pretrained)), 'init_cfg and pretrained cannot be specified at the same time'
if isinstance(pretrained, str):
warnings.warn('DeprecationWarning: pretrained is deprecated, please use "init_cfg" instead')
self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)
elif (pretrained is None):
if (init_cfg is None):
self.init_cfg = [dict(type='Kaiming', layer='Conv2d'), dict(type='Constant', val=1, layer=['_BatchNorm', 'GroupNorm'])]
else:
raise TypeError('pretrained must be a str or None')
def forward(self, x):
outs = []
for (i, layer_name) in enumerate(self.cr_blocks):
cr_block = getattr(self, layer_name)
x = cr_block(x)
if (i in self.out_indices):
outs.append(x)
return tuple(outs)
def _freeze_stages(self):
if (self.frozen_stages >= 0):
for i in range(self.frozen_stages):
m = getattr(self, self.cr_blocks[i])
m.eval()
for param in m.parameters():
param.requires_grad = False
def train(self, mode=True):
super(Darknet, self).train(mode)
self._freeze_stages()
if (mode and self.norm_eval):
for m in self.modules():
if isinstance(m, _BatchNorm):
m.eval()
@staticmethod
def make_conv_res_block(in_channels, out_channels, res_repeat, conv_cfg=None, norm_cfg=dict(type='BN', requires_grad=True), act_cfg=dict(type='LeakyReLU', negative_slope=0.1)):
"In Darknet backbone, ConvLayer is usually followed by ResBlock. This\n function will make that. The Conv layers always have 3x3 filters with\n stride=2. The number of the filters in Conv layer is the same as the\n out channels of the ResBlock.\n\n Args:\n in_channels (int): The number of input channels.\n out_channels (int): The number of output channels.\n res_repeat (int): The number of ResBlocks.\n conv_cfg (dict): Config dict for convolution layer. Default: None.\n norm_cfg (dict): Dictionary to construct and config norm layer.\n Default: dict(type='BN', requires_grad=True)\n act_cfg (dict): Config dict for activation layer.\n Default: dict(type='LeakyReLU', negative_slope=0.1).\n "
cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)
model = nn.Sequential()
model.add_module('conv', ConvModule(in_channels, out_channels, 3, stride=2, padding=1, **cfg))
for idx in range(res_repeat):
model.add_module('res{}'.format(idx), ResBlock(out_channels, **cfg))
return model
|
class Bottleneck(_Bottleneck):
'Bottleneck for the ResNet backbone in `DetectoRS\n <https://arxiv.org/pdf/2006.02334.pdf>`_.\n\n This bottleneck allows the users to specify whether to use\n SAC (Switchable Atrous Convolution) and RFP (Recursive Feature Pyramid).\n\n Args:\n inplanes (int): The number of input channels.\n planes (int): The number of output channels before expansion.\n rfp_inplanes (int, optional): The number of channels from RFP.\n Default: None. If specified, an additional conv layer will be\n added for ``rfp_feat``. Otherwise, the structure is the same as\n base class.\n sac (dict, optional): Dictionary to construct SAC. Default: None.\n init_cfg (dict or list[dict], optional): Initialization config dict.\n Default: None\n '
expansion = 4
def __init__(self, inplanes, planes, rfp_inplanes=None, sac=None, init_cfg=None, **kwargs):
super(Bottleneck, self).__init__(inplanes, planes, init_cfg=init_cfg, **kwargs)
assert ((sac is None) or isinstance(sac, dict))
self.sac = sac
self.with_sac = (sac is not None)
if self.with_sac:
self.conv2 = build_conv_layer(self.sac, planes, planes, kernel_size=3, stride=self.conv2_stride, padding=self.dilation, dilation=self.dilation, bias=False)
self.rfp_inplanes = rfp_inplanes
if self.rfp_inplanes:
self.rfp_conv = build_conv_layer(None, self.rfp_inplanes, (planes * self.expansion), 1, stride=1, bias=True)
if (init_cfg is None):
self.init_cfg = dict(type='Constant', val=0, override=dict(name='rfp_conv'))
def rfp_forward(self, x, rfp_feat):
'The forward function that also takes the RFP features as input.'
def _inner_forward(x):
identity = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv1_plugin_names)
out = self.conv2(out)
out = self.norm2(out)
out = self.relu(out)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv2_plugin_names)
out = self.conv3(out)
out = self.norm3(out)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv3_plugin_names)
if (self.downsample is not None):
identity = self.downsample(x)
out += identity
return out
if (self.with_cp and x.requires_grad):
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
if self.rfp_inplanes:
rfp_feat = self.rfp_conv(rfp_feat)
out = (out + rfp_feat)
out = self.relu(out)
return out
|
class ResLayer(Sequential):
"ResLayer to build ResNet style backbone for RPF in detectoRS.\n\n The difference between this module and base class is that we pass\n ``rfp_inplanes`` to the first block.\n\n Args:\n block (nn.Module): block used to build ResLayer.\n inplanes (int): inplanes of block.\n planes (int): planes of block.\n num_blocks (int): number of blocks.\n stride (int): stride of the first block. Default: 1\n avg_down (bool): Use AvgPool instead of stride conv when\n downsampling in the bottleneck. Default: False\n conv_cfg (dict): dictionary to construct and config conv layer.\n Default: None\n norm_cfg (dict): dictionary to construct and config norm layer.\n Default: dict(type='BN')\n downsample_first (bool): Downsample at the first block or last block.\n False for Hourglass, True for ResNet. Default: True\n rfp_inplanes (int, optional): The number of channels from RFP.\n Default: None. If specified, an additional conv layer will be\n added for ``rfp_feat``. Otherwise, the structure is the same as\n base class.\n "
def __init__(self, block, inplanes, planes, num_blocks, stride=1, avg_down=False, conv_cfg=None, norm_cfg=dict(type='BN'), downsample_first=True, rfp_inplanes=None, **kwargs):
self.block = block
assert downsample_first, f'downsample_first={downsample_first} is not supported in DetectoRS'
downsample = None
if ((stride != 1) or (inplanes != (planes * block.expansion))):
downsample = []
conv_stride = stride
if (avg_down and (stride != 1)):
conv_stride = 1
downsample.append(nn.AvgPool2d(kernel_size=stride, stride=stride, ceil_mode=True, count_include_pad=False))
downsample.extend([build_conv_layer(conv_cfg, inplanes, (planes * block.expansion), kernel_size=1, stride=conv_stride, bias=False), build_norm_layer(norm_cfg, (planes * block.expansion))[1]])
downsample = nn.Sequential(*downsample)
layers = []
layers.append(block(inplanes=inplanes, planes=planes, stride=stride, downsample=downsample, conv_cfg=conv_cfg, norm_cfg=norm_cfg, rfp_inplanes=rfp_inplanes, **kwargs))
inplanes = (planes * block.expansion)
for _ in range(1, num_blocks):
layers.append(block(inplanes=inplanes, planes=planes, stride=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, **kwargs))
super(ResLayer, self).__init__(*layers)
|
@BACKBONES.register_module()
class DetectoRS_ResNet(ResNet):
'ResNet backbone for DetectoRS.\n\n Args:\n sac (dict, optional): Dictionary to construct SAC (Switchable Atrous\n Convolution). Default: None.\n stage_with_sac (list): Which stage to use sac. Default: (False, False,\n False, False).\n rfp_inplanes (int, optional): The number of channels from RFP.\n Default: None. If specified, an additional conv layer will be\n added for ``rfp_feat``. Otherwise, the structure is the same as\n base class.\n output_img (bool): If ``True``, the input image will be inserted into\n the starting position of output. Default: False.\n '
arch_settings = {50: (Bottleneck, (3, 4, 6, 3)), 101: (Bottleneck, (3, 4, 23, 3)), 152: (Bottleneck, (3, 8, 36, 3))}
def __init__(self, sac=None, stage_with_sac=(False, False, False, False), rfp_inplanes=None, output_img=False, pretrained=None, init_cfg=None, **kwargs):
assert (not (init_cfg and pretrained)), 'init_cfg and pretrained cannot be specified at the same time'
self.pretrained = pretrained
if (init_cfg is not None):
assert isinstance(init_cfg, dict), f'init_cfg must be a dict, but got {type(init_cfg)}'
if ('type' in init_cfg):
assert (init_cfg.get('type') == 'Pretrained'), 'Only can initialize module by loading a pretrained model'
else:
raise KeyError('`init_cfg` must contain the key "type"')
self.pretrained = init_cfg.get('checkpoint')
self.sac = sac
self.stage_with_sac = stage_with_sac
self.rfp_inplanes = rfp_inplanes
self.output_img = output_img
super(DetectoRS_ResNet, self).__init__(**kwargs)
self.inplanes = self.stem_channels
self.res_layers = []
for (i, num_blocks) in enumerate(self.stage_blocks):
stride = self.strides[i]
dilation = self.dilations[i]
dcn = (self.dcn if self.stage_with_dcn[i] else None)
sac = (self.sac if self.stage_with_sac[i] else None)
if (self.plugins is not None):
stage_plugins = self.make_stage_plugins(self.plugins, i)
else:
stage_plugins = None
planes = (self.base_channels * (2 ** i))
res_layer = self.make_res_layer(block=self.block, inplanes=self.inplanes, planes=planes, num_blocks=num_blocks, stride=stride, dilation=dilation, style=self.style, avg_down=self.avg_down, with_cp=self.with_cp, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, dcn=dcn, sac=sac, rfp_inplanes=(rfp_inplanes if (i > 0) else None), plugins=stage_plugins)
self.inplanes = (planes * self.block.expansion)
layer_name = f'layer{(i + 1)}'
self.add_module(layer_name, res_layer)
self.res_layers.append(layer_name)
self._freeze_stages()
def init_weights(self):
if isinstance(self.pretrained, str):
logger = get_root_logger()
load_checkpoint(self, self.pretrained, strict=False, logger=logger)
elif (self.pretrained is None):
for m in self.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
constant_init(m, 1)
if (self.dcn is not None):
for m in self.modules():
if (isinstance(m, Bottleneck) and hasattr(m.conv2, 'conv_offset')):
constant_init(m.conv2.conv_offset, 0)
if self.zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
constant_init(m.norm3, 0)
elif isinstance(m, BasicBlock):
constant_init(m.norm2, 0)
else:
raise TypeError('pretrained must be a str or None')
def make_res_layer(self, **kwargs):
'Pack all blocks in a stage into a ``ResLayer`` for DetectoRS.'
return ResLayer(**kwargs)
def forward(self, x):
'Forward function.'
outs = list(super(DetectoRS_ResNet, self).forward(x))
if self.output_img:
outs.insert(0, x)
return tuple(outs)
def rfp_forward(self, x, rfp_feats):
'Forward function for RFP.'
if self.deep_stem:
x = self.stem(x)
else:
x = self.conv1(x)
x = self.norm1(x)
x = self.relu(x)
x = self.maxpool(x)
outs = []
for (i, layer_name) in enumerate(self.res_layers):
res_layer = getattr(self, layer_name)
rfp_feat = (rfp_feats[i] if (i > 0) else None)
for layer in res_layer:
x = layer.rfp_forward(x, rfp_feat)
if (i in self.out_indices):
outs.append(x)
return tuple(outs)
|
class Bottleneck(_Bottleneck):
expansion = 4
def __init__(self, inplanes, planes, groups=1, base_width=4, base_channels=64, **kwargs):
'Bottleneck block for ResNeXt.\n\n If style is "pytorch", the stride-two layer is the 3x3 conv layer, if\n it is "caffe", the stride-two layer is the first 1x1 conv layer.\n '
super(Bottleneck, self).__init__(inplanes, planes, **kwargs)
if (groups == 1):
width = self.planes
else:
width = (math.floor((self.planes * (base_width / base_channels))) * groups)
(self.norm1_name, norm1) = build_norm_layer(self.norm_cfg, width, postfix=1)
(self.norm2_name, norm2) = build_norm_layer(self.norm_cfg, width, postfix=2)
(self.norm3_name, norm3) = build_norm_layer(self.norm_cfg, (self.planes * self.expansion), postfix=3)
self.conv1 = build_conv_layer(self.conv_cfg, self.inplanes, width, kernel_size=1, stride=self.conv1_stride, bias=False)
self.add_module(self.norm1_name, norm1)
fallback_on_stride = False
self.with_modulated_dcn = False
if self.with_dcn:
fallback_on_stride = self.dcn.pop('fallback_on_stride', False)
if self.with_sac:
self.conv2 = build_conv_layer(self.sac, width, width, kernel_size=3, stride=self.conv2_stride, padding=self.dilation, dilation=self.dilation, groups=groups, bias=False)
elif ((not self.with_dcn) or fallback_on_stride):
self.conv2 = build_conv_layer(self.conv_cfg, width, width, kernel_size=3, stride=self.conv2_stride, padding=self.dilation, dilation=self.dilation, groups=groups, bias=False)
else:
assert (self.conv_cfg is None), 'conv_cfg must be None for DCN'
self.conv2 = build_conv_layer(self.dcn, width, width, kernel_size=3, stride=self.conv2_stride, padding=self.dilation, dilation=self.dilation, groups=groups, bias=False)
self.add_module(self.norm2_name, norm2)
self.conv3 = build_conv_layer(self.conv_cfg, width, (self.planes * self.expansion), kernel_size=1, bias=False)
self.add_module(self.norm3_name, norm3)
|
@BACKBONES.register_module()
class DetectoRS_ResNeXt(DetectoRS_ResNet):
'ResNeXt backbone for DetectoRS.\n\n Args:\n groups (int): The number of groups in ResNeXt.\n base_width (int): The base width of ResNeXt.\n '
arch_settings = {50: (Bottleneck, (3, 4, 6, 3)), 101: (Bottleneck, (3, 4, 23, 3)), 152: (Bottleneck, (3, 8, 36, 3))}
def __init__(self, groups=1, base_width=4, **kwargs):
self.groups = groups
self.base_width = base_width
super(DetectoRS_ResNeXt, self).__init__(**kwargs)
def make_res_layer(self, **kwargs):
return super().make_res_layer(groups=self.groups, base_width=self.base_width, base_channels=self.base_channels, **kwargs)
|
class HourglassModule(BaseModule):
"Hourglass Module for HourglassNet backbone.\n\n Generate module recursively and use BasicBlock as the base unit.\n\n Args:\n depth (int): Depth of current HourglassModule.\n stage_channels (list[int]): Feature channels of sub-modules in current\n and follow-up HourglassModule.\n stage_blocks (list[int]): Number of sub-modules stacked in current and\n follow-up HourglassModule.\n norm_cfg (dict): Dictionary to construct and config norm layer.\n init_cfg (dict or list[dict], optional): Initialization config dict.\n Default: None\n upsample_cfg (dict, optional): Config dict for interpolate layer.\n Default: `dict(mode='nearest')`\n "
def __init__(self, depth, stage_channels, stage_blocks, norm_cfg=dict(type='BN', requires_grad=True), init_cfg=None, upsample_cfg=dict(mode='nearest')):
super(HourglassModule, self).__init__(init_cfg)
self.depth = depth
cur_block = stage_blocks[0]
next_block = stage_blocks[1]
cur_channel = stage_channels[0]
next_channel = stage_channels[1]
self.up1 = ResLayer(BasicBlock, cur_channel, cur_channel, cur_block, norm_cfg=norm_cfg)
self.low1 = ResLayer(BasicBlock, cur_channel, next_channel, cur_block, stride=2, norm_cfg=norm_cfg)
if (self.depth > 1):
self.low2 = HourglassModule((depth - 1), stage_channels[1:], stage_blocks[1:])
else:
self.low2 = ResLayer(BasicBlock, next_channel, next_channel, next_block, norm_cfg=norm_cfg)
self.low3 = ResLayer(BasicBlock, next_channel, cur_channel, cur_block, norm_cfg=norm_cfg, downsample_first=False)
self.up2 = F.interpolate
self.upsample_cfg = upsample_cfg
def forward(self, x):
'Forward function.'
up1 = self.up1(x)
low1 = self.low1(x)
low2 = self.low2(low1)
low3 = self.low3(low2)
if ('scale_factor' in self.upsample_cfg):
up2 = self.up2(low3, **self.upsample_cfg)
else:
shape = up1.shape[2:]
up2 = self.up2(low3, size=shape, **self.upsample_cfg)
return (up1 + up2)
|
@BACKBONES.register_module()
class HourglassNet(BaseModule):
'HourglassNet backbone.\n\n Stacked Hourglass Networks for Human Pose Estimation.\n More details can be found in the `paper\n <https://arxiv.org/abs/1603.06937>`_ .\n\n Args:\n downsample_times (int): Downsample times in a HourglassModule.\n num_stacks (int): Number of HourglassModule modules stacked,\n 1 for Hourglass-52, 2 for Hourglass-104.\n stage_channels (list[int]): Feature channel of each sub-module in a\n HourglassModule.\n stage_blocks (list[int]): Number of sub-modules stacked in a\n HourglassModule.\n feat_channel (int): Feature channel of conv after a HourglassModule.\n norm_cfg (dict): Dictionary to construct and config norm layer.\n pretrained (str, optional): model pretrained path. Default: None\n init_cfg (dict or list[dict], optional): Initialization config dict.\n Default: None\n\n Example:\n >>> from mmdet.models import HourglassNet\n >>> import torch\n >>> self = HourglassNet()\n >>> self.eval()\n >>> inputs = torch.rand(1, 3, 511, 511)\n >>> level_outputs = self.forward(inputs)\n >>> for level_output in level_outputs:\n ... print(tuple(level_output.shape))\n (1, 256, 128, 128)\n (1, 256, 128, 128)\n '
def __init__(self, downsample_times=5, num_stacks=2, stage_channels=(256, 256, 384, 384, 384, 512), stage_blocks=(2, 2, 2, 2, 2, 4), feat_channel=256, norm_cfg=dict(type='BN', requires_grad=True), pretrained=None, init_cfg=None):
assert (init_cfg is None), 'To prevent abnormal initialization behavior, init_cfg is not allowed to be set'
super(HourglassNet, self).__init__(init_cfg)
self.num_stacks = num_stacks
assert (self.num_stacks >= 1)
assert (len(stage_channels) == len(stage_blocks))
assert (len(stage_channels) > downsample_times)
cur_channel = stage_channels[0]
self.stem = nn.Sequential(ConvModule(3, (cur_channel // 2), 7, padding=3, stride=2, norm_cfg=norm_cfg), ResLayer(BasicBlock, (cur_channel // 2), cur_channel, 1, stride=2, norm_cfg=norm_cfg))
self.hourglass_modules = nn.ModuleList([HourglassModule(downsample_times, stage_channels, stage_blocks) for _ in range(num_stacks)])
self.inters = ResLayer(BasicBlock, cur_channel, cur_channel, (num_stacks - 1), norm_cfg=norm_cfg)
self.conv1x1s = nn.ModuleList([ConvModule(cur_channel, cur_channel, 1, norm_cfg=norm_cfg, act_cfg=None) for _ in range((num_stacks - 1))])
self.out_convs = nn.ModuleList([ConvModule(cur_channel, feat_channel, 3, padding=1, norm_cfg=norm_cfg) for _ in range(num_stacks)])
self.remap_convs = nn.ModuleList([ConvModule(feat_channel, cur_channel, 1, norm_cfg=norm_cfg, act_cfg=None) for _ in range((num_stacks - 1))])
self.relu = nn.ReLU(inplace=True)
def init_weights(self):
'Init module weights.'
super(HourglassNet, self).init_weights()
for m in self.modules():
if isinstance(m, nn.Conv2d):
m.reset_parameters()
def forward(self, x):
'Forward function.'
inter_feat = self.stem(x)
out_feats = []
for ind in range(self.num_stacks):
single_hourglass = self.hourglass_modules[ind]
out_conv = self.out_convs[ind]
hourglass_feat = single_hourglass(inter_feat)
out_feat = out_conv(hourglass_feat)
out_feats.append(out_feat)
if (ind < (self.num_stacks - 1)):
inter_feat = (self.conv1x1s[ind](inter_feat) + self.remap_convs[ind](out_feat))
inter_feat = self.inters[ind](self.relu(inter_feat))
return out_feats
|
class HRModule(BaseModule):
'High-Resolution Module for HRNet.\n\n In this module, every branch has 4 BasicBlocks/Bottlenecks. Fusion/Exchange\n is in this module.\n '
def __init__(self, num_branches, blocks, num_blocks, in_channels, num_channels, multiscale_output=True, with_cp=False, conv_cfg=None, norm_cfg=dict(type='BN'), block_init_cfg=None, init_cfg=None):
super(HRModule, self).__init__(init_cfg)
self.block_init_cfg = block_init_cfg
self._check_branches(num_branches, num_blocks, in_channels, num_channels)
self.in_channels = in_channels
self.num_branches = num_branches
self.multiscale_output = multiscale_output
self.norm_cfg = norm_cfg
self.conv_cfg = conv_cfg
self.with_cp = with_cp
self.branches = self._make_branches(num_branches, blocks, num_blocks, num_channels)
self.fuse_layers = self._make_fuse_layers()
self.relu = nn.ReLU(inplace=False)
def _check_branches(self, num_branches, num_blocks, in_channels, num_channels):
if (num_branches != len(num_blocks)):
error_msg = f'NUM_BRANCHES({num_branches}) != NUM_BLOCKS({len(num_blocks)})'
raise ValueError(error_msg)
if (num_branches != len(num_channels)):
error_msg = f'NUM_BRANCHES({num_branches}) != NUM_CHANNELS({len(num_channels)})'
raise ValueError(error_msg)
if (num_branches != len(in_channels)):
error_msg = f'NUM_BRANCHES({num_branches}) != NUM_INCHANNELS({len(in_channels)})'
raise ValueError(error_msg)
def _make_one_branch(self, branch_index, block, num_blocks, num_channels, stride=1):
downsample = None
if ((stride != 1) or (self.in_channels[branch_index] != (num_channels[branch_index] * block.expansion))):
downsample = nn.Sequential(build_conv_layer(self.conv_cfg, self.in_channels[branch_index], (num_channels[branch_index] * block.expansion), kernel_size=1, stride=stride, bias=False), build_norm_layer(self.norm_cfg, (num_channels[branch_index] * block.expansion))[1])
layers = []
layers.append(block(self.in_channels[branch_index], num_channels[branch_index], stride, downsample=downsample, with_cp=self.with_cp, norm_cfg=self.norm_cfg, conv_cfg=self.conv_cfg, init_cfg=self.block_init_cfg))
self.in_channels[branch_index] = (num_channels[branch_index] * block.expansion)
for i in range(1, num_blocks[branch_index]):
layers.append(block(self.in_channels[branch_index], num_channels[branch_index], with_cp=self.with_cp, norm_cfg=self.norm_cfg, conv_cfg=self.conv_cfg, init_cfg=self.block_init_cfg))
return Sequential(*layers)
def _make_branches(self, num_branches, block, num_blocks, num_channels):
branches = []
for i in range(num_branches):
branches.append(self._make_one_branch(i, block, num_blocks, num_channels))
return ModuleList(branches)
def _make_fuse_layers(self):
if (self.num_branches == 1):
return None
num_branches = self.num_branches
in_channels = self.in_channels
fuse_layers = []
num_out_branches = (num_branches if self.multiscale_output else 1)
for i in range(num_out_branches):
fuse_layer = []
for j in range(num_branches):
if (j > i):
fuse_layer.append(nn.Sequential(build_conv_layer(self.conv_cfg, in_channels[j], in_channels[i], kernel_size=1, stride=1, padding=0, bias=False), build_norm_layer(self.norm_cfg, in_channels[i])[1], nn.Upsample(scale_factor=(2 ** (j - i)), mode='nearest')))
elif (j == i):
fuse_layer.append(None)
else:
conv_downsamples = []
for k in range((i - j)):
if (k == ((i - j) - 1)):
conv_downsamples.append(nn.Sequential(build_conv_layer(self.conv_cfg, in_channels[j], in_channels[i], kernel_size=3, stride=2, padding=1, bias=False), build_norm_layer(self.norm_cfg, in_channels[i])[1]))
else:
conv_downsamples.append(nn.Sequential(build_conv_layer(self.conv_cfg, in_channels[j], in_channels[j], kernel_size=3, stride=2, padding=1, bias=False), build_norm_layer(self.norm_cfg, in_channels[j])[1], nn.ReLU(inplace=False)))
fuse_layer.append(nn.Sequential(*conv_downsamples))
fuse_layers.append(nn.ModuleList(fuse_layer))
return nn.ModuleList(fuse_layers)
def forward(self, x):
'Forward function.'
if (self.num_branches == 1):
return [self.branches[0](x[0])]
for i in range(self.num_branches):
x[i] = self.branches[i](x[i])
x_fuse = []
for i in range(len(self.fuse_layers)):
y = 0
for j in range(self.num_branches):
if (i == j):
y += x[j]
else:
y += self.fuse_layers[i][j](x[j])
x_fuse.append(self.relu(y))
return x_fuse
|
@BACKBONES.register_module()
class HRNet(BaseModule):
"HRNet backbone.\n\n `High-Resolution Representations for Labeling Pixels and Regions\n arXiv: <https://arxiv.org/abs/1904.04514>`_.\n\n Args:\n extra (dict): Detailed configuration for each stage of HRNet.\n There must be 4 stages, the configuration for each stage must have\n 5 keys:\n\n - num_modules(int): The number of HRModule in this stage.\n - num_branches(int): The number of branches in the HRModule.\n - block(str): The type of convolution block.\n - num_blocks(tuple): The number of blocks in each branch.\n The length must be equal to num_branches.\n - num_channels(tuple): The number of channels in each branch.\n The length must be equal to num_branches.\n in_channels (int): Number of input image channels. Default: 3.\n conv_cfg (dict): Dictionary to construct and config conv layer.\n norm_cfg (dict): Dictionary to construct and config norm layer.\n norm_eval (bool): Whether to set norm layers to eval mode, namely,\n freeze running stats (mean and var). Note: Effect on Batch Norm\n and its variants only. Default: True.\n with_cp (bool): Use checkpoint or not. Using checkpoint will save some\n memory while slowing down the training speed. Default: False.\n zero_init_residual (bool): Whether to use zero init for last norm layer\n in resblocks to let them behave as identity. Default: False.\n multiscale_output (bool): Whether to output multi-level features\n produced by multiple branches. If False, only the first level\n feature will be output. Default: True.\n pretrained (str, optional): Model pretrained path. Default: None.\n init_cfg (dict or list[dict], optional): Initialization config dict.\n Default: None.\n\n Example:\n >>> from mmdet.models import HRNet\n >>> import torch\n >>> extra = dict(\n >>> stage1=dict(\n >>> num_modules=1,\n >>> num_branches=1,\n >>> block='BOTTLENECK',\n >>> num_blocks=(4, ),\n >>> num_channels=(64, )),\n >>> stage2=dict(\n >>> num_modules=1,\n >>> num_branches=2,\n >>> block='BASIC',\n >>> num_blocks=(4, 4),\n >>> num_channels=(32, 64)),\n >>> stage3=dict(\n >>> num_modules=4,\n >>> num_branches=3,\n >>> block='BASIC',\n >>> num_blocks=(4, 4, 4),\n >>> num_channels=(32, 64, 128)),\n >>> stage4=dict(\n >>> num_modules=3,\n >>> num_branches=4,\n >>> block='BASIC',\n >>> num_blocks=(4, 4, 4, 4),\n >>> num_channels=(32, 64, 128, 256)))\n >>> self = HRNet(extra, in_channels=1)\n >>> self.eval()\n >>> inputs = torch.rand(1, 1, 32, 32)\n >>> level_outputs = self.forward(inputs)\n >>> for level_out in level_outputs:\n ... print(tuple(level_out.shape))\n (1, 32, 8, 8)\n (1, 64, 4, 4)\n (1, 128, 2, 2)\n (1, 256, 1, 1)\n "
blocks_dict = {'BASIC': BasicBlock, 'BOTTLENECK': Bottleneck}
def __init__(self, extra, in_channels=3, conv_cfg=None, norm_cfg=dict(type='BN'), norm_eval=True, with_cp=False, zero_init_residual=False, multiscale_output=True, pretrained=None, init_cfg=None):
super(HRNet, self).__init__(init_cfg)
self.pretrained = pretrained
assert (not (init_cfg and pretrained)), 'init_cfg and pretrained cannot be specified at the same time'
if isinstance(pretrained, str):
warnings.warn('DeprecationWarning: pretrained is deprecated, please use "init_cfg" instead')
self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)
elif (pretrained is None):
if (init_cfg is None):
self.init_cfg = [dict(type='Kaiming', layer='Conv2d'), dict(type='Constant', val=1, layer=['_BatchNorm', 'GroupNorm'])]
else:
raise TypeError('pretrained must be a str or None')
assert (('stage1' in extra) and ('stage2' in extra) and ('stage3' in extra) and ('stage4' in extra))
for i in range(4):
cfg = extra[f'stage{(i + 1)}']
assert ((len(cfg['num_blocks']) == cfg['num_branches']) and (len(cfg['num_channels']) == cfg['num_branches']))
self.extra = extra
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.norm_eval = norm_eval
self.with_cp = with_cp
self.zero_init_residual = zero_init_residual
(self.norm1_name, norm1) = build_norm_layer(self.norm_cfg, 64, postfix=1)
(self.norm2_name, norm2) = build_norm_layer(self.norm_cfg, 64, postfix=2)
self.conv1 = build_conv_layer(self.conv_cfg, in_channels, 64, kernel_size=3, stride=2, padding=1, bias=False)
self.add_module(self.norm1_name, norm1)
self.conv2 = build_conv_layer(self.conv_cfg, 64, 64, kernel_size=3, stride=2, padding=1, bias=False)
self.add_module(self.norm2_name, norm2)
self.relu = nn.ReLU(inplace=True)
self.stage1_cfg = self.extra['stage1']
num_channels = self.stage1_cfg['num_channels'][0]
block_type = self.stage1_cfg['block']
num_blocks = self.stage1_cfg['num_blocks'][0]
block = self.blocks_dict[block_type]
stage1_out_channels = (num_channels * block.expansion)
self.layer1 = self._make_layer(block, 64, num_channels, num_blocks)
self.stage2_cfg = self.extra['stage2']
num_channels = self.stage2_cfg['num_channels']
block_type = self.stage2_cfg['block']
block = self.blocks_dict[block_type]
num_channels = [(channel * block.expansion) for channel in num_channels]
self.transition1 = self._make_transition_layer([stage1_out_channels], num_channels)
(self.stage2, pre_stage_channels) = self._make_stage(self.stage2_cfg, num_channels)
self.stage3_cfg = self.extra['stage3']
num_channels = self.stage3_cfg['num_channels']
block_type = self.stage3_cfg['block']
block = self.blocks_dict[block_type]
num_channels = [(channel * block.expansion) for channel in num_channels]
self.transition2 = self._make_transition_layer(pre_stage_channels, num_channels)
(self.stage3, pre_stage_channels) = self._make_stage(self.stage3_cfg, num_channels)
self.stage4_cfg = self.extra['stage4']
num_channels = self.stage4_cfg['num_channels']
block_type = self.stage4_cfg['block']
block = self.blocks_dict[block_type]
num_channels = [(channel * block.expansion) for channel in num_channels]
self.transition3 = self._make_transition_layer(pre_stage_channels, num_channels)
(self.stage4, pre_stage_channels) = self._make_stage(self.stage4_cfg, num_channels, multiscale_output=multiscale_output)
@property
def norm1(self):
'nn.Module: the normalization layer named "norm1" '
return getattr(self, self.norm1_name)
@property
def norm2(self):
'nn.Module: the normalization layer named "norm2" '
return getattr(self, self.norm2_name)
def _make_transition_layer(self, num_channels_pre_layer, num_channels_cur_layer):
num_branches_cur = len(num_channels_cur_layer)
num_branches_pre = len(num_channels_pre_layer)
transition_layers = []
for i in range(num_branches_cur):
if (i < num_branches_pre):
if (num_channels_cur_layer[i] != num_channels_pre_layer[i]):
transition_layers.append(nn.Sequential(build_conv_layer(self.conv_cfg, num_channels_pre_layer[i], num_channels_cur_layer[i], kernel_size=3, stride=1, padding=1, bias=False), build_norm_layer(self.norm_cfg, num_channels_cur_layer[i])[1], nn.ReLU(inplace=True)))
else:
transition_layers.append(None)
else:
conv_downsamples = []
for j in range(((i + 1) - num_branches_pre)):
in_channels = num_channels_pre_layer[(- 1)]
out_channels = (num_channels_cur_layer[i] if (j == (i - num_branches_pre)) else in_channels)
conv_downsamples.append(nn.Sequential(build_conv_layer(self.conv_cfg, in_channels, out_channels, kernel_size=3, stride=2, padding=1, bias=False), build_norm_layer(self.norm_cfg, out_channels)[1], nn.ReLU(inplace=True)))
transition_layers.append(nn.Sequential(*conv_downsamples))
return nn.ModuleList(transition_layers)
def _make_layer(self, block, inplanes, planes, blocks, stride=1):
downsample = None
if ((stride != 1) or (inplanes != (planes * block.expansion))):
downsample = nn.Sequential(build_conv_layer(self.conv_cfg, inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), build_norm_layer(self.norm_cfg, (planes * block.expansion))[1])
layers = []
block_init_cfg = None
if ((self.pretrained is None) and (not hasattr(self, 'init_cfg')) and self.zero_init_residual):
if (block is BasicBlock):
block_init_cfg = dict(type='Constant', val=0, override=dict(name='norm2'))
elif (block is Bottleneck):
block_init_cfg = dict(type='Constant', val=0, override=dict(name='norm3'))
layers.append(block(inplanes, planes, stride, downsample=downsample, with_cp=self.with_cp, norm_cfg=self.norm_cfg, conv_cfg=self.conv_cfg, init_cfg=block_init_cfg))
inplanes = (planes * block.expansion)
for i in range(1, blocks):
layers.append(block(inplanes, planes, with_cp=self.with_cp, norm_cfg=self.norm_cfg, conv_cfg=self.conv_cfg, init_cfg=block_init_cfg))
return Sequential(*layers)
def _make_stage(self, layer_config, in_channels, multiscale_output=True):
num_modules = layer_config['num_modules']
num_branches = layer_config['num_branches']
num_blocks = layer_config['num_blocks']
num_channels = layer_config['num_channels']
block = self.blocks_dict[layer_config['block']]
hr_modules = []
block_init_cfg = None
if ((self.pretrained is None) and (not hasattr(self, 'init_cfg')) and self.zero_init_residual):
if (block is BasicBlock):
block_init_cfg = dict(type='Constant', val=0, override=dict(name='norm2'))
elif (block is Bottleneck):
block_init_cfg = dict(type='Constant', val=0, override=dict(name='norm3'))
for i in range(num_modules):
if ((not multiscale_output) and (i == (num_modules - 1))):
reset_multiscale_output = False
else:
reset_multiscale_output = True
hr_modules.append(HRModule(num_branches, block, num_blocks, in_channels, num_channels, reset_multiscale_output, with_cp=self.with_cp, norm_cfg=self.norm_cfg, conv_cfg=self.conv_cfg, block_init_cfg=block_init_cfg))
return (Sequential(*hr_modules), in_channels)
def forward(self, x):
'Forward function.'
x = self.conv1(x)
x = self.norm1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.norm2(x)
x = self.relu(x)
x = self.layer1(x)
x_list = []
for i in range(self.stage2_cfg['num_branches']):
if (self.transition1[i] is not None):
x_list.append(self.transition1[i](x))
else:
x_list.append(x)
y_list = self.stage2(x_list)
x_list = []
for i in range(self.stage3_cfg['num_branches']):
if (self.transition2[i] is not None):
x_list.append(self.transition2[i](y_list[(- 1)]))
else:
x_list.append(y_list[i])
y_list = self.stage3(x_list)
x_list = []
for i in range(self.stage4_cfg['num_branches']):
if (self.transition3[i] is not None):
x_list.append(self.transition3[i](y_list[(- 1)]))
else:
x_list.append(y_list[i])
y_list = self.stage4(x_list)
return y_list
def train(self, mode=True):
'Convert the model into training mode will keeping the normalization\n layer freezed.'
super(HRNet, self).train(mode)
if (mode and self.norm_eval):
for m in self.modules():
if isinstance(m, _BatchNorm):
m.eval()
|
@BACKBONES.register_module()
class MobileNetV2(BaseModule):
"MobileNetV2 backbone.\n\n Args:\n widen_factor (float): Width multiplier, multiply number of\n channels in each layer by this amount. Default: 1.0.\n out_indices (Sequence[int], optional): Output from which stages.\n Default: (1, 2, 4, 7).\n frozen_stages (int): Stages to be frozen (all param fixed).\n Default: -1, which means not freezing any parameters.\n conv_cfg (dict, optional): Config dict for convolution layer.\n Default: None, which means using conv2d.\n norm_cfg (dict): Config dict for normalization layer.\n Default: dict(type='BN').\n act_cfg (dict): Config dict for activation layer.\n Default: dict(type='ReLU6').\n norm_eval (bool): Whether to set norm layers to eval mode, namely,\n freeze running stats (mean and var). Note: Effect on Batch Norm\n and its variants only. Default: False.\n with_cp (bool): Use checkpoint or not. Using checkpoint will save some\n memory while slowing down the training speed. Default: False.\n pretrained (str, optional): model pretrained path. Default: None\n init_cfg (dict or list[dict], optional): Initialization config dict.\n Default: None\n "
arch_settings = [[1, 16, 1, 1], [6, 24, 2, 2], [6, 32, 3, 2], [6, 64, 4, 2], [6, 96, 3, 1], [6, 160, 3, 2], [6, 320, 1, 1]]
def __init__(self, widen_factor=1.0, out_indices=(1, 2, 4, 7), frozen_stages=(- 1), conv_cfg=None, norm_cfg=dict(type='BN'), act_cfg=dict(type='ReLU6'), norm_eval=False, with_cp=False, pretrained=None, init_cfg=None):
super(MobileNetV2, self).__init__(init_cfg)
self.pretrained = pretrained
assert (not (init_cfg and pretrained)), 'init_cfg and pretrained cannot be specified at the same time'
if isinstance(pretrained, str):
warnings.warn('DeprecationWarning: pretrained is deprecated, please use "init_cfg" instead')
self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)
elif (pretrained is None):
if (init_cfg is None):
self.init_cfg = [dict(type='Kaiming', layer='Conv2d'), dict(type='Constant', val=1, layer=['_BatchNorm', 'GroupNorm'])]
else:
raise TypeError('pretrained must be a str or None')
self.widen_factor = widen_factor
self.out_indices = out_indices
if (not set(out_indices).issubset(set(range(0, 8)))):
raise ValueError(f'out_indices must be a subset of range(0, 8). But received {out_indices}')
if (frozen_stages not in range((- 1), 8)):
raise ValueError(f'frozen_stages must be in range(-1, 8). But received {frozen_stages}')
self.out_indices = out_indices
self.frozen_stages = frozen_stages
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.norm_eval = norm_eval
self.with_cp = with_cp
self.in_channels = make_divisible((32 * widen_factor), 8)
self.conv1 = ConvModule(in_channels=3, out_channels=self.in_channels, kernel_size=3, stride=2, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)
self.layers = []
for (i, layer_cfg) in enumerate(self.arch_settings):
(expand_ratio, channel, num_blocks, stride) = layer_cfg
out_channels = make_divisible((channel * widen_factor), 8)
inverted_res_layer = self.make_layer(out_channels=out_channels, num_blocks=num_blocks, stride=stride, expand_ratio=expand_ratio)
layer_name = f'layer{(i + 1)}'
self.add_module(layer_name, inverted_res_layer)
self.layers.append(layer_name)
if (widen_factor > 1.0):
self.out_channel = int((1280 * widen_factor))
else:
self.out_channel = 1280
layer = ConvModule(in_channels=self.in_channels, out_channels=self.out_channel, kernel_size=1, stride=1, padding=0, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)
self.add_module('conv2', layer)
self.layers.append('conv2')
def make_layer(self, out_channels, num_blocks, stride, expand_ratio):
'Stack InvertedResidual blocks to build a layer for MobileNetV2.\n\n Args:\n out_channels (int): out_channels of block.\n num_blocks (int): number of blocks.\n stride (int): stride of the first block. Default: 1\n expand_ratio (int): Expand the number of channels of the\n hidden layer in InvertedResidual by this ratio. Default: 6.\n '
layers = []
for i in range(num_blocks):
if (i >= 1):
stride = 1
layers.append(InvertedResidual(self.in_channels, out_channels, mid_channels=int(round((self.in_channels * expand_ratio))), stride=stride, with_expand_conv=(expand_ratio != 1), conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg, with_cp=self.with_cp))
self.in_channels = out_channels
return nn.Sequential(*layers)
def _freeze_stages(self):
if (self.frozen_stages >= 0):
for param in self.conv1.parameters():
param.requires_grad = False
for i in range(1, (self.frozen_stages + 1)):
layer = getattr(self, f'layer{i}')
layer.eval()
for param in layer.parameters():
param.requires_grad = False
def forward(self, x):
'Forward function.'
x = self.conv1(x)
outs = []
for (i, layer_name) in enumerate(self.layers):
layer = getattr(self, layer_name)
x = layer(x)
if (i in self.out_indices):
outs.append(x)
return tuple(outs)
def train(self, mode=True):
'Convert the model into training mode while keep normalization layer\n frozen.'
super(MobileNetV2, self).train(mode)
self._freeze_stages()
if (mode and self.norm_eval):
for m in self.modules():
if isinstance(m, _BatchNorm):
m.eval()
|
@BACKBONES.register_module()
class RegNet(ResNet):
'RegNet backbone.\n\n More details can be found in `paper <https://arxiv.org/abs/2003.13678>`_ .\n\n Args:\n arch (dict): The parameter of RegNets.\n\n - w0 (int): initial width\n - wa (float): slope of width\n - wm (float): quantization parameter to quantize the width\n - depth (int): depth of the backbone\n - group_w (int): width of group\n - bot_mul (float): bottleneck ratio, i.e. expansion of bottleneck.\n strides (Sequence[int]): Strides of the first block of each stage.\n base_channels (int): Base channels after stem layer.\n in_channels (int): Number of input image channels. Default: 3.\n dilations (Sequence[int]): Dilation of each stage.\n out_indices (Sequence[int]): Output from which stages.\n style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two\n layer is the 3x3 conv layer, otherwise the stride-two layer is\n the first 1x1 conv layer.\n frozen_stages (int): Stages to be frozen (all param fixed). -1 means\n not freezing any parameters.\n norm_cfg (dict): dictionary to construct and config norm layer.\n norm_eval (bool): Whether to set norm layers to eval mode, namely,\n freeze running stats (mean and var). Note: Effect on Batch Norm\n and its variants only.\n with_cp (bool): Use checkpoint or not. Using checkpoint will save some\n memory while slowing down the training speed.\n zero_init_residual (bool): whether to use zero init for last norm layer\n in resblocks to let them behave as identity.\n pretrained (str, optional): model pretrained path. Default: None\n init_cfg (dict or list[dict], optional): Initialization config dict.\n Default: None\n\n Example:\n >>> from mmdet.models import RegNet\n >>> import torch\n >>> self = RegNet(\n arch=dict(\n w0=88,\n wa=26.31,\n wm=2.25,\n group_w=48,\n depth=25,\n bot_mul=1.0))\n >>> self.eval()\n >>> inputs = torch.rand(1, 3, 32, 32)\n >>> level_outputs = self.forward(inputs)\n >>> for level_out in level_outputs:\n ... print(tuple(level_out.shape))\n (1, 96, 8, 8)\n (1, 192, 4, 4)\n (1, 432, 2, 2)\n (1, 1008, 1, 1)\n '
arch_settings = {'regnetx_400mf': dict(w0=24, wa=24.48, wm=2.54, group_w=16, depth=22, bot_mul=1.0), 'regnetx_800mf': dict(w0=56, wa=35.73, wm=2.28, group_w=16, depth=16, bot_mul=1.0), 'regnetx_1.6gf': dict(w0=80, wa=34.01, wm=2.25, group_w=24, depth=18, bot_mul=1.0), 'regnetx_3.2gf': dict(w0=88, wa=26.31, wm=2.25, group_w=48, depth=25, bot_mul=1.0), 'regnetx_4.0gf': dict(w0=96, wa=38.65, wm=2.43, group_w=40, depth=23, bot_mul=1.0), 'regnetx_6.4gf': dict(w0=184, wa=60.83, wm=2.07, group_w=56, depth=17, bot_mul=1.0), 'regnetx_8.0gf': dict(w0=80, wa=49.56, wm=2.88, group_w=120, depth=23, bot_mul=1.0), 'regnetx_12gf': dict(w0=168, wa=73.36, wm=2.37, group_w=112, depth=19, bot_mul=1.0)}
def __init__(self, arch, in_channels=3, stem_channels=32, base_channels=32, strides=(2, 2, 2, 2), dilations=(1, 1, 1, 1), out_indices=(0, 1, 2, 3), style='pytorch', deep_stem=False, avg_down=False, frozen_stages=(- 1), conv_cfg=None, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, dcn=None, stage_with_dcn=(False, False, False, False), plugins=None, with_cp=False, zero_init_residual=True, pretrained=None, init_cfg=None):
super(ResNet, self).__init__(init_cfg)
if isinstance(arch, str):
assert (arch in self.arch_settings), f'"arch": "{arch}" is not one of the arch_settings'
arch = self.arch_settings[arch]
elif (not isinstance(arch, dict)):
raise ValueError(f'Expect "arch" to be either a string or a dict, got {type(arch)}')
(widths, num_stages) = self.generate_regnet(arch['w0'], arch['wa'], arch['wm'], arch['depth'])
(stage_widths, stage_blocks) = self.get_stages_from_blocks(widths)
group_widths = [arch['group_w'] for _ in range(num_stages)]
self.bottleneck_ratio = [arch['bot_mul'] for _ in range(num_stages)]
(stage_widths, group_widths) = self.adjust_width_group(stage_widths, self.bottleneck_ratio, group_widths)
self.stage_widths = stage_widths
self.group_widths = group_widths
self.depth = sum(stage_blocks)
self.stem_channels = stem_channels
self.base_channels = base_channels
self.num_stages = num_stages
assert ((num_stages >= 1) and (num_stages <= 4))
self.strides = strides
self.dilations = dilations
assert (len(strides) == len(dilations) == num_stages)
self.out_indices = out_indices
assert (max(out_indices) < num_stages)
self.style = style
self.deep_stem = deep_stem
self.avg_down = avg_down
self.frozen_stages = frozen_stages
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.with_cp = with_cp
self.norm_eval = norm_eval
self.dcn = dcn
self.stage_with_dcn = stage_with_dcn
if (dcn is not None):
assert (len(stage_with_dcn) == num_stages)
self.plugins = plugins
self.zero_init_residual = zero_init_residual
self.block = Bottleneck
expansion_bak = self.block.expansion
self.block.expansion = 1
self.stage_blocks = stage_blocks[:num_stages]
self._make_stem_layer(in_channels, stem_channels)
block_init_cfg = None
assert (not (init_cfg and pretrained)), 'init_cfg and pretrained cannot be specified at the same time'
if isinstance(pretrained, str):
warnings.warn('DeprecationWarning: pretrained is deprecated, please use "init_cfg" instead')
self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)
elif (pretrained is None):
if (init_cfg is None):
self.init_cfg = [dict(type='Kaiming', layer='Conv2d'), dict(type='Constant', val=1, layer=['_BatchNorm', 'GroupNorm'])]
if self.zero_init_residual:
block_init_cfg = dict(type='Constant', val=0, override=dict(name='norm3'))
else:
raise TypeError('pretrained must be a str or None')
self.inplanes = stem_channels
self.res_layers = []
for (i, num_blocks) in enumerate(self.stage_blocks):
stride = self.strides[i]
dilation = self.dilations[i]
group_width = self.group_widths[i]
width = int(round((self.stage_widths[i] * self.bottleneck_ratio[i])))
stage_groups = (width // group_width)
dcn = (self.dcn if self.stage_with_dcn[i] else None)
if (self.plugins is not None):
stage_plugins = self.make_stage_plugins(self.plugins, i)
else:
stage_plugins = None
res_layer = self.make_res_layer(block=self.block, inplanes=self.inplanes, planes=self.stage_widths[i], num_blocks=num_blocks, stride=stride, dilation=dilation, style=self.style, avg_down=self.avg_down, with_cp=self.with_cp, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, dcn=dcn, plugins=stage_plugins, groups=stage_groups, base_width=group_width, base_channels=self.stage_widths[i], init_cfg=block_init_cfg)
self.inplanes = self.stage_widths[i]
layer_name = f'layer{(i + 1)}'
self.add_module(layer_name, res_layer)
self.res_layers.append(layer_name)
self._freeze_stages()
self.feat_dim = stage_widths[(- 1)]
self.block.expansion = expansion_bak
def _make_stem_layer(self, in_channels, base_channels):
self.conv1 = build_conv_layer(self.conv_cfg, in_channels, base_channels, kernel_size=3, stride=2, padding=1, bias=False)
(self.norm1_name, norm1) = build_norm_layer(self.norm_cfg, base_channels, postfix=1)
self.add_module(self.norm1_name, norm1)
self.relu = nn.ReLU(inplace=True)
def generate_regnet(self, initial_width, width_slope, width_parameter, depth, divisor=8):
'Generates per block width from RegNet parameters.\n\n Args:\n initial_width ([int]): Initial width of the backbone\n width_slope ([float]): Slope of the quantized linear function\n width_parameter ([int]): Parameter used to quantize the width.\n depth ([int]): Depth of the backbone.\n divisor (int, optional): The divisor of channels. Defaults to 8.\n\n Returns:\n list, int: return a list of widths of each stage and the number of stages\n '
assert (width_slope >= 0)
assert (initial_width > 0)
assert (width_parameter > 1)
assert ((initial_width % divisor) == 0)
widths_cont = ((np.arange(depth) * width_slope) + initial_width)
ks = np.round((np.log((widths_cont / initial_width)) / np.log(width_parameter)))
widths = (initial_width * np.power(width_parameter, ks))
widths = (np.round(np.divide(widths, divisor)) * divisor)
num_stages = len(np.unique(widths))
(widths, widths_cont) = (widths.astype(int).tolist(), widths_cont.tolist())
return (widths, num_stages)
@staticmethod
def quantize_float(number, divisor):
'Converts a float to closest non-zero int divisible by divisor.\n\n Args:\n number (int): Original number to be quantized.\n divisor (int): Divisor used to quantize the number.\n\n Returns:\n int: quantized number that is divisible by devisor.\n '
return int((round((number / divisor)) * divisor))
def adjust_width_group(self, widths, bottleneck_ratio, groups):
'Adjusts the compatibility of widths and groups.\n\n Args:\n widths (list[int]): Width of each stage.\n bottleneck_ratio (float): Bottleneck ratio.\n groups (int): number of groups in each stage\n\n Returns:\n tuple(list): The adjusted widths and groups of each stage.\n '
bottleneck_width = [int((w * b)) for (w, b) in zip(widths, bottleneck_ratio)]
groups = [min(g, w_bot) for (g, w_bot) in zip(groups, bottleneck_width)]
bottleneck_width = [self.quantize_float(w_bot, g) for (w_bot, g) in zip(bottleneck_width, groups)]
widths = [int((w_bot / b)) for (w_bot, b) in zip(bottleneck_width, bottleneck_ratio)]
return (widths, groups)
def get_stages_from_blocks(self, widths):
'Gets widths/stage_blocks of network at each stage.\n\n Args:\n widths (list[int]): Width in each stage.\n\n Returns:\n tuple(list): width and depth of each stage\n '
width_diff = [(width != width_prev) for (width, width_prev) in zip((widths + [0]), ([0] + widths))]
stage_widths = [width for (width, diff) in zip(widths, width_diff[:(- 1)]) if diff]
stage_blocks = np.diff([depth for (depth, diff) in zip(range(len(width_diff)), width_diff) if diff]).tolist()
return (stage_widths, stage_blocks)
def forward(self, x):
'Forward function.'
x = self.conv1(x)
x = self.norm1(x)
x = self.relu(x)
outs = []
for (i, layer_name) in enumerate(self.res_layers):
res_layer = getattr(self, layer_name)
x = res_layer(x)
if (i in self.out_indices):
outs.append(x)
return tuple(outs)
|
class Bottle2neck(_Bottleneck):
expansion = 4
def __init__(self, inplanes, planes, scales=4, base_width=26, base_channels=64, stage_type='normal', **kwargs):
'Bottle2neck block for Res2Net.\n\n If style is "pytorch", the stride-two layer is the 3x3 conv layer, if\n it is "caffe", the stride-two layer is the first 1x1 conv layer.\n '
super(Bottle2neck, self).__init__(inplanes, planes, **kwargs)
assert (scales > 1), 'Res2Net degenerates to ResNet when scales = 1.'
width = int(math.floor((self.planes * (base_width / base_channels))))
(self.norm1_name, norm1) = build_norm_layer(self.norm_cfg, (width * scales), postfix=1)
(self.norm3_name, norm3) = build_norm_layer(self.norm_cfg, (self.planes * self.expansion), postfix=3)
self.conv1 = build_conv_layer(self.conv_cfg, self.inplanes, (width * scales), kernel_size=1, stride=self.conv1_stride, bias=False)
self.add_module(self.norm1_name, norm1)
if ((stage_type == 'stage') and (self.conv2_stride != 1)):
self.pool = nn.AvgPool2d(kernel_size=3, stride=self.conv2_stride, padding=1)
convs = []
bns = []
fallback_on_stride = False
if self.with_dcn:
fallback_on_stride = self.dcn.pop('fallback_on_stride', False)
if ((not self.with_dcn) or fallback_on_stride):
for i in range((scales - 1)):
convs.append(build_conv_layer(self.conv_cfg, width, width, kernel_size=3, stride=self.conv2_stride, padding=self.dilation, dilation=self.dilation, bias=False))
bns.append(build_norm_layer(self.norm_cfg, width, postfix=(i + 1))[1])
self.convs = nn.ModuleList(convs)
self.bns = nn.ModuleList(bns)
else:
assert (self.conv_cfg is None), 'conv_cfg must be None for DCN'
for i in range((scales - 1)):
convs.append(build_conv_layer(self.dcn, width, width, kernel_size=3, stride=self.conv2_stride, padding=self.dilation, dilation=self.dilation, bias=False))
bns.append(build_norm_layer(self.norm_cfg, width, postfix=(i + 1))[1])
self.convs = nn.ModuleList(convs)
self.bns = nn.ModuleList(bns)
self.conv3 = build_conv_layer(self.conv_cfg, (width * scales), (self.planes * self.expansion), kernel_size=1, bias=False)
self.add_module(self.norm3_name, norm3)
self.stage_type = stage_type
self.scales = scales
self.width = width
delattr(self, 'conv2')
delattr(self, self.norm2_name)
def forward(self, x):
'Forward function.'
def _inner_forward(x):
identity = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv1_plugin_names)
spx = torch.split(out, self.width, 1)
sp = self.convs[0](spx[0].contiguous())
sp = self.relu(self.bns[0](sp))
out = sp
for i in range(1, (self.scales - 1)):
if (self.stage_type == 'stage'):
sp = spx[i]
else:
sp = (sp + spx[i])
sp = self.convs[i](sp.contiguous())
sp = self.relu(self.bns[i](sp))
out = torch.cat((out, sp), 1)
if ((self.stage_type == 'normal') or (self.conv2_stride == 1)):
out = torch.cat((out, spx[(self.scales - 1)]), 1)
elif (self.stage_type == 'stage'):
out = torch.cat((out, self.pool(spx[(self.scales - 1)])), 1)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv2_plugin_names)
out = self.conv3(out)
out = self.norm3(out)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv3_plugin_names)
if (self.downsample is not None):
identity = self.downsample(x)
out += identity
return out
if (self.with_cp and x.requires_grad):
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = self.relu(out)
return out
|
class Res2Layer(Sequential):
"Res2Layer to build Res2Net style backbone.\n\n Args:\n block (nn.Module): block used to build ResLayer.\n inplanes (int): inplanes of block.\n planes (int): planes of block.\n num_blocks (int): number of blocks.\n stride (int): stride of the first block. Default: 1\n avg_down (bool): Use AvgPool instead of stride conv when\n downsampling in the bottle2neck. Default: False\n conv_cfg (dict): dictionary to construct and config conv layer.\n Default: None\n norm_cfg (dict): dictionary to construct and config norm layer.\n Default: dict(type='BN')\n scales (int): Scales used in Res2Net. Default: 4\n base_width (int): Basic width of each scale. Default: 26\n "
def __init__(self, block, inplanes, planes, num_blocks, stride=1, avg_down=True, conv_cfg=None, norm_cfg=dict(type='BN'), scales=4, base_width=26, **kwargs):
self.block = block
downsample = None
if ((stride != 1) or (inplanes != (planes * block.expansion))):
downsample = nn.Sequential(nn.AvgPool2d(kernel_size=stride, stride=stride, ceil_mode=True, count_include_pad=False), build_conv_layer(conv_cfg, inplanes, (planes * block.expansion), kernel_size=1, stride=1, bias=False), build_norm_layer(norm_cfg, (planes * block.expansion))[1])
layers = []
layers.append(block(inplanes=inplanes, planes=planes, stride=stride, downsample=downsample, conv_cfg=conv_cfg, norm_cfg=norm_cfg, scales=scales, base_width=base_width, stage_type='stage', **kwargs))
inplanes = (planes * block.expansion)
for i in range(1, num_blocks):
layers.append(block(inplanes=inplanes, planes=planes, stride=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, scales=scales, base_width=base_width, **kwargs))
super(Res2Layer, self).__init__(*layers)
|
@BACKBONES.register_module()
class Res2Net(ResNet):
'Res2Net backbone.\n\n Args:\n scales (int): Scales used in Res2Net. Default: 4\n base_width (int): Basic width of each scale. Default: 26\n depth (int): Depth of res2net, from {50, 101, 152}.\n in_channels (int): Number of input image channels. Default: 3.\n num_stages (int): Res2net stages. Default: 4.\n strides (Sequence[int]): Strides of the first block of each stage.\n dilations (Sequence[int]): Dilation of each stage.\n out_indices (Sequence[int]): Output from which stages.\n style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two\n layer is the 3x3 conv layer, otherwise the stride-two layer is\n the first 1x1 conv layer.\n deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv\n avg_down (bool): Use AvgPool instead of stride conv when\n downsampling in the bottle2neck.\n frozen_stages (int): Stages to be frozen (stop grad and set eval mode).\n -1 means not freezing any parameters.\n norm_cfg (dict): Dictionary to construct and config norm layer.\n norm_eval (bool): Whether to set norm layers to eval mode, namely,\n freeze running stats (mean and var). Note: Effect on Batch Norm\n and its variants only.\n plugins (list[dict]): List of plugins for stages, each dict contains:\n\n - cfg (dict, required): Cfg dict to build plugin.\n - position (str, required): Position inside block to insert\n plugin, options are \'after_conv1\', \'after_conv2\', \'after_conv3\'.\n - stages (tuple[bool], optional): Stages to apply plugin, length\n should be same as \'num_stages\'.\n with_cp (bool): Use checkpoint or not. Using checkpoint will save some\n memory while slowing down the training speed.\n zero_init_residual (bool): Whether to use zero init for last norm layer\n in resblocks to let them behave as identity.\n pretrained (str, optional): model pretrained path. Default: None\n init_cfg (dict or list[dict], optional): Initialization config dict.\n Default: None\n\n Example:\n >>> from mmdet.models import Res2Net\n >>> import torch\n >>> self = Res2Net(depth=50, scales=4, base_width=26)\n >>> self.eval()\n >>> inputs = torch.rand(1, 3, 32, 32)\n >>> level_outputs = self.forward(inputs)\n >>> for level_out in level_outputs:\n ... print(tuple(level_out.shape))\n (1, 256, 8, 8)\n (1, 512, 4, 4)\n (1, 1024, 2, 2)\n (1, 2048, 1, 1)\n '
arch_settings = {50: (Bottle2neck, (3, 4, 6, 3)), 101: (Bottle2neck, (3, 4, 23, 3)), 152: (Bottle2neck, (3, 8, 36, 3))}
def __init__(self, scales=4, base_width=26, style='pytorch', deep_stem=True, avg_down=True, pretrained=None, init_cfg=None, **kwargs):
self.scales = scales
self.base_width = base_width
super(Res2Net, self).__init__(style='pytorch', deep_stem=True, avg_down=True, pretrained=pretrained, init_cfg=init_cfg, **kwargs)
def make_res_layer(self, **kwargs):
return Res2Layer(scales=self.scales, base_width=self.base_width, base_channels=self.base_channels, **kwargs)
|
class RSoftmax(nn.Module):
'Radix Softmax module in ``SplitAttentionConv2d``.\n\n Args:\n radix (int): Radix of input.\n groups (int): Groups of input.\n '
def __init__(self, radix, groups):
super().__init__()
self.radix = radix
self.groups = groups
def forward(self, x):
batch = x.size(0)
if (self.radix > 1):
x = x.view(batch, self.groups, self.radix, (- 1)).transpose(1, 2)
x = F.softmax(x, dim=1)
x = x.reshape(batch, (- 1))
else:
x = torch.sigmoid(x)
return x
|
class SplitAttentionConv2d(BaseModule):
'Split-Attention Conv2d in ResNeSt.\n\n Args:\n in_channels (int): Number of channels in the input feature map.\n channels (int): Number of intermediate channels.\n kernel_size (int | tuple[int]): Size of the convolution kernel.\n stride (int | tuple[int]): Stride of the convolution.\n padding (int | tuple[int]): Zero-padding added to both sides of\n dilation (int | tuple[int]): Spacing between kernel elements.\n groups (int): Number of blocked connections from input channels to\n output channels.\n groups (int): Same as nn.Conv2d.\n radix (int): Radix of SpltAtConv2d. Default: 2\n reduction_factor (int): Reduction factor of inter_channels. Default: 4.\n conv_cfg (dict): Config dict for convolution layer. Default: None,\n which means using conv2d.\n norm_cfg (dict): Config dict for normalization layer. Default: None.\n dcn (dict): Config dict for DCN. Default: None.\n init_cfg (dict or list[dict], optional): Initialization config dict.\n Default: None\n '
def __init__(self, in_channels, channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, radix=2, reduction_factor=4, conv_cfg=None, norm_cfg=dict(type='BN'), dcn=None, init_cfg=None):
super(SplitAttentionConv2d, self).__init__(init_cfg)
inter_channels = max(((in_channels * radix) // reduction_factor), 32)
self.radix = radix
self.groups = groups
self.channels = channels
self.with_dcn = (dcn is not None)
self.dcn = dcn
fallback_on_stride = False
if self.with_dcn:
fallback_on_stride = self.dcn.pop('fallback_on_stride', False)
if (self.with_dcn and (not fallback_on_stride)):
assert (conv_cfg is None), 'conv_cfg must be None for DCN'
conv_cfg = dcn
self.conv = build_conv_layer(conv_cfg, in_channels, (channels * radix), kernel_size, stride=stride, padding=padding, dilation=dilation, groups=(groups * radix), bias=False)
(self.norm0_name, norm0) = build_norm_layer(norm_cfg, (channels * radix), postfix=0)
self.add_module(self.norm0_name, norm0)
self.relu = nn.ReLU(inplace=True)
self.fc1 = build_conv_layer(None, channels, inter_channels, 1, groups=self.groups)
(self.norm1_name, norm1) = build_norm_layer(norm_cfg, inter_channels, postfix=1)
self.add_module(self.norm1_name, norm1)
self.fc2 = build_conv_layer(None, inter_channels, (channels * radix), 1, groups=self.groups)
self.rsoftmax = RSoftmax(radix, groups)
@property
def norm0(self):
'nn.Module: the normalization layer named "norm0" '
return getattr(self, self.norm0_name)
@property
def norm1(self):
'nn.Module: the normalization layer named "norm1" '
return getattr(self, self.norm1_name)
def forward(self, x):
x = self.conv(x)
x = self.norm0(x)
x = self.relu(x)
(batch, rchannel) = x.shape[:2]
batch = x.size(0)
if (self.radix > 1):
splits = x.view(batch, self.radix, (- 1), *x.shape[2:])
gap = splits.sum(dim=1)
else:
gap = x
gap = F.adaptive_avg_pool2d(gap, 1)
gap = self.fc1(gap)
gap = self.norm1(gap)
gap = self.relu(gap)
atten = self.fc2(gap)
atten = self.rsoftmax(atten).view(batch, (- 1), 1, 1)
if (self.radix > 1):
attens = atten.view(batch, self.radix, (- 1), *atten.shape[2:])
out = torch.sum((attens * splits), dim=1)
else:
out = (atten * x)
return out.contiguous()
|
class Bottleneck(_Bottleneck):
'Bottleneck block for ResNeSt.\n\n Args:\n inplane (int): Input planes of this block.\n planes (int): Middle planes of this block.\n groups (int): Groups of conv2.\n base_width (int): Base of width in terms of base channels. Default: 4.\n base_channels (int): Base of channels for calculating width.\n Default: 64.\n radix (int): Radix of SpltAtConv2d. Default: 2\n reduction_factor (int): Reduction factor of inter_channels in\n SplitAttentionConv2d. Default: 4.\n avg_down_stride (bool): Whether to use average pool for stride in\n Bottleneck. Default: True.\n kwargs (dict): Key word arguments for base class.\n '
expansion = 4
def __init__(self, inplanes, planes, groups=1, base_width=4, base_channels=64, radix=2, reduction_factor=4, avg_down_stride=True, **kwargs):
'Bottleneck block for ResNeSt.'
super(Bottleneck, self).__init__(inplanes, planes, **kwargs)
if (groups == 1):
width = self.planes
else:
width = (math.floor((self.planes * (base_width / base_channels))) * groups)
self.avg_down_stride = (avg_down_stride and (self.conv2_stride > 1))
(self.norm1_name, norm1) = build_norm_layer(self.norm_cfg, width, postfix=1)
(self.norm3_name, norm3) = build_norm_layer(self.norm_cfg, (self.planes * self.expansion), postfix=3)
self.conv1 = build_conv_layer(self.conv_cfg, self.inplanes, width, kernel_size=1, stride=self.conv1_stride, bias=False)
self.add_module(self.norm1_name, norm1)
self.with_modulated_dcn = False
self.conv2 = SplitAttentionConv2d(width, width, kernel_size=3, stride=(1 if self.avg_down_stride else self.conv2_stride), padding=self.dilation, dilation=self.dilation, groups=groups, radix=radix, reduction_factor=reduction_factor, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, dcn=self.dcn)
delattr(self, self.norm2_name)
if self.avg_down_stride:
self.avd_layer = nn.AvgPool2d(3, self.conv2_stride, padding=1)
self.conv3 = build_conv_layer(self.conv_cfg, width, (self.planes * self.expansion), kernel_size=1, bias=False)
self.add_module(self.norm3_name, norm3)
def forward(self, x):
def _inner_forward(x):
identity = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv1_plugin_names)
out = self.conv2(out)
if self.avg_down_stride:
out = self.avd_layer(out)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv2_plugin_names)
out = self.conv3(out)
out = self.norm3(out)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv3_plugin_names)
if (self.downsample is not None):
identity = self.downsample(x)
out += identity
return out
if (self.with_cp and x.requires_grad):
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = self.relu(out)
return out
|
@BACKBONES.register_module()
class ResNeSt(ResNetV1d):
'ResNeSt backbone.\n\n Args:\n groups (int): Number of groups of Bottleneck. Default: 1\n base_width (int): Base width of Bottleneck. Default: 4\n radix (int): Radix of SplitAttentionConv2d. Default: 2\n reduction_factor (int): Reduction factor of inter_channels in\n SplitAttentionConv2d. Default: 4.\n avg_down_stride (bool): Whether to use average pool for stride in\n Bottleneck. Default: True.\n kwargs (dict): Keyword arguments for ResNet.\n '
arch_settings = {50: (Bottleneck, (3, 4, 6, 3)), 101: (Bottleneck, (3, 4, 23, 3)), 152: (Bottleneck, (3, 8, 36, 3)), 200: (Bottleneck, (3, 24, 36, 3))}
def __init__(self, groups=1, base_width=4, radix=2, reduction_factor=4, avg_down_stride=True, **kwargs):
self.groups = groups
self.base_width = base_width
self.radix = radix
self.reduction_factor = reduction_factor
self.avg_down_stride = avg_down_stride
super(ResNeSt, self).__init__(**kwargs)
def make_res_layer(self, **kwargs):
'Pack all blocks in a stage into a ``ResLayer``.'
return ResLayer(groups=self.groups, base_width=self.base_width, base_channels=self.base_channels, radix=self.radix, reduction_factor=self.reduction_factor, avg_down_stride=self.avg_down_stride, **kwargs)
|
class BasicBlock(BaseModule):
expansion = 1
def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, style='pytorch', with_cp=False, conv_cfg=None, norm_cfg=dict(type='BN'), dcn=None, plugins=None, init_cfg=None):
super(BasicBlock, self).__init__(init_cfg)
assert (dcn is None), 'Not implemented yet.'
assert (plugins is None), 'Not implemented yet.'
(self.norm1_name, norm1) = build_norm_layer(norm_cfg, planes, postfix=1)
(self.norm2_name, norm2) = build_norm_layer(norm_cfg, planes, postfix=2)
self.conv1 = build_conv_layer(conv_cfg, inplanes, planes, 3, stride=stride, padding=dilation, dilation=dilation, bias=False)
self.add_module(self.norm1_name, norm1)
self.conv2 = build_conv_layer(conv_cfg, planes, planes, 3, padding=1, bias=False)
self.add_module(self.norm2_name, norm2)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.dilation = dilation
self.with_cp = with_cp
@property
def norm1(self):
'nn.Module: normalization layer after the first convolution layer'
return getattr(self, self.norm1_name)
@property
def norm2(self):
'nn.Module: normalization layer after the second convolution layer'
return getattr(self, self.norm2_name)
def forward(self, x):
'Forward function.'
def _inner_forward(x):
identity = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.norm2(out)
if (self.downsample is not None):
identity = self.downsample(x)
out += identity
return out
if (self.with_cp and x.requires_grad):
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = self.relu(out)
return out
|
class Bottleneck(BaseModule):
expansion = 4
def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, style='pytorch', with_cp=False, conv_cfg=None, norm_cfg=dict(type='BN'), dcn=None, plugins=None, init_cfg=None):
'Bottleneck block for ResNet.\n\n If style is "pytorch", the stride-two layer is the 3x3 conv layer, if\n it is "caffe", the stride-two layer is the first 1x1 conv layer.\n '
super(Bottleneck, self).__init__(init_cfg)
assert (style in ['pytorch', 'caffe'])
assert ((dcn is None) or isinstance(dcn, dict))
assert ((plugins is None) or isinstance(plugins, list))
if (plugins is not None):
allowed_position = ['after_conv1', 'after_conv2', 'after_conv3']
assert all(((p['position'] in allowed_position) for p in plugins))
self.inplanes = inplanes
self.planes = planes
self.stride = stride
self.dilation = dilation
self.style = style
self.with_cp = with_cp
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.dcn = dcn
self.with_dcn = (dcn is not None)
self.plugins = plugins
self.with_plugins = (plugins is not None)
if self.with_plugins:
self.after_conv1_plugins = [plugin['cfg'] for plugin in plugins if (plugin['position'] == 'after_conv1')]
self.after_conv2_plugins = [plugin['cfg'] for plugin in plugins if (plugin['position'] == 'after_conv2')]
self.after_conv3_plugins = [plugin['cfg'] for plugin in plugins if (plugin['position'] == 'after_conv3')]
if (self.style == 'pytorch'):
self.conv1_stride = 1
self.conv2_stride = stride
else:
self.conv1_stride = stride
self.conv2_stride = 1
(self.norm1_name, norm1) = build_norm_layer(norm_cfg, planes, postfix=1)
(self.norm2_name, norm2) = build_norm_layer(norm_cfg, planes, postfix=2)
(self.norm3_name, norm3) = build_norm_layer(norm_cfg, (planes * self.expansion), postfix=3)
self.conv1 = build_conv_layer(conv_cfg, inplanes, planes, kernel_size=1, stride=self.conv1_stride, bias=False)
self.add_module(self.norm1_name, norm1)
fallback_on_stride = False
if self.with_dcn:
fallback_on_stride = dcn.pop('fallback_on_stride', False)
if ((not self.with_dcn) or fallback_on_stride):
self.conv2 = build_conv_layer(conv_cfg, planes, planes, kernel_size=3, stride=self.conv2_stride, padding=dilation, dilation=dilation, bias=False)
else:
assert (self.conv_cfg is None), 'conv_cfg must be None for DCN'
self.conv2 = build_conv_layer(dcn, planes, planes, kernel_size=3, stride=self.conv2_stride, padding=dilation, dilation=dilation, bias=False)
self.add_module(self.norm2_name, norm2)
self.conv3 = build_conv_layer(conv_cfg, planes, (planes * self.expansion), kernel_size=1, bias=False)
self.add_module(self.norm3_name, norm3)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
if self.with_plugins:
self.after_conv1_plugin_names = self.make_block_plugins(planes, self.after_conv1_plugins)
self.after_conv2_plugin_names = self.make_block_plugins(planes, self.after_conv2_plugins)
self.after_conv3_plugin_names = self.make_block_plugins((planes * self.expansion), self.after_conv3_plugins)
def make_block_plugins(self, in_channels, plugins):
'make plugins for block.\n\n Args:\n in_channels (int): Input channels of plugin.\n plugins (list[dict]): List of plugins cfg to build.\n\n Returns:\n list[str]: List of the names of plugin.\n '
assert isinstance(plugins, list)
plugin_names = []
for plugin in plugins:
plugin = plugin.copy()
(name, layer) = build_plugin_layer(plugin, in_channels=in_channels, postfix=plugin.pop('postfix', ''))
assert (not hasattr(self, name)), f'duplicate plugin {name}'
self.add_module(name, layer)
plugin_names.append(name)
return plugin_names
def forward_plugin(self, x, plugin_names):
out = x
for name in plugin_names:
out = getattr(self, name)(x)
return out
@property
def norm1(self):
'nn.Module: normalization layer after the first convolution layer'
return getattr(self, self.norm1_name)
@property
def norm2(self):
'nn.Module: normalization layer after the second convolution layer'
return getattr(self, self.norm2_name)
@property
def norm3(self):
'nn.Module: normalization layer after the third convolution layer'
return getattr(self, self.norm3_name)
def forward(self, x):
'Forward function.'
def _inner_forward(x):
identity = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv1_plugin_names)
out = self.conv2(out)
out = self.norm2(out)
out = self.relu(out)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv2_plugin_names)
out = self.conv3(out)
out = self.norm3(out)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv3_plugin_names)
if (self.downsample is not None):
identity = self.downsample(x)
out += identity
return out
if (self.with_cp and x.requires_grad):
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = self.relu(out)
return out
|
@BACKBONES.register_module()
class ResNet(BaseModule):
'ResNet backbone.\n\n Args:\n depth (int): Depth of resnet, from {18, 34, 50, 101, 152}.\n stem_channels (int | None): Number of stem channels. If not specified,\n it will be the same as `base_channels`. Default: None.\n base_channels (int): Number of base channels of res layer. Default: 64.\n in_channels (int): Number of input image channels. Default: 3.\n num_stages (int): Resnet stages. Default: 4.\n strides (Sequence[int]): Strides of the first block of each stage.\n dilations (Sequence[int]): Dilation of each stage.\n out_indices (Sequence[int]): Output from which stages.\n style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two\n layer is the 3x3 conv layer, otherwise the stride-two layer is\n the first 1x1 conv layer.\n deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv\n avg_down (bool): Use AvgPool instead of stride conv when\n downsampling in the bottleneck.\n frozen_stages (int): Stages to be frozen (stop grad and set eval mode).\n -1 means not freezing any parameters.\n norm_cfg (dict): Dictionary to construct and config norm layer.\n norm_eval (bool): Whether to set norm layers to eval mode, namely,\n freeze running stats (mean and var). Note: Effect on Batch Norm\n and its variants only.\n plugins (list[dict]): List of plugins for stages, each dict contains:\n\n - cfg (dict, required): Cfg dict to build plugin.\n - position (str, required): Position inside block to insert\n plugin, options are \'after_conv1\', \'after_conv2\', \'after_conv3\'.\n - stages (tuple[bool], optional): Stages to apply plugin, length\n should be same as \'num_stages\'.\n with_cp (bool): Use checkpoint or not. Using checkpoint will save some\n memory while slowing down the training speed.\n zero_init_residual (bool): Whether to use zero init for last norm layer\n in resblocks to let them behave as identity.\n pretrained (str, optional): model pretrained path. Default: None\n init_cfg (dict or list[dict], optional): Initialization config dict.\n Default: None\n\n Example:\n >>> from mmdet.models import ResNet\n >>> import torch\n >>> self = ResNet(depth=18)\n >>> self.eval()\n >>> inputs = torch.rand(1, 3, 32, 32)\n >>> level_outputs = self.forward(inputs)\n >>> for level_out in level_outputs:\n ... print(tuple(level_out.shape))\n (1, 64, 8, 8)\n (1, 128, 4, 4)\n (1, 256, 2, 2)\n (1, 512, 1, 1)\n '
arch_settings = {18: (BasicBlock, (2, 2, 2, 2)), 34: (BasicBlock, (3, 4, 6, 3)), 50: (Bottleneck, (3, 4, 6, 3)), 101: (Bottleneck, (3, 4, 23, 3)), 152: (Bottleneck, (3, 8, 36, 3))}
def __init__(self, depth, in_channels=3, stem_channels=None, base_channels=64, num_stages=4, strides=(1, 2, 2, 2), dilations=(1, 1, 1, 1), out_indices=(0, 1, 2, 3), style='pytorch', deep_stem=False, avg_down=False, frozen_stages=(- 1), conv_cfg=None, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, dcn=None, stage_with_dcn=(False, False, False, False), plugins=None, with_cp=False, zero_init_residual=True, pretrained=None, init_cfg=None):
super(ResNet, self).__init__(init_cfg)
self.zero_init_residual = zero_init_residual
if (depth not in self.arch_settings):
raise KeyError(f'invalid depth {depth} for resnet')
block_init_cfg = None
assert (not (init_cfg and pretrained)), 'init_cfg and pretrained cannot be specified at the same time'
if isinstance(pretrained, str):
warnings.warn('DeprecationWarning: pretrained is deprecated, please use "init_cfg" instead')
self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)
elif (pretrained is None):
if (init_cfg is None):
self.init_cfg = [dict(type='Kaiming', layer='Conv2d'), dict(type='Constant', val=1, layer=['_BatchNorm', 'GroupNorm'])]
block = self.arch_settings[depth][0]
if self.zero_init_residual:
if (block is BasicBlock):
block_init_cfg = dict(type='Constant', val=0, override=dict(name='norm2'))
elif (block is Bottleneck):
block_init_cfg = dict(type='Constant', val=0, override=dict(name='norm3'))
else:
raise TypeError('pretrained must be a str or None')
self.depth = depth
if (stem_channels is None):
stem_channels = base_channels
self.stem_channels = stem_channels
self.base_channels = base_channels
self.num_stages = num_stages
assert ((num_stages >= 1) and (num_stages <= 4))
self.strides = strides
self.dilations = dilations
assert (len(strides) == len(dilations) == num_stages)
self.out_indices = out_indices
assert (max(out_indices) < num_stages)
self.style = style
self.deep_stem = deep_stem
self.avg_down = avg_down
self.frozen_stages = frozen_stages
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.with_cp = with_cp
self.norm_eval = norm_eval
self.dcn = dcn
self.stage_with_dcn = stage_with_dcn
if (dcn is not None):
assert (len(stage_with_dcn) == num_stages)
self.plugins = plugins
(self.block, stage_blocks) = self.arch_settings[depth]
self.stage_blocks = stage_blocks[:num_stages]
self.inplanes = stem_channels
self._make_stem_layer(in_channels, stem_channels)
self.res_layers = []
for (i, num_blocks) in enumerate(self.stage_blocks):
stride = strides[i]
dilation = dilations[i]
dcn = (self.dcn if self.stage_with_dcn[i] else None)
if (plugins is not None):
stage_plugins = self.make_stage_plugins(plugins, i)
else:
stage_plugins = None
planes = (base_channels * (2 ** i))
res_layer = self.make_res_layer(block=self.block, inplanes=self.inplanes, planes=planes, num_blocks=num_blocks, stride=stride, dilation=dilation, style=self.style, avg_down=self.avg_down, with_cp=with_cp, conv_cfg=conv_cfg, norm_cfg=norm_cfg, dcn=dcn, plugins=stage_plugins, init_cfg=block_init_cfg)
self.inplanes = (planes * self.block.expansion)
layer_name = f'layer{(i + 1)}'
self.add_module(layer_name, res_layer)
self.res_layers.append(layer_name)
self._freeze_stages()
self.feat_dim = ((self.block.expansion * base_channels) * (2 ** (len(self.stage_blocks) - 1)))
def make_stage_plugins(self, plugins, stage_idx):
"Make plugins for ResNet ``stage_idx`` th stage.\n\n Currently we support to insert ``context_block``,\n ``empirical_attention_block``, ``nonlocal_block`` into the backbone\n like ResNet/ResNeXt. They could be inserted after conv1/conv2/conv3 of\n Bottleneck.\n\n An example of plugins format could be:\n\n Examples:\n >>> plugins=[\n ... dict(cfg=dict(type='xxx', arg1='xxx'),\n ... stages=(False, True, True, True),\n ... position='after_conv2'),\n ... dict(cfg=dict(type='yyy'),\n ... stages=(True, True, True, True),\n ... position='after_conv3'),\n ... dict(cfg=dict(type='zzz', postfix='1'),\n ... stages=(True, True, True, True),\n ... position='after_conv3'),\n ... dict(cfg=dict(type='zzz', postfix='2'),\n ... stages=(True, True, True, True),\n ... position='after_conv3')\n ... ]\n >>> self = ResNet(depth=18)\n >>> stage_plugins = self.make_stage_plugins(plugins, 0)\n >>> assert len(stage_plugins) == 3\n\n Suppose ``stage_idx=0``, the structure of blocks in the stage would be:\n\n .. code-block:: none\n\n conv1-> conv2->conv3->yyy->zzz1->zzz2\n\n Suppose 'stage_idx=1', the structure of blocks in the stage would be:\n\n .. code-block:: none\n\n conv1-> conv2->xxx->conv3->yyy->zzz1->zzz2\n\n If stages is missing, the plugin would be applied to all stages.\n\n Args:\n plugins (list[dict]): List of plugins cfg to build. The postfix is\n required if multiple same type plugins are inserted.\n stage_idx (int): Index of stage to build\n\n Returns:\n list[dict]: Plugins for current stage\n "
stage_plugins = []
for plugin in plugins:
plugin = plugin.copy()
stages = plugin.pop('stages', None)
assert ((stages is None) or (len(stages) == self.num_stages))
if ((stages is None) or stages[stage_idx]):
stage_plugins.append(plugin)
return stage_plugins
def make_res_layer(self, **kwargs):
'Pack all blocks in a stage into a ``ResLayer``.'
return ResLayer(**kwargs)
@property
def norm1(self):
'nn.Module: the normalization layer named "norm1" '
return getattr(self, self.norm1_name)
def _make_stem_layer(self, in_channels, stem_channels):
if self.deep_stem:
self.stem = nn.Sequential(build_conv_layer(self.conv_cfg, in_channels, (stem_channels // 2), kernel_size=3, stride=2, padding=1, bias=False), build_norm_layer(self.norm_cfg, (stem_channels // 2))[1], nn.ReLU(inplace=True), build_conv_layer(self.conv_cfg, (stem_channels // 2), (stem_channels // 2), kernel_size=3, stride=1, padding=1, bias=False), build_norm_layer(self.norm_cfg, (stem_channels // 2))[1], nn.ReLU(inplace=True), build_conv_layer(self.conv_cfg, (stem_channels // 2), stem_channels, kernel_size=3, stride=1, padding=1, bias=False), build_norm_layer(self.norm_cfg, stem_channels)[1], nn.ReLU(inplace=True))
else:
self.conv1 = build_conv_layer(self.conv_cfg, in_channels, stem_channels, kernel_size=7, stride=2, padding=3, bias=False)
(self.norm1_name, norm1) = build_norm_layer(self.norm_cfg, stem_channels, postfix=1)
self.add_module(self.norm1_name, norm1)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
def _freeze_stages(self):
if (self.frozen_stages >= 0):
if self.deep_stem:
self.stem.eval()
for param in self.stem.parameters():
param.requires_grad = False
else:
self.norm1.eval()
for m in [self.conv1, self.norm1]:
for param in m.parameters():
param.requires_grad = False
for i in range(1, (self.frozen_stages + 1)):
m = getattr(self, f'layer{i}')
m.eval()
for param in m.parameters():
param.requires_grad = False
def forward(self, x):
'Forward function.'
if self.deep_stem:
x = self.stem(x)
else:
x = self.conv1(x)
x = self.norm1(x)
x = self.relu(x)
x = self.maxpool(x)
outs = []
for (i, layer_name) in enumerate(self.res_layers):
res_layer = getattr(self, layer_name)
x = res_layer(x)
if (i in self.out_indices):
outs.append(x)
return tuple(outs)
def train(self, mode=True):
'Convert the model into training mode while keep normalization layer\n freezed.'
super(ResNet, self).train(mode)
self._freeze_stages()
if (mode and self.norm_eval):
for m in self.modules():
if isinstance(m, _BatchNorm):
m.eval()
|
@BACKBONES.register_module()
class ResNetV1d(ResNet):
'ResNetV1d variant described in `Bag of Tricks\n <https://arxiv.org/pdf/1812.01187.pdf>`_.\n\n Compared with default ResNet(ResNetV1b), ResNetV1d replaces the 7x7 conv in\n the input stem with three 3x3 convs. And in the downsampling block, a 2x2\n avg_pool with stride 2 is added before conv, whose stride is changed to 1.\n '
def __init__(self, **kwargs):
super(ResNetV1d, self).__init__(deep_stem=True, avg_down=True, **kwargs)
|
class Bottleneck(_Bottleneck):
expansion = 4
def __init__(self, inplanes, planes, groups=1, base_width=4, base_channels=64, **kwargs):
'Bottleneck block for ResNeXt.\n\n If style is "pytorch", the stride-two layer is the 3x3 conv layer, if\n it is "caffe", the stride-two layer is the first 1x1 conv layer.\n '
super(Bottleneck, self).__init__(inplanes, planes, **kwargs)
if (groups == 1):
width = self.planes
else:
width = (math.floor((self.planes * (base_width / base_channels))) * groups)
(self.norm1_name, norm1) = build_norm_layer(self.norm_cfg, width, postfix=1)
(self.norm2_name, norm2) = build_norm_layer(self.norm_cfg, width, postfix=2)
(self.norm3_name, norm3) = build_norm_layer(self.norm_cfg, (self.planes * self.expansion), postfix=3)
self.conv1 = build_conv_layer(self.conv_cfg, self.inplanes, width, kernel_size=1, stride=self.conv1_stride, bias=False)
self.add_module(self.norm1_name, norm1)
fallback_on_stride = False
self.with_modulated_dcn = False
if self.with_dcn:
fallback_on_stride = self.dcn.pop('fallback_on_stride', False)
if ((not self.with_dcn) or fallback_on_stride):
self.conv2 = build_conv_layer(self.conv_cfg, width, width, kernel_size=3, stride=self.conv2_stride, padding=self.dilation, dilation=self.dilation, groups=groups, bias=False)
else:
assert (self.conv_cfg is None), 'conv_cfg must be None for DCN'
self.conv2 = build_conv_layer(self.dcn, width, width, kernel_size=3, stride=self.conv2_stride, padding=self.dilation, dilation=self.dilation, groups=groups, bias=False)
self.add_module(self.norm2_name, norm2)
self.conv3 = build_conv_layer(self.conv_cfg, width, (self.planes * self.expansion), kernel_size=1, bias=False)
self.add_module(self.norm3_name, norm3)
if self.with_plugins:
self._del_block_plugins(((self.after_conv1_plugin_names + self.after_conv2_plugin_names) + self.after_conv3_plugin_names))
self.after_conv1_plugin_names = self.make_block_plugins(width, self.after_conv1_plugins)
self.after_conv2_plugin_names = self.make_block_plugins(width, self.after_conv2_plugins)
self.after_conv3_plugin_names = self.make_block_plugins((self.planes * self.expansion), self.after_conv3_plugins)
def _del_block_plugins(self, plugin_names):
'delete plugins for block if exist.\n\n Args:\n plugin_names (list[str]): List of plugins name to delete.\n '
assert isinstance(plugin_names, list)
for plugin_name in plugin_names:
del self._modules[plugin_name]
|
@BACKBONES.register_module()
class ResNeXt(ResNet):
'ResNeXt backbone.\n\n Args:\n depth (int): Depth of resnet, from {18, 34, 50, 101, 152}.\n in_channels (int): Number of input image channels. Default: 3.\n num_stages (int): Resnet stages. Default: 4.\n groups (int): Group of resnext.\n base_width (int): Base width of resnext.\n strides (Sequence[int]): Strides of the first block of each stage.\n dilations (Sequence[int]): Dilation of each stage.\n out_indices (Sequence[int]): Output from which stages.\n style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two\n layer is the 3x3 conv layer, otherwise the stride-two layer is\n the first 1x1 conv layer.\n frozen_stages (int): Stages to be frozen (all param fixed). -1 means\n not freezing any parameters.\n norm_cfg (dict): dictionary to construct and config norm layer.\n norm_eval (bool): Whether to set norm layers to eval mode, namely,\n freeze running stats (mean and var). Note: Effect on Batch Norm\n and its variants only.\n with_cp (bool): Use checkpoint or not. Using checkpoint will save some\n memory while slowing down the training speed.\n zero_init_residual (bool): whether to use zero init for last norm layer\n in resblocks to let them behave as identity.\n '
arch_settings = {50: (Bottleneck, (3, 4, 6, 3)), 101: (Bottleneck, (3, 4, 23, 3)), 152: (Bottleneck, (3, 8, 36, 3))}
def __init__(self, groups=1, base_width=4, **kwargs):
self.groups = groups
self.base_width = base_width
super(ResNeXt, self).__init__(**kwargs)
def make_res_layer(self, **kwargs):
'Pack all blocks in a stage into a ``ResLayer``'
return ResLayer(groups=self.groups, base_width=self.base_width, base_channels=self.base_channels, **kwargs)
|
@BACKBONES.register_module()
class SSDVGG(VGG, BaseModule):
'VGG Backbone network for single-shot-detection.\n\n Args:\n depth (int): Depth of vgg, from {11, 13, 16, 19}.\n with_last_pool (bool): Whether to add a pooling layer at the last\n of the model\n ceil_mode (bool): When True, will use `ceil` instead of `floor`\n to compute the output shape.\n out_indices (Sequence[int]): Output from which stages.\n out_feature_indices (Sequence[int]): Output from which feature map.\n pretrained (str, optional): model pretrained path. Default: None\n init_cfg (dict or list[dict], optional): Initialization config dict.\n Default: None\n input_size (int, optional): Deprecated argumment.\n Width and height of input, from {300, 512}.\n l2_norm_scale (float, optional) : Deprecated argumment.\n L2 normalization layer init scale.\n\n Example:\n >>> self = SSDVGG(input_size=300, depth=11)\n >>> self.eval()\n >>> inputs = torch.rand(1, 3, 300, 300)\n >>> level_outputs = self.forward(inputs)\n >>> for level_out in level_outputs:\n ... print(tuple(level_out.shape))\n (1, 1024, 19, 19)\n (1, 512, 10, 10)\n (1, 256, 5, 5)\n (1, 256, 3, 3)\n (1, 256, 1, 1)\n '
extra_setting = {300: (256, 'S', 512, 128, 'S', 256, 128, 256, 128, 256), 512: (256, 'S', 512, 128, 'S', 256, 128, 'S', 256, 128, 'S', 256, 128)}
def __init__(self, depth, with_last_pool=False, ceil_mode=True, out_indices=(3, 4), out_feature_indices=(22, 34), pretrained=None, init_cfg=None, input_size=None, l2_norm_scale=None):
super(SSDVGG, self).__init__(depth, with_last_pool=with_last_pool, ceil_mode=ceil_mode, out_indices=out_indices)
self.features.add_module(str(len(self.features)), nn.MaxPool2d(kernel_size=3, stride=1, padding=1))
self.features.add_module(str(len(self.features)), nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6))
self.features.add_module(str(len(self.features)), nn.ReLU(inplace=True))
self.features.add_module(str(len(self.features)), nn.Conv2d(1024, 1024, kernel_size=1))
self.features.add_module(str(len(self.features)), nn.ReLU(inplace=True))
self.out_feature_indices = out_feature_indices
assert (not (init_cfg and pretrained)), 'init_cfg and pretrained cannot be specified at the same time'
if (init_cfg is not None):
self.init_cfg = init_cfg
elif isinstance(pretrained, str):
warnings.warn('DeprecationWarning: pretrained is deprecated, please use "init_cfg" instead')
self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)
elif (pretrained is None):
self.init_cfg = [dict(type='Kaiming', layer='Conv2d'), dict(type='Constant', val=1, layer='BatchNorm2d'), dict(type='Normal', std=0.01, layer='Linear')]
else:
raise TypeError('pretrained must be a str or None')
if (input_size is not None):
warnings.warn('DeprecationWarning: input_size is deprecated')
if (l2_norm_scale is not None):
warnings.warn('DeprecationWarning: l2_norm_scale in VGG is deprecated, it has been moved to SSDNeck.')
def init_weights(self, pretrained=None):
super(VGG, self).init_weights()
def forward(self, x):
'Forward function.'
outs = []
for (i, layer) in enumerate(self.features):
x = layer(x)
if (i in self.out_feature_indices):
outs.append(x)
if (len(outs) == 1):
return outs[0]
else:
return tuple(outs)
|
class L2Norm(ssd_neck.L2Norm):
def __init__(self, **kwargs):
super(L2Norm, self).__init__(**kwargs)
warnings.warn('DeprecationWarning: L2Norm in ssd_vgg.py is deprecated, please use L2Norm in mmdet/models/necks/ssd_neck.py instead')
|
class TridentConv(BaseModule):
'Trident Convolution Module.\n\n Args:\n in_channels (int): Number of channels in input.\n out_channels (int): Number of channels in output.\n kernel_size (int): Size of convolution kernel.\n stride (int, optional): Convolution stride. Default: 1.\n trident_dilations (tuple[int, int, int], optional): Dilations of\n different trident branch. Default: (1, 2, 3).\n test_branch_idx (int, optional): In inference, all 3 branches will\n be used if `test_branch_idx==-1`, otherwise only branch with\n index `test_branch_idx` will be used. Default: 1.\n bias (bool, optional): Whether to use bias in convolution or not.\n Default: False.\n init_cfg (dict or list[dict], optional): Initialization config dict.\n Default: None\n '
def __init__(self, in_channels, out_channels, kernel_size, stride=1, trident_dilations=(1, 2, 3), test_branch_idx=1, bias=False, init_cfg=None):
super(TridentConv, self).__init__(init_cfg)
self.num_branch = len(trident_dilations)
self.with_bias = bias
self.test_branch_idx = test_branch_idx
self.stride = _pair(stride)
self.kernel_size = _pair(kernel_size)
self.paddings = _pair(trident_dilations)
self.dilations = trident_dilations
self.in_channels = in_channels
self.out_channels = out_channels
self.bias = bias
self.weight = nn.Parameter(torch.Tensor(out_channels, in_channels, *self.kernel_size))
if bias:
self.bias = nn.Parameter(torch.Tensor(out_channels))
else:
self.bias = None
def extra_repr(self):
tmpstr = f'in_channels={self.in_channels}'
tmpstr += f', out_channels={self.out_channels}'
tmpstr += f', kernel_size={self.kernel_size}'
tmpstr += f', num_branch={self.num_branch}'
tmpstr += f', test_branch_idx={self.test_branch_idx}'
tmpstr += f', stride={self.stride}'
tmpstr += f', paddings={self.paddings}'
tmpstr += f', dilations={self.dilations}'
tmpstr += f', bias={self.bias}'
return tmpstr
def forward(self, inputs):
if (self.training or (self.test_branch_idx == (- 1))):
outputs = [F.conv2d(input, self.weight, self.bias, self.stride, padding, dilation) for (input, dilation, padding) in zip(inputs, self.dilations, self.paddings)]
else:
assert (len(inputs) == 1)
outputs = [F.conv2d(inputs[0], self.weight, self.bias, self.stride, self.paddings[self.test_branch_idx], self.dilations[self.test_branch_idx])]
return outputs
|
class TridentBottleneck(Bottleneck):
'BottleBlock for TridentResNet.\n\n Args:\n trident_dilations (tuple[int, int, int]): Dilations of different\n trident branch.\n test_branch_idx (int): In inference, all 3 branches will be used\n if `test_branch_idx==-1`, otherwise only branch with index\n `test_branch_idx` will be used.\n concat_output (bool): Whether to concat the output list to a Tensor.\n `True` only in the last Block.\n '
def __init__(self, trident_dilations, test_branch_idx, concat_output, **kwargs):
super(TridentBottleneck, self).__init__(**kwargs)
self.trident_dilations = trident_dilations
self.num_branch = len(trident_dilations)
self.concat_output = concat_output
self.test_branch_idx = test_branch_idx
self.conv2 = TridentConv(self.planes, self.planes, kernel_size=3, stride=self.conv2_stride, bias=False, trident_dilations=self.trident_dilations, test_branch_idx=test_branch_idx, init_cfg=dict(type='Kaiming', distribution='uniform', mode='fan_in', override=dict(name='conv2')))
def forward(self, x):
def _inner_forward(x):
num_branch = (self.num_branch if (self.training or (self.test_branch_idx == (- 1))) else 1)
identity = x
if (not isinstance(x, list)):
x = ((x,) * num_branch)
identity = x
if (self.downsample is not None):
identity = [self.downsample(b) for b in x]
out = [self.conv1(b) for b in x]
out = [self.norm1(b) for b in out]
out = [self.relu(b) for b in out]
if self.with_plugins:
for k in range(len(out)):
out[k] = self.forward_plugin(out[k], self.after_conv1_plugin_names)
out = self.conv2(out)
out = [self.norm2(b) for b in out]
out = [self.relu(b) for b in out]
if self.with_plugins:
for k in range(len(out)):
out[k] = self.forward_plugin(out[k], self.after_conv2_plugin_names)
out = [self.conv3(b) for b in out]
out = [self.norm3(b) for b in out]
if self.with_plugins:
for k in range(len(out)):
out[k] = self.forward_plugin(out[k], self.after_conv3_plugin_names)
out = [(out_b + identity_b) for (out_b, identity_b) in zip(out, identity)]
return out
if (self.with_cp and x.requires_grad):
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = [self.relu(b) for b in out]
if self.concat_output:
out = torch.cat(out, dim=0)
return out
|
def make_trident_res_layer(block, inplanes, planes, num_blocks, stride=1, trident_dilations=(1, 2, 3), style='pytorch', with_cp=False, conv_cfg=None, norm_cfg=dict(type='BN'), dcn=None, plugins=None, test_branch_idx=(- 1)):
'Build Trident Res Layers.'
downsample = None
if ((stride != 1) or (inplanes != (planes * block.expansion))):
downsample = []
conv_stride = stride
downsample.extend([build_conv_layer(conv_cfg, inplanes, (planes * block.expansion), kernel_size=1, stride=conv_stride, bias=False), build_norm_layer(norm_cfg, (planes * block.expansion))[1]])
downsample = nn.Sequential(*downsample)
layers = []
for i in range(num_blocks):
layers.append(block(inplanes=inplanes, planes=planes, stride=(stride if (i == 0) else 1), trident_dilations=trident_dilations, downsample=(downsample if (i == 0) else None), style=style, with_cp=with_cp, conv_cfg=conv_cfg, norm_cfg=norm_cfg, dcn=dcn, plugins=plugins, test_branch_idx=test_branch_idx, concat_output=(True if (i == (num_blocks - 1)) else False)))
inplanes = (planes * block.expansion)
return nn.Sequential(*layers)
|
@BACKBONES.register_module()
class TridentResNet(ResNet):
'The stem layer, stage 1 and stage 2 in Trident ResNet are identical to\n ResNet, while in stage 3, Trident BottleBlock is utilized to replace the\n normal BottleBlock to yield trident output. Different branch shares the\n convolution weight but uses different dilations to achieve multi-scale\n output.\n\n / stage3(b0) x - stem - stage1 - stage2 - stage3(b1) - output\n \\ stage3(b2) /\n\n Args:\n depth (int): Depth of resnet, from {50, 101, 152}.\n num_branch (int): Number of branches in TridentNet.\n test_branch_idx (int): In inference, all 3 branches will be used\n if `test_branch_idx==-1`, otherwise only branch with index\n `test_branch_idx` will be used.\n trident_dilations (tuple[int]): Dilations of different trident branch.\n len(trident_dilations) should be equal to num_branch.\n '
def __init__(self, depth, num_branch, test_branch_idx, trident_dilations, **kwargs):
assert (num_branch == len(trident_dilations))
assert (depth in (50, 101, 152))
super(TridentResNet, self).__init__(depth, **kwargs)
assert (self.num_stages == 3)
self.test_branch_idx = test_branch_idx
self.num_branch = num_branch
last_stage_idx = (self.num_stages - 1)
stride = self.strides[last_stage_idx]
dilation = trident_dilations
dcn = (self.dcn if self.stage_with_dcn[last_stage_idx] else None)
if (self.plugins is not None):
stage_plugins = self.make_stage_plugins(self.plugins, last_stage_idx)
else:
stage_plugins = None
planes = (self.base_channels * (2 ** last_stage_idx))
res_layer = make_trident_res_layer(TridentBottleneck, inplanes=((self.block.expansion * self.base_channels) * (2 ** (last_stage_idx - 1))), planes=planes, num_blocks=self.stage_blocks[last_stage_idx], stride=stride, trident_dilations=dilation, style=self.style, with_cp=self.with_cp, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, dcn=dcn, plugins=stage_plugins, test_branch_idx=self.test_branch_idx)
layer_name = f'layer{(last_stage_idx + 1)}'
self.__setattr__(layer_name, res_layer)
self.res_layers.pop(last_stage_idx)
self.res_layers.insert(last_stage_idx, layer_name)
self._freeze_stages()
|
def build_backbone(cfg):
'Build backbone.'
return BACKBONES.build(cfg)
|
def build_neck(cfg):
'Build neck.'
return NECKS.build(cfg)
|
def build_roi_extractor(cfg):
'Build roi extractor.'
return ROI_EXTRACTORS.build(cfg)
|
def build_shared_head(cfg):
'Build shared head.'
return SHARED_HEADS.build(cfg)
|
def build_head(cfg):
'Build head.'
return HEADS.build(cfg)
|
def build_loss(cfg):
'Build loss.'
return LOSSES.build(cfg)
|
def build_detector(cfg, train_cfg=None, test_cfg=None):
'Build detector.'
if ((train_cfg is not None) or (test_cfg is not None)):
warnings.warn('train_cfg and test_cfg is deprecated, please specify them in model', UserWarning)
assert ((cfg.get('train_cfg') is None) or (train_cfg is None)), 'train_cfg specified in both outer field and model field '
assert ((cfg.get('test_cfg') is None) or (test_cfg is None)), 'test_cfg specified in both outer field and model field '
return DETECTORS.build(cfg, default_args=dict(train_cfg=train_cfg, test_cfg=test_cfg))
|
@HEADS.register_module()
class AnchorFreeHead(BaseDenseHead, BBoxTestMixin):
'Anchor-free head (FCOS, Fovea, RepPoints, etc.).\n\n Args:\n num_classes (int): Number of categories excluding the background\n category.\n in_channels (int): Number of channels in the input feature map.\n feat_channels (int): Number of hidden channels. Used in child classes.\n stacked_convs (int): Number of stacking convs of the head.\n strides (tuple): Downsample factor of each feature map.\n dcn_on_last_conv (bool): If true, use dcn in the last layer of\n towers. Default: False.\n conv_bias (bool | str): If specified as `auto`, it will be decided by\n the norm_cfg. Bias of conv will be set as True if `norm_cfg` is\n None, otherwise False. Default: "auto".\n loss_cls (dict): Config of classification loss.\n loss_bbox (dict): Config of localization loss.\n bbox_coder (dict): Config of bbox coder. Defaults\n \'DistancePointBBoxCoder\'.\n conv_cfg (dict): Config dict for convolution layer. Default: None.\n norm_cfg (dict): Config dict for normalization layer. Default: None.\n train_cfg (dict): Training config of anchor head.\n test_cfg (dict): Testing config of anchor head.\n init_cfg (dict or list[dict], optional): Initialization config dict.\n '
_version = 1
def __init__(self, num_classes, in_channels, feat_channels=256, stacked_convs=4, strides=(4, 8, 16, 32, 64), dcn_on_last_conv=False, conv_bias='auto', loss_cls=dict(type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_bbox=dict(type='IoULoss', loss_weight=1.0), bbox_coder=dict(type='DistancePointBBoxCoder'), conv_cfg=None, norm_cfg=None, train_cfg=None, test_cfg=None, init_cfg=dict(type='Normal', layer='Conv2d', std=0.01, override=dict(type='Normal', name='conv_cls', std=0.01, bias_prob=0.01))):
super(AnchorFreeHead, self).__init__(init_cfg)
self.num_classes = num_classes
self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False)
if self.use_sigmoid_cls:
self.cls_out_channels = num_classes
else:
self.cls_out_channels = (num_classes + 1)
self.in_channels = in_channels
self.feat_channels = feat_channels
self.stacked_convs = stacked_convs
self.strides = strides
self.dcn_on_last_conv = dcn_on_last_conv
assert ((conv_bias == 'auto') or isinstance(conv_bias, bool))
self.conv_bias = conv_bias
self.loss_cls = build_loss(loss_cls)
self.loss_bbox = build_loss(loss_bbox)
self.bbox_coder = build_bbox_coder(bbox_coder)
self.prior_generator = MlvlPointGenerator(strides)
self.num_base_priors = self.prior_generator.num_base_priors[0]
self.train_cfg = train_cfg
self.test_cfg = test_cfg
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.fp16_enabled = False
self._init_layers()
def _init_layers(self):
'Initialize layers of the head.'
self._init_cls_convs()
self._init_reg_convs()
self._init_predictor()
def _init_cls_convs(self):
'Initialize classification conv layers of the head.'
self.cls_convs = nn.ModuleList()
for i in range(self.stacked_convs):
chn = (self.in_channels if (i == 0) else self.feat_channels)
if (self.dcn_on_last_conv and (i == (self.stacked_convs - 1))):
conv_cfg = dict(type='DCNv2')
else:
conv_cfg = self.conv_cfg
self.cls_convs.append(ConvModule(chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=conv_cfg, norm_cfg=self.norm_cfg, bias=self.conv_bias))
def _init_reg_convs(self):
'Initialize bbox regression conv layers of the head.'
self.reg_convs = nn.ModuleList()
for i in range(self.stacked_convs):
chn = (self.in_channels if (i == 0) else self.feat_channels)
if (self.dcn_on_last_conv and (i == (self.stacked_convs - 1))):
conv_cfg = dict(type='DCNv2')
else:
conv_cfg = self.conv_cfg
self.reg_convs.append(ConvModule(chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=conv_cfg, norm_cfg=self.norm_cfg, bias=self.conv_bias))
def _init_predictor(self):
'Initialize predictor layers of the head.'
self.conv_cls = nn.Conv2d(self.feat_channels, self.cls_out_channels, 3, padding=1)
self.conv_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1)
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):
'Hack some keys of the model state dict so that can load checkpoints\n of previous version.'
version = local_metadata.get('version', None)
if (version is None):
bbox_head_keys = [k for k in state_dict.keys() if k.startswith(prefix)]
ori_predictor_keys = []
new_predictor_keys = []
for key in bbox_head_keys:
ori_predictor_keys.append(key)
key = key.split('.')
conv_name = None
if key[1].endswith('cls'):
conv_name = 'conv_cls'
elif key[1].endswith('reg'):
conv_name = 'conv_reg'
elif key[1].endswith('centerness'):
conv_name = 'conv_centerness'
else:
assert NotImplementedError
if (conv_name is not None):
key[1] = conv_name
new_predictor_keys.append('.'.join(key))
else:
ori_predictor_keys.pop((- 1))
for i in range(len(new_predictor_keys)):
state_dict[new_predictor_keys[i]] = state_dict.pop(ori_predictor_keys[i])
super()._load_from_state_dict(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs)
def forward(self, feats):
'Forward features from the upstream network.\n\n Args:\n feats (tuple[Tensor]): Features from the upstream network, each is\n a 4D-tensor.\n\n Returns:\n tuple: Usually contain classification scores and bbox predictions.\n cls_scores (list[Tensor]): Box scores for each scale level,\n each is a 4D-tensor, the channel number is\n num_points * num_classes.\n bbox_preds (list[Tensor]): Box energies / deltas for each scale\n level, each is a 4D-tensor, the channel number is\n num_points * 4.\n '
return multi_apply(self.forward_single, feats)[:2]
def forward_single(self, x):
'Forward features of a single scale level.\n\n Args:\n x (Tensor): FPN feature maps of the specified stride.\n\n Returns:\n tuple: Scores for each class, bbox predictions, features\n after classification and regression conv layers, some\n models needs these features like FCOS.\n '
cls_feat = x
reg_feat = x
for cls_layer in self.cls_convs:
cls_feat = cls_layer(cls_feat)
cls_score = self.conv_cls(cls_feat)
for reg_layer in self.reg_convs:
reg_feat = reg_layer(reg_feat)
bbox_pred = self.conv_reg(reg_feat)
return (cls_score, bbox_pred, cls_feat, reg_feat)
@abstractmethod
@force_fp32(apply_to=('cls_scores', 'bbox_preds'))
def loss(self, cls_scores, bbox_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None):
'Compute loss of the head.\n\n Args:\n cls_scores (list[Tensor]): Box scores for each scale level,\n each is a 4D-tensor, the channel number is\n num_points * num_classes.\n bbox_preds (list[Tensor]): Box energies / deltas for each scale\n level, each is a 4D-tensor, the channel number is\n num_points * 4.\n gt_bboxes (list[Tensor]): Ground truth bboxes for each image with\n shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.\n gt_labels (list[Tensor]): class indices corresponding to each box\n img_metas (list[dict]): Meta information of each image, e.g.,\n image size, scaling factor, etc.\n gt_bboxes_ignore (None | list[Tensor]): specify which bounding\n boxes can be ignored when computing the loss.\n '
raise NotImplementedError
@abstractmethod
def get_targets(self, points, gt_bboxes_list, gt_labels_list):
'Compute regression, classification and centerness targets for points\n in multiple images.\n\n Args:\n points (list[Tensor]): Points of each fpn level, each has shape\n (num_points, 2).\n gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image,\n each has shape (num_gt, 4).\n gt_labels_list (list[Tensor]): Ground truth labels of each box,\n each has shape (num_gt,).\n '
raise NotImplementedError
def _get_points_single(self, featmap_size, stride, dtype, device, flatten=False):
'Get points of a single scale level.\n\n This function will be deprecated soon.\n '
warnings.warn('`_get_points_single` in `AnchorFreeHead` will be deprecated soon, we support a multi level point generator nowyou can get points of a single level feature map with `self.prior_generator.single_level_grid_priors` ')
(h, w) = featmap_size
x_range = torch.arange(w, device=device).to(dtype)
y_range = torch.arange(h, device=device).to(dtype)
(y, x) = torch.meshgrid(y_range, x_range)
if flatten:
y = y.flatten()
x = x.flatten()
return (y, x)
def get_points(self, featmap_sizes, dtype, device, flatten=False):
'Get points according to feature map sizes.\n\n Args:\n featmap_sizes (list[tuple]): Multi-level feature map sizes.\n dtype (torch.dtype): Type of points.\n device (torch.device): Device of points.\n\n Returns:\n tuple: points of each image.\n '
warnings.warn('`get_points` in `AnchorFreeHead` will be deprecated soon, we support a multi level point generator nowyou can get points of all levels with `self.prior_generator.grid_priors` ')
mlvl_points = []
for i in range(len(featmap_sizes)):
mlvl_points.append(self._get_points_single(featmap_sizes[i], self.strides[i], dtype, device, flatten))
return mlvl_points
def aug_test(self, feats, img_metas, rescale=False):
'Test function with test time augmentation.\n\n Args:\n feats (list[Tensor]): the outer list indicates test-time\n augmentations and inner Tensor should have a shape NxCxHxW,\n which contains features for all images in the batch.\n img_metas (list[list[dict]]): the outer list indicates test-time\n augs (multiscale, flip, etc.) and the inner list indicates\n images in a batch. each dict has image information.\n rescale (bool, optional): Whether to rescale the results.\n Defaults to False.\n\n Returns:\n list[ndarray]: bbox results of each class\n '
return self.aug_test_bboxes(feats, img_metas, rescale=rescale)
|
class BaseMaskHead(BaseModule, metaclass=ABCMeta):
'Base class for mask heads used in One-Stage Instance Segmentation.'
def __init__(self, init_cfg):
super(BaseMaskHead, self).__init__(init_cfg)
@abstractmethod
def loss(self, **kwargs):
pass
@abstractmethod
def get_results(self, **kwargs):
'Get precessed :obj:`InstanceData` of multiple images.'
pass
def forward_train(self, x, gt_labels, gt_masks, img_metas, gt_bboxes=None, gt_bboxes_ignore=None, positive_infos=None, **kwargs):
'\n Args:\n x (list[Tensor] | tuple[Tensor]): Features from FPN.\n Each has a shape (B, C, H, W).\n gt_labels (list[Tensor]): Ground truth labels of all images.\n each has a shape (num_gts,).\n gt_masks (list[Tensor]) : Masks for each bbox, has a shape\n (num_gts, h , w).\n img_metas (list[dict]): Meta information of each image, e.g.,\n image size, scaling factor, etc.\n gt_bboxes (list[Tensor]): Ground truth bboxes of the image,\n each item has a shape (num_gts, 4).\n gt_bboxes_ignore (list[Tensor], None): Ground truth bboxes to be\n ignored, each item has a shape (num_ignored_gts, 4).\n positive_infos (list[:obj:`InstanceData`], optional): Information\n of positive samples. Used when the label assignment is\n done outside the MaskHead, e.g., in BboxHead in\n YOLACT or CondInst, etc. When the label assignment is done in\n MaskHead, it would be None, like SOLO. All values\n in it should have shape (num_positive_samples, *).\n\n Returns:\n dict[str, Tensor]: A dictionary of loss components.\n '
if (positive_infos is None):
outs = self(x)
else:
outs = self(x, positive_infos)
assert isinstance(outs, tuple), 'Forward results should be a tuple, even if only one item is returned'
loss = self.loss(*outs, gt_labels=gt_labels, gt_masks=gt_masks, img_metas=img_metas, gt_bboxes=gt_bboxes, gt_bboxes_ignore=gt_bboxes_ignore, positive_infos=positive_infos, **kwargs)
return loss
def simple_test(self, feats, img_metas, rescale=False, instances_list=None, **kwargs):
'Test function without test-time augmentation.\n\n Args:\n feats (tuple[torch.Tensor]): Multi-level features from the\n upstream network, each is a 4D-tensor.\n img_metas (list[dict]): List of image information.\n rescale (bool, optional): Whether to rescale the results.\n Defaults to False.\n instances_list (list[obj:`InstanceData`], optional): Detection\n results of each image after the post process. Only exist\n if there is a `bbox_head`, like `YOLACT`, `CondInst`, etc.\n\n Returns:\n list[obj:`InstanceData`]: Instance segmentation results of each image after the post process. Each item usually contains following keys. \n - scores (Tensor): Classification scores, has a shape\n (num_instance,)\n - labels (Tensor): Has a shape (num_instances,).\n - masks (Tensor): Processed mask results, has a\n shape (num_instances, h, w).\n '
if (instances_list is None):
outs = self(feats)
else:
outs = self(feats, instances_list=instances_list)
mask_inputs = (outs + (img_metas,))
results_list = self.get_results(*mask_inputs, rescale=rescale, instances_list=instances_list, **kwargs)
return results_list
def onnx_export(self, img, img_metas):
raise NotImplementedError(f'{self.__class__.__name__} does not support ONNX EXPORT')
|
@HEADS.register_module()
class GARetinaHead(GuidedAnchorHead):
'Guided-Anchor-based RetinaNet head.'
def __init__(self, num_classes, in_channels, stacked_convs=4, conv_cfg=None, norm_cfg=None, init_cfg=None, **kwargs):
if (init_cfg is None):
init_cfg = dict(type='Normal', layer='Conv2d', std=0.01, override=[dict(type='Normal', name='conv_loc', std=0.01, bias_prob=0.01), dict(type='Normal', name='retina_cls', std=0.01, bias_prob=0.01)])
self.stacked_convs = stacked_convs
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
super(GARetinaHead, self).__init__(num_classes, in_channels, init_cfg=init_cfg, **kwargs)
def _init_layers(self):
'Initialize layers of the head.'
self.relu = nn.ReLU(inplace=True)
self.cls_convs = nn.ModuleList()
self.reg_convs = nn.ModuleList()
for i in range(self.stacked_convs):
chn = (self.in_channels if (i == 0) else self.feat_channels)
self.cls_convs.append(ConvModule(chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg))
self.reg_convs.append(ConvModule(chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg))
self.conv_loc = nn.Conv2d(self.feat_channels, 1, 1)
self.conv_shape = nn.Conv2d(self.feat_channels, (self.num_anchors * 2), 1)
self.feature_adaption_cls = FeatureAdaption(self.feat_channels, self.feat_channels, kernel_size=3, deform_groups=self.deform_groups)
self.feature_adaption_reg = FeatureAdaption(self.feat_channels, self.feat_channels, kernel_size=3, deform_groups=self.deform_groups)
self.retina_cls = MaskedConv2d(self.feat_channels, (self.num_base_priors * self.cls_out_channels), 3, padding=1)
self.retina_reg = MaskedConv2d(self.feat_channels, (self.num_base_priors * 4), 3, padding=1)
def forward_single(self, x):
'Forward feature map of a single scale level.'
cls_feat = x
reg_feat = x
for cls_conv in self.cls_convs:
cls_feat = cls_conv(cls_feat)
for reg_conv in self.reg_convs:
reg_feat = reg_conv(reg_feat)
loc_pred = self.conv_loc(cls_feat)
shape_pred = self.conv_shape(reg_feat)
cls_feat = self.feature_adaption_cls(cls_feat, shape_pred)
reg_feat = self.feature_adaption_reg(reg_feat, shape_pred)
if (not self.training):
mask = (loc_pred.sigmoid()[0] >= self.loc_filter_thr)
else:
mask = None
cls_score = self.retina_cls(cls_feat, mask)
bbox_pred = self.retina_reg(reg_feat, mask)
return (cls_score, bbox_pred, shape_pred, loc_pred)
|
@HEADS.register_module()
class LADHead(PAAHead):
'Label Assignment Head from the paper: `Improving Object Detection by\n Label Assignment Distillation <https://arxiv.org/pdf/2108.10520.pdf>`_'
@force_fp32(apply_to=('cls_scores', 'bbox_preds', 'iou_preds'))
def get_label_assignment(self, cls_scores, bbox_preds, iou_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None):
'Get label assignment (from teacher).\n\n Args:\n cls_scores (list[Tensor]): Box scores for each scale level.\n Has shape (N, num_anchors * num_classes, H, W)\n bbox_preds (list[Tensor]): Box energies / deltas for each scale\n level with shape (N, num_anchors * 4, H, W)\n iou_preds (list[Tensor]): iou_preds for each scale\n level with shape (N, num_anchors * 1, H, W)\n gt_bboxes (list[Tensor]): Ground truth bboxes for each image with\n shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.\n gt_labels (list[Tensor]): class indices corresponding to each box\n img_metas (list[dict]): Meta information of each image, e.g.,\n image size, scaling factor, etc.\n gt_bboxes_ignore (list[Tensor] | None): Specify which bounding\n boxes can be ignored when are computing the loss.\n\n Returns:\n tuple: Returns a tuple containing label assignment variables.\n\n - labels (Tensor): Labels of all anchors, each with\n shape (num_anchors,).\n - labels_weight (Tensor): Label weights of all anchor.\n each with shape (num_anchors,).\n - bboxes_target (Tensor): BBox targets of all anchors.\n each with shape (num_anchors, 4).\n - bboxes_weight (Tensor): BBox weights of all anchors.\n each with shape (num_anchors, 4).\n - pos_inds_flatten (Tensor): Contains all index of positive\n sample in all anchor.\n - pos_anchors (Tensor): Positive anchors.\n - num_pos (int): Number of positive anchors.\n '
featmap_sizes = [featmap.size()[(- 2):] for featmap in cls_scores]
assert (len(featmap_sizes) == self.prior_generator.num_levels)
device = cls_scores[0].device
(anchor_list, valid_flag_list) = self.get_anchors(featmap_sizes, img_metas, device=device)
label_channels = (self.cls_out_channels if self.use_sigmoid_cls else 1)
cls_reg_targets = self.get_targets(anchor_list, valid_flag_list, gt_bboxes, img_metas, gt_bboxes_ignore_list=gt_bboxes_ignore, gt_labels_list=gt_labels, label_channels=label_channels)
(labels, labels_weight, bboxes_target, bboxes_weight, pos_inds, pos_gt_index) = cls_reg_targets
cls_scores = levels_to_images(cls_scores)
cls_scores = [item.reshape((- 1), self.cls_out_channels) for item in cls_scores]
bbox_preds = levels_to_images(bbox_preds)
bbox_preds = [item.reshape((- 1), 4) for item in bbox_preds]
(pos_losses_list,) = multi_apply(self.get_pos_loss, anchor_list, cls_scores, bbox_preds, labels, labels_weight, bboxes_target, bboxes_weight, pos_inds)
with torch.no_grad():
(reassign_labels, reassign_label_weight, reassign_bbox_weights, num_pos) = multi_apply(self.paa_reassign, pos_losses_list, labels, labels_weight, bboxes_weight, pos_inds, pos_gt_index, anchor_list)
num_pos = sum(num_pos)
labels = torch.cat(reassign_labels, 0).view((- 1))
flatten_anchors = torch.cat([torch.cat(item, 0) for item in anchor_list])
labels_weight = torch.cat(reassign_label_weight, 0).view((- 1))
bboxes_target = torch.cat(bboxes_target, 0).view((- 1), bboxes_target[0].size((- 1)))
pos_inds_flatten = ((labels >= 0) & (labels < self.num_classes)).nonzero().reshape((- 1))
if num_pos:
pos_anchors = flatten_anchors[pos_inds_flatten]
else:
pos_anchors = None
label_assignment_results = (labels, labels_weight, bboxes_target, bboxes_weight, pos_inds_flatten, pos_anchors, num_pos)
return label_assignment_results
def forward_train(self, x, label_assignment_results, img_metas, gt_bboxes, gt_labels=None, gt_bboxes_ignore=None, **kwargs):
'Forward train with the available label assignment (student receives\n from teacher).\n\n Args:\n x (list[Tensor]): Features from FPN.\n label_assignment_results (tuple): As the outputs defined in the\n function `self.get_label_assignment`.\n img_metas (list[dict]): Meta information of each image, e.g.,\n image size, scaling factor, etc.\n gt_bboxes (Tensor): Ground truth bboxes of the image,\n shape (num_gts, 4).\n gt_labels (Tensor): Ground truth labels of each box,\n shape (num_gts,).\n gt_bboxes_ignore (Tensor): Ground truth bboxes to be\n ignored, shape (num_ignored_gts, 4).\n\n Returns:\n losses: (dict[str, Tensor]): A dictionary of loss components.\n '
outs = self(x)
if (gt_labels is None):
loss_inputs = (outs + (gt_bboxes, img_metas))
else:
loss_inputs = (outs + (gt_bboxes, gt_labels, img_metas))
losses = self.loss(*loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore, label_assignment_results=label_assignment_results)
return losses
@force_fp32(apply_to=('cls_scores', 'bbox_preds', 'iou_preds'))
def loss(self, cls_scores, bbox_preds, iou_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None, label_assignment_results=None):
'Compute losses of the head.\n\n Args:\n cls_scores (list[Tensor]): Box scores for each scale level\n Has shape (N, num_anchors * num_classes, H, W)\n bbox_preds (list[Tensor]): Box energies / deltas for each scale\n level with shape (N, num_anchors * 4, H, W)\n iou_preds (list[Tensor]): iou_preds for each scale\n level with shape (N, num_anchors * 1, H, W)\n gt_bboxes (list[Tensor]): Ground truth bboxes for each image with\n shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.\n gt_labels (list[Tensor]): class indices corresponding to each box\n img_metas (list[dict]): Meta information of each image, e.g.,\n image size, scaling factor, etc.\n gt_bboxes_ignore (list[Tensor] | None): Specify which bounding\n boxes can be ignored when are computing the loss.\n label_assignment_results (tuple): As the outputs defined in the\n function `self.get_label_assignment`.\n\n Returns:\n dict[str, Tensor]: A dictionary of loss gmm_assignment.\n '
(labels, labels_weight, bboxes_target, bboxes_weight, pos_inds_flatten, pos_anchors, num_pos) = label_assignment_results
cls_scores = levels_to_images(cls_scores)
cls_scores = [item.reshape((- 1), self.cls_out_channels) for item in cls_scores]
bbox_preds = levels_to_images(bbox_preds)
bbox_preds = [item.reshape((- 1), 4) for item in bbox_preds]
iou_preds = levels_to_images(iou_preds)
iou_preds = [item.reshape((- 1), 1) for item in iou_preds]
cls_scores = torch.cat(cls_scores, 0).view((- 1), cls_scores[0].size((- 1)))
bbox_preds = torch.cat(bbox_preds, 0).view((- 1), bbox_preds[0].size((- 1)))
iou_preds = torch.cat(iou_preds, 0).view((- 1), iou_preds[0].size((- 1)))
losses_cls = self.loss_cls(cls_scores, labels, labels_weight, avg_factor=max(num_pos, len(img_metas)))
if num_pos:
pos_bbox_pred = self.bbox_coder.decode(pos_anchors, bbox_preds[pos_inds_flatten])
pos_bbox_target = bboxes_target[pos_inds_flatten]
iou_target = bbox_overlaps(pos_bbox_pred.detach(), pos_bbox_target, is_aligned=True)
losses_iou = self.loss_centerness(iou_preds[pos_inds_flatten], iou_target.unsqueeze((- 1)), avg_factor=num_pos)
losses_bbox = self.loss_bbox(pos_bbox_pred, pos_bbox_target, avg_factor=num_pos)
else:
losses_iou = (iou_preds.sum() * 0)
losses_bbox = (bbox_preds.sum() * 0)
return dict(loss_cls=losses_cls, loss_bbox=losses_bbox, loss_iou=losses_iou)
|
@HEADS.register_module()
class NASFCOSHead(FCOSHead):
'Anchor-free head used in `NASFCOS <https://arxiv.org/abs/1906.04423>`_.\n\n It is quite similar with FCOS head, except for the searched structure of\n classification branch and bbox regression branch, where a structure of\n "dconv3x3, conv3x3, dconv3x3, conv1x1" is utilized instead.\n '
def __init__(self, *args, init_cfg=None, **kwargs):
if (init_cfg is None):
init_cfg = [dict(type='Caffe2Xavier', layer=['ConvModule', 'Conv2d']), dict(type='Normal', std=0.01, override=[dict(name='conv_reg'), dict(name='conv_centerness'), dict(name='conv_cls', type='Normal', std=0.01, bias_prob=0.01)])]
super(NASFCOSHead, self).__init__(*args, init_cfg=init_cfg, **kwargs)
def _init_layers(self):
'Initialize layers of the head.'
dconv3x3_config = dict(type='DCNv2', kernel_size=3, use_bias=True, deform_groups=2, padding=1)
conv3x3_config = dict(type='Conv', kernel_size=3, padding=1)
conv1x1_config = dict(type='Conv', kernel_size=1)
self.arch_config = [dconv3x3_config, conv3x3_config, dconv3x3_config, conv1x1_config]
self.cls_convs = nn.ModuleList()
self.reg_convs = nn.ModuleList()
for (i, op_) in enumerate(self.arch_config):
op = copy.deepcopy(op_)
chn = (self.in_channels if (i == 0) else self.feat_channels)
assert isinstance(op, dict)
use_bias = op.pop('use_bias', False)
padding = op.pop('padding', 0)
kernel_size = op.pop('kernel_size')
module = ConvModule(chn, self.feat_channels, kernel_size, stride=1, padding=padding, norm_cfg=self.norm_cfg, bias=use_bias, conv_cfg=op)
self.cls_convs.append(copy.deepcopy(module))
self.reg_convs.append(copy.deepcopy(module))
self.conv_cls = nn.Conv2d(self.feat_channels, self.cls_out_channels, 3, padding=1)
self.conv_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1)
self.conv_centerness = nn.Conv2d(self.feat_channels, 1, 3, padding=1)
self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides])
|
@HEADS.register_module()
class PISARetinaHead(RetinaHead):
'PISA Retinanet Head.\n\n The head owns the same structure with Retinanet Head, but differs in two\n aspects:\n 1. Importance-based Sample Reweighting Positive (ISR-P) is applied to\n change the positive loss weights.\n 2. Classification-aware regression loss is adopted as a third loss.\n '
@force_fp32(apply_to=('cls_scores', 'bbox_preds'))
def loss(self, cls_scores, bbox_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None):
'Compute losses of the head.\n\n Args:\n cls_scores (list[Tensor]): Box scores for each scale level\n Has shape (N, num_anchors * num_classes, H, W)\n bbox_preds (list[Tensor]): Box energies / deltas for each scale\n level with shape (N, num_anchors * 4, H, W)\n gt_bboxes (list[Tensor]): Ground truth bboxes of each image\n with shape (num_obj, 4).\n gt_labels (list[Tensor]): Ground truth labels of each image\n with shape (num_obj, 4).\n img_metas (list[dict]): Meta information of each image, e.g.,\n image size, scaling factor, etc.\n gt_bboxes_ignore (list[Tensor]): Ignored gt bboxes of each image.\n Default: None.\n\n Returns:\n dict: Loss dict, comprise classification loss, regression loss and\n carl loss.\n '
featmap_sizes = [featmap.size()[(- 2):] for featmap in cls_scores]
assert (len(featmap_sizes) == self.prior_generator.num_levels)
device = cls_scores[0].device
(anchor_list, valid_flag_list) = self.get_anchors(featmap_sizes, img_metas, device=device)
label_channels = (self.cls_out_channels if self.use_sigmoid_cls else 1)
cls_reg_targets = self.get_targets(anchor_list, valid_flag_list, gt_bboxes, img_metas, gt_bboxes_ignore_list=gt_bboxes_ignore, gt_labels_list=gt_labels, label_channels=label_channels, return_sampling_results=True)
if (cls_reg_targets is None):
return None
(labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_pos, num_total_neg, sampling_results_list) = cls_reg_targets
num_total_samples = ((num_total_pos + num_total_neg) if self.sampling else num_total_pos)
num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]
concat_anchor_list = []
for i in range(len(anchor_list)):
concat_anchor_list.append(torch.cat(anchor_list[i]))
all_anchor_list = images_to_levels(concat_anchor_list, num_level_anchors)
num_imgs = len(img_metas)
flatten_cls_scores = [cls_score.permute(0, 2, 3, 1).reshape(num_imgs, (- 1), label_channels) for cls_score in cls_scores]
flatten_cls_scores = torch.cat(flatten_cls_scores, dim=1).reshape((- 1), flatten_cls_scores[0].size((- 1)))
flatten_bbox_preds = [bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, (- 1), 4) for bbox_pred in bbox_preds]
flatten_bbox_preds = torch.cat(flatten_bbox_preds, dim=1).view((- 1), flatten_bbox_preds[0].size((- 1)))
flatten_labels = torch.cat(labels_list, dim=1).reshape((- 1))
flatten_label_weights = torch.cat(label_weights_list, dim=1).reshape((- 1))
flatten_anchors = torch.cat(all_anchor_list, dim=1).reshape((- 1), 4)
flatten_bbox_targets = torch.cat(bbox_targets_list, dim=1).reshape((- 1), 4)
flatten_bbox_weights = torch.cat(bbox_weights_list, dim=1).reshape((- 1), 4)
isr_cfg = self.train_cfg.get('isr', None)
if (isr_cfg is not None):
all_targets = (flatten_labels, flatten_label_weights, flatten_bbox_targets, flatten_bbox_weights)
with torch.no_grad():
all_targets = isr_p(flatten_cls_scores, flatten_bbox_preds, all_targets, flatten_anchors, sampling_results_list, bbox_coder=self.bbox_coder, loss_cls=self.loss_cls, num_class=self.num_classes, **self.train_cfg.isr)
(flatten_labels, flatten_label_weights, flatten_bbox_targets, flatten_bbox_weights) = all_targets
losses_cls = self.loss_cls(flatten_cls_scores, flatten_labels, flatten_label_weights, avg_factor=num_total_samples)
losses_bbox = self.loss_bbox(flatten_bbox_preds, flatten_bbox_targets, flatten_bbox_weights, avg_factor=num_total_samples)
loss_dict = dict(loss_cls=losses_cls, loss_bbox=losses_bbox)
carl_cfg = self.train_cfg.get('carl', None)
if (carl_cfg is not None):
loss_carl = carl_loss(flatten_cls_scores, flatten_labels, flatten_bbox_preds, flatten_bbox_targets, self.loss_bbox, **self.train_cfg.carl, avg_factor=num_total_pos, sigmoid=True, num_class=self.num_classes)
loss_dict.update(loss_carl)
return loss_dict
|
@HEADS.register_module()
class PISASSDHead(SSDHead):
def loss(self, cls_scores, bbox_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None):
'Compute losses of the head.\n\n Args:\n cls_scores (list[Tensor]): Box scores for each scale level\n Has shape (N, num_anchors * num_classes, H, W)\n bbox_preds (list[Tensor]): Box energies / deltas for each scale\n level with shape (N, num_anchors * 4, H, W)\n gt_bboxes (list[Tensor]): Ground truth bboxes of each image\n with shape (num_obj, 4).\n gt_labels (list[Tensor]): Ground truth labels of each image\n with shape (num_obj, 4).\n img_metas (list[dict]): Meta information of each image, e.g.,\n image size, scaling factor, etc.\n gt_bboxes_ignore (list[Tensor]): Ignored gt bboxes of each image.\n Default: None.\n\n Returns:\n dict: Loss dict, comprise classification loss regression loss and\n carl loss.\n '
featmap_sizes = [featmap.size()[(- 2):] for featmap in cls_scores]
assert (len(featmap_sizes) == self.prior_generator.num_levels)
device = cls_scores[0].device
(anchor_list, valid_flag_list) = self.get_anchors(featmap_sizes, img_metas, device=device)
cls_reg_targets = self.get_targets(anchor_list, valid_flag_list, gt_bboxes, img_metas, gt_bboxes_ignore_list=gt_bboxes_ignore, gt_labels_list=gt_labels, label_channels=1, unmap_outputs=False, return_sampling_results=True)
if (cls_reg_targets is None):
return None
(labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_pos, num_total_neg, sampling_results_list) = cls_reg_targets
num_images = len(img_metas)
all_cls_scores = torch.cat([s.permute(0, 2, 3, 1).reshape(num_images, (- 1), self.cls_out_channels) for s in cls_scores], 1)
all_labels = torch.cat(labels_list, (- 1)).view(num_images, (- 1))
all_label_weights = torch.cat(label_weights_list, (- 1)).view(num_images, (- 1))
all_bbox_preds = torch.cat([b.permute(0, 2, 3, 1).reshape(num_images, (- 1), 4) for b in bbox_preds], (- 2))
all_bbox_targets = torch.cat(bbox_targets_list, (- 2)).view(num_images, (- 1), 4)
all_bbox_weights = torch.cat(bbox_weights_list, (- 2)).view(num_images, (- 1), 4)
all_anchors = []
for i in range(num_images):
all_anchors.append(torch.cat(anchor_list[i]))
isr_cfg = self.train_cfg.get('isr', None)
all_targets = (all_labels.view((- 1)), all_label_weights.view((- 1)), all_bbox_targets.view((- 1), 4), all_bbox_weights.view((- 1), 4))
if (isr_cfg is not None):
all_targets = isr_p(all_cls_scores.view((- 1), all_cls_scores.size((- 1))), all_bbox_preds.view((- 1), 4), all_targets, torch.cat(all_anchors), sampling_results_list, loss_cls=CrossEntropyLoss(), bbox_coder=self.bbox_coder, **self.train_cfg.isr, num_class=self.num_classes)
(new_labels, new_label_weights, new_bbox_targets, new_bbox_weights) = all_targets
all_labels = new_labels.view(all_labels.shape)
all_label_weights = new_label_weights.view(all_label_weights.shape)
all_bbox_targets = new_bbox_targets.view(all_bbox_targets.shape)
all_bbox_weights = new_bbox_weights.view(all_bbox_weights.shape)
carl_loss_cfg = self.train_cfg.get('carl', None)
if (carl_loss_cfg is not None):
loss_carl = carl_loss(all_cls_scores.view((- 1), all_cls_scores.size((- 1))), all_targets[0], all_bbox_preds.view((- 1), 4), all_targets[2], SmoothL1Loss(beta=1.0), **self.train_cfg.carl, avg_factor=num_total_pos, num_class=self.num_classes)
assert torch.isfinite(all_cls_scores).all().item(), 'classification scores become infinite or NaN!'
assert torch.isfinite(all_bbox_preds).all().item(), 'bbox predications become infinite or NaN!'
(losses_cls, losses_bbox) = multi_apply(self.loss_single, all_cls_scores, all_bbox_preds, all_anchors, all_labels, all_label_weights, all_bbox_targets, all_bbox_weights, num_total_samples=num_total_pos)
loss_dict = dict(loss_cls=losses_cls, loss_bbox=losses_bbox)
if (carl_loss_cfg is not None):
loss_dict.update(loss_carl)
return loss_dict
|
@HEADS.register_module()
class RetinaHead(AnchorHead):
'An anchor-based head used in `RetinaNet\n <https://arxiv.org/pdf/1708.02002.pdf>`_.\n\n The head contains two subnetworks. The first classifies anchor boxes and\n the second regresses deltas for the anchors.\n\n Example:\n >>> import torch\n >>> self = RetinaHead(11, 7)\n >>> x = torch.rand(1, 7, 32, 32)\n >>> cls_score, bbox_pred = self.forward_single(x)\n >>> # Each anchor predicts a score for each class except background\n >>> cls_per_anchor = cls_score.shape[1] / self.num_anchors\n >>> box_per_anchor = bbox_pred.shape[1] / self.num_anchors\n >>> assert cls_per_anchor == (self.num_classes)\n >>> assert box_per_anchor == 4\n '
def __init__(self, num_classes, in_channels, stacked_convs=4, conv_cfg=None, norm_cfg=None, anchor_generator=dict(type='AnchorGenerator', octave_base_scale=4, scales_per_octave=3, ratios=[0.5, 1.0, 2.0], strides=[8, 16, 32, 64, 128]), init_cfg=dict(type='Normal', layer='Conv2d', std=0.01, override=dict(type='Normal', name='retina_cls', std=0.01, bias_prob=0.01)), **kwargs):
self.stacked_convs = stacked_convs
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
super(RetinaHead, self).__init__(num_classes, in_channels, anchor_generator=anchor_generator, init_cfg=init_cfg, **kwargs)
def _init_layers(self):
'Initialize layers of the head.'
self.relu = nn.ReLU(inplace=True)
self.cls_convs = nn.ModuleList()
self.reg_convs = nn.ModuleList()
for i in range(self.stacked_convs):
chn = (self.in_channels if (i == 0) else self.feat_channels)
self.cls_convs.append(ConvModule(chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg))
self.reg_convs.append(ConvModule(chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg))
self.retina_cls = nn.Conv2d(self.feat_channels, (self.num_base_priors * self.cls_out_channels), 3, padding=1)
self.retina_reg = nn.Conv2d(self.feat_channels, (self.num_base_priors * 4), 3, padding=1)
def forward_single(self, x):
'Forward feature of a single scale level.\n\n Args:\n x (Tensor): Features of a single scale level.\n\n Returns:\n tuple:\n cls_score (Tensor): Cls scores for a single scale level\n the channels number is num_anchors * num_classes.\n bbox_pred (Tensor): Box energies / deltas for a single scale\n level, the channels number is num_anchors * 4.\n '
cls_feat = x
reg_feat = x
for cls_conv in self.cls_convs:
cls_feat = cls_conv(cls_feat)
for reg_conv in self.reg_convs:
reg_feat = reg_conv(reg_feat)
cls_score = self.retina_cls(cls_feat)
bbox_pred = self.retina_reg(reg_feat)
return (cls_score, bbox_pred)
|
@HEADS.register_module()
class RetinaSepBNHead(AnchorHead):
'"RetinaHead with separate BN.\n\n In RetinaHead, conv/norm layers are shared across different FPN levels,\n while in RetinaSepBNHead, conv layers are shared across different FPN\n levels, but BN layers are separated.\n '
def __init__(self, num_classes, num_ins, in_channels, stacked_convs=4, conv_cfg=None, norm_cfg=None, init_cfg=None, **kwargs):
assert (init_cfg is None), 'To prevent abnormal initialization behavior, init_cfg is not allowed to be set'
self.stacked_convs = stacked_convs
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.num_ins = num_ins
super(RetinaSepBNHead, self).__init__(num_classes, in_channels, init_cfg=init_cfg, **kwargs)
def _init_layers(self):
'Initialize layers of the head.'
self.relu = nn.ReLU(inplace=True)
self.cls_convs = nn.ModuleList()
self.reg_convs = nn.ModuleList()
for i in range(self.num_ins):
cls_convs = nn.ModuleList()
reg_convs = nn.ModuleList()
for i in range(self.stacked_convs):
chn = (self.in_channels if (i == 0) else self.feat_channels)
cls_convs.append(ConvModule(chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg))
reg_convs.append(ConvModule(chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg))
self.cls_convs.append(cls_convs)
self.reg_convs.append(reg_convs)
for i in range(self.stacked_convs):
for j in range(1, self.num_ins):
self.cls_convs[j][i].conv = self.cls_convs[0][i].conv
self.reg_convs[j][i].conv = self.reg_convs[0][i].conv
self.retina_cls = nn.Conv2d(self.feat_channels, (self.num_base_priors * self.cls_out_channels), 3, padding=1)
self.retina_reg = nn.Conv2d(self.feat_channels, (self.num_base_priors * 4), 3, padding=1)
def init_weights(self):
'Initialize weights of the head.'
super(RetinaSepBNHead, self).init_weights()
for m in self.cls_convs[0]:
normal_init(m.conv, std=0.01)
for m in self.reg_convs[0]:
normal_init(m.conv, std=0.01)
bias_cls = bias_init_with_prob(0.01)
normal_init(self.retina_cls, std=0.01, bias=bias_cls)
normal_init(self.retina_reg, std=0.01)
def forward(self, feats):
'Forward features from the upstream network.\n\n Args:\n feats (tuple[Tensor]): Features from the upstream network, each is\n a 4D-tensor.\n\n Returns:\n tuple: Usually a tuple of classification scores and bbox prediction\n cls_scores (list[Tensor]): Classification scores for all scale\n levels, each is a 4D-tensor, the channels number is\n num_anchors * num_classes.\n bbox_preds (list[Tensor]): Box energies / deltas for all scale\n levels, each is a 4D-tensor, the channels number is\n num_anchors * 4.\n '
cls_scores = []
bbox_preds = []
for (i, x) in enumerate(feats):
cls_feat = feats[i]
reg_feat = feats[i]
for cls_conv in self.cls_convs[i]:
cls_feat = cls_conv(cls_feat)
for reg_conv in self.reg_convs[i]:
reg_feat = reg_conv(reg_feat)
cls_score = self.retina_cls(cls_feat)
bbox_pred = self.retina_reg(reg_feat)
cls_scores.append(cls_score)
bbox_preds.append(bbox_pred)
return (cls_scores, bbox_preds)
|
@HEADS.register_module()
class SSDHead(AnchorHead):
'SSD head used in https://arxiv.org/abs/1512.02325.\n\n Args:\n num_classes (int): Number of categories excluding the background\n category.\n in_channels (int): Number of channels in the input feature map.\n stacked_convs (int): Number of conv layers in cls and reg tower.\n Default: 0.\n feat_channels (int): Number of hidden channels when stacked_convs\n > 0. Default: 256.\n use_depthwise (bool): Whether to use DepthwiseSeparableConv.\n Default: False.\n conv_cfg (dict): Dictionary to construct and config conv layer.\n Default: None.\n norm_cfg (dict): Dictionary to construct and config norm layer.\n Default: None.\n act_cfg (dict): Dictionary to construct and config activation layer.\n Default: None.\n anchor_generator (dict): Config dict for anchor generator\n bbox_coder (dict): Config of bounding box coder.\n reg_decoded_bbox (bool): If true, the regression loss would be\n applied directly on decoded bounding boxes, converting both\n the predicted boxes and regression targets to absolute\n coordinates format. Default False. It should be `True` when\n using `IoULoss`, `GIoULoss`, or `DIoULoss` in the bbox head.\n train_cfg (dict): Training config of anchor head.\n test_cfg (dict): Testing config of anchor head.\n init_cfg (dict or list[dict], optional): Initialization config dict.\n '
def __init__(self, num_classes=80, in_channels=(512, 1024, 512, 256, 256, 256), stacked_convs=0, feat_channels=256, use_depthwise=False, conv_cfg=None, norm_cfg=None, act_cfg=None, anchor_generator=dict(type='SSDAnchorGenerator', scale_major=False, input_size=300, strides=[8, 16, 32, 64, 100, 300], ratios=([2], [2, 3], [2, 3], [2, 3], [2], [2]), basesize_ratio_range=(0.1, 0.9)), bbox_coder=dict(type='DeltaXYWHBBoxCoder', clip_border=True, target_means=[0.0, 0.0, 0.0, 0.0], target_stds=[1.0, 1.0, 1.0, 1.0]), reg_decoded_bbox=False, train_cfg=None, test_cfg=None, init_cfg=dict(type='Xavier', layer='Conv2d', distribution='uniform', bias=0)):
super(AnchorHead, self).__init__(init_cfg)
self.num_classes = num_classes
self.in_channels = in_channels
self.stacked_convs = stacked_convs
self.feat_channels = feat_channels
self.use_depthwise = use_depthwise
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.cls_out_channels = (num_classes + 1)
self.prior_generator = build_prior_generator(anchor_generator)
self.num_base_priors = self.prior_generator.num_base_priors
self._init_layers()
self.bbox_coder = build_bbox_coder(bbox_coder)
self.reg_decoded_bbox = reg_decoded_bbox
self.use_sigmoid_cls = False
self.cls_focal_loss = False
self.train_cfg = train_cfg
self.test_cfg = test_cfg
self.sampling = False
if self.train_cfg:
self.assigner = build_assigner(self.train_cfg.assigner)
sampler_cfg = dict(type='PseudoSampler')
self.sampler = build_sampler(sampler_cfg, context=self)
self.fp16_enabled = False
@property
def num_anchors(self):
'\n Returns:\n list[int]: Number of base_anchors on each point of each level.\n '
warnings.warn('DeprecationWarning: `num_anchors` is deprecated, please use "num_base_priors" instead')
return self.num_base_priors
def _init_layers(self):
'Initialize layers of the head.'
self.cls_convs = nn.ModuleList()
self.reg_convs = nn.ModuleList()
conv = (DepthwiseSeparableConvModule if self.use_depthwise else ConvModule)
for (channel, num_base_priors) in zip(self.in_channels, self.num_base_priors):
cls_layers = []
reg_layers = []
in_channel = channel
for i in range(self.stacked_convs):
cls_layers.append(conv(in_channel, self.feat_channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg))
reg_layers.append(conv(in_channel, self.feat_channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg))
in_channel = self.feat_channels
if self.use_depthwise:
cls_layers.append(ConvModule(in_channel, in_channel, 3, padding=1, groups=in_channel, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg))
reg_layers.append(ConvModule(in_channel, in_channel, 3, padding=1, groups=in_channel, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg))
cls_layers.append(nn.Conv2d(in_channel, (num_base_priors * self.cls_out_channels), kernel_size=(1 if self.use_depthwise else 3), padding=(0 if self.use_depthwise else 1)))
reg_layers.append(nn.Conv2d(in_channel, (num_base_priors * 4), kernel_size=(1 if self.use_depthwise else 3), padding=(0 if self.use_depthwise else 1)))
self.cls_convs.append(nn.Sequential(*cls_layers))
self.reg_convs.append(nn.Sequential(*reg_layers))
def forward(self, feats):
'Forward features from the upstream network.\n\n Args:\n feats (tuple[Tensor]): Features from the upstream network, each is\n a 4D-tensor.\n\n Returns:\n tuple:\n cls_scores (list[Tensor]): Classification scores for all scale\n levels, each is a 4D-tensor, the channels number is\n num_anchors * num_classes.\n bbox_preds (list[Tensor]): Box energies / deltas for all scale\n levels, each is a 4D-tensor, the channels number is\n num_anchors * 4.\n '
cls_scores = []
bbox_preds = []
for (feat, reg_conv, cls_conv) in zip(feats, self.reg_convs, self.cls_convs):
cls_scores.append(cls_conv(feat))
bbox_preds.append(reg_conv(feat))
return (cls_scores, bbox_preds)
def loss_single(self, cls_score, bbox_pred, anchor, labels, label_weights, bbox_targets, bbox_weights, num_total_samples):
'Compute loss of a single image.\n\n Args:\n cls_score (Tensor): Box scores for eachimage\n Has shape (num_total_anchors, num_classes).\n bbox_pred (Tensor): Box energies / deltas for each image\n level with shape (num_total_anchors, 4).\n anchors (Tensor): Box reference for each scale level with shape\n (num_total_anchors, 4).\n labels (Tensor): Labels of each anchors with shape\n (num_total_anchors,).\n label_weights (Tensor): Label weights of each anchor with shape\n (num_total_anchors,)\n bbox_targets (Tensor): BBox regression targets of each anchor\n weight shape (num_total_anchors, 4).\n bbox_weights (Tensor): BBox regression loss weights of each anchor\n with shape (num_total_anchors, 4).\n num_total_samples (int): If sampling, num total samples equal to\n the number of total anchors; Otherwise, it is the number of\n positive anchors.\n\n Returns:\n dict[str, Tensor]: A dictionary of loss components.\n '
loss_cls_all = (F.cross_entropy(cls_score, labels, reduction='none') * label_weights)
pos_inds = ((labels >= 0) & (labels < self.num_classes)).nonzero(as_tuple=False).reshape((- 1))
neg_inds = (labels == self.num_classes).nonzero(as_tuple=False).view((- 1))
num_pos_samples = pos_inds.size(0)
num_neg_samples = (self.train_cfg.neg_pos_ratio * num_pos_samples)
if (num_neg_samples > neg_inds.size(0)):
num_neg_samples = neg_inds.size(0)
(topk_loss_cls_neg, _) = loss_cls_all[neg_inds].topk(num_neg_samples)
loss_cls_pos = loss_cls_all[pos_inds].sum()
loss_cls_neg = topk_loss_cls_neg.sum()
loss_cls = ((loss_cls_pos + loss_cls_neg) / num_total_samples)
if self.reg_decoded_bbox:
bbox_pred = self.bbox_coder.decode(anchor, bbox_pred)
loss_bbox = smooth_l1_loss(bbox_pred, bbox_targets, bbox_weights, beta=self.train_cfg.smoothl1_beta, avg_factor=num_total_samples)
return (loss_cls[None], loss_bbox)
@force_fp32(apply_to=('cls_scores', 'bbox_preds'))
def loss(self, cls_scores, bbox_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None):
'Compute losses of the head.\n\n Args:\n cls_scores (list[Tensor]): Box scores for each scale level\n Has shape (N, num_anchors * num_classes, H, W)\n bbox_preds (list[Tensor]): Box energies / deltas for each scale\n level with shape (N, num_anchors * 4, H, W)\n gt_bboxes (list[Tensor]): each item are the truth boxes for each\n image in [tl_x, tl_y, br_x, br_y] format.\n gt_labels (list[Tensor]): class indices corresponding to each box\n img_metas (list[dict]): Meta information of each image, e.g.,\n image size, scaling factor, etc.\n gt_bboxes_ignore (None | list[Tensor]): specify which bounding\n boxes can be ignored when computing the loss.\n\n Returns:\n dict[str, Tensor]: A dictionary of loss components.\n '
featmap_sizes = [featmap.size()[(- 2):] for featmap in cls_scores]
assert (len(featmap_sizes) == self.prior_generator.num_levels)
device = cls_scores[0].device
(anchor_list, valid_flag_list) = self.get_anchors(featmap_sizes, img_metas, device=device)
cls_reg_targets = self.get_targets(anchor_list, valid_flag_list, gt_bboxes, img_metas, gt_bboxes_ignore_list=gt_bboxes_ignore, gt_labels_list=gt_labels, label_channels=1, unmap_outputs=False)
if (cls_reg_targets is None):
return None
(labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_pos, num_total_neg) = cls_reg_targets
num_images = len(img_metas)
all_cls_scores = torch.cat([s.permute(0, 2, 3, 1).reshape(num_images, (- 1), self.cls_out_channels) for s in cls_scores], 1)
all_labels = torch.cat(labels_list, (- 1)).view(num_images, (- 1))
all_label_weights = torch.cat(label_weights_list, (- 1)).view(num_images, (- 1))
all_bbox_preds = torch.cat([b.permute(0, 2, 3, 1).reshape(num_images, (- 1), 4) for b in bbox_preds], (- 2))
all_bbox_targets = torch.cat(bbox_targets_list, (- 2)).view(num_images, (- 1), 4)
all_bbox_weights = torch.cat(bbox_weights_list, (- 2)).view(num_images, (- 1), 4)
all_anchors = []
for i in range(num_images):
all_anchors.append(torch.cat(anchor_list[i]))
(losses_cls, losses_bbox) = multi_apply(self.loss_single, all_cls_scores, all_bbox_preds, all_anchors, all_labels, all_label_weights, all_bbox_targets, all_bbox_weights, num_total_samples=num_total_pos)
return dict(loss_cls=losses_cls, loss_bbox=losses_bbox)
|
@DETECTORS.register_module()
class ATSS(SingleStageDetector):
'Implementation of `ATSS <https://arxiv.org/abs/1912.02424>`_.'
def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None):
super(ATSS, self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, pretrained, init_cfg)
|
@DETECTORS.register_module()
class AutoAssign(SingleStageDetector):
'Implementation of `AutoAssign: Differentiable Label Assignment for Dense\n Object Detection <https://arxiv.org/abs/2007.03496>`_.'
def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None):
super(AutoAssign, self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, pretrained)
|
class BaseDetector(BaseModule, metaclass=ABCMeta):
'Base class for detectors.'
def __init__(self, init_cfg=None):
super(BaseDetector, self).__init__(init_cfg)
self.fp16_enabled = False
@property
def with_neck(self):
'bool: whether the detector has a neck'
return (hasattr(self, 'neck') and (self.neck is not None))
@property
def with_shared_head(self):
'bool: whether the detector has a shared head in the RoI Head'
return (hasattr(self, 'roi_head') and self.roi_head.with_shared_head)
@property
def with_bbox(self):
'bool: whether the detector has a bbox head'
return ((hasattr(self, 'roi_head') and self.roi_head.with_bbox) or (hasattr(self, 'bbox_head') and (self.bbox_head is not None)))
@property
def with_mask(self):
'bool: whether the detector has a mask head'
return ((hasattr(self, 'roi_head') and self.roi_head.with_mask) or (hasattr(self, 'mask_head') and (self.mask_head is not None)))
@abstractmethod
def extract_feat(self, imgs):
'Extract features from images.'
pass
def extract_feats(self, imgs):
'Extract features from multiple images.\n\n Args:\n imgs (list[torch.Tensor]): A list of images. The images are\n augmented from the same image but in different ways.\n\n Returns:\n list[torch.Tensor]: Features of different images\n '
assert isinstance(imgs, list)
return [self.extract_feat(img) for img in imgs]
def forward_train(self, imgs, img_metas, **kwargs):
"\n Args:\n img (Tensor): of shape (N, C, H, W) encoding input images.\n Typically these should be mean centered and std scaled.\n img_metas (list[dict]): List of image info dict where each dict\n has: 'img_shape', 'scale_factor', 'flip', and may also contain\n 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.\n For details on the values of these keys, see\n :class:`mmdet.datasets.pipelines.Collect`.\n kwargs (keyword arguments): Specific to concrete implementation.\n "
batch_input_shape = tuple(imgs[0].size()[(- 2):])
for img_meta in img_metas:
img_meta['batch_input_shape'] = batch_input_shape
async def async_simple_test(self, img, img_metas, **kwargs):
raise NotImplementedError
@abstractmethod
def simple_test(self, img, img_metas, **kwargs):
pass
@abstractmethod
def aug_test(self, imgs, img_metas, **kwargs):
'Test function with test time augmentation.'
pass
async def aforward_test(self, *, img, img_metas, **kwargs):
for (var, name) in [(img, 'img'), (img_metas, 'img_metas')]:
if (not isinstance(var, list)):
raise TypeError(f'{name} must be a list, but got {type(var)}')
num_augs = len(img)
if (num_augs != len(img_metas)):
raise ValueError(f'num of augmentations ({len(img)}) != num of image metas ({len(img_metas)})')
samples_per_gpu = img[0].size(0)
assert (samples_per_gpu == 1)
if (num_augs == 1):
return (await self.async_simple_test(img[0], img_metas[0], **kwargs))
else:
raise NotImplementedError
def forward_test(self, imgs, img_metas, **kwargs):
'\n Args:\n imgs (List[Tensor]): the outer list indicates test-time\n augmentations and inner Tensor should have a shape NxCxHxW,\n which contains all images in the batch.\n img_metas (List[List[dict]]): the outer list indicates test-time\n augs (multiscale, flip, etc.) and the inner list indicates\n images in a batch.\n '
for (var, name) in [(imgs, 'imgs'), (img_metas, 'img_metas')]:
if (not isinstance(var, list)):
raise TypeError(f'{name} must be a list, but got {type(var)}')
num_augs = len(imgs)
if (num_augs != len(img_metas)):
raise ValueError(f'num of augmentations ({len(imgs)}) != num of image meta ({len(img_metas)})')
for (img, img_meta) in zip(imgs, img_metas):
batch_size = len(img_meta)
for img_id in range(batch_size):
img_meta[img_id]['batch_input_shape'] = tuple(img.size()[(- 2):])
if (num_augs == 1):
if ('proposals' in kwargs):
kwargs['proposals'] = kwargs['proposals'][0]
return self.simple_test(imgs[0], img_metas[0], **kwargs)
else:
assert (imgs[0].size(0) == 1), f'aug test does not support inference with batch size {imgs[0].size(0)}'
assert ('proposals' not in kwargs)
return self.aug_test(imgs, img_metas, **kwargs)
@auto_fp16(apply_to=('img',))
def forward(self, img, img_metas, return_loss=True, **kwargs):
'Calls either :func:`forward_train` or :func:`forward_test` depending\n on whether ``return_loss`` is ``True``.\n\n Note this setting will change the expected inputs. When\n ``return_loss=True``, img and img_meta are single-nested (i.e. Tensor\n and List[dict]), and when ``resturn_loss=False``, img and img_meta\n should be double nested (i.e. List[Tensor], List[List[dict]]), with\n the outer list indicating test time augmentations.\n '
if torch.onnx.is_in_onnx_export():
assert (len(img_metas) == 1)
return self.onnx_export(img[0], img_metas[0])
if return_loss:
return self.forward_train(img, img_metas, **kwargs)
else:
return self.forward_test(img, img_metas, **kwargs)
def _parse_losses(self, losses):
'Parse the raw outputs (losses) of the network.\n\n Args:\n losses (dict): Raw output of the network, which usually contain\n losses and other necessary information.\n\n Returns:\n tuple[Tensor, dict]: (loss, log_vars), loss is the loss tensor which may be a weighted sum of all losses, log_vars contains all the variables to be sent to the logger.\n '
log_vars = OrderedDict()
for (loss_name, loss_value) in losses.items():
if isinstance(loss_value, torch.Tensor):
log_vars[loss_name] = loss_value.mean()
elif isinstance(loss_value, list):
log_vars[loss_name] = sum((_loss.mean() for _loss in loss_value))
else:
raise TypeError(f'{loss_name} is not a tensor or list of tensors')
loss = sum((_value for (_key, _value) in log_vars.items() if ('loss' in _key)))
if (dist.is_available() and dist.is_initialized()):
log_var_length = torch.tensor(len(log_vars), device=loss.device)
dist.all_reduce(log_var_length)
message = (((f'rank {dist.get_rank()}' + f' len(log_vars): {len(log_vars)}') + ' keys: ') + ','.join(log_vars.keys()))
assert (log_var_length == (len(log_vars) * dist.get_world_size())), ('loss log variables are different across GPUs!\n' + message)
log_vars['loss'] = loss
for (loss_name, loss_value) in log_vars.items():
if (dist.is_available() and dist.is_initialized()):
loss_value = loss_value.data.clone()
dist.all_reduce(loss_value.div_(dist.get_world_size()))
log_vars[loss_name] = loss_value.item()
return (loss, log_vars)
def train_step(self, data, optimizer):
'The iteration step during training.\n\n This method defines an iteration step during training, except for the\n back propagation and optimizer updating, which are done in an optimizer\n hook. Note that in some complicated cases or models, the whole process\n including back propagation and optimizer updating is also defined in\n this method, such as GAN.\n\n Args:\n data (dict): The output of dataloader.\n optimizer (:obj:`torch.optim.Optimizer` | dict): The optimizer of\n runner is passed to ``train_step()``. This argument is unused\n and reserved.\n\n Returns:\n dict: It should contain at least 3 keys: ``loss``, ``log_vars``, ``num_samples``.\n\n - ``loss`` is a tensor for back propagation, which can be a\n weighted sum of multiple losses.\n - ``log_vars`` contains all the variables to be sent to the\n logger.\n - ``num_samples`` indicates the batch size (when the model is\n DDP, it means the batch size on each GPU), which is used for\n averaging the logs.\n '
losses = self(**data)
(loss, log_vars) = self._parse_losses(losses)
outputs = dict(loss=loss, log_vars=log_vars, num_samples=len(data['img_metas']))
return outputs
def val_step(self, data, optimizer=None):
'The iteration step during validation.\n\n This method shares the same signature as :func:`train_step`, but used\n during val epochs. Note that the evaluation after training epochs is\n not implemented with this method, but an evaluation hook.\n '
losses = self(**data)
(loss, log_vars) = self._parse_losses(losses)
outputs = dict(loss=loss, log_vars=log_vars, num_samples=len(data['img_metas']))
return outputs
def show_result(self, img, result, score_thr=0.3, bbox_color=(72, 101, 241), text_color=(72, 101, 241), mask_color=None, thickness=2, font_size=13, win_name='', show=False, wait_time=0, out_file=None):
"Draw `result` over `img`.\n\n Args:\n img (str or Tensor): The image to be displayed.\n result (Tensor or tuple): The results to draw over `img`\n bbox_result or (bbox_result, segm_result).\n score_thr (float, optional): Minimum score of bboxes to be shown.\n Default: 0.3.\n bbox_color (str or tuple(int) or :obj:`Color`):Color of bbox lines.\n The tuple of color should be in BGR order. Default: 'green'\n text_color (str or tuple(int) or :obj:`Color`):Color of texts.\n The tuple of color should be in BGR order. Default: 'green'\n mask_color (None or str or tuple(int) or :obj:`Color`):\n Color of masks. The tuple of color should be in BGR order.\n Default: None\n thickness (int): Thickness of lines. Default: 2\n font_size (int): Font size of texts. Default: 13\n win_name (str): The window name. Default: ''\n wait_time (float): Value of waitKey param.\n Default: 0.\n show (bool): Whether to show the image.\n Default: False.\n out_file (str or None): The filename to write the image.\n Default: None.\n\n Returns:\n img (Tensor): Only if not `show` or `out_file`\n "
img = mmcv.imread(img)
img = img.copy()
if isinstance(result, tuple):
(bbox_result, segm_result) = result
if isinstance(segm_result, tuple):
segm_result = segm_result[0]
else:
(bbox_result, segm_result) = (result, None)
bboxes = np.vstack(bbox_result)
labels = [np.full(bbox.shape[0], i, dtype=np.int32) for (i, bbox) in enumerate(bbox_result)]
labels = np.concatenate(labels)
segms = None
if ((segm_result is not None) and (len(labels) > 0)):
segms = mmcv.concat_list(segm_result)
if isinstance(segms[0], torch.Tensor):
segms = torch.stack(segms, dim=0).detach().cpu().numpy()
else:
segms = np.stack(segms, axis=0)
if (out_file is not None):
show = False
img = imshow_det_bboxes(img, bboxes, labels, segms, class_names=self.CLASSES, score_thr=score_thr, bbox_color=bbox_color, text_color=text_color, mask_color=mask_color, thickness=thickness, font_size=font_size, win_name=win_name, show=show, wait_time=wait_time, out_file=out_file)
if (not (show or out_file)):
return img
def onnx_export(self, img, img_metas):
raise NotImplementedError(f'{self.__class__.__name__} does not support ONNX EXPORT')
|
@DETECTORS.register_module()
class CascadeRCNN(TwoStageDetector):
'Implementation of `Cascade R-CNN: Delving into High Quality Object\n Detection <https://arxiv.org/abs/1906.09756>`_'
def __init__(self, backbone, neck=None, rpn_head=None, roi_head=None, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None):
super(CascadeRCNN, self).__init__(backbone=backbone, neck=neck, rpn_head=rpn_head, roi_head=roi_head, train_cfg=train_cfg, test_cfg=test_cfg, pretrained=pretrained, init_cfg=init_cfg)
def show_result(self, data, result, **kwargs):
'Show prediction results of the detector.\n\n Args:\n data (str or np.ndarray): Image filename or loaded image.\n result (Tensor or tuple): The results to draw over `img`\n bbox_result or (bbox_result, segm_result).\n\n Returns:\n np.ndarray: The image with bboxes drawn on it.\n '
if self.with_mask:
(ms_bbox_result, ms_segm_result) = result
if isinstance(ms_bbox_result, dict):
result = (ms_bbox_result['ensemble'], ms_segm_result['ensemble'])
elif isinstance(result, dict):
result = result['ensemble']
return super(CascadeRCNN, self).show_result(data, result, **kwargs)
|
@DETECTORS.register_module()
class CenterNet(SingleStageDetector):
'Implementation of CenterNet(Objects as Points)\n\n <https://arxiv.org/abs/1904.07850>.\n '
def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None):
super(CenterNet, self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, pretrained, init_cfg)
def merge_aug_results(self, aug_results, with_nms):
'Merge augmented detection bboxes and score.\n\n Args:\n aug_results (list[list[Tensor]]): Det_bboxes and det_labels of each\n image.\n with_nms (bool): If True, do nms before return boxes.\n\n Returns:\n tuple: (out_bboxes, out_labels)\n '
(recovered_bboxes, aug_labels) = ([], [])
for single_result in aug_results:
recovered_bboxes.append(single_result[0][0])
aug_labels.append(single_result[0][1])
bboxes = torch.cat(recovered_bboxes, dim=0).contiguous()
labels = torch.cat(aug_labels).contiguous()
if with_nms:
(out_bboxes, out_labels) = self.bbox_head._bboxes_nms(bboxes, labels, self.bbox_head.test_cfg)
else:
(out_bboxes, out_labels) = (bboxes, labels)
return (out_bboxes, out_labels)
def aug_test(self, imgs, img_metas, rescale=True):
'Augment testing of CenterNet. Aug test must have flipped image pair,\n and unlike CornerNet, it will perform an averaging operation on the\n feature map instead of detecting bbox.\n\n Args:\n imgs (list[Tensor]): Augmented images.\n img_metas (list[list[dict]]): Meta information of each image, e.g.,\n image size, scaling factor, etc.\n rescale (bool): If True, return boxes in original image space.\n Default: True.\n\n Note:\n ``imgs`` must including flipped image pairs.\n\n Returns:\n list[list[np.ndarray]]: BBox results of each image and classes.\n The outer list corresponds to each image. The inner list\n corresponds to each class.\n '
img_inds = list(range(len(imgs)))
assert (img_metas[0][0]['flip'] + img_metas[1][0]['flip']), 'aug test must have flipped image pair'
aug_results = []
for (ind, flip_ind) in zip(img_inds[0::2], img_inds[1::2]):
flip_direction = img_metas[flip_ind][0]['flip_direction']
img_pair = torch.cat([imgs[ind], imgs[flip_ind]])
x = self.extract_feat(img_pair)
(center_heatmap_preds, wh_preds, offset_preds) = self.bbox_head(x)
assert (len(center_heatmap_preds) == len(wh_preds) == len(offset_preds) == 1)
center_heatmap_preds[0] = ((center_heatmap_preds[0][0:1] + flip_tensor(center_heatmap_preds[0][1:2], flip_direction)) / 2)
wh_preds[0] = ((wh_preds[0][0:1] + flip_tensor(wh_preds[0][1:2], flip_direction)) / 2)
bbox_list = self.bbox_head.get_bboxes(center_heatmap_preds, wh_preds, [offset_preds[0][0:1]], img_metas[ind], rescale=rescale, with_nms=False)
aug_results.append(bbox_list)
nms_cfg = self.bbox_head.test_cfg.get('nms_cfg', None)
if (nms_cfg is None):
with_nms = False
else:
with_nms = True
bbox_list = [self.merge_aug_results(aug_results, with_nms)]
bbox_results = [bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes) for (det_bboxes, det_labels) in bbox_list]
return bbox_results
|
@DETECTORS.register_module()
class DeformableDETR(DETR):
def __init__(self, *args, **kwargs):
super(DETR, self).__init__(*args, **kwargs)
|
@DETECTORS.register_module()
class DETR(SingleStageDetector):
'Implementation of `DETR: End-to-End Object Detection with\n Transformers <https://arxiv.org/pdf/2005.12872>`_'
def __init__(self, backbone, bbox_head, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None):
super(DETR, self).__init__(backbone, None, bbox_head, train_cfg, test_cfg, pretrained, init_cfg)
def forward_dummy(self, img):
'Used for computing network flops.\n\n See `mmdetection/tools/analysis_tools/get_flops.py`\n '
warnings.warn('Warning! MultiheadAttention in DETR does not support flops computation! Do not use the results in your papers!')
(batch_size, _, height, width) = img.shape
dummy_img_metas = [dict(batch_input_shape=(height, width), img_shape=(height, width, 3)) for _ in range(batch_size)]
x = self.extract_feat(img)
outs = self.bbox_head(x, dummy_img_metas)
return outs
def onnx_export(self, img, img_metas):
'Test function for exporting to ONNX, without test time augmentation.\n\n Args:\n img (torch.Tensor): input images.\n img_metas (list[dict]): List of image information.\n\n Returns:\n tuple[Tensor, Tensor]: dets of shape [N, num_det, 5]\n and class labels of shape [N, num_det].\n '
x = self.extract_feat(img)
outs = self.bbox_head.forward_onnx(x, img_metas)
img_shape = torch._shape_as_tensor(img)[2:]
img_metas[0]['img_shape_for_onnx'] = img_shape
(det_bboxes, det_labels) = self.bbox_head.onnx_export(*outs, img_metas)
return (det_bboxes, det_labels)
|
@DETECTORS.register_module()
class FastRCNN(TwoStageDetector):
'Implementation of `Fast R-CNN <https://arxiv.org/abs/1504.08083>`_'
def __init__(self, backbone, roi_head, train_cfg, test_cfg, neck=None, pretrained=None, init_cfg=None):
super(FastRCNN, self).__init__(backbone=backbone, neck=neck, roi_head=roi_head, train_cfg=train_cfg, test_cfg=test_cfg, pretrained=pretrained, init_cfg=init_cfg)
def forward_test(self, imgs, img_metas, proposals, **kwargs):
'\n Args:\n imgs (List[Tensor]): the outer list indicates test-time\n augmentations and inner Tensor should have a shape NxCxHxW,\n which contains all images in the batch.\n img_metas (List[List[dict]]): the outer list indicates test-time\n augs (multiscale, flip, etc.) and the inner list indicates\n images in a batch.\n proposals (List[List[Tensor]]): the outer list indicates test-time\n augs (multiscale, flip, etc.) and the inner list indicates\n images in a batch. The Tensor should have a shape Px4, where\n P is the number of proposals.\n '
for (var, name) in [(imgs, 'imgs'), (img_metas, 'img_metas')]:
if (not isinstance(var, list)):
raise TypeError(f'{name} must be a list, but got {type(var)}')
num_augs = len(imgs)
if (num_augs != len(img_metas)):
raise ValueError(f'num of augmentations ({len(imgs)}) != num of image meta ({len(img_metas)})')
if (num_augs == 1):
return self.simple_test(imgs[0], img_metas[0], proposals[0], **kwargs)
else:
assert NotImplementedError
|
@DETECTORS.register_module()
class FasterRCNN(TwoStageDetector):
'Implementation of `Faster R-CNN <https://arxiv.org/abs/1506.01497>`_'
def __init__(self, backbone, rpn_head, roi_head, train_cfg, test_cfg, neck=None, pretrained=None, init_cfg=None):
super(FasterRCNN, self).__init__(backbone=backbone, neck=neck, rpn_head=rpn_head, roi_head=roi_head, train_cfg=train_cfg, test_cfg=test_cfg, pretrained=pretrained, init_cfg=init_cfg)
|
@DETECTORS.register_module()
class FCOS(SingleStageDetector):
'Implementation of `FCOS <https://arxiv.org/abs/1904.01355>`_'
def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None):
super(FCOS, self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, pretrained, init_cfg)
|
@DETECTORS.register_module()
class FOVEA(SingleStageDetector):
'Implementation of `FoveaBox <https://arxiv.org/abs/1904.03797>`_'
def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None):
super(FOVEA, self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, pretrained, init_cfg)
|
@DETECTORS.register_module()
class FSAF(SingleStageDetector):
'Implementation of `FSAF <https://arxiv.org/abs/1903.00621>`_'
def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None):
super(FSAF, self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, pretrained, init_cfg)
|
@DETECTORS.register_module()
class GFL(SingleStageDetector):
def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None):
super(GFL, self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, pretrained, init_cfg)
|
@DETECTORS.register_module()
class GridRCNN(TwoStageDetector):
'Grid R-CNN.\n\n This detector is the implementation of:\n - Grid R-CNN (https://arxiv.org/abs/1811.12030)\n - Grid R-CNN Plus: Faster and Better (https://arxiv.org/abs/1906.05688)\n '
def __init__(self, backbone, rpn_head, roi_head, train_cfg, test_cfg, neck=None, pretrained=None, init_cfg=None):
super(GridRCNN, self).__init__(backbone=backbone, neck=neck, rpn_head=rpn_head, roi_head=roi_head, train_cfg=train_cfg, test_cfg=test_cfg, pretrained=pretrained, init_cfg=init_cfg)
|
@DETECTORS.register_module()
class HybridTaskCascade(CascadeRCNN):
'Implementation of `HTC <https://arxiv.org/abs/1901.07518>`_'
def __init__(self, **kwargs):
super(HybridTaskCascade, self).__init__(**kwargs)
@property
def with_semantic(self):
'bool: whether the detector has a semantic head'
return self.roi_head.with_semantic
|
@DETECTORS.register_module()
class KnowledgeDistillationSingleStageDetector(SingleStageDetector):
'Implementation of `Distilling the Knowledge in a Neural Network.\n <https://arxiv.org/abs/1503.02531>`_.\n\n Args:\n teacher_config (str | dict): Config file path\n or the config object of teacher model.\n teacher_ckpt (str, optional): Checkpoint path of teacher model.\n If left as None, the model will not load any weights.\n '
def __init__(self, backbone, neck, bbox_head, teacher_config, teacher_ckpt=None, eval_teacher=True, train_cfg=None, test_cfg=None, pretrained=None):
super().__init__(backbone, neck, bbox_head, train_cfg, test_cfg, pretrained)
self.eval_teacher = eval_teacher
if isinstance(teacher_config, str):
teacher_config = mmcv.Config.fromfile(teacher_config)
self.teacher_model = build_detector(teacher_config['model'])
if (teacher_ckpt is not None):
load_checkpoint(self.teacher_model, teacher_ckpt, map_location='cpu')
def forward_train(self, img, img_metas, gt_bboxes, gt_labels, gt_bboxes_ignore=None):
"\n Args:\n img (Tensor): Input images of shape (N, C, H, W).\n Typically these should be mean centered and std scaled.\n img_metas (list[dict]): A List of image info dict where each dict\n has: 'img_shape', 'scale_factor', 'flip', and may also contain\n 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.\n For details on the values of these keys see\n :class:`mmdet.datasets.pipelines.Collect`.\n gt_bboxes (list[Tensor]): Each item are the truth boxes for each\n image in [tl_x, tl_y, br_x, br_y] format.\n gt_labels (list[Tensor]): Class indices corresponding to each box\n gt_bboxes_ignore (None | list[Tensor]): Specify which bounding\n boxes can be ignored when computing the loss.\n Returns:\n dict[str, Tensor]: A dictionary of loss components.\n "
x = self.extract_feat(img)
with torch.no_grad():
teacher_x = self.teacher_model.extract_feat(img)
out_teacher = self.teacher_model.bbox_head(teacher_x)
losses = self.bbox_head.forward_train(x, out_teacher, img_metas, gt_bboxes, gt_labels, gt_bboxes_ignore)
return losses
def cuda(self, device=None):
'Since teacher_model is registered as a plain object, it is necessary\n to put the teacher model to cuda when calling cuda function.'
self.teacher_model.cuda(device=device)
return super().cuda(device=device)
def train(self, mode=True):
'Set the same train mode for teacher and student model.'
if self.eval_teacher:
self.teacher_model.train(False)
else:
self.teacher_model.train(mode)
super().train(mode)
def __setattr__(self, name, value):
'Set attribute, i.e. self.name = value\n\n This reloading prevent the teacher model from being registered as a\n nn.Module. The teacher module is registered as a plain object, so that\n the teacher parameters will not show up when calling\n ``self.parameters``, ``self.modules``, ``self.children`` methods.\n '
if (name == 'teacher_model'):
object.__setattr__(self, name, value)
else:
super().__setattr__(name, value)
|
@DETECTORS.register_module()
class LAD(KnowledgeDistillationSingleStageDetector):
'Implementation of `LAD <https://arxiv.org/pdf/2108.10520.pdf>`_.'
def __init__(self, backbone, neck, bbox_head, teacher_backbone, teacher_neck, teacher_bbox_head, teacher_ckpt, eval_teacher=True, train_cfg=None, test_cfg=None, pretrained=None):
super(KnowledgeDistillationSingleStageDetector, self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, pretrained)
self.eval_teacher = eval_teacher
self.teacher_model = nn.Module()
self.teacher_model.backbone = build_backbone(teacher_backbone)
if (teacher_neck is not None):
self.teacher_model.neck = build_neck(teacher_neck)
teacher_bbox_head.update(train_cfg=train_cfg)
teacher_bbox_head.update(test_cfg=test_cfg)
self.teacher_model.bbox_head = build_head(teacher_bbox_head)
if (teacher_ckpt is not None):
load_checkpoint(self.teacher_model, teacher_ckpt, map_location='cpu')
@property
def with_teacher_neck(self):
'bool: whether the detector has a teacher_neck'
return (hasattr(self.teacher_model, 'neck') and (self.teacher_model.neck is not None))
def extract_teacher_feat(self, img):
'Directly extract teacher features from the backbone+neck.'
x = self.teacher_model.backbone(img)
if self.with_teacher_neck:
x = self.teacher_model.neck(x)
return x
def forward_train(self, img, img_metas, gt_bboxes, gt_labels, gt_bboxes_ignore=None):
"\n Args:\n img (Tensor): Input images of shape (N, C, H, W).\n Typically these should be mean centered and std scaled.\n img_metas (list[dict]): A List of image info dict where each dict\n has: 'img_shape', 'scale_factor', 'flip', and may also contain\n 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.\n For details on the values of these keys see\n :class:`mmdet.datasets.pipelines.Collect`.\n gt_bboxes (list[Tensor]): Each item are the truth boxes for each\n image in [tl_x, tl_y, br_x, br_y] format.\n gt_labels (list[Tensor]): Class indices corresponding to each box\n gt_bboxes_ignore (None | list[Tensor]): Specify which bounding\n boxes can be ignored when computing the loss.\n\n Returns:\n dict[str, Tensor]: A dictionary of loss components.\n "
with torch.no_grad():
x_teacher = self.extract_teacher_feat(img)
outs_teacher = self.teacher_model.bbox_head(x_teacher)
label_assignment_results = self.teacher_model.bbox_head.get_label_assignment(*outs_teacher, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore)
x = self.extract_feat(img)
losses = self.bbox_head.forward_train(x, label_assignment_results, img_metas, gt_bboxes, gt_labels, gt_bboxes_ignore)
return losses
|
@DETECTORS.register_module()
class MaskRCNN(TwoStageDetector):
'Implementation of `Mask R-CNN <https://arxiv.org/abs/1703.06870>`_'
def __init__(self, backbone, rpn_head, roi_head, train_cfg, test_cfg, neck=None, pretrained=None, init_cfg=None):
super(MaskRCNN, self).__init__(backbone=backbone, neck=neck, rpn_head=rpn_head, roi_head=roi_head, train_cfg=train_cfg, test_cfg=test_cfg, pretrained=pretrained, init_cfg=init_cfg)
|
@DETECTORS.register_module()
class MaskScoringRCNN(TwoStageDetector):
'Mask Scoring RCNN.\n\n https://arxiv.org/abs/1903.00241\n '
def __init__(self, backbone, rpn_head, roi_head, train_cfg, test_cfg, neck=None, pretrained=None, init_cfg=None):
super(MaskScoringRCNN, self).__init__(backbone=backbone, neck=neck, rpn_head=rpn_head, roi_head=roi_head, train_cfg=train_cfg, test_cfg=test_cfg, pretrained=pretrained, init_cfg=init_cfg)
|
@DETECTORS.register_module()
class NASFCOS(SingleStageDetector):
'NAS-FCOS: Fast Neural Architecture Search for Object Detection.\n\n https://arxiv.org/abs/1906.0442\n '
def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None):
super(NASFCOS, self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, pretrained, init_cfg)
|
@DETECTORS.register_module()
class PAA(SingleStageDetector):
'Implementation of `PAA <https://arxiv.org/pdf/2007.08103.pdf>`_.'
def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None):
super(PAA, self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, pretrained, init_cfg)
|
@DETECTORS.register_module()
class PanopticFPN(TwoStagePanopticSegmentor):
'Implementation of `Panoptic feature pyramid\n networks <https://arxiv.org/pdf/1901.02446>`_'
def __init__(self, backbone, neck=None, rpn_head=None, roi_head=None, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None, semantic_head=None, panoptic_fusion_head=None):
super(PanopticFPN, self).__init__(backbone=backbone, neck=neck, rpn_head=rpn_head, roi_head=roi_head, train_cfg=train_cfg, test_cfg=test_cfg, pretrained=pretrained, init_cfg=init_cfg, semantic_head=semantic_head, panoptic_fusion_head=panoptic_fusion_head)
|
@DETECTORS.register_module()
class PointRend(TwoStageDetector):
'PointRend: Image Segmentation as Rendering\n\n This detector is the implementation of\n `PointRend <https://arxiv.org/abs/1912.08193>`_.\n\n '
def __init__(self, backbone, rpn_head, roi_head, train_cfg, test_cfg, neck=None, pretrained=None, init_cfg=None):
super(PointRend, self).__init__(backbone=backbone, neck=neck, rpn_head=rpn_head, roi_head=roi_head, train_cfg=train_cfg, test_cfg=test_cfg, pretrained=pretrained, init_cfg=init_cfg)
|
@DETECTORS.register_module()
class QueryInst(SparseRCNN):
'Implementation of\n `Instances as Queries <http://arxiv.org/abs/2105.01928>`_'
def __init__(self, backbone, rpn_head, roi_head, train_cfg, test_cfg, neck=None, pretrained=None, init_cfg=None):
super(QueryInst, self).__init__(backbone=backbone, neck=neck, rpn_head=rpn_head, roi_head=roi_head, train_cfg=train_cfg, test_cfg=test_cfg, pretrained=pretrained, init_cfg=init_cfg)
|
@DETECTORS.register_module()
class RepPointsDetector(SingleStageDetector):
'RepPoints: Point Set Representation for Object Detection.\n\n This detector is the implementation of:\n - RepPoints detector (https://arxiv.org/pdf/1904.11490)\n '
def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None):
super(RepPointsDetector, self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, pretrained, init_cfg)
|
@DETECTORS.register_module()
class RetinaNet(SingleStageDetector):
'Implementation of `RetinaNet <https://arxiv.org/abs/1708.02002>`_'
def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None):
super(RetinaNet, self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, pretrained, init_cfg)
|
@DETECTORS.register_module()
class SCNet(CascadeRCNN):
'Implementation of `SCNet <https://arxiv.org/abs/2012.10150>`_'
def __init__(self, **kwargs):
super(SCNet, self).__init__(**kwargs)
|
@DETECTORS.register_module()
class SingleStageDetector(BaseDetector):
'Base class for single-stage detectors.\n\n Single-stage detectors directly and densely predict bounding boxes on the\n output features of the backbone+neck.\n '
def __init__(self, backbone, neck=None, bbox_head=None, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None):
super(SingleStageDetector, self).__init__(init_cfg)
if pretrained:
warnings.warn('DeprecationWarning: pretrained is deprecated, please use "init_cfg" instead')
backbone.pretrained = pretrained
self.backbone = build_backbone(backbone)
if (neck is not None):
self.neck = build_neck(neck)
bbox_head.update(train_cfg=train_cfg)
bbox_head.update(test_cfg=test_cfg)
self.bbox_head = build_head(bbox_head)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
def extract_feat(self, img):
'Directly extract features from the backbone+neck.'
x = self.backbone(img)
if self.with_neck:
x = self.neck(x)
return x
def forward_dummy(self, img):
'Used for computing network flops.\n\n See `mmdetection/tools/analysis_tools/get_flops.py`\n '
x = self.extract_feat(img)
outs = self.bbox_head(x)
return outs
def forward_train(self, img, img_metas, gt_bboxes, gt_labels, gt_bboxes_ignore=None):
"\n Args:\n img (Tensor): Input images of shape (N, C, H, W).\n Typically these should be mean centered and std scaled.\n img_metas (list[dict]): A List of image info dict where each dict\n has: 'img_shape', 'scale_factor', 'flip', and may also contain\n 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.\n For details on the values of these keys see\n :class:`mmdet.datasets.pipelines.Collect`.\n gt_bboxes (list[Tensor]): Each item are the truth boxes for each\n image in [tl_x, tl_y, br_x, br_y] format.\n gt_labels (list[Tensor]): Class indices corresponding to each box\n gt_bboxes_ignore (None | list[Tensor]): Specify which bounding\n boxes can be ignored when computing the loss.\n\n Returns:\n dict[str, Tensor]: A dictionary of loss components.\n "
super(SingleStageDetector, self).forward_train(img, img_metas)
x = self.extract_feat(img)
losses = self.bbox_head.forward_train(x, img_metas, gt_bboxes, gt_labels, gt_bboxes_ignore)
return losses
def simple_test(self, img, img_metas, rescale=False):
'Test function without test-time augmentation.\n\n Args:\n img (torch.Tensor): Images with shape (N, C, H, W).\n img_metas (list[dict]): List of image information.\n rescale (bool, optional): Whether to rescale the results.\n Defaults to False.\n\n Returns:\n list[list[np.ndarray]]: BBox results of each image and classes.\n The outer list corresponds to each image. The inner list\n corresponds to each class.\n '
feat = self.extract_feat(img)
results_list = self.bbox_head.simple_test(feat, img_metas, rescale=rescale)
bbox_results = [bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes) for (det_bboxes, det_labels) in results_list]
return bbox_results
def aug_test(self, imgs, img_metas, rescale=False):
'Test function with test time augmentation.\n\n Args:\n imgs (list[Tensor]): the outer list indicates test-time\n augmentations and inner Tensor should have a shape NxCxHxW,\n which contains all images in the batch.\n img_metas (list[list[dict]]): the outer list indicates test-time\n augs (multiscale, flip, etc.) and the inner list indicates\n images in a batch. each dict has image information.\n rescale (bool, optional): Whether to rescale the results.\n Defaults to False.\n\n Returns:\n list[list[np.ndarray]]: BBox results of each image and classes.\n The outer list corresponds to each image. The inner list\n corresponds to each class.\n '
assert hasattr(self.bbox_head, 'aug_test'), f'{self.bbox_head.__class__.__name__} does not support test-time augmentation'
feats = self.extract_feats(imgs)
results_list = self.bbox_head.aug_test(feats, img_metas, rescale=rescale)
bbox_results = [bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes) for (det_bboxes, det_labels) in results_list]
return bbox_results
def onnx_export(self, img, img_metas, with_nms=True):
'Test function without test time augmentation.\n\n Args:\n img (torch.Tensor): input images.\n img_metas (list[dict]): List of image information.\n\n Returns:\n tuple[Tensor, Tensor]: dets of shape [N, num_det, 5]\n and class labels of shape [N, num_det].\n '
x = self.extract_feat(img)
outs = self.bbox_head(x)
img_shape = torch._shape_as_tensor(img)[2:]
img_metas[0]['img_shape_for_onnx'] = img_shape
img_metas[0]['pad_shape_for_onnx'] = img_shape
if (len(outs) == 2):
outs = (*outs, None)
(det_bboxes, det_labels) = self.bbox_head.onnx_export(*outs, img_metas, with_nms=with_nms)
return (det_bboxes, det_labels)
|
@DETECTORS.register_module()
class SOLO(SingleStageInstanceSegmentor):
'`SOLO: Segmenting Objects by Locations\n <https://arxiv.org/abs/1912.04488>`_\n\n '
def __init__(self, backbone, neck=None, bbox_head=None, mask_head=None, train_cfg=None, test_cfg=None, init_cfg=None, pretrained=None):
super().__init__(backbone=backbone, neck=neck, bbox_head=bbox_head, mask_head=mask_head, train_cfg=train_cfg, test_cfg=test_cfg, init_cfg=init_cfg, pretrained=pretrained)
|
@DETECTORS.register_module()
class SparseRCNN(TwoStageDetector):
'Implementation of `Sparse R-CNN: End-to-End Object Detection with\n Learnable Proposals <https://arxiv.org/abs/2011.12450>`_'
def __init__(self, *args, **kwargs):
super(SparseRCNN, self).__init__(*args, **kwargs)
assert self.with_rpn, 'Sparse R-CNN and QueryInst do not support external proposals'
def forward_train(self, img, img_metas, gt_bboxes, gt_labels, gt_bboxes_ignore=None, gt_masks=None, proposals=None, **kwargs):
"Forward function of SparseR-CNN and QueryInst in train stage.\n\n Args:\n img (Tensor): of shape (N, C, H, W) encoding input images.\n Typically these should be mean centered and std scaled.\n img_metas (list[dict]): list of image info dict where each dict\n has: 'img_shape', 'scale_factor', 'flip', and may also contain\n 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.\n For details on the values of these keys see\n :class:`mmdet.datasets.pipelines.Collect`.\n gt_bboxes (list[Tensor]): Ground truth bboxes for each image with\n shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.\n gt_labels (list[Tensor]): class indices corresponding to each box\n gt_bboxes_ignore (None | list[Tensor): specify which bounding\n boxes can be ignored when computing the loss.\n gt_masks (List[Tensor], optional) : Segmentation masks for\n each box. This is required to train QueryInst.\n proposals (List[Tensor], optional): override rpn proposals with\n custom proposals. Use when `with_rpn` is False.\n\n Returns:\n dict[str, Tensor]: a dictionary of loss components\n "
assert (proposals is None), 'Sparse R-CNN and QueryInst do not support external proposals'
x = self.extract_feat(img)
(proposal_boxes, proposal_features, imgs_whwh) = self.rpn_head.forward_train(x, img_metas)
roi_losses = self.roi_head.forward_train(x, proposal_boxes, proposal_features, img_metas, gt_bboxes, gt_labels, gt_bboxes_ignore=gt_bboxes_ignore, gt_masks=gt_masks, imgs_whwh=imgs_whwh)
return roi_losses
def simple_test(self, img, img_metas, rescale=False):
'Test function without test time augmentation.\n\n Args:\n imgs (list[torch.Tensor]): List of multiple images\n img_metas (list[dict]): List of image information.\n rescale (bool): Whether to rescale the results.\n Defaults to False.\n\n Returns:\n list[list[np.ndarray]]: BBox results of each image and classes.\n The outer list corresponds to each image. The inner list\n corresponds to each class.\n '
x = self.extract_feat(img)
(proposal_boxes, proposal_features, imgs_whwh) = self.rpn_head.simple_test_rpn(x, img_metas)
results = self.roi_head.simple_test(x, proposal_boxes, proposal_features, img_metas, imgs_whwh=imgs_whwh, rescale=rescale)
return results
def forward_dummy(self, img):
'Used for computing network flops.\n\n See `mmdetection/tools/analysis_tools/get_flops.py`\n '
x = self.extract_feat(img)
num_imgs = len(img)
dummy_img_metas = [dict(img_shape=(800, 1333, 3)) for _ in range(num_imgs)]
(proposal_boxes, proposal_features, imgs_whwh) = self.rpn_head.simple_test_rpn(x, dummy_img_metas)
roi_outs = self.roi_head.forward_dummy(x, proposal_boxes, proposal_features, dummy_img_metas)
return roi_outs
|
@DETECTORS.register_module()
class TOOD(SingleStageDetector):
'Implementation of `TOOD: Task-aligned One-stage Object Detection.\n <https://arxiv.org/abs/2108.07755>`_.'
def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None):
super(TOOD, self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, pretrained, init_cfg)
def set_epoch(self, epoch):
self.bbox_head.epoch = epoch
|
@DETECTORS.register_module()
class TridentFasterRCNN(FasterRCNN):
'Implementation of `TridentNet <https://arxiv.org/abs/1901.01892>`_'
def __init__(self, backbone, rpn_head, roi_head, train_cfg, test_cfg, neck=None, pretrained=None, init_cfg=None):
super(TridentFasterRCNN, self).__init__(backbone=backbone, neck=neck, rpn_head=rpn_head, roi_head=roi_head, train_cfg=train_cfg, test_cfg=test_cfg, pretrained=pretrained, init_cfg=init_cfg)
assert (self.backbone.num_branch == self.roi_head.num_branch)
assert (self.backbone.test_branch_idx == self.roi_head.test_branch_idx)
self.num_branch = self.backbone.num_branch
self.test_branch_idx = self.backbone.test_branch_idx
def simple_test(self, img, img_metas, proposals=None, rescale=False):
'Test without augmentation.'
assert self.with_bbox, 'Bbox head must be implemented.'
x = self.extract_feat(img)
if (proposals is None):
num_branch = (self.num_branch if (self.test_branch_idx == (- 1)) else 1)
trident_img_metas = (img_metas * num_branch)
proposal_list = self.rpn_head.simple_test_rpn(x, trident_img_metas)
else:
proposal_list = proposals
return self.roi_head.simple_test(x, proposal_list, trident_img_metas, rescale=rescale)
def aug_test(self, imgs, img_metas, rescale=False):
'Test with augmentations.\n\n If rescale is False, then returned bboxes and masks will fit the scale\n of imgs[0].\n '
x = self.extract_feats(imgs)
num_branch = (self.num_branch if (self.test_branch_idx == (- 1)) else 1)
trident_img_metas = [(img_metas * num_branch) for img_metas in img_metas]
proposal_list = self.rpn_head.aug_test_rpn(x, trident_img_metas)
return self.roi_head.aug_test(x, proposal_list, img_metas, rescale=rescale)
def forward_train(self, img, img_metas, gt_bboxes, gt_labels, **kwargs):
'make copies of img and gts to fit multi-branch.'
trident_gt_bboxes = tuple((gt_bboxes * self.num_branch))
trident_gt_labels = tuple((gt_labels * self.num_branch))
trident_img_metas = tuple((img_metas * self.num_branch))
return super(TridentFasterRCNN, self).forward_train(img, trident_img_metas, trident_gt_bboxes, trident_gt_labels)
|
@DETECTORS.register_module()
class TwoStageDetector(BaseDetector):
'Base class for two-stage detectors.\n\n Two-stage detectors typically consisting of a region proposal network and a\n task-specific regression head.\n '
def __init__(self, backbone, neck=None, rpn_head=None, roi_head=None, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None):
super(TwoStageDetector, self).__init__(init_cfg)
if pretrained:
warnings.warn('DeprecationWarning: pretrained is deprecated, please use "init_cfg" instead')
backbone.pretrained = pretrained
self.backbone = build_backbone(backbone)
if (neck is not None):
self.neck = build_neck(neck)
if (rpn_head is not None):
rpn_train_cfg = (train_cfg.rpn if (train_cfg is not None) else None)
rpn_head_ = rpn_head.copy()
rpn_head_.update(train_cfg=rpn_train_cfg, test_cfg=test_cfg.rpn)
self.rpn_head = build_head(rpn_head_)
if (roi_head is not None):
rcnn_train_cfg = (train_cfg.rcnn if (train_cfg is not None) else None)
roi_head.update(train_cfg=rcnn_train_cfg)
roi_head.update(test_cfg=test_cfg.rcnn)
roi_head.pretrained = pretrained
self.roi_head = build_head(roi_head)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
@property
def with_rpn(self):
'bool: whether the detector has RPN'
return (hasattr(self, 'rpn_head') and (self.rpn_head is not None))
@property
def with_roi_head(self):
'bool: whether the detector has a RoI head'
return (hasattr(self, 'roi_head') and (self.roi_head is not None))
def extract_feat(self, img):
'Directly extract features from the backbone+neck.'
x = self.backbone(img)
if self.with_neck:
x = self.neck(x)
return x
def forward_dummy(self, img):
'Used for computing network flops.\n\n See `mmdetection/tools/analysis_tools/get_flops.py`\n '
outs = ()
x = self.extract_feat(img)
if self.with_rpn:
rpn_outs = self.rpn_head(x)
outs = (outs + (rpn_outs,))
proposals = torch.randn(1000, 4).to(img.device)
roi_outs = self.roi_head.forward_dummy(x, proposals)
outs = (outs + (roi_outs,))
return outs
def forward_train(self, img, img_metas, gt_bboxes, gt_labels, gt_bboxes_ignore=None, gt_masks=None, proposals=None, **kwargs):
"\n Args:\n img (Tensor): of shape (N, C, H, W) encoding input images.\n Typically these should be mean centered and std scaled.\n\n img_metas (list[dict]): list of image info dict where each dict\n has: 'img_shape', 'scale_factor', 'flip', and may also contain\n 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.\n For details on the values of these keys see\n `mmdet/datasets/pipelines/formatting.py:Collect`.\n\n gt_bboxes (list[Tensor]): Ground truth bboxes for each image with\n shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.\n\n gt_labels (list[Tensor]): class indices corresponding to each box\n\n gt_bboxes_ignore (None | list[Tensor]): specify which bounding\n boxes can be ignored when computing the loss.\n\n gt_masks (None | Tensor) : true segmentation masks for each box\n used if the architecture supports a segmentation task.\n\n proposals : override rpn proposals with custom proposals. Use when\n `with_rpn` is False.\n\n Returns:\n dict[str, Tensor]: a dictionary of loss components\n "
x = self.extract_feat(img)
losses = dict()
if self.with_rpn:
proposal_cfg = self.train_cfg.get('rpn_proposal', self.test_cfg.rpn)
(rpn_losses, proposal_list) = self.rpn_head.forward_train(x, img_metas, gt_bboxes, gt_labels=None, gt_bboxes_ignore=gt_bboxes_ignore, proposal_cfg=proposal_cfg, **kwargs)
losses.update(rpn_losses)
else:
proposal_list = proposals
roi_losses = self.roi_head.forward_train(x, img_metas, proposal_list, gt_bboxes, gt_labels, gt_bboxes_ignore, gt_masks, **kwargs)
losses.update(roi_losses)
return losses
async def async_simple_test(self, img, img_meta, proposals=None, rescale=False):
'Async test without augmentation.'
assert self.with_bbox, 'Bbox head must be implemented.'
x = self.extract_feat(img)
if (proposals is None):
proposal_list = (await self.rpn_head.async_simple_test_rpn(x, img_meta))
else:
proposal_list = proposals
return (await self.roi_head.async_simple_test(x, proposal_list, img_meta, rescale=rescale))
def simple_test(self, img, img_metas, proposals=None, rescale=False):
'Test without augmentation.'
assert self.with_bbox, 'Bbox head must be implemented.'
x = self.extract_feat(img)
if (proposals is None):
proposal_list = self.rpn_head.simple_test_rpn(x, img_metas)
else:
proposal_list = proposals
return self.roi_head.simple_test(x, proposal_list, img_metas, rescale=rescale)
def aug_test(self, imgs, img_metas, rescale=False):
'Test with augmentations.\n\n If rescale is False, then returned bboxes and masks will fit the scale\n of imgs[0].\n '
x = self.extract_feats(imgs)
proposal_list = self.rpn_head.aug_test_rpn(x, img_metas)
return self.roi_head.aug_test(x, proposal_list, img_metas, rescale=rescale)
def onnx_export(self, img, img_metas):
img_shape = torch._shape_as_tensor(img)[2:]
img_metas[0]['img_shape_for_onnx'] = img_shape
x = self.extract_feat(img)
proposals = self.rpn_head.onnx_export(x, img_metas)
if hasattr(self.roi_head, 'onnx_export'):
return self.roi_head.onnx_export(x, proposals, img_metas)
else:
raise NotImplementedError(f'{self.__class__.__name__} can not be exported to ONNX. Please refer to the list of supported models,https://mmdetection.readthedocs.io/en/latest/tutorials/pytorch2onnx.html#list-of-supported-models-exportable-to-onnx')
|
@DETECTORS.register_module()
class VFNet(SingleStageDetector):
'Implementation of `VarifocalNet\n (VFNet).<https://arxiv.org/abs/2008.13367>`_'
def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None):
super(VFNet, self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, pretrained, init_cfg)
|
@DETECTORS.register_module()
class YOLACT(SingleStageDetector):
'Implementation of `YOLACT <https://arxiv.org/abs/1904.02689>`_'
def __init__(self, backbone, neck, bbox_head, segm_head, mask_head, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None):
super(YOLACT, self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, pretrained, init_cfg)
self.segm_head = build_head(segm_head)
self.mask_head = build_head(mask_head)
def forward_dummy(self, img):
'Used for computing network flops.\n\n See `mmdetection/tools/analysis_tools/get_flops.py`\n '
feat = self.extract_feat(img)
bbox_outs = self.bbox_head(feat)
prototypes = self.mask_head.forward_dummy(feat[0])
return (bbox_outs, prototypes)
def forward_train(self, img, img_metas, gt_bboxes, gt_labels, gt_bboxes_ignore=None, gt_masks=None):
"\n Args:\n img (Tensor): of shape (N, C, H, W) encoding input images.\n Typically these should be mean centered and std scaled.\n img_metas (list[dict]): list of image info dict where each dict\n has: 'img_shape', 'scale_factor', 'flip', and may also contain\n 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.\n For details on the values of these keys see\n `mmdet/datasets/pipelines/formatting.py:Collect`.\n gt_bboxes (list[Tensor]): Ground truth bboxes for each image with\n shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.\n gt_labels (list[Tensor]): class indices corresponding to each box\n gt_bboxes_ignore (None | list[Tensor]): specify which bounding\n boxes can be ignored when computing the loss.\n gt_masks (None | Tensor) : true segmentation masks for each box\n used if the architecture supports a segmentation task.\n\n Returns:\n dict[str, Tensor]: a dictionary of loss components\n "
gt_masks = [gt_mask.to_tensor(dtype=torch.uint8, device=img.device) for gt_mask in gt_masks]
x = self.extract_feat(img)
(cls_score, bbox_pred, coeff_pred) = self.bbox_head(x)
bbox_head_loss_inputs = ((cls_score, bbox_pred) + (gt_bboxes, gt_labels, img_metas))
(losses, sampling_results) = self.bbox_head.loss(*bbox_head_loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore)
segm_head_outs = self.segm_head(x[0])
loss_segm = self.segm_head.loss(segm_head_outs, gt_masks, gt_labels)
losses.update(loss_segm)
mask_pred = self.mask_head(x[0], coeff_pred, gt_bboxes, img_metas, sampling_results)
loss_mask = self.mask_head.loss(mask_pred, gt_masks, gt_bboxes, img_metas, sampling_results)
losses.update(loss_mask)
for loss_name in losses.keys():
assert torch.isfinite(torch.stack(losses[loss_name])).all().item(), '{} becomes infinite or NaN!'.format(loss_name)
return losses
def simple_test(self, img, img_metas, rescale=False):
'Test function without test-time augmentation.'
feat = self.extract_feat(img)
(det_bboxes, det_labels, det_coeffs) = self.bbox_head.simple_test(feat, img_metas, rescale=rescale)
bbox_results = [bbox2result(det_bbox, det_label, self.bbox_head.num_classes) for (det_bbox, det_label) in zip(det_bboxes, det_labels)]
segm_results = self.mask_head.simple_test(feat, det_bboxes, det_labels, det_coeffs, img_metas, rescale=rescale)
return list(zip(bbox_results, segm_results))
def aug_test(self, imgs, img_metas, rescale=False):
'Test with augmentations.'
raise NotImplementedError('YOLACT does not support test-time augmentation')
|
@DETECTORS.register_module()
class YOLOV3(SingleStageDetector):
def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None):
super(YOLOV3, self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, pretrained, init_cfg)
def onnx_export(self, img, img_metas):
'Test function for exporting to ONNX, without test time augmentation.\n\n Args:\n img (torch.Tensor): input images.\n img_metas (list[dict]): List of image information.\n\n Returns:\n tuple[Tensor, Tensor]: dets of shape [N, num_det, 5]\n and class labels of shape [N, num_det].\n '
x = self.extract_feat(img)
outs = self.bbox_head.forward(x)
img_shape = torch._shape_as_tensor(img)[2:]
img_metas[0]['img_shape_for_onnx'] = img_shape
(det_bboxes, det_labels) = self.bbox_head.onnx_export(*outs, img_metas)
return (det_bboxes, det_labels)
|
@DETECTORS.register_module()
class YOLOF(SingleStageDetector):
'Implementation of `You Only Look One-level Feature\n <https://arxiv.org/abs/2103.09460>`_'
def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None):
super(YOLOF, self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, pretrained)
|
@mmcv.jit(coderize=True)
def accuracy(pred, target, topk=1, thresh=None):
'Calculate accuracy according to the prediction and target.\n\n Args:\n pred (torch.Tensor): The model prediction, shape (N, num_class)\n target (torch.Tensor): The target of each prediction, shape (N, )\n topk (int | tuple[int], optional): If the predictions in ``topk``\n matches the target, the predictions will be regarded as\n correct ones. Defaults to 1.\n thresh (float, optional): If not None, predictions with scores under\n this threshold are considered incorrect. Default to None.\n\n Returns:\n float | tuple[float]: If the input ``topk`` is a single integer,\n the function will return a single float as accuracy. If\n ``topk`` is a tuple containing multiple integers, the\n function will return a tuple containing accuracies of\n each ``topk`` number.\n '
assert isinstance(topk, (int, tuple))
if isinstance(topk, int):
topk = (topk,)
return_single = True
else:
return_single = False
maxk = max(topk)
if (pred.size(0) == 0):
accu = [pred.new_tensor(0.0) for i in range(len(topk))]
return (accu[0] if return_single else accu)
assert ((pred.ndim == 2) and (target.ndim == 1))
assert (pred.size(0) == target.size(0))
assert (maxk <= pred.size(1)), f'maxk {maxk} exceeds pred dimension {pred.size(1)}'
(pred_value, pred_label) = pred.topk(maxk, dim=1)
pred_label = pred_label.t()
correct = pred_label.eq(target.view(1, (- 1)).expand_as(pred_label))
if (thresh is not None):
correct = (correct & (pred_value > thresh).t())
res = []
for k in topk:
correct_k = correct[:k].reshape((- 1)).float().sum(0, keepdim=True)
res.append(correct_k.mul_((100.0 / pred.size(0))))
return (res[0] if return_single else res)
|
class Accuracy(nn.Module):
def __init__(self, topk=(1,), thresh=None):
'Module to calculate the accuracy.\n\n Args:\n topk (tuple, optional): The criterion used to calculate the\n accuracy. Defaults to (1,).\n thresh (float, optional): If not None, predictions with scores\n under this threshold are considered incorrect. Default to None.\n '
super().__init__()
self.topk = topk
self.thresh = thresh
def forward(self, pred, target):
'Forward function to calculate accuracy.\n\n Args:\n pred (torch.Tensor): Prediction of models.\n target (torch.Tensor): Target for each prediction.\n\n Returns:\n tuple[float]: The accuracies under different topk criterions.\n '
return accuracy(pred, target, self.topk, self.thresh)
|
@mmcv.jit(derivate=True, coderize=True)
@weighted_loss
def balanced_l1_loss(pred, target, beta=1.0, alpha=0.5, gamma=1.5, reduction='mean'):
'Calculate balanced L1 loss.\n\n Please see the `Libra R-CNN <https://arxiv.org/pdf/1904.02701.pdf>`_\n\n Args:\n pred (torch.Tensor): The prediction with shape (N, 4).\n target (torch.Tensor): The learning target of the prediction with\n shape (N, 4).\n beta (float): The loss is a piecewise function of prediction and target\n and ``beta`` serves as a threshold for the difference between the\n prediction and target. Defaults to 1.0.\n alpha (float): The denominator ``alpha`` in the balanced L1 loss.\n Defaults to 0.5.\n gamma (float): The ``gamma`` in the balanced L1 loss.\n Defaults to 1.5.\n reduction (str, optional): The method that reduces the loss to a\n scalar. Options are "none", "mean" and "sum".\n\n Returns:\n torch.Tensor: The calculated loss\n '
assert (beta > 0)
if (target.numel() == 0):
return (pred.sum() * 0)
assert (pred.size() == target.size())
diff = torch.abs((pred - target))
b = ((np.e ** (gamma / alpha)) - 1)
loss = torch.where((diff < beta), ((((alpha / b) * ((b * diff) + 1)) * torch.log((((b * diff) / beta) + 1))) - (alpha * diff)), (((gamma * diff) + (gamma / b)) - (alpha * beta)))
return loss
|
@LOSSES.register_module()
class BalancedL1Loss(nn.Module):
'Balanced L1 Loss.\n\n arXiv: https://arxiv.org/pdf/1904.02701.pdf (CVPR 2019)\n\n Args:\n alpha (float): The denominator ``alpha`` in the balanced L1 loss.\n Defaults to 0.5.\n gamma (float): The ``gamma`` in the balanced L1 loss. Defaults to 1.5.\n beta (float, optional): The loss is a piecewise function of prediction\n and target. ``beta`` serves as a threshold for the difference\n between the prediction and target. Defaults to 1.0.\n reduction (str, optional): The method that reduces the loss to a\n scalar. Options are "none", "mean" and "sum".\n loss_weight (float, optional): The weight of the loss. Defaults to 1.0\n '
def __init__(self, alpha=0.5, gamma=1.5, beta=1.0, reduction='mean', loss_weight=1.0):
super(BalancedL1Loss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.beta = beta
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None, **kwargs):
'Forward function of loss.\n\n Args:\n pred (torch.Tensor): The prediction with shape (N, 4).\n target (torch.Tensor): The learning target of the prediction with\n shape (N, 4).\n weight (torch.Tensor, optional): Sample-wise loss weight with\n shape (N, ).\n avg_factor (int, optional): Average factor that is used to average\n the loss. Defaults to None.\n reduction_override (str, optional): The reduction method used to\n override the original reduction method of the loss.\n Options are "none", "mean" and "sum".\n\n Returns:\n torch.Tensor: The calculated loss\n '
assert (reduction_override in (None, 'none', 'mean', 'sum'))
reduction = (reduction_override if reduction_override else self.reduction)
loss_bbox = (self.loss_weight * balanced_l1_loss(pred, target, weight, alpha=self.alpha, gamma=self.gamma, beta=self.beta, reduction=reduction, avg_factor=avg_factor, **kwargs))
return loss_bbox
|
def cross_entropy(pred, label, weight=None, reduction='mean', avg_factor=None, class_weight=None, ignore_index=(- 100)):
'Calculate the CrossEntropy loss.\n\n Args:\n pred (torch.Tensor): The prediction with shape (N, C), C is the number\n of classes.\n label (torch.Tensor): The learning label of the prediction.\n weight (torch.Tensor, optional): Sample-wise loss weight.\n reduction (str, optional): The method used to reduce the loss.\n avg_factor (int, optional): Average factor that is used to average\n the loss. Defaults to None.\n class_weight (list[float], optional): The weight for each class.\n ignore_index (int | None): The label index to be ignored.\n If None, it will be set to default value. Default: -100.\n\n Returns:\n torch.Tensor: The calculated loss\n '
ignore_index = ((- 100) if (ignore_index is None) else ignore_index)
loss = F.cross_entropy(pred, label, weight=class_weight, reduction='none', ignore_index=ignore_index)
if (weight is not None):
weight = weight.float()
loss = weight_reduce_loss(loss, weight=weight, reduction=reduction, avg_factor=avg_factor)
return loss
|
def _expand_onehot_labels(labels, label_weights, label_channels, ignore_index):
'Expand onehot labels to match the size of prediction.'
bin_labels = labels.new_full((labels.size(0), label_channels), 0)
valid_mask = ((labels >= 0) & (labels != ignore_index))
inds = torch.nonzero((valid_mask & (labels < label_channels)), as_tuple=False)
if (inds.numel() > 0):
bin_labels[(inds, labels[inds])] = 1
valid_mask = valid_mask.view((- 1), 1).expand(labels.size(0), label_channels).float()
if (label_weights is None):
bin_label_weights = valid_mask
else:
bin_label_weights = label_weights.view((- 1), 1).repeat(1, label_channels)
bin_label_weights *= valid_mask
return (bin_labels, bin_label_weights)
|
def binary_cross_entropy(pred, label, weight=None, reduction='mean', avg_factor=None, class_weight=None, ignore_index=(- 100)):
'Calculate the binary CrossEntropy loss.\n\n Args:\n pred (torch.Tensor): The prediction with shape (N, 1).\n label (torch.Tensor): The learning label of the prediction.\n weight (torch.Tensor, optional): Sample-wise loss weight.\n reduction (str, optional): The method used to reduce the loss.\n Options are "none", "mean" and "sum".\n avg_factor (int, optional): Average factor that is used to average\n the loss. Defaults to None.\n class_weight (list[float], optional): The weight for each class.\n ignore_index (int | None): The label index to be ignored.\n If None, it will be set to default value. Default: -100.\n\n Returns:\n torch.Tensor: The calculated loss.\n '
ignore_index = ((- 100) if (ignore_index is None) else ignore_index)
if (pred.dim() != label.dim()):
(label, weight) = _expand_onehot_labels(label, weight, pred.size((- 1)), ignore_index)
if (weight is not None):
weight = weight.float()
loss = F.binary_cross_entropy_with_logits(pred, label.float(), pos_weight=class_weight, reduction='none')
loss = weight_reduce_loss(loss, weight, reduction=reduction, avg_factor=avg_factor)
return loss
|
def mask_cross_entropy(pred, target, label, reduction='mean', avg_factor=None, class_weight=None, ignore_index=None):
'Calculate the CrossEntropy loss for masks.\n\n Args:\n pred (torch.Tensor): The prediction with shape (N, C, *), C is the\n number of classes. The trailing * indicates arbitrary shape.\n target (torch.Tensor): The learning label of the prediction.\n label (torch.Tensor): ``label`` indicates the class label of the mask\n corresponding object. This will be used to select the mask in the\n of the class which the object belongs to when the mask prediction\n if not class-agnostic.\n reduction (str, optional): The method used to reduce the loss.\n Options are "none", "mean" and "sum".\n avg_factor (int, optional): Average factor that is used to average\n the loss. Defaults to None.\n class_weight (list[float], optional): The weight for each class.\n ignore_index (None): Placeholder, to be consistent with other loss.\n Default: None.\n\n Returns:\n torch.Tensor: The calculated loss\n\n Example:\n >>> N, C = 3, 11\n >>> H, W = 2, 2\n >>> pred = torch.randn(N, C, H, W) * 1000\n >>> target = torch.rand(N, H, W)\n >>> label = torch.randint(0, C, size=(N,))\n >>> reduction = \'mean\'\n >>> avg_factor = None\n >>> class_weights = None\n >>> loss = mask_cross_entropy(pred, target, label, reduction,\n >>> avg_factor, class_weights)\n >>> assert loss.shape == (1,)\n '
assert (ignore_index is None), 'BCE loss does not support ignore_index'
assert ((reduction == 'mean') and (avg_factor is None))
num_rois = pred.size()[0]
inds = torch.arange(0, num_rois, dtype=torch.long, device=pred.device)
pred_slice = pred[(inds, label)].squeeze(1)
return F.binary_cross_entropy_with_logits(pred_slice, target, weight=class_weight, reduction='mean')[None]
|
@LOSSES.register_module()
class CrossEntropyLoss(nn.Module):
def __init__(self, use_sigmoid=False, use_mask=False, reduction='mean', class_weight=None, ignore_index=None, loss_weight=1.0):
'CrossEntropyLoss.\n\n Args:\n use_sigmoid (bool, optional): Whether the prediction uses sigmoid\n of softmax. Defaults to False.\n use_mask (bool, optional): Whether to use mask cross entropy loss.\n Defaults to False.\n reduction (str, optional): . Defaults to \'mean\'.\n Options are "none", "mean" and "sum".\n class_weight (list[float], optional): Weight of each class.\n Defaults to None.\n ignore_index (int | None): The label index to be ignored.\n Defaults to None.\n loss_weight (float, optional): Weight of the loss. Defaults to 1.0.\n '
super(CrossEntropyLoss, self).__init__()
assert ((use_sigmoid is False) or (use_mask is False))
self.use_sigmoid = use_sigmoid
self.use_mask = use_mask
self.reduction = reduction
self.loss_weight = loss_weight
self.class_weight = class_weight
self.ignore_index = ignore_index
if self.use_sigmoid:
self.cls_criterion = binary_cross_entropy
elif self.use_mask:
self.cls_criterion = mask_cross_entropy
else:
self.cls_criterion = cross_entropy
def forward(self, cls_score, label, weight=None, avg_factor=None, reduction_override=None, ignore_index=None, **kwargs):
'Forward function.\n\n Args:\n cls_score (torch.Tensor): The prediction.\n label (torch.Tensor): The learning label of the prediction.\n weight (torch.Tensor, optional): Sample-wise loss weight.\n avg_factor (int, optional): Average factor that is used to average\n the loss. Defaults to None.\n reduction_override (str, optional): The method used to reduce the\n loss. Options are "none", "mean" and "sum".\n ignore_index (int | None): The label index to be ignored.\n If not None, it will override the default value. Default: None.\n Returns:\n torch.Tensor: The calculated loss.\n '
assert (reduction_override in (None, 'none', 'mean', 'sum'))
reduction = (reduction_override if reduction_override else self.reduction)
if (ignore_index is None):
ignore_index = self.ignore_index
if (self.class_weight is not None):
class_weight = cls_score.new_tensor(self.class_weight, device=cls_score.device)
else:
class_weight = None
loss_cls = (self.loss_weight * self.cls_criterion(cls_score, label, weight, class_weight=class_weight, reduction=reduction, avg_factor=avg_factor, ignore_index=ignore_index, **kwargs))
return loss_cls
|
def dice_loss(pred, target, weight=None, eps=0.001, reduction='mean', naive_dice=False, avg_factor=None):
'Calculate dice loss, there are two forms of dice loss is supported:\n\n - the one proposed in `V-Net: Fully Convolutional Neural\n Networks for Volumetric Medical Image Segmentation\n <https://arxiv.org/abs/1606.04797>`_.\n - the dice loss in which the power of the number in the\n denominator is the first power instead of the second\n power.\n\n Args:\n pred (torch.Tensor): The prediction, has a shape (n, *)\n target (torch.Tensor): The learning label of the prediction,\n shape (n, *), same shape of pred.\n weight (torch.Tensor, optional): The weight of loss for each\n prediction, has a shape (n,). Defaults to None.\n eps (float): Avoid dividing by zero. Default: 1e-3.\n reduction (str, optional): The method used to reduce the loss into\n a scalar. Defaults to \'mean\'.\n Options are "none", "mean" and "sum".\n naive_dice (bool, optional): If false, use the dice\n loss defined in the V-Net paper, otherwise, use the\n naive dice loss in which the power of the number in the\n denominator is the first power instead of the second\n power.Defaults to False.\n avg_factor (int, optional): Average factor that is used to average\n the loss. Defaults to None.\n '
input = pred.flatten(1)
target = target.flatten(1).float()
a = torch.sum((input * target), 1)
if naive_dice:
b = torch.sum(input, 1)
c = torch.sum(target, 1)
d = (((2 * a) + eps) / ((b + c) + eps))
else:
b = (torch.sum((input * input), 1) + eps)
c = (torch.sum((target * target), 1) + eps)
d = ((2 * a) / (b + c))
loss = (1 - d)
if (weight is not None):
assert (weight.ndim == loss.ndim)
assert (len(weight) == len(pred))
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
|
@LOSSES.register_module()
class DiceLoss(nn.Module):
def __init__(self, use_sigmoid=True, activate=True, reduction='mean', naive_dice=False, loss_weight=1.0, eps=0.001):
'Compute dice loss.\n\n Args:\n use_sigmoid (bool, optional): Whether to the prediction is\n used for sigmoid or softmax. Defaults to True.\n activate (bool): Whether to activate the predictions inside,\n this will disable the inside sigmoid operation.\n Defaults to True.\n reduction (str, optional): The method used\n to reduce the loss. Options are "none",\n "mean" and "sum". Defaults to \'mean\'.\n naive_dice (bool, optional): If false, use the dice\n loss defined in the V-Net paper, otherwise, use the\n naive dice loss in which the power of the number in the\n denominator is the first power instead of the second\n power. Defaults to False.\n loss_weight (float, optional): Weight of loss. Defaults to 1.0.\n eps (float): Avoid dividing by zero. Defaults to 1e-3.\n '
super(DiceLoss, self).__init__()
self.use_sigmoid = use_sigmoid
self.reduction = reduction
self.naive_dice = naive_dice
self.loss_weight = loss_weight
self.eps = eps
self.activate = activate
def forward(self, pred, target, weight=None, reduction_override=None, avg_factor=None):
'Forward function.\n\n Args:\n pred (torch.Tensor): The prediction, has a shape (n, *).\n target (torch.Tensor): The label of the prediction,\n shape (n, *), same shape of pred.\n weight (torch.Tensor, optional): The weight of loss for each\n prediction, has a shape (n,). Defaults to None.\n avg_factor (int, optional): Average factor that is used to average\n the loss. Defaults to None.\n reduction_override (str, optional): The reduction method used to\n override the original reduction method of the loss.\n Options are "none", "mean" and "sum".\n\n Returns:\n torch.Tensor: The calculated loss\n '
assert (reduction_override in (None, 'none', 'mean', 'sum'))
reduction = (reduction_override if reduction_override else self.reduction)
if self.activate:
if self.use_sigmoid:
pred = pred.sigmoid()
else:
raise NotImplementedError
loss = (self.loss_weight * dice_loss(pred, target, weight, eps=self.eps, reduction=reduction, naive_dice=self.naive_dice, avg_factor=avg_factor))
return loss
|
@mmcv.jit(derivate=True, coderize=True)
@weighted_loss
def gaussian_focal_loss(pred, gaussian_target, alpha=2.0, gamma=4.0):
'`Focal Loss <https://arxiv.org/abs/1708.02002>`_ for targets in gaussian\n distribution.\n\n Args:\n pred (torch.Tensor): The prediction.\n gaussian_target (torch.Tensor): The learning target of the prediction\n in gaussian distribution.\n alpha (float, optional): A balanced form for Focal Loss.\n Defaults to 2.0.\n gamma (float, optional): The gamma for calculating the modulating\n factor. Defaults to 4.0.\n '
eps = 1e-12
pos_weights = gaussian_target.eq(1)
neg_weights = (1 - gaussian_target).pow(gamma)
pos_loss = (((- (pred + eps).log()) * (1 - pred).pow(alpha)) * pos_weights)
neg_loss = (((- ((1 - pred) + eps).log()) * pred.pow(alpha)) * neg_weights)
return (pos_loss + neg_loss)
|
@LOSSES.register_module()
class GaussianFocalLoss(nn.Module):
'GaussianFocalLoss is a variant of focal loss.\n\n More details can be found in the `paper\n <https://arxiv.org/abs/1808.01244>`_\n Code is modified from `kp_utils.py\n <https://github.com/princeton-vl/CornerNet/blob/master/models/py_utils/kp_utils.py#L152>`_ # noqa: E501\n Please notice that the target in GaussianFocalLoss is a gaussian heatmap,\n not 0/1 binary target.\n\n Args:\n alpha (float): Power of prediction.\n gamma (float): Power of target for negative samples.\n reduction (str): Options are "none", "mean" and "sum".\n loss_weight (float): Loss weight of current loss.\n '
def __init__(self, alpha=2.0, gamma=4.0, reduction='mean', loss_weight=1.0):
super(GaussianFocalLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None):
'Forward function.\n\n Args:\n pred (torch.Tensor): The prediction.\n target (torch.Tensor): The learning target of the prediction\n in gaussian distribution.\n weight (torch.Tensor, optional): The weight of loss for each\n prediction. Defaults to None.\n avg_factor (int, optional): Average factor that is used to average\n the loss. Defaults to None.\n reduction_override (str, optional): The reduction method used to\n override the original reduction method of the loss.\n Defaults to None.\n '
assert (reduction_override in (None, 'none', 'mean', 'sum'))
reduction = (reduction_override if reduction_override else self.reduction)
loss_reg = (self.loss_weight * gaussian_focal_loss(pred, target, weight, alpha=self.alpha, gamma=self.gamma, reduction=reduction, avg_factor=avg_factor))
return loss_reg
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.