code
stringlengths
17
6.64M
@HEADS.register_module() class PanopticFPNHead(BaseSemanticHead): 'PanopticFPNHead used in Panoptic FPN.\n\n In this head, the number of output channels is ``num_stuff_classes\n + 1``, including all stuff classes and one thing class. The stuff\n classes will be reset from ``0`` to ``num_stuff_classes - 1``, the\n thing classes will be merged to ``num_stuff_classes``-th channel.\n\n Arg:\n num_things_classes (int): Number of thing classes. Default: 80.\n num_stuff_classes (int): Number of stuff classes. Default: 53.\n num_classes (int): Number of classes, including all stuff\n classes and one thing class. This argument is deprecated,\n please use ``num_things_classes`` and ``num_stuff_classes``.\n The module will automatically infer the num_classes by\n ``num_stuff_classes + 1``.\n in_channels (int): Number of channels in the input feature\n map.\n inner_channels (int): Number of channels in inner features.\n start_level (int): The start level of the input features\n used in PanopticFPN.\n end_level (int): The end level of the used features, the\n ``end_level``-th layer will not be used.\n fg_range (tuple): Range of the foreground classes. It starts\n from ``0`` to ``num_things_classes-1``. Deprecated, please use\n ``num_things_classes`` directly.\n bg_range (tuple): Range of the background classes. It starts\n from ``num_things_classes`` to ``num_things_classes +\n num_stuff_classes - 1``. Deprecated, please use\n ``num_stuff_classes`` and ``num_things_classes`` directly.\n conv_cfg (dict): Dictionary to construct and config\n conv layer. Default: None.\n norm_cfg (dict): Dictionary to construct and config norm layer.\n Use ``GN`` by default.\n init_cfg (dict or list[dict], optional): Initialization config dict.\n loss_seg (dict): the loss of the semantic head.\n ' def __init__(self, num_things_classes=80, num_stuff_classes=53, num_classes=None, in_channels=256, inner_channels=128, start_level=0, end_level=4, fg_range=None, bg_range=None, conv_cfg=None, norm_cfg=dict(type='GN', num_groups=32, requires_grad=True), init_cfg=None, loss_seg=dict(type='CrossEntropyLoss', ignore_index=(- 1), loss_weight=1.0)): if (num_classes is not None): warnings.warn('`num_classes` is deprecated now, please set `num_stuff_classes` directly, the `num_classes` will be set to `num_stuff_classes + 1`') assert (num_classes == (num_stuff_classes + 1)) super(PanopticFPNHead, self).__init__((num_stuff_classes + 1), init_cfg, loss_seg) self.num_things_classes = num_things_classes self.num_stuff_classes = num_stuff_classes if ((fg_range is not None) and (bg_range is not None)): self.fg_range = fg_range self.bg_range = bg_range self.num_things_classes = ((fg_range[1] - fg_range[0]) + 1) self.num_stuff_classes = ((bg_range[1] - bg_range[0]) + 1) warnings.warn(f'`fg_range` and `bg_range` are deprecated now, please use `num_things_classes`={self.num_things_classes} and `num_stuff_classes`={self.num_stuff_classes} instead.') self.start_level = start_level self.end_level = end_level self.num_stages = (end_level - start_level) self.inner_channels = inner_channels self.conv_upsample_layers = ModuleList() for i in range(start_level, end_level): self.conv_upsample_layers.append(ConvUpsample(in_channels, inner_channels, num_layers=(i if (i > 0) else 1), num_upsample=(i if (i > 0) else 0), conv_cfg=conv_cfg, norm_cfg=norm_cfg)) self.conv_logits = nn.Conv2d(inner_channels, self.num_classes, 1) def _set_things_to_void(self, gt_semantic_seg): 'Merge thing classes to one class.\n\n In PanopticFPN, the background labels will be reset from `0` to\n `self.num_stuff_classes-1`, the foreground labels will be merged to\n `self.num_stuff_classes`-th channel.\n ' gt_semantic_seg = gt_semantic_seg.int() fg_mask = (gt_semantic_seg < self.num_things_classes) bg_mask = ((gt_semantic_seg >= self.num_things_classes) * (gt_semantic_seg < (self.num_things_classes + self.num_stuff_classes))) new_gt_seg = torch.clone(gt_semantic_seg) new_gt_seg = torch.where(bg_mask, (gt_semantic_seg - self.num_things_classes), new_gt_seg) new_gt_seg = torch.where(fg_mask, (fg_mask.int() * self.num_stuff_classes), new_gt_seg) return new_gt_seg def loss(self, seg_preds, gt_semantic_seg): 'The loss of PanopticFPN head.\n\n Things classes will be merged to one class in PanopticFPN.\n ' gt_semantic_seg = self._set_things_to_void(gt_semantic_seg) return super().loss(seg_preds, gt_semantic_seg) def init_weights(self): super().init_weights() nn.init.normal_(self.conv_logits.weight.data, 0, 0.01) self.conv_logits.bias.data.zero_() def forward(self, x): assert (self.num_stages <= len(x)) feats = [] for (i, layer) in enumerate(self.conv_upsample_layers): f = layer(x[(self.start_level + i)]) feats.append(f) feats = torch.sum(torch.stack(feats, dim=0), dim=0) seg_preds = self.conv_logits(feats) out = dict(seg_preds=seg_preds, feats=feats) return out
class BasePanopticFusionHead(BaseModule, metaclass=ABCMeta): 'Base class for panoptic heads.' def __init__(self, num_things_classes=80, num_stuff_classes=53, test_cfg=None, loss_panoptic=None, init_cfg=None, **kwargs): super(BasePanopticFusionHead, self).__init__(init_cfg) self.num_things_classes = num_things_classes self.num_stuff_classes = num_stuff_classes self.num_classes = (num_things_classes + num_stuff_classes) self.test_cfg = test_cfg if loss_panoptic: self.loss_panoptic = build_loss(loss_panoptic) else: self.loss_panoptic = None @property def with_loss(self): 'bool: whether the panoptic head contains loss function.' return (self.loss_panoptic is not None) @abstractmethod def forward_train(self, gt_masks=None, gt_semantic_seg=None, **kwargs): 'Forward function during training.' @abstractmethod def simple_test(self, img_metas, det_labels, mask_preds, seg_preds, det_bboxes, cfg=None, **kwargs): 'Test without augmentation.'
def adaptive_avg_pool2d(input, output_size): 'Handle empty batch dimension to adaptive_avg_pool2d.\n\n Args:\n input (tensor): 4D tensor.\n output_size (int, tuple[int,int]): the target output size.\n ' if ((input.numel() == 0) and obsolete_torch_version(TORCH_VERSION, (1, 9))): if isinstance(output_size, int): output_size = [output_size, output_size] output_size = [*input.shape[:2], *output_size] empty = NewEmptyTensorOp.apply(input, output_size) return empty else: return F.adaptive_avg_pool2d(input, output_size)
class AdaptiveAvgPool2d(nn.AdaptiveAvgPool2d): 'Handle empty batch dimension to AdaptiveAvgPool2d.' def forward(self, x): if ((x.numel() == 0) and obsolete_torch_version(TORCH_VERSION, (1, 9))): output_size = self.output_size if isinstance(output_size, int): output_size = [output_size, output_size] else: output_size = [(v if (v is not None) else d) for (v, d) in zip(output_size, x.size()[(- 2):])] output_size = [*x.shape[:2], *output_size] empty = NewEmptyTensorOp.apply(x, output_size) return empty return super().forward(x)
def build_transformer(cfg, default_args=None): 'Builder for Transformer.' return build_from_cfg(cfg, TRANSFORMER, default_args)
def build_linear_layer(cfg, *args, **kwargs): 'Build linear layer.\n Args:\n cfg (None or dict): The linear layer config, which should contain:\n - type (str): Layer type.\n - layer args: Args needed to instantiate an linear layer.\n args (argument list): Arguments passed to the `__init__`\n method of the corresponding linear layer.\n kwargs (keyword arguments): Keyword arguments passed to the `__init__`\n method of the corresponding linear layer.\n Returns:\n nn.Module: Created linear layer.\n ' if (cfg is None): cfg_ = dict(type='Linear') else: if (not isinstance(cfg, dict)): raise TypeError('cfg must be a dict') if ('type' not in cfg): raise KeyError('the cfg dict must contain the key "type"') cfg_ = cfg.copy() layer_type = cfg_.pop('type') if (layer_type not in LINEAR_LAYERS): raise KeyError(f'Unrecognized linear type {layer_type}') else: linear_layer = LINEAR_LAYERS.get(layer_type) layer = linear_layer(*args, **kwargs, **cfg_) return layer
class ConvUpsample(BaseModule): 'ConvUpsample performs 2x upsampling after Conv.\n\n There are several `ConvModule` layers. In the first few layers, upsampling\n will be applied after each layer of convolution. The number of upsampling\n must be no more than the number of ConvModule layers.\n\n Args:\n in_channels (int): Number of channels in the input feature map.\n inner_channels (int): Number of channels produced by the convolution.\n num_layers (int): Number of convolution layers.\n num_upsample (int | optional): Number of upsampling layer. Must be no\n more than num_layers. Upsampling will be applied after the first\n ``num_upsample`` layers of convolution. Default: ``num_layers``.\n conv_cfg (dict): Config dict for convolution layer. Default: None,\n which means using conv2d.\n norm_cfg (dict): Config dict for normalization layer. Default: None.\n init_cfg (dict): Config dict for initialization. Default: None.\n kwargs (key word augments): Other augments used in ConvModule.\n ' def __init__(self, in_channels, inner_channels, num_layers=1, num_upsample=None, conv_cfg=None, norm_cfg=None, init_cfg=None, **kwargs): super(ConvUpsample, self).__init__(init_cfg) if (num_upsample is None): num_upsample = num_layers assert (num_upsample <= num_layers), f'num_upsample({num_upsample})must be no more than num_layers({num_layers})' self.num_layers = num_layers self.num_upsample = num_upsample self.conv = ModuleList() for i in range(num_layers): self.conv.append(ConvModule(in_channels, inner_channels, 3, padding=1, stride=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, **kwargs)) in_channels = inner_channels def forward(self, x): num_upsample = self.num_upsample for i in range(self.num_layers): x = self.conv[i](x) if (num_upsample > 0): num_upsample -= 1 x = F.interpolate(x, scale_factor=2, mode='bilinear', align_corners=False) return x
class DarknetBottleneck(BaseModule): "The basic bottleneck block used in Darknet.\n\n Each ResBlock consists of two ConvModules and the input is added to the\n final output. Each ConvModule is composed of Conv, BN, and LeakyReLU.\n The first convLayer has filter size of 1x1 and the second one has the\n filter size of 3x3.\n\n Args:\n in_channels (int): The input channels of this Module.\n out_channels (int): The output channels of this Module.\n expansion (int): The kernel size of the convolution. Default: 0.5\n add_identity (bool): Whether to add identity to the out.\n Default: True\n use_depthwise (bool): Whether to use depthwise separable convolution.\n Default: False\n conv_cfg (dict): Config dict for convolution layer. Default: None,\n which means using conv2d.\n norm_cfg (dict): Config dict for normalization layer.\n Default: dict(type='BN').\n act_cfg (dict): Config dict for activation layer.\n Default: dict(type='Swish').\n " def __init__(self, in_channels, out_channels, expansion=0.5, add_identity=True, use_depthwise=False, conv_cfg=None, norm_cfg=dict(type='BN', momentum=0.03, eps=0.001), act_cfg=dict(type='Swish'), init_cfg=None): super().__init__(init_cfg) hidden_channels = int((out_channels * expansion)) conv = (DepthwiseSeparableConvModule if use_depthwise else ConvModule) self.conv1 = ConvModule(in_channels, hidden_channels, 1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) self.conv2 = conv(hidden_channels, out_channels, 3, stride=1, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) self.add_identity = (add_identity and (in_channels == out_channels)) def forward(self, x): identity = x out = self.conv1(x) out = self.conv2(out) if self.add_identity: return (out + identity) else: return out
class CSPLayer(BaseModule): "Cross Stage Partial Layer.\n\n Args:\n in_channels (int): The input channels of the CSP layer.\n out_channels (int): The output channels of the CSP layer.\n expand_ratio (float): Ratio to adjust the number of channels of the\n hidden layer. Default: 0.5\n num_blocks (int): Number of blocks. Default: 1\n add_identity (bool): Whether to add identity in blocks.\n Default: True\n use_depthwise (bool): Whether to depthwise separable convolution in\n blocks. Default: False\n conv_cfg (dict, optional): Config dict for convolution layer.\n Default: None, which means using conv2d.\n norm_cfg (dict): Config dict for normalization layer.\n Default: dict(type='BN')\n act_cfg (dict): Config dict for activation layer.\n Default: dict(type='Swish')\n " def __init__(self, in_channels, out_channels, expand_ratio=0.5, num_blocks=1, add_identity=True, use_depthwise=False, conv_cfg=None, norm_cfg=dict(type='BN', momentum=0.03, eps=0.001), act_cfg=dict(type='Swish'), init_cfg=None): super().__init__(init_cfg) mid_channels = int((out_channels * expand_ratio)) self.main_conv = ConvModule(in_channels, mid_channels, 1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) self.short_conv = ConvModule(in_channels, mid_channels, 1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) self.final_conv = ConvModule((2 * mid_channels), out_channels, 1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) self.blocks = nn.Sequential(*[DarknetBottleneck(mid_channels, mid_channels, 1.0, add_identity, use_depthwise, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) for _ in range(num_blocks)]) def forward(self, x): x_short = self.short_conv(x) x_main = self.main_conv(x) x_main = self.blocks(x_main) x_final = torch.cat((x_main, x_short), dim=1) return self.final_conv(x_final)
class InvertedResidual(BaseModule): "Inverted Residual Block.\n\n Args:\n in_channels (int): The input channels of this Module.\n out_channels (int): The output channels of this Module.\n mid_channels (int): The input channels of the depthwise convolution.\n kernel_size (int): The kernel size of the depthwise convolution.\n Default: 3.\n stride (int): The stride of the depthwise convolution. Default: 1.\n se_cfg (dict): Config dict for se layer. Default: None, which means no\n se layer.\n with_expand_conv (bool): Use expand conv or not. If set False,\n mid_channels must be the same with in_channels.\n Default: True.\n conv_cfg (dict): Config dict for convolution layer. Default: None,\n which means using conv2d.\n norm_cfg (dict): Config dict for normalization layer.\n Default: dict(type='BN').\n act_cfg (dict): Config dict for activation layer.\n Default: dict(type='ReLU').\n with_cp (bool): Use checkpoint or not. Using checkpoint will save some\n memory while slowing down the training speed. Default: False.\n init_cfg (dict or list[dict], optional): Initialization config dict.\n Default: None\n\n Returns:\n Tensor: The output tensor.\n " def __init__(self, in_channels, out_channels, mid_channels, kernel_size=3, stride=1, se_cfg=None, with_expand_conv=True, conv_cfg=None, norm_cfg=dict(type='BN'), act_cfg=dict(type='ReLU'), with_cp=False, init_cfg=None): super(InvertedResidual, self).__init__(init_cfg) self.with_res_shortcut = ((stride == 1) and (in_channels == out_channels)) assert (stride in [1, 2]), f'stride must in [1, 2]. But received {stride}.' self.with_cp = with_cp self.with_se = (se_cfg is not None) self.with_expand_conv = with_expand_conv if self.with_se: assert isinstance(se_cfg, dict) if (not self.with_expand_conv): assert (mid_channels == in_channels) if self.with_expand_conv: self.expand_conv = ConvModule(in_channels=in_channels, out_channels=mid_channels, kernel_size=1, stride=1, padding=0, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) self.depthwise_conv = ConvModule(in_channels=mid_channels, out_channels=mid_channels, kernel_size=kernel_size, stride=stride, padding=(kernel_size // 2), groups=mid_channels, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) if self.with_se: self.se = SELayer(**se_cfg) self.linear_conv = ConvModule(in_channels=mid_channels, out_channels=out_channels, kernel_size=1, stride=1, padding=0, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=None) def forward(self, x): def _inner_forward(x): out = x if self.with_expand_conv: out = self.expand_conv(out) out = self.depthwise_conv(out) if self.with_se: out = self.se(out) out = self.linear_conv(out) if self.with_res_shortcut: return (x + out) else: return out if (self.with_cp and x.requires_grad): out = cp.checkpoint(_inner_forward, x) else: out = _inner_forward(x) return out
def make_divisible(value, divisor, min_value=None, min_ratio=0.9): 'Make divisible function.\n\n This function rounds the channel number to the nearest value that can be\n divisible by the divisor. It is taken from the original tf repo. It ensures\n that all layers have a channel number that is divisible by divisor. It can\n be seen here: https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py # noqa\n\n Args:\n value (int): The original channel number.\n divisor (int): The divisor to fully divide the channel number.\n min_value (int): The minimum value of the output channel.\n Default: None, means that the minimum value equal to the divisor.\n min_ratio (float): The minimum ratio of the rounded channel number to\n the original channel number. Default: 0.9.\n\n Returns:\n int: The modified output channel number.\n ' if (min_value is None): min_value = divisor new_value = max(min_value, ((int((value + (divisor / 2))) // divisor) * divisor)) if (new_value < (min_ratio * value)): new_value += divisor return new_value
@LINEAR_LAYERS.register_module(name='NormedLinear') class NormedLinear(nn.Linear): 'Normalized Linear Layer.\n\n Args:\n tempeature (float, optional): Tempeature term. Default to 20.\n power (int, optional): Power term. Default to 1.0.\n eps (float, optional): The minimal value of divisor to\n keep numerical stability. Default to 1e-6.\n ' def __init__(self, *args, tempearture=20, power=1.0, eps=1e-06, **kwargs): super(NormedLinear, self).__init__(*args, **kwargs) self.tempearture = tempearture self.power = power self.eps = eps self.init_weights() def init_weights(self): nn.init.normal_(self.weight, mean=0, std=0.01) if (self.bias is not None): nn.init.constant_(self.bias, 0) def forward(self, x): weight_ = (self.weight / (self.weight.norm(dim=1, keepdim=True).pow(self.power) + self.eps)) x_ = (x / (x.norm(dim=1, keepdim=True).pow(self.power) + self.eps)) x_ = (x_ * self.tempearture) return F.linear(x_, weight_, self.bias)
@CONV_LAYERS.register_module(name='NormedConv2d') class NormedConv2d(nn.Conv2d): 'Normalized Conv2d Layer.\n\n Args:\n tempeature (float, optional): Tempeature term. Default to 20.\n power (int, optional): Power term. Default to 1.0.\n eps (float, optional): The minimal value of divisor to\n keep numerical stability. Default to 1e-6.\n norm_over_kernel (bool, optional): Normalize over kernel.\n Default to False.\n ' def __init__(self, *args, tempearture=20, power=1.0, eps=1e-06, norm_over_kernel=False, **kwargs): super(NormedConv2d, self).__init__(*args, **kwargs) self.tempearture = tempearture self.power = power self.norm_over_kernel = norm_over_kernel self.eps = eps def forward(self, x): if (not self.norm_over_kernel): weight_ = (self.weight / (self.weight.norm(dim=1, keepdim=True).pow(self.power) + self.eps)) else: weight_ = (self.weight / (self.weight.view(self.weight.size(0), (- 1)).norm(dim=1, keepdim=True).pow(self.power)[(..., None, None)] + self.eps)) x_ = (x / (x.norm(dim=1, keepdim=True).pow(self.power) + self.eps)) x_ = (x_ * self.tempearture) if hasattr(self, 'conv2d_forward'): x_ = self.conv2d_forward(x_, weight_) elif (torch.__version__ >= '1.8'): x_ = self._conv_forward(x_, weight_, self.bias) else: x_ = self._conv_forward(x_, weight_) return x_
def preprocess_panoptic_gt(gt_labels, gt_masks, gt_semantic_seg, num_things, num_stuff): 'Preprocess the ground truth for a image.\n\n Args:\n gt_labels (Tensor): Ground truth labels of each bbox,\n with shape (num_gts, ).\n gt_masks (BitmapMasks): Ground truth masks of each instances\n of a image, shape (num_gts, h, w).\n gt_semantic_seg (Tensor): Ground truth of semantic\n segmentation with the shape (1, h, w).\n [0, num_thing_class - 1] means things,\n [num_thing_class, num_class-1] means stuff,\n 255 means VOID.\n target_shape (tuple[int]): Shape of output mask_preds.\n Resize the masks to shape of mask_preds.\n\n Returns:\n tuple: a tuple containing the following targets.\n\n - labels (Tensor): Ground truth class indices for a\n image, with shape (n, ), n is the sum of number\n of stuff type and number of instance in a image.\n - masks (Tensor): Ground truth mask for a image, with\n shape (n, h, w).\n ' num_classes = (num_things + num_stuff) things_labels = gt_labels gt_semantic_seg = gt_semantic_seg.squeeze(0) things_masks = gt_masks.pad(gt_semantic_seg.shape[(- 2):], pad_val=0).to_tensor(dtype=torch.bool, device=gt_labels.device) semantic_labels = torch.unique(gt_semantic_seg, sorted=False, return_inverse=False, return_counts=False) stuff_masks_list = [] stuff_labels_list = [] for label in semantic_labels: if ((label < num_things) or (label >= num_classes)): continue stuff_mask = (gt_semantic_seg == label) stuff_masks_list.append(stuff_mask) stuff_labels_list.append(label) if (len(stuff_masks_list) > 0): stuff_masks = torch.stack(stuff_masks_list, dim=0) stuff_labels = torch.stack(stuff_labels_list, dim=0) labels = torch.cat([things_labels, stuff_labels], dim=0) masks = torch.cat([things_masks, stuff_masks], dim=0) else: labels = things_labels masks = things_masks masks = masks.long() return (labels, masks)
class ResLayer(Sequential): "ResLayer to build ResNet style backbone.\n\n Args:\n block (nn.Module): block used to build ResLayer.\n inplanes (int): inplanes of block.\n planes (int): planes of block.\n num_blocks (int): number of blocks.\n stride (int): stride of the first block. Default: 1\n avg_down (bool): Use AvgPool instead of stride conv when\n downsampling in the bottleneck. Default: False\n conv_cfg (dict): dictionary to construct and config conv layer.\n Default: None\n norm_cfg (dict): dictionary to construct and config norm layer.\n Default: dict(type='BN')\n downsample_first (bool): Downsample at the first block or last block.\n False for Hourglass, True for ResNet. Default: True\n " def __init__(self, block, inplanes, planes, num_blocks, stride=1, avg_down=False, conv_cfg=None, norm_cfg=dict(type='BN'), downsample_first=True, **kwargs): self.block = block downsample = None if ((stride != 1) or (inplanes != (planes * block.expansion))): downsample = [] conv_stride = stride if avg_down: conv_stride = 1 downsample.append(nn.AvgPool2d(kernel_size=stride, stride=stride, ceil_mode=True, count_include_pad=False)) downsample.extend([build_conv_layer(conv_cfg, inplanes, (planes * block.expansion), kernel_size=1, stride=conv_stride, bias=False), build_norm_layer(norm_cfg, (planes * block.expansion))[1]]) downsample = nn.Sequential(*downsample) layers = [] if downsample_first: layers.append(block(inplanes=inplanes, planes=planes, stride=stride, downsample=downsample, conv_cfg=conv_cfg, norm_cfg=norm_cfg, **kwargs)) inplanes = (planes * block.expansion) for _ in range(1, num_blocks): layers.append(block(inplanes=inplanes, planes=planes, stride=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, **kwargs)) else: for _ in range((num_blocks - 1)): layers.append(block(inplanes=inplanes, planes=inplanes, stride=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, **kwargs)) layers.append(block(inplanes=inplanes, planes=planes, stride=stride, downsample=downsample, conv_cfg=conv_cfg, norm_cfg=norm_cfg, **kwargs)) super(ResLayer, self).__init__(*layers)
class SimplifiedBasicBlock(BaseModule): 'Simplified version of original basic residual block. This is used in\n `SCNet <https://arxiv.org/abs/2012.10150>`_.\n\n - Norm layer is now optional\n - Last ReLU in forward function is removed\n ' expansion = 1 def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, style='pytorch', with_cp=False, conv_cfg=None, norm_cfg=dict(type='BN'), dcn=None, plugins=None, init_fg=None): super(SimplifiedBasicBlock, self).__init__(init_fg) assert (dcn is None), 'Not implemented yet.' assert (plugins is None), 'Not implemented yet.' assert (not with_cp), 'Not implemented yet.' self.with_norm = (norm_cfg is not None) with_bias = (True if (norm_cfg is None) else False) self.conv1 = build_conv_layer(conv_cfg, inplanes, planes, 3, stride=stride, padding=dilation, dilation=dilation, bias=with_bias) if self.with_norm: (self.norm1_name, norm1) = build_norm_layer(norm_cfg, planes, postfix=1) self.add_module(self.norm1_name, norm1) self.conv2 = build_conv_layer(conv_cfg, planes, planes, 3, padding=1, bias=with_bias) if self.with_norm: (self.norm2_name, norm2) = build_norm_layer(norm_cfg, planes, postfix=2) self.add_module(self.norm2_name, norm2) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride self.dilation = dilation self.with_cp = with_cp @property def norm1(self): 'nn.Module: normalization layer after the first convolution layer' return (getattr(self, self.norm1_name) if self.with_norm else None) @property def norm2(self): 'nn.Module: normalization layer after the second convolution layer' return (getattr(self, self.norm2_name) if self.with_norm else None) def forward(self, x): 'Forward function.' identity = x out = self.conv1(x) if self.with_norm: out = self.norm1(out) out = self.relu(out) out = self.conv2(out) if self.with_norm: out = self.norm2(out) if (self.downsample is not None): identity = self.downsample(x) out += identity return out
class SELayer(BaseModule): "Squeeze-and-Excitation Module.\n\n Args:\n channels (int): The input (and output) channels of the SE layer.\n ratio (int): Squeeze ratio in SELayer, the intermediate channel will be\n ``int(channels/ratio)``. Default: 16.\n conv_cfg (None or dict): Config dict for convolution layer.\n Default: None, which means using conv2d.\n act_cfg (dict or Sequence[dict]): Config dict for activation layer.\n If act_cfg is a dict, two activation layers will be configurated\n by this dict. If act_cfg is a sequence of dicts, the first\n activation layer will be configurated by the first dict and the\n second activation layer will be configurated by the second dict.\n Default: (dict(type='ReLU'), dict(type='Sigmoid'))\n init_cfg (dict or list[dict], optional): Initialization config dict.\n Default: None\n " def __init__(self, channels, ratio=16, conv_cfg=None, act_cfg=(dict(type='ReLU'), dict(type='Sigmoid')), init_cfg=None): super(SELayer, self).__init__(init_cfg) if isinstance(act_cfg, dict): act_cfg = (act_cfg, act_cfg) assert (len(act_cfg) == 2) assert mmcv.is_tuple_of(act_cfg, dict) self.global_avgpool = nn.AdaptiveAvgPool2d(1) self.conv1 = ConvModule(in_channels=channels, out_channels=int((channels / ratio)), kernel_size=1, stride=1, conv_cfg=conv_cfg, act_cfg=act_cfg[0]) self.conv2 = ConvModule(in_channels=int((channels / ratio)), out_channels=channels, kernel_size=1, stride=1, conv_cfg=conv_cfg, act_cfg=act_cfg[1]) def forward(self, x): out = self.global_avgpool(x) out = self.conv1(out) out = self.conv2(out) return (x * out)
class DyReLU(BaseModule): "Dynamic ReLU (DyReLU) module.\n\n See `Dynamic ReLU <https://arxiv.org/abs/2003.10027>`_ for details.\n Current implementation is specialized for task-aware attention in DyHead.\n HSigmoid arguments in default act_cfg follow DyHead official code.\n https://github.com/microsoft/DynamicHead/blob/master/dyhead/dyrelu.py\n\n Args:\n channels (int): The input (and output) channels of DyReLU module.\n ratio (int): Squeeze ratio in Squeeze-and-Excitation-like module,\n the intermediate channel will be ``int(channels/ratio)``.\n Default: 4.\n conv_cfg (None or dict): Config dict for convolution layer.\n Default: None, which means using conv2d.\n act_cfg (dict or Sequence[dict]): Config dict for activation layer.\n If act_cfg is a dict, two activation layers will be configurated\n by this dict. If act_cfg is a sequence of dicts, the first\n activation layer will be configurated by the first dict and the\n second activation layer will be configurated by the second dict.\n Default: (dict(type='ReLU'), dict(type='HSigmoid', bias=3.0,\n divisor=6.0))\n init_cfg (dict or list[dict], optional): Initialization config dict.\n Default: None\n " def __init__(self, channels, ratio=4, conv_cfg=None, act_cfg=(dict(type='ReLU'), dict(type='HSigmoid', bias=3.0, divisor=6.0)), init_cfg=None): super().__init__(init_cfg=init_cfg) if isinstance(act_cfg, dict): act_cfg = (act_cfg, act_cfg) assert (len(act_cfg) == 2) assert mmcv.is_tuple_of(act_cfg, dict) self.channels = channels self.expansion = 4 self.global_avgpool = nn.AdaptiveAvgPool2d(1) self.conv1 = ConvModule(in_channels=channels, out_channels=int((channels / ratio)), kernel_size=1, stride=1, conv_cfg=conv_cfg, act_cfg=act_cfg[0]) self.conv2 = ConvModule(in_channels=int((channels / ratio)), out_channels=(channels * self.expansion), kernel_size=1, stride=1, conv_cfg=conv_cfg, act_cfg=act_cfg[1]) def forward(self, x): 'Forward function.' coeffs = self.global_avgpool(x) coeffs = self.conv1(coeffs) coeffs = (self.conv2(coeffs) - 0.5) (a1, b1, a2, b2) = torch.split(coeffs, self.channels, dim=1) a1 = ((a1 * 2.0) + 1.0) a2 = (a2 * 2.0) out = torch.max(((x * a1) + b1), ((x * a2) + b2)) return out
def collect_env(): 'Collect the information of the running environments.' env_info = collect_base_env() env_info['MMDetection'] = ((mmdet.__version__ + '+') + get_git_hash()[:7]) return env_info
def get_root_logger(log_file=None, log_level=logging.INFO): 'Get root logger.\n\n Args:\n log_file (str, optional): File path of log. Defaults to None.\n log_level (int, optional): The level of logger.\n Defaults to logging.INFO.\n\n Returns:\n :obj:`logging.Logger`: The obtained logger\n ' logger = get_logger(name='mmdet', log_file=log_file, log_level=log_level) return logger
def find_latest_checkpoint(path, suffix='pth'): 'Find the latest checkpoint from the working directory.\n\n Args:\n path(str): The path to find checkpoints.\n suffix(str): File extension.\n Defaults to pth.\n\n Returns:\n latest_path(str | None): File path of the latest checkpoint.\n References:\n .. [1] https://github.com/microsoft/SoftTeacher\n /blob/main/ssod/utils/patch.py\n ' if (not osp.exists(path)): warnings.warn('The path of checkpoints does not exist.') return None if osp.exists(osp.join(path, f'latest.{suffix}')): return osp.join(path, f'latest.{suffix}') checkpoints = glob.glob(osp.join(path, f'*.{suffix}')) if (len(checkpoints) == 0): warnings.warn('There are no checkpoints in the path.') return None latest = (- 1) latest_path = None for checkpoint in checkpoints: count = int(osp.basename(checkpoint).split('_')[(- 1)].split('.')[0]) if (count > latest): latest = count latest_path = checkpoint return latest_path
def setup_multi_processes(cfg): 'Setup multi-processing environment variables.' if (platform.system() != 'Windows'): mp_start_method = cfg.get('mp_start_method', 'fork') current_method = mp.get_start_method(allow_none=True) if ((current_method is not None) and (current_method != mp_start_method)): warnings.warn(f'Multi-processing start method `{mp_start_method}` is different from the previous setting `{current_method}`.It will be force set to `{mp_start_method}`. You can change this behavior by changing `mp_start_method` in your config.') mp.set_start_method(mp_start_method, force=True) opencv_num_threads = cfg.get('opencv_num_threads', 0) cv2.setNumThreads(opencv_num_threads) if (('OMP_NUM_THREADS' not in os.environ) and (cfg.data.workers_per_gpu > 1)): omp_num_threads = 1 warnings.warn(f'Setting OMP_NUM_THREADS environment variable for each process to be {omp_num_threads} in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.') os.environ['OMP_NUM_THREADS'] = str(omp_num_threads) if (('MKL_NUM_THREADS' not in os.environ) and (cfg.data.workers_per_gpu > 1)): mkl_num_threads = 1 warnings.warn(f'Setting MKL_NUM_THREADS environment variable for each process to be {mkl_num_threads} in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.') os.environ['MKL_NUM_THREADS'] = str(mkl_num_threads)
class NiceRepr(): 'Inherit from this class and define ``__nice__`` to "nicely" print your\n objects.\n\n Defines ``__str__`` and ``__repr__`` in terms of ``__nice__`` function\n Classes that inherit from :class:`NiceRepr` should redefine ``__nice__``.\n If the inheriting class has a ``__len__``, method then the default\n ``__nice__`` method will return its length.\n\n Example:\n >>> class Foo(NiceRepr):\n ... def __nice__(self):\n ... return \'info\'\n >>> foo = Foo()\n >>> assert str(foo) == \'<Foo(info)>\'\n >>> assert repr(foo).startswith(\'<Foo(info) at \')\n\n Example:\n >>> class Bar(NiceRepr):\n ... pass\n >>> bar = Bar()\n >>> import pytest\n >>> with pytest.warns(None) as record:\n >>> assert \'object at\' in str(bar)\n >>> assert \'object at\' in repr(bar)\n\n Example:\n >>> class Baz(NiceRepr):\n ... def __len__(self):\n ... return 5\n >>> baz = Baz()\n >>> assert str(baz) == \'<Baz(5)>\'\n ' def __nice__(self): 'str: a "nice" summary string describing this module' if hasattr(self, '__len__'): return str(len(self)) else: raise NotImplementedError(f'Define the __nice__ method for {self.__class__!r}') def __repr__(self): 'str: the string of the module' try: nice = self.__nice__() classname = self.__class__.__name__ return f'<{classname}({nice}) at {hex(id(self))}>' except NotImplementedError as ex: warnings.warn(str(ex), category=RuntimeWarning) return object.__repr__(self) def __str__(self): 'str: the string of the module' try: classname = self.__class__.__name__ nice = self.__nice__() return f'<{classname}({nice})>' except NotImplementedError as ex: warnings.warn(str(ex), category=RuntimeWarning) return object.__repr__(self)
def ensure_rng(rng=None): 'Coerces input into a random number generator.\n\n If the input is None, then a global random state is returned.\n\n If the input is a numeric value, then that is used as a seed to construct a\n random state. Otherwise the input is returned as-is.\n\n Adapted from [1]_.\n\n Args:\n rng (int | numpy.random.RandomState | None):\n if None, then defaults to the global rng. Otherwise this can be an\n integer or a RandomState class\n Returns:\n (numpy.random.RandomState) : rng -\n a numpy random number generator\n\n References:\n .. [1] https://gitlab.kitware.com/computer-vision/kwarray/blob/master/kwarray/util_random.py#L270 # noqa: E501\n ' if (rng is None): rng = np.random.mtrand._rand elif isinstance(rng, int): rng = np.random.RandomState(rng) else: rng = rng return rng
def parse_version_info(version_str): version_info = [] for x in version_str.split('.'): if x.isdigit(): version_info.append(int(x)) elif (x.find('rc') != (- 1)): patch_version = x.split('rc') version_info.append(int(patch_version[0])) version_info.append(f'rc{patch_version[1]}') return tuple(version_info)
def check_installation(): 'Check whether mmcv-full has been installed successfully.' np_boxes1 = np.asarray([[1.0, 1.0, 3.0, 4.0, 0.5], [2.0, 2.0, 3.0, 4.0, 0.6], [7.0, 7.0, 8.0, 8.0, 0.4]], dtype=np.float32) np_boxes2 = np.asarray([[0.0, 2.0, 2.0, 5.0, 0.3], [2.0, 1.0, 3.0, 3.0, 0.5], [5.0, 5.0, 6.0, 7.0, 0.4]], dtype=np.float32) boxes1 = torch.from_numpy(np_boxes1) boxes2 = torch.from_numpy(np_boxes2) box_iou_rotated(boxes1, boxes2) print('CPU ops were compiled successfully.') if torch.cuda.is_available(): boxes1 = boxes1.cuda() boxes2 = boxes2.cuda() box_iou_rotated(boxes1, boxes2) print('CUDA ops were compiled successfully.') else: print('No CUDA runtime is found, skipping the checking of CUDA ops.')
class Model(nn.Module): def __init__(self): super(Model, self).__init__() self.conv1 = nn.Conv2d(3, 6, 5) self.pool = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(6, 16, 5) self.fc1 = nn.Linear(((16 * 5) * 5), 120) self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, 10) self.loss_fn = nn.CrossEntropyLoss() def forward(self, x): x = self.pool(F.relu(self.conv1(x))) x = self.pool(F.relu(self.conv2(x))) x = x.view((- 1), ((16 * 5) * 5)) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) return x def train_step(self, data, optimizer): (images, labels) = data predicts = self(images) loss = self.loss_fn(predicts, labels) return {'loss': loss}
def quantize(arr, min_val, max_val, levels, dtype=np.int64): 'Quantize an array of (-inf, inf) to [0, levels-1].\n\n Args:\n arr (ndarray): Input array.\n min_val (scalar): Minimum value to be clipped.\n max_val (scalar): Maximum value to be clipped.\n levels (int): Quantization levels.\n dtype (np.type): The type of the quantized array.\n\n Returns:\n tuple: Quantized array.\n ' if (not (isinstance(levels, int) and (levels > 1))): raise ValueError(f'levels must be a positive integer, but got {levels}') if (min_val >= max_val): raise ValueError(f'min_val ({min_val}) must be smaller than max_val ({max_val})') arr = (np.clip(arr, min_val, max_val) - min_val) quantized_arr = np.minimum(np.floor(((levels * arr) / (max_val - min_val))).astype(dtype), (levels - 1)) return quantized_arr
def dequantize(arr, min_val, max_val, levels, dtype=np.float64): 'Dequantize an array.\n\n Args:\n arr (ndarray): Input array.\n min_val (scalar): Minimum value to be clipped.\n max_val (scalar): Maximum value to be clipped.\n levels (int): Quantization levels.\n dtype (np.type): The type of the dequantized array.\n\n Returns:\n tuple: Dequantized array.\n ' if (not (isinstance(levels, int) and (levels > 1))): raise ValueError(f'levels must be a positive integer, but got {levels}') if (min_val >= max_val): raise ValueError(f'min_val ({min_val}) must be smaller than max_val ({max_val})') dequantized_arr = ((((arr + 0.5).astype(dtype) * (max_val - min_val)) / levels) + min_val) return dequantized_arr
class AlexNet(nn.Module): 'AlexNet backbone.\n\n Args:\n num_classes (int): number of classes for classification.\n ' def __init__(self, num_classes=(- 1)): super(AlexNet, self).__init__() self.num_classes = num_classes self.features = nn.Sequential(nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2), nn.Conv2d(64, 192, kernel_size=5, padding=2), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2), nn.Conv2d(192, 384, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(384, 256, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2)) if (self.num_classes > 0): self.classifier = nn.Sequential(nn.Dropout(), nn.Linear(((256 * 6) * 6), 4096), nn.ReLU(inplace=True), nn.Dropout(), nn.Linear(4096, 4096), nn.ReLU(inplace=True), nn.Linear(4096, num_classes)) def init_weights(self, pretrained=None): if isinstance(pretrained, str): logger = logging.getLogger() from ..runner import load_checkpoint load_checkpoint(self, pretrained, strict=False, logger=logger) elif (pretrained is None): pass else: raise TypeError('pretrained must be a str or None') def forward(self, x): x = self.features(x) if (self.num_classes > 0): x = x.view(x.size(0), ((256 * 6) * 6)) x = self.classifier(x) return x
@ACTIVATION_LAYERS.register_module(name='Clip') @ACTIVATION_LAYERS.register_module() class Clamp(nn.Module): 'Clamp activation layer.\n\n This activation function is to clamp the feature map value within\n :math:`[min, max]`. More details can be found in ``torch.clamp()``.\n\n Args:\n min (Number | optional): Lower-bound of the range to be clamped to.\n Default to -1.\n max (Number | optional): Upper-bound of the range to be clamped to.\n Default to 1.\n ' def __init__(self, min=(- 1.0), max=1.0): super(Clamp, self).__init__() self.min = min self.max = max def forward(self, x): 'Forward function.\n\n Args:\n x (torch.Tensor): The input tensor.\n\n Returns:\n torch.Tensor: Clamped tensor.\n ' return torch.clamp(x, min=self.min, max=self.max)
class GELU(nn.Module): 'Applies the Gaussian Error Linear Units function:\n\n .. math::\n \\text{GELU}(x) = x * \\Phi(x)\n where :math:`\\Phi(x)` is the Cumulative Distribution Function for\n Gaussian Distribution.\n\n Shape:\n - Input: :math:`(N, *)` where `*` means, any number of additional\n dimensions\n - Output: :math:`(N, *)`, same shape as the input\n\n .. image:: scripts/activation_images/GELU.png\n\n Examples::\n\n >>> m = nn.GELU()\n >>> input = torch.randn(2)\n >>> output = m(input)\n ' def forward(self, input): return F.gelu(input)
def build_activation_layer(cfg): 'Build activation layer.\n\n Args:\n cfg (dict): The activation layer config, which should contain:\n\n - type (str): Layer type.\n - layer args: Args needed to instantiate an activation layer.\n\n Returns:\n nn.Module: Created activation layer.\n ' return build_from_cfg(cfg, ACTIVATION_LAYERS)
def last_zero_init(m): if isinstance(m, nn.Sequential): constant_init(m[(- 1)], val=0) else: constant_init(m, val=0)
@PLUGIN_LAYERS.register_module() class ContextBlock(nn.Module): "ContextBlock module in GCNet.\n\n See 'GCNet: Non-local Networks Meet Squeeze-Excitation Networks and Beyond'\n (https://arxiv.org/abs/1904.11492) for details.\n\n Args:\n in_channels (int): Channels of the input feature map.\n ratio (float): Ratio of channels of transform bottleneck\n pooling_type (str): Pooling method for context modeling.\n Options are 'att' and 'avg', stand for attention pooling and\n average pooling respectively. Default: 'att'.\n fusion_types (Sequence[str]): Fusion method for feature fusion,\n Options are 'channels_add', 'channel_mul', stand for channelwise\n addition and multiplication respectively. Default: ('channel_add',)\n " _abbr_ = 'context_block' def __init__(self, in_channels, ratio, pooling_type='att', fusion_types=('channel_add',)): super(ContextBlock, self).__init__() assert (pooling_type in ['avg', 'att']) assert isinstance(fusion_types, (list, tuple)) valid_fusion_types = ['channel_add', 'channel_mul'] assert all([(f in valid_fusion_types) for f in fusion_types]) assert (len(fusion_types) > 0), 'at least one fusion should be used' self.in_channels = in_channels self.ratio = ratio self.planes = int((in_channels * ratio)) self.pooling_type = pooling_type self.fusion_types = fusion_types if (pooling_type == 'att'): self.conv_mask = nn.Conv2d(in_channels, 1, kernel_size=1) self.softmax = nn.Softmax(dim=2) else: self.avg_pool = nn.AdaptiveAvgPool2d(1) if ('channel_add' in fusion_types): self.channel_add_conv = nn.Sequential(nn.Conv2d(self.in_channels, self.planes, kernel_size=1), nn.LayerNorm([self.planes, 1, 1]), nn.ReLU(inplace=True), nn.Conv2d(self.planes, self.in_channels, kernel_size=1)) else: self.channel_add_conv = None if ('channel_mul' in fusion_types): self.channel_mul_conv = nn.Sequential(nn.Conv2d(self.in_channels, self.planes, kernel_size=1), nn.LayerNorm([self.planes, 1, 1]), nn.ReLU(inplace=True), nn.Conv2d(self.planes, self.in_channels, kernel_size=1)) else: self.channel_mul_conv = None self.reset_parameters() def reset_parameters(self): if (self.pooling_type == 'att'): kaiming_init(self.conv_mask, mode='fan_in') self.conv_mask.inited = True if (self.channel_add_conv is not None): last_zero_init(self.channel_add_conv) if (self.channel_mul_conv is not None): last_zero_init(self.channel_mul_conv) def spatial_pool(self, x): (batch, channel, height, width) = x.size() if (self.pooling_type == 'att'): input_x = x input_x = input_x.view(batch, channel, (height * width)) input_x = input_x.unsqueeze(1) context_mask = self.conv_mask(x) context_mask = context_mask.view(batch, 1, (height * width)) context_mask = self.softmax(context_mask) context_mask = context_mask.unsqueeze((- 1)) context = torch.matmul(input_x, context_mask) context = context.view(batch, channel, 1, 1) else: context = self.avg_pool(x) return context def forward(self, x): context = self.spatial_pool(x) out = x if (self.channel_mul_conv is not None): channel_mul_term = torch.sigmoid(self.channel_mul_conv(context)) out = (out * channel_mul_term) if (self.channel_add_conv is not None): channel_add_term = self.channel_add_conv(context) out = (out + channel_add_term) return out
def build_conv_layer(cfg, *args, **kwargs): 'Build convolution layer.\n\n Args:\n cfg (None or dict): The conv layer config, which should contain:\n - type (str): Layer type.\n - layer args: Args needed to instantiate an conv layer.\n args (argument list): Arguments passed to the `__init__`\n method of the corresponding conv layer.\n kwargs (keyword arguments): Keyword arguments passed to the `__init__`\n method of the corresponding conv layer.\n\n Returns:\n nn.Module: Created conv layer.\n ' if (cfg is None): cfg_ = dict(type='Conv2d') else: if (not isinstance(cfg, dict)): raise TypeError('cfg must be a dict') if ('type' not in cfg): raise KeyError('the cfg dict must contain the key "type"') cfg_ = cfg.copy() layer_type = cfg_.pop('type') if (layer_type not in CONV_LAYERS): raise KeyError(f'Unrecognized layer type {layer_type}') else: conv_layer = CONV_LAYERS.get(layer_type) layer = conv_layer(*args, **kwargs, **cfg_) return layer
@CONV_LAYERS.register_module() class Conv2dAdaptivePadding(nn.Conv2d): 'Implementation of 2D convolution in tensorflow with `padding` as "same",\n which applies padding to input (if needed) so that input image gets fully\n covered by filter and stride you specified. For stride 1, this will ensure\n that output image size is same as input. For stride of 2, output dimensions\n will be half, for example.\n\n Args:\n in_channels (int): Number of channels in the input image\n out_channels (int): Number of channels produced by the convolution\n kernel_size (int or tuple): Size of the convolving kernel\n stride (int or tuple, optional): Stride of the convolution. Default: 1\n padding (int or tuple, optional): Zero-padding added to both sides of\n the input. Default: 0\n dilation (int or tuple, optional): Spacing between kernel elements.\n Default: 1\n groups (int, optional): Number of blocked connections from input\n channels to output channels. Default: 1\n bias (bool, optional): If ``True``, adds a learnable bias to the\n output. Default: ``True``\n ' def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True): super().__init__(in_channels, out_channels, kernel_size, stride, 0, dilation, groups, bias) def forward(self, x): (img_h, img_w) = x.size()[(- 2):] (kernel_h, kernel_w) = self.weight.size()[(- 2):] (stride_h, stride_w) = self.stride output_h = math.ceil((img_h / stride_h)) output_w = math.ceil((img_w / stride_w)) pad_h = max((((((output_h - 1) * self.stride[0]) + ((kernel_h - 1) * self.dilation[0])) + 1) - img_h), 0) pad_w = max((((((output_w - 1) * self.stride[1]) + ((kernel_w - 1) * self.dilation[1])) + 1) - img_w), 0) if ((pad_h > 0) or (pad_w > 0)): x = F.pad(x, [(pad_w // 2), (pad_w - (pad_w // 2)), (pad_h // 2), (pad_h - (pad_h // 2))]) return F.conv2d(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
@PLUGIN_LAYERS.register_module() class ConvModule(nn.Module): 'A conv block that bundles conv/norm/activation layers.\n\n This block simplifies the usage of convolution layers, which are commonly\n used with a norm layer (e.g., BatchNorm) and activation layer (e.g., ReLU).\n It is based upon three build methods: `build_conv_layer()`,\n `build_norm_layer()` and `build_activation_layer()`.\n\n Besides, we add some additional features in this module.\n 1. Automatically set `bias` of the conv layer.\n 2. Spectral norm is supported.\n 3. More padding modes are supported. Before PyTorch 1.5, nn.Conv2d only\n supports zero and circular padding, and we add "reflect" padding mode.\n\n Args:\n in_channels (int): Number of channels in the input feature map.\n Same as that in ``nn._ConvNd``.\n out_channels (int): Number of channels produced by the convolution.\n Same as that in ``nn._ConvNd``.\n kernel_size (int | tuple[int]): Size of the convolving kernel.\n Same as that in ``nn._ConvNd``.\n stride (int | tuple[int]): Stride of the convolution.\n Same as that in ``nn._ConvNd``.\n padding (int | tuple[int]): Zero-padding added to both sides of\n the input. Same as that in ``nn._ConvNd``.\n dilation (int | tuple[int]): Spacing between kernel elements.\n Same as that in ``nn._ConvNd``.\n groups (int): Number of blocked connections from input channels to\n output channels. Same as that in ``nn._ConvNd``.\n bias (bool | str): If specified as `auto`, it will be decided by the\n norm_cfg. Bias will be set as True if `norm_cfg` is None, otherwise\n False. Default: "auto".\n conv_cfg (dict): Config dict for convolution layer. Default: None,\n which means using conv2d.\n norm_cfg (dict): Config dict for normalization layer. Default: None.\n act_cfg (dict): Config dict for activation layer.\n Default: dict(type=\'ReLU\').\n inplace (bool): Whether to use inplace mode for activation.\n Default: True.\n with_spectral_norm (bool): Whether use spectral norm in conv module.\n Default: False.\n padding_mode (str): If the `padding_mode` has not been supported by\n current `Conv2d` in PyTorch, we will use our own padding layer\n instead. Currently, we support [\'zeros\', \'circular\'] with official\n implementation and [\'reflect\'] with our own implementation.\n Default: \'zeros\'.\n order (tuple[str]): The order of conv/norm/activation layers. It is a\n sequence of "conv", "norm" and "act". Common examples are\n ("conv", "norm", "act") and ("act", "conv", "norm").\n Default: (\'conv\', \'norm\', \'act\').\n ' _abbr_ = 'conv_block' def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias='auto', conv_cfg=None, norm_cfg=None, act_cfg=dict(type='ReLU'), inplace=True, with_spectral_norm=False, padding_mode='zeros', order=('conv', 'norm', 'act')): super(ConvModule, self).__init__() assert ((conv_cfg is None) or isinstance(conv_cfg, dict)) assert ((norm_cfg is None) or isinstance(norm_cfg, dict)) assert ((act_cfg is None) or isinstance(act_cfg, dict)) official_padding_mode = ['zeros', 'circular'] self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.act_cfg = act_cfg self.inplace = inplace self.with_spectral_norm = with_spectral_norm self.with_explicit_padding = (padding_mode not in official_padding_mode) self.order = order assert (isinstance(self.order, tuple) and (len(self.order) == 3)) assert (set(order) == set(['conv', 'norm', 'act'])) self.with_norm = (norm_cfg is not None) self.with_activation = (act_cfg is not None) if (bias == 'auto'): bias = (not self.with_norm) self.with_bias = bias if self.with_explicit_padding: pad_cfg = dict(type=padding_mode) self.padding_layer = build_padding_layer(pad_cfg, padding) conv_padding = (0 if self.with_explicit_padding else padding) self.conv = build_conv_layer(conv_cfg, in_channels, out_channels, kernel_size, stride=stride, padding=conv_padding, dilation=dilation, groups=groups, bias=bias) self.in_channels = self.conv.in_channels self.out_channels = self.conv.out_channels self.kernel_size = self.conv.kernel_size self.stride = self.conv.stride self.padding = padding self.dilation = self.conv.dilation self.transposed = self.conv.transposed self.output_padding = self.conv.output_padding self.groups = self.conv.groups if self.with_spectral_norm: self.conv = nn.utils.spectral_norm(self.conv) if self.with_norm: if (order.index('norm') > order.index('conv')): norm_channels = out_channels else: norm_channels = in_channels (self.norm_name, norm) = build_norm_layer(norm_cfg, norm_channels) self.add_module(self.norm_name, norm) if self.with_bias: if isinstance(norm, (_BatchNorm, _InstanceNorm)): warnings.warn('Unnecessary conv bias before batch/instance norm') else: self.norm_name = None if self.with_activation: act_cfg_ = act_cfg.copy() if (act_cfg_['type'] not in ['Tanh', 'PReLU', 'Sigmoid', 'HSigmoid', 'Swish']): act_cfg_.setdefault('inplace', inplace) self.activate = build_activation_layer(act_cfg_) self.init_weights() @property def norm(self): if self.norm_name: return getattr(self, self.norm_name) else: return None def init_weights(self): if (not hasattr(self.conv, 'init_weights')): if (self.with_activation and (self.act_cfg['type'] == 'LeakyReLU')): nonlinearity = 'leaky_relu' a = self.act_cfg.get('negative_slope', 0.01) else: nonlinearity = 'relu' a = 0 kaiming_init(self.conv, a=a, nonlinearity=nonlinearity) if self.with_norm: constant_init(self.norm, 1, bias=0) def forward(self, x, activate=True, norm=True): for layer in self.order: if (layer == 'conv'): if self.with_explicit_padding: x = self.padding_layer(x) x = self.conv(x) elif ((layer == 'norm') and norm and self.with_norm): x = self.norm(x) elif ((layer == 'act') and activate and self.with_activation): x = self.activate(x) return x
def conv_ws_2d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1, eps=1e-05): c_in = weight.size(0) weight_flat = weight.view(c_in, (- 1)) mean = weight_flat.mean(dim=1, keepdim=True).view(c_in, 1, 1, 1) std = weight_flat.std(dim=1, keepdim=True).view(c_in, 1, 1, 1) weight = ((weight - mean) / (std + eps)) return F.conv2d(input, weight, bias, stride, padding, dilation, groups)
@CONV_LAYERS.register_module('ConvWS') class ConvWS2d(nn.Conv2d): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, eps=1e-05): super(ConvWS2d, self).__init__(in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias) self.eps = eps def forward(self, x): return conv_ws_2d(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups, self.eps)
@CONV_LAYERS.register_module(name='ConvAWS') class ConvAWS2d(nn.Conv2d): 'AWS (Adaptive Weight Standardization)\n\n This is a variant of Weight Standardization\n (https://arxiv.org/pdf/1903.10520.pdf)\n It is used in DetectoRS to avoid NaN\n (https://arxiv.org/pdf/2006.02334.pdf)\n\n Args:\n in_channels (int): Number of channels in the input image\n out_channels (int): Number of channels produced by the convolution\n kernel_size (int or tuple): Size of the conv kernel\n stride (int or tuple, optional): Stride of the convolution. Default: 1\n padding (int or tuple, optional): Zero-padding added to both sides of\n the input. Default: 0\n dilation (int or tuple, optional): Spacing between kernel elements.\n Default: 1\n groups (int, optional): Number of blocked connections from input\n channels to output channels. Default: 1\n bias (bool, optional): If set True, adds a learnable bias to the\n output. Default: True\n ' def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True): super().__init__(in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias) self.register_buffer('weight_gamma', torch.ones(self.out_channels, 1, 1, 1)) self.register_buffer('weight_beta', torch.zeros(self.out_channels, 1, 1, 1)) def _get_weight(self, weight): weight_flat = weight.view(weight.size(0), (- 1)) mean = weight_flat.mean(dim=1).view((- 1), 1, 1, 1) std = torch.sqrt((weight_flat.var(dim=1) + 1e-05)).view((- 1), 1, 1, 1) weight = ((weight - mean) / std) weight = ((self.weight_gamma * weight) + self.weight_beta) return weight def forward(self, x): weight = self._get_weight(self.weight) return F.conv2d(x, weight, self.bias, self.stride, self.padding, self.dilation, self.groups) def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs): 'Override default load function.\n\n AWS overrides the function _load_from_state_dict to recover\n weight_gamma and weight_beta if they are missing. If weight_gamma and\n weight_beta are found in the checkpoint, this function will return\n after super()._load_from_state_dict. Otherwise, it will compute the\n mean and std of the pretrained weights and store them in weight_beta\n and weight_gamma.\n ' self.weight_gamma.data.fill_((- 1)) local_missing_keys = [] super()._load_from_state_dict(state_dict, prefix, local_metadata, strict, local_missing_keys, unexpected_keys, error_msgs) if (self.weight_gamma.data.mean() > 0): for k in local_missing_keys: missing_keys.append(k) return weight = self.weight.data weight_flat = weight.view(weight.size(0), (- 1)) mean = weight_flat.mean(dim=1).view((- 1), 1, 1, 1) std = torch.sqrt((weight_flat.var(dim=1) + 1e-05)).view((- 1), 1, 1, 1) self.weight_beta.data.copy_(mean) self.weight_gamma.data.copy_(std) missing_gamma_beta = [k for k in local_missing_keys if (k.endswith('weight_gamma') or k.endswith('weight_beta'))] for k in missing_gamma_beta: local_missing_keys.remove(k) for k in local_missing_keys: missing_keys.append(k)
class DepthwiseSeparableConvModule(nn.Module): "Depthwise separable convolution module.\n\n See https://arxiv.org/pdf/1704.04861.pdf for details.\n\n This module can replace a ConvModule with the conv block replaced by two\n conv block: depthwise conv block and pointwise conv block. The depthwise\n conv block contains depthwise-conv/norm/activation layers. The pointwise\n conv block contains pointwise-conv/norm/activation layers. It should be\n noted that there will be norm/activation layer in the depthwise conv block\n if `norm_cfg` and `act_cfg` are specified.\n\n Args:\n in_channels (int): Number of channels in the input feature map.\n Same as that in ``nn._ConvNd``.\n out_channels (int): Number of channels produced by the convolution.\n Same as that in ``nn._ConvNd``.\n kernel_size (int | tuple[int]): Size of the convolving kernel.\n Same as that in ``nn._ConvNd``.\n stride (int | tuple[int]): Stride of the convolution.\n Same as that in ``nn._ConvNd``. Default: 1.\n padding (int | tuple[int]): Zero-padding added to both sides of\n the input. Same as that in ``nn._ConvNd``. Default: 0.\n dilation (int | tuple[int]): Spacing between kernel elements.\n Same as that in ``nn._ConvNd``. Default: 1.\n norm_cfg (dict): Default norm config for both depthwise ConvModule and\n pointwise ConvModule. Default: None.\n act_cfg (dict): Default activation config for both depthwise ConvModule\n and pointwise ConvModule. Default: dict(type='ReLU').\n dw_norm_cfg (dict): Norm config of depthwise ConvModule. If it is\n 'default', it will be the same as `norm_cfg`. Default: 'default'.\n dw_act_cfg (dict): Activation config of depthwise ConvModule. If it is\n 'default', it will be the same as `act_cfg`. Default: 'default'.\n pw_norm_cfg (dict): Norm config of pointwise ConvModule. If it is\n 'default', it will be the same as `norm_cfg`. Default: 'default'.\n pw_act_cfg (dict): Activation config of pointwise ConvModule. If it is\n 'default', it will be the same as `act_cfg`. Default: 'default'.\n kwargs (optional): Other shared arguments for depthwise and pointwise\n ConvModule. See ConvModule for ref.\n " def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, norm_cfg=None, act_cfg=dict(type='ReLU'), dw_norm_cfg='default', dw_act_cfg='default', pw_norm_cfg='default', pw_act_cfg='default', **kwargs): super(DepthwiseSeparableConvModule, self).__init__() assert ('groups' not in kwargs), 'groups should not be specified' dw_norm_cfg = (dw_norm_cfg if (dw_norm_cfg != 'default') else norm_cfg) dw_act_cfg = (dw_act_cfg if (dw_act_cfg != 'default') else act_cfg) pw_norm_cfg = (pw_norm_cfg if (pw_norm_cfg != 'default') else norm_cfg) pw_act_cfg = (pw_act_cfg if (pw_act_cfg != 'default') else act_cfg) self.depthwise_conv = ConvModule(in_channels, in_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, groups=in_channels, norm_cfg=dw_norm_cfg, act_cfg=dw_act_cfg, **kwargs) self.pointwise_conv = ConvModule(in_channels, out_channels, 1, norm_cfg=pw_norm_cfg, act_cfg=pw_act_cfg, **kwargs) def forward(self, x): x = self.depthwise_conv(x) x = self.pointwise_conv(x) return x
def drop_path(x, drop_prob=0.0, training=False): 'Drop paths (Stochastic Depth) per sample (when applied in main path of\n residual blocks).\n\n We follow the implementation\n https://github.com/rwightman/pytorch-image-models/blob/a2727c1bf78ba0d7b5727f5f95e37fb7f8866b1f/timm/models/layers/drop.py # noqa: E501\n ' if ((drop_prob == 0.0) or (not training)): return x keep_prob = (1 - drop_prob) shape = ((x.shape[0],) + ((1,) * (x.ndim - 1))) random_tensor = (keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)) output = (x.div(keep_prob) * random_tensor.floor()) return output
@DROPOUT_LAYERS.register_module() class DropPath(nn.Module): 'Drop paths (Stochastic Depth) per sample (when applied in main path of\n residual blocks).\n\n We follow the implementation\n https://github.com/rwightman/pytorch-image-models/blob/a2727c1bf78ba0d7b5727f5f95e37fb7f8866b1f/timm/models/layers/drop.py # noqa: E501\n\n Args:\n drop_prob (float): Probability of the path to be zeroed. Default: 0.1\n ' def __init__(self, drop_prob=0.1): super(DropPath, self).__init__() self.drop_prob = drop_prob def forward(self, x): return drop_path(x, self.drop_prob, self.training)
@DROPOUT_LAYERS.register_module() class Dropout(nn.Dropout): 'A wrapper for ``torch.nn.Dropout``, We rename the ``p`` of\n ``torch.nn.Dropout`` to ``drop_prob`` so as to be consistent with\n ``DropPath``\n\n Args:\n drop_prob (float): Probability of the elements to be\n zeroed. Default: 0.5.\n inplace (bool): Do the operation inplace or not. Default: False.\n ' def __init__(self, drop_prob=0.5, inplace=False): super().__init__(p=drop_prob, inplace=inplace)
def build_dropout(cfg, default_args=None): 'Builder for drop out layers.' return build_from_cfg(cfg, DROPOUT_LAYERS, default_args)
@ACTIVATION_LAYERS.register_module() class HSigmoid(nn.Module): 'Hard Sigmoid Module. Apply the hard sigmoid function:\n Hsigmoid(x) = min(max((x + bias) / divisor, min_value), max_value)\n Default: Hsigmoid(x) = min(max((x + 3) / 6, 0), 1)\n\n Note:\n In MMCV v1.4.4, we modified the default value of args to align with\n PyTorch official.\n\n Args:\n bias (float): Bias of the input feature map. Default: 3.0.\n divisor (float): Divisor of the input feature map. Default: 6.0.\n min_value (float): Lower bound value. Default: 0.0.\n max_value (float): Upper bound value. Default: 1.0.\n\n Returns:\n Tensor: The output tensor.\n ' def __init__(self, bias=3.0, divisor=6.0, min_value=0.0, max_value=1.0): super(HSigmoid, self).__init__() warnings.warn('In MMCV v1.4.4, we modified the default value of args to align with PyTorch official. Previous Implementation: Hsigmoid(x) = min(max((x + 1) / 2, 0), 1). Current Implementation: Hsigmoid(x) = min(max((x + 3) / 6, 0), 1).') self.bias = bias self.divisor = divisor assert (self.divisor != 0) self.min_value = min_value self.max_value = max_value def forward(self, x): x = ((x + self.bias) / self.divisor) return x.clamp_(self.min_value, self.max_value)
class HSwish(nn.Module): 'Hard Swish Module.\n\n This module applies the hard swish function:\n\n .. math::\n Hswish(x) = x * ReLU6(x + 3) / 6\n\n Args:\n inplace (bool): can optionally do the operation in-place.\n Default: False.\n\n Returns:\n Tensor: The output tensor.\n ' def __init__(self, inplace=False): super(HSwish, self).__init__() self.act = nn.ReLU6(inplace) def forward(self, x): return ((x * self.act((x + 3))) / 6)
class _NonLocalNd(nn.Module, metaclass=ABCMeta): 'Basic Non-local module.\n\n This module is proposed in\n "Non-local Neural Networks"\n Paper reference: https://arxiv.org/abs/1711.07971\n Code reference: https://github.com/AlexHex7/Non-local_pytorch\n\n Args:\n in_channels (int): Channels of the input feature map.\n reduction (int): Channel reduction ratio. Default: 2.\n use_scale (bool): Whether to scale pairwise_weight by\n `1/sqrt(inter_channels)` when the mode is `embedded_gaussian`.\n Default: True.\n conv_cfg (None | dict): The config dict for convolution layers.\n If not specified, it will use `nn.Conv2d` for convolution layers.\n Default: None.\n norm_cfg (None | dict): The config dict for normalization layers.\n Default: None. (This parameter is only applicable to conv_out.)\n mode (str): Options are `gaussian`, `concatenation`,\n `embedded_gaussian` and `dot_product`. Default: embedded_gaussian.\n ' def __init__(self, in_channels, reduction=2, use_scale=True, conv_cfg=None, norm_cfg=None, mode='embedded_gaussian', **kwargs): super(_NonLocalNd, self).__init__() self.in_channels = in_channels self.reduction = reduction self.use_scale = use_scale self.inter_channels = max((in_channels // reduction), 1) self.mode = mode if (mode not in ['gaussian', 'embedded_gaussian', 'dot_product', 'concatenation']): raise ValueError(f"Mode should be in 'gaussian', 'concatenation', 'embedded_gaussian' or 'dot_product', but got {mode} instead.") self.g = ConvModule(self.in_channels, self.inter_channels, kernel_size=1, conv_cfg=conv_cfg, act_cfg=None) self.conv_out = ConvModule(self.inter_channels, self.in_channels, kernel_size=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=None) if (self.mode != 'gaussian'): self.theta = ConvModule(self.in_channels, self.inter_channels, kernel_size=1, conv_cfg=conv_cfg, act_cfg=None) self.phi = ConvModule(self.in_channels, self.inter_channels, kernel_size=1, conv_cfg=conv_cfg, act_cfg=None) if (self.mode == 'concatenation'): self.concat_project = ConvModule((self.inter_channels * 2), 1, kernel_size=1, stride=1, padding=0, bias=False, act_cfg=dict(type='ReLU')) self.init_weights(**kwargs) def init_weights(self, std=0.01, zeros_init=True): if (self.mode != 'gaussian'): for m in [self.g, self.theta, self.phi]: normal_init(m.conv, std=std) else: normal_init(self.g.conv, std=std) if zeros_init: if (self.conv_out.norm_cfg is None): constant_init(self.conv_out.conv, 0) else: constant_init(self.conv_out.norm, 0) elif (self.conv_out.norm_cfg is None): normal_init(self.conv_out.conv, std=std) else: normal_init(self.conv_out.norm, std=std) def gaussian(self, theta_x, phi_x): pairwise_weight = torch.matmul(theta_x, phi_x) pairwise_weight = pairwise_weight.softmax(dim=(- 1)) return pairwise_weight def embedded_gaussian(self, theta_x, phi_x): pairwise_weight = torch.matmul(theta_x, phi_x) if self.use_scale: pairwise_weight /= (theta_x.shape[(- 1)] ** 0.5) pairwise_weight = pairwise_weight.softmax(dim=(- 1)) return pairwise_weight def dot_product(self, theta_x, phi_x): pairwise_weight = torch.matmul(theta_x, phi_x) pairwise_weight /= pairwise_weight.shape[(- 1)] return pairwise_weight def concatenation(self, theta_x, phi_x): h = theta_x.size(2) w = phi_x.size(3) theta_x = theta_x.repeat(1, 1, 1, w) phi_x = phi_x.repeat(1, 1, h, 1) concat_feature = torch.cat([theta_x, phi_x], dim=1) pairwise_weight = self.concat_project(concat_feature) (n, _, h, w) = pairwise_weight.size() pairwise_weight = pairwise_weight.view(n, h, w) pairwise_weight /= pairwise_weight.shape[(- 1)] return pairwise_weight def forward(self, x): n = x.size(0) g_x = self.g(x).view(n, self.inter_channels, (- 1)) g_x = g_x.permute(0, 2, 1) if (self.mode == 'gaussian'): theta_x = x.view(n, self.in_channels, (- 1)) theta_x = theta_x.permute(0, 2, 1) if self.sub_sample: phi_x = self.phi(x).view(n, self.in_channels, (- 1)) else: phi_x = x.view(n, self.in_channels, (- 1)) elif (self.mode == 'concatenation'): theta_x = self.theta(x).view(n, self.inter_channels, (- 1), 1) phi_x = self.phi(x).view(n, self.inter_channels, 1, (- 1)) else: theta_x = self.theta(x).view(n, self.inter_channels, (- 1)) theta_x = theta_x.permute(0, 2, 1) phi_x = self.phi(x).view(n, self.inter_channels, (- 1)) pairwise_func = getattr(self, self.mode) pairwise_weight = pairwise_func(theta_x, phi_x) y = torch.matmul(pairwise_weight, g_x) y = y.permute(0, 2, 1).contiguous().reshape(n, self.inter_channels, *x.size()[2:]) output = (x + self.conv_out(y)) return output
class NonLocal1d(_NonLocalNd): "1D Non-local module.\n\n Args:\n in_channels (int): Same as `NonLocalND`.\n sub_sample (bool): Whether to apply max pooling after pairwise\n function (Note that the `sub_sample` is applied on spatial only).\n Default: False.\n conv_cfg (None | dict): Same as `NonLocalND`.\n Default: dict(type='Conv1d').\n " def __init__(self, in_channels, sub_sample=False, conv_cfg=dict(type='Conv1d'), **kwargs): super(NonLocal1d, self).__init__(in_channels, conv_cfg=conv_cfg, **kwargs) self.sub_sample = sub_sample if sub_sample: max_pool_layer = nn.MaxPool1d(kernel_size=2) self.g = nn.Sequential(self.g, max_pool_layer) if (self.mode != 'gaussian'): self.phi = nn.Sequential(self.phi, max_pool_layer) else: self.phi = max_pool_layer
@PLUGIN_LAYERS.register_module() class NonLocal2d(_NonLocalNd): "2D Non-local module.\n\n Args:\n in_channels (int): Same as `NonLocalND`.\n sub_sample (bool): Whether to apply max pooling after pairwise\n function (Note that the `sub_sample` is applied on spatial only).\n Default: False.\n conv_cfg (None | dict): Same as `NonLocalND`.\n Default: dict(type='Conv2d').\n " _abbr_ = 'nonlocal_block' def __init__(self, in_channels, sub_sample=False, conv_cfg=dict(type='Conv2d'), **kwargs): super(NonLocal2d, self).__init__(in_channels, conv_cfg=conv_cfg, **kwargs) self.sub_sample = sub_sample if sub_sample: max_pool_layer = nn.MaxPool2d(kernel_size=(2, 2)) self.g = nn.Sequential(self.g, max_pool_layer) if (self.mode != 'gaussian'): self.phi = nn.Sequential(self.phi, max_pool_layer) else: self.phi = max_pool_layer
class NonLocal3d(_NonLocalNd): "3D Non-local module.\n\n Args:\n in_channels (int): Same as `NonLocalND`.\n sub_sample (bool): Whether to apply max pooling after pairwise\n function (Note that the `sub_sample` is applied on spatial only).\n Default: False.\n conv_cfg (None | dict): Same as `NonLocalND`.\n Default: dict(type='Conv3d').\n " def __init__(self, in_channels, sub_sample=False, conv_cfg=dict(type='Conv3d'), **kwargs): super(NonLocal3d, self).__init__(in_channels, conv_cfg=conv_cfg, **kwargs) self.sub_sample = sub_sample if sub_sample: max_pool_layer = nn.MaxPool3d(kernel_size=(1, 2, 2)) self.g = nn.Sequential(self.g, max_pool_layer) if (self.mode != 'gaussian'): self.phi = nn.Sequential(self.phi, max_pool_layer) else: self.phi = max_pool_layer
def infer_abbr(class_type): 'Infer abbreviation from the class name.\n\n When we build a norm layer with `build_norm_layer()`, we want to preserve\n the norm type in variable names, e.g, self.bn1, self.gn. This method will\n infer the abbreviation to map class types to abbreviations.\n\n Rule 1: If the class has the property "_abbr_", return the property.\n Rule 2: If the parent class is _BatchNorm, GroupNorm, LayerNorm or\n InstanceNorm, the abbreviation of this layer will be "bn", "gn", "ln" and\n "in" respectively.\n Rule 3: If the class name contains "batch", "group", "layer" or "instance",\n the abbreviation of this layer will be "bn", "gn", "ln" and "in"\n respectively.\n Rule 4: Otherwise, the abbreviation falls back to "norm".\n\n Args:\n class_type (type): The norm layer type.\n\n Returns:\n str: The inferred abbreviation.\n ' if (not inspect.isclass(class_type)): raise TypeError(f'class_type must be a type, but got {type(class_type)}') if hasattr(class_type, '_abbr_'): return class_type._abbr_ if issubclass(class_type, _InstanceNorm): return 'in' elif issubclass(class_type, _BatchNorm): return 'bn' elif issubclass(class_type, nn.GroupNorm): return 'gn' elif issubclass(class_type, nn.LayerNorm): return 'ln' else: class_name = class_type.__name__.lower() if ('batch' in class_name): return 'bn' elif ('group' in class_name): return 'gn' elif ('layer' in class_name): return 'ln' elif ('instance' in class_name): return 'in' else: return 'norm_layer'
def build_norm_layer(cfg, num_features, postfix=''): 'Build normalization layer.\n\n Args:\n cfg (dict): The norm layer config, which should contain:\n\n - type (str): Layer type.\n - layer args: Args needed to instantiate a norm layer.\n - requires_grad (bool, optional): Whether stop gradient updates.\n num_features (int): Number of input channels.\n postfix (int | str): The postfix to be appended into norm abbreviation\n to create named layer.\n\n Returns:\n tuple[str, nn.Module]: The first element is the layer name consisting\n of abbreviation and postfix, e.g., bn1, gn. The second element is the\n created norm layer.\n ' if (not isinstance(cfg, dict)): raise TypeError('cfg must be a dict') if ('type' not in cfg): raise KeyError('the cfg dict must contain the key "type"') cfg_ = cfg.copy() layer_type = cfg_.pop('type') if (layer_type not in NORM_LAYERS): raise KeyError(f'Unrecognized norm type {layer_type}') norm_layer = NORM_LAYERS.get(layer_type) abbr = infer_abbr(norm_layer) assert isinstance(postfix, (int, str)) name = (abbr + str(postfix)) requires_grad = cfg_.pop('requires_grad', True) cfg_.setdefault('eps', 1e-05) if (layer_type != 'GN'): layer = norm_layer(num_features, **cfg_) if ((layer_type == 'SyncBN') and hasattr(layer, '_specify_ddp_gpu_num')): layer._specify_ddp_gpu_num(1) else: assert ('num_groups' in cfg_) layer = norm_layer(num_channels=num_features, **cfg_) for param in layer.parameters(): param.requires_grad = requires_grad return (name, layer)
def is_norm(layer, exclude=None): 'Check if a layer is a normalization layer.\n\n Args:\n layer (nn.Module): The layer to be checked.\n exclude (type | tuple[type]): Types to be excluded.\n\n Returns:\n bool: Whether the layer is a norm layer.\n ' if (exclude is not None): if (not isinstance(exclude, tuple)): exclude = (exclude,) if (not is_tuple_of(exclude, type)): raise TypeError(f'"exclude" must be either None or type or a tuple of types, but got {type(exclude)}: {exclude}') if (exclude and isinstance(layer, exclude)): return False all_norm_bases = (_BatchNorm, _InstanceNorm, nn.GroupNorm, nn.LayerNorm) return isinstance(layer, all_norm_bases)
def build_padding_layer(cfg, *args, **kwargs): 'Build padding layer.\n\n Args:\n cfg (None or dict): The padding layer config, which should contain:\n - type (str): Layer type.\n - layer args: Args needed to instantiate a padding layer.\n\n Returns:\n nn.Module: Created padding layer.\n ' if (not isinstance(cfg, dict)): raise TypeError('cfg must be a dict') if ('type' not in cfg): raise KeyError('the cfg dict must contain the key "type"') cfg_ = cfg.copy() padding_type = cfg_.pop('type') if (padding_type not in PADDING_LAYERS): raise KeyError(f'Unrecognized padding type {padding_type}.') else: padding_layer = PADDING_LAYERS.get(padding_type) layer = padding_layer(*args, **kwargs, **cfg_) return layer
def infer_abbr(class_type): 'Infer abbreviation from the class name.\n\n This method will infer the abbreviation to map class types to\n abbreviations.\n\n Rule 1: If the class has the property "abbr", return the property.\n Rule 2: Otherwise, the abbreviation falls back to snake case of class\n name, e.g. the abbreviation of ``FancyBlock`` will be ``fancy_block``.\n\n Args:\n class_type (type): The norm layer type.\n\n Returns:\n str: The inferred abbreviation.\n ' def camel2snack(word): 'Convert camel case word into snack case.\n\n Modified from `inflection lib\n <https://inflection.readthedocs.io/en/latest/#inflection.underscore>`_.\n\n Example::\n\n >>> camel2snack("FancyBlock")\n \'fancy_block\'\n ' word = re.sub('([A-Z]+)([A-Z][a-z])', '\\1_\\2', word) word = re.sub('([a-z\\d])([A-Z])', '\\1_\\2', word) word = word.replace('-', '_') return word.lower() if (not inspect.isclass(class_type)): raise TypeError(f'class_type must be a type, but got {type(class_type)}') if hasattr(class_type, '_abbr_'): return class_type._abbr_ else: return camel2snack(class_type.__name__)
def build_plugin_layer(cfg, postfix='', **kwargs): "Build plugin layer.\n\n Args:\n cfg (None or dict): cfg should contain:\n\n - type (str): identify plugin layer type.\n - layer args: args needed to instantiate a plugin layer.\n postfix (int, str): appended into norm abbreviation to\n create named layer. Default: ''.\n\n Returns:\n tuple[str, nn.Module]: The first one is the concatenation of\n abbreviation and postfix. The second is the created plugin layer.\n " if (not isinstance(cfg, dict)): raise TypeError('cfg must be a dict') if ('type' not in cfg): raise KeyError('the cfg dict must contain the key "type"') cfg_ = cfg.copy() layer_type = cfg_.pop('type') if (layer_type not in PLUGIN_LAYERS): raise KeyError(f'Unrecognized plugin type {layer_type}') plugin_layer = PLUGIN_LAYERS.get(layer_type) abbr = infer_abbr(plugin_layer) assert isinstance(postfix, (int, str)) name = (abbr + str(postfix)) layer = plugin_layer(**kwargs, **cfg_) return (name, layer)
class Scale(nn.Module): 'A learnable scale parameter.\n\n This layer scales the input by a learnable factor. It multiplies a\n learnable scale parameter of shape (1,) with input of any shape.\n\n Args:\n scale (float): Initial value of scale factor. Default: 1.0\n ' def __init__(self, scale=1.0): super(Scale, self).__init__() self.scale = nn.Parameter(torch.tensor(scale, dtype=torch.float)) def forward(self, x): return (x * self.scale)
@ACTIVATION_LAYERS.register_module() class Swish(nn.Module): 'Swish Module.\n\n This module applies the swish function:\n\n .. math::\n Swish(x) = x * Sigmoid(x)\n\n Returns:\n Tensor: The output tensor.\n ' def __init__(self): super(Swish, self).__init__() def forward(self, x): return (x * torch.sigmoid(x))
def build_positional_encoding(cfg, default_args=None): 'Builder for Position Encoding.' return build_from_cfg(cfg, POSITIONAL_ENCODING, default_args)
def build_attention(cfg, default_args=None): 'Builder for attention.' return build_from_cfg(cfg, ATTENTION, default_args)
def build_feedforward_network(cfg, default_args=None): 'Builder for feed-forward network (FFN).' return build_from_cfg(cfg, FEEDFORWARD_NETWORK, default_args)
def build_transformer_layer(cfg, default_args=None): 'Builder for transformer layer.' return build_from_cfg(cfg, TRANSFORMER_LAYER, default_args)
def build_transformer_layer_sequence(cfg, default_args=None): 'Builder for transformer encoder and transformer decoder.' return build_from_cfg(cfg, TRANSFORMER_LAYER_SEQUENCE, default_args)
class AdaptivePadding(nn.Module): 'Applies padding adaptively to the input.\n\n This module can make input get fully covered by filter\n you specified. It support two modes "same" and "corner". The\n "same" mode is same with "SAME" padding mode in TensorFlow, pad\n zero around input. The "corner" mode would pad zero\n to bottom right.\n\n Args:\n kernel_size (int | tuple): Size of the kernel. Default: 1.\n stride (int | tuple): Stride of the filter. Default: 1.\n dilation (int | tuple): Spacing between kernel elements.\n Default: 1.\n padding (str): Support "same" and "corner", "corner" mode\n would pad zero to bottom right, and "same" mode would\n pad zero around input. Default: "corner".\n\n Example:\n >>> kernel_size = 16\n >>> stride = 16\n >>> dilation = 1\n >>> input = torch.rand(1, 1, 15, 17)\n >>> adap_pad = AdaptivePadding(\n >>> kernel_size=kernel_size,\n >>> stride=stride,\n >>> dilation=dilation,\n >>> padding="corner")\n >>> out = adap_pad(input)\n >>> assert (out.shape[2], out.shape[3]) == (16, 32)\n >>> input = torch.rand(1, 1, 16, 17)\n >>> out = adap_pad(input)\n >>> assert (out.shape[2], out.shape[3]) == (16, 32)\n ' def __init__(self, kernel_size=1, stride=1, dilation=1, padding='corner'): super(AdaptivePadding, self).__init__() assert (padding in ('same', 'corner')) kernel_size = to_2tuple(kernel_size) stride = to_2tuple(stride) dilation = to_2tuple(dilation) self.padding = padding self.kernel_size = kernel_size self.stride = stride self.dilation = dilation def get_pad_shape(self, input_shape): 'Calculate the padding size of input.\n\n Args:\n input_shape (:obj:`torch.Size`): arrange as (H, W).\n\n Returns:\n Tuple[int]: The padding size along the\n original H and W directions\n ' (input_h, input_w) = input_shape (kernel_h, kernel_w) = self.kernel_size (stride_h, stride_w) = self.stride output_h = math.ceil((input_h / stride_h)) output_w = math.ceil((input_w / stride_w)) pad_h = max((((((output_h - 1) * stride_h) + ((kernel_h - 1) * self.dilation[0])) + 1) - input_h), 0) pad_w = max((((((output_w - 1) * stride_w) + ((kernel_w - 1) * self.dilation[1])) + 1) - input_w), 0) return (pad_h, pad_w) def forward(self, x): 'Add padding to `x`\n\n Args:\n x (Tensor): Input tensor has shape (B, C, H, W).\n\n Returns:\n Tensor: The tensor with adaptive padding\n ' (pad_h, pad_w) = self.get_pad_shape(x.size()[(- 2):]) if ((pad_h > 0) or (pad_w > 0)): if (self.padding == 'corner'): x = F.pad(x, [0, pad_w, 0, pad_h]) elif (self.padding == 'same'): x = F.pad(x, [(pad_w // 2), (pad_w - (pad_w // 2)), (pad_h // 2), (pad_h - (pad_h // 2))]) return x
class PatchEmbed(BaseModule): 'Image to Patch Embedding.\n\n We use a conv layer to implement PatchEmbed.\n\n Args:\n in_channels (int): The num of input channels. Default: 3\n embed_dims (int): The dimensions of embedding. Default: 768\n conv_type (str): The type of convolution\n to generate patch embedding. Default: "Conv2d".\n kernel_size (int): The kernel_size of embedding conv. Default: 16.\n stride (int): The slide stride of embedding conv.\n Default: 16.\n padding (int | tuple | string): The padding length of\n embedding conv. When it is a string, it means the mode\n of adaptive padding, support "same" and "corner" now.\n Default: "corner".\n dilation (int): The dilation rate of embedding conv. Default: 1.\n bias (bool): Bias of embed conv. Default: True.\n norm_cfg (dict, optional): Config dict for normalization layer.\n Default: None.\n input_size (int | tuple | None): The size of input, which will be\n used to calculate the out size. Only works when `dynamic_size`\n is False. Default: None.\n init_cfg (`mmcv.ConfigDict`, optional): The Config for initialization.\n Default: None.\n ' def __init__(self, in_channels=3, embed_dims=768, conv_type='Conv2d', kernel_size=16, stride=16, padding='corner', dilation=1, bias=True, norm_cfg=None, input_size=None, init_cfg=None): super(PatchEmbed, self).__init__(init_cfg=init_cfg) self.embed_dims = embed_dims if (stride is None): stride = kernel_size kernel_size = to_2tuple(kernel_size) stride = to_2tuple(stride) dilation = to_2tuple(dilation) if isinstance(padding, str): self.adaptive_padding = AdaptivePadding(kernel_size=kernel_size, stride=stride, dilation=dilation, padding=padding) padding = 0 else: self.adaptive_padding = None padding = to_2tuple(padding) self.projection = build_conv_layer(dict(type=conv_type), in_channels=in_channels, out_channels=embed_dims, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias) if (norm_cfg is not None): self.norm = build_norm_layer(norm_cfg, embed_dims)[1] else: self.norm = None if input_size: input_size = to_2tuple(input_size) self.init_input_size = input_size if self.adaptive_padding: (pad_h, pad_w) = self.adaptive_padding.get_pad_shape(input_size) (input_h, input_w) = input_size input_h = (input_h + pad_h) input_w = (input_w + pad_w) input_size = (input_h, input_w) h_out = (((((input_size[0] + (2 * padding[0])) - (dilation[0] * (kernel_size[0] - 1))) - 1) // stride[0]) + 1) w_out = (((((input_size[1] + (2 * padding[1])) - (dilation[1] * (kernel_size[1] - 1))) - 1) // stride[1]) + 1) self.init_out_size = (h_out, w_out) else: self.init_input_size = None self.init_out_size = None def forward(self, x): '\n Args:\n x (Tensor): Has shape (B, C, H, W). In most case, C is 3.\n\n Returns:\n tuple: Contains merged results and its spatial shape.\n\n - x (Tensor): Has shape (B, out_h * out_w, embed_dims)\n - out_size (tuple[int]): Spatial shape of x, arrange as\n (out_h, out_w).\n ' if self.adaptive_padding: x = self.adaptive_padding(x) x = self.projection(x) out_size = (x.shape[2], x.shape[3]) x = x.flatten(2).transpose(1, 2) if (self.norm is not None): x = self.norm(x) return (x, out_size)
class PatchMerging(BaseModule): 'Merge patch feature map.\n\n This layer groups feature map by kernel_size, and applies norm and linear\n layers to the grouped feature map ((used in Swin Transformer)).\n Our implementation uses `nn.Unfold` to\n merge patches, which is about 25% faster than the original\n implementation. However, we need to modify pretrained\n models for compatibility.\n\n Args:\n in_channels (int): The num of input channels.\n to gets fully covered by filter and stride you specified.\n out_channels (int): The num of output channels.\n kernel_size (int | tuple, optional): the kernel size in the unfold\n layer. Defaults to 2.\n stride (int | tuple, optional): the stride of the sliding blocks in the\n unfold layer. Default: None. (Would be set as `kernel_size`)\n padding (int | tuple | string ): The padding length of\n embedding conv. When it is a string, it means the mode\n of adaptive padding, support "same" and "corner" now.\n Default: "corner".\n dilation (int | tuple, optional): dilation parameter in the unfold\n layer. Default: 1.\n bias (bool, optional): Whether to add bias in linear layer or not.\n Defaults: False.\n norm_cfg (dict, optional): Config dict for normalization layer.\n Default: dict(type=\'LN\').\n init_cfg (dict, optional): The extra config for initialization.\n Default: None.\n ' def __init__(self, in_channels, out_channels, kernel_size=2, stride=None, padding='corner', dilation=1, bias=False, norm_cfg=dict(type='LN'), init_cfg=None): super().__init__(init_cfg=init_cfg) self.in_channels = in_channels self.out_channels = out_channels if stride: stride = stride else: stride = kernel_size kernel_size = to_2tuple(kernel_size) stride = to_2tuple(stride) dilation = to_2tuple(dilation) if isinstance(padding, str): self.adaptive_padding = AdaptivePadding(kernel_size=kernel_size, stride=stride, dilation=dilation, padding=padding) padding = 0 else: self.adaptive_padding = None padding = to_2tuple(padding) self.sampler = nn.Unfold(kernel_size=kernel_size, dilation=dilation, padding=padding, stride=stride) sample_dim = ((kernel_size[0] * kernel_size[1]) * in_channels) if (norm_cfg is not None): self.norm = build_norm_layer(norm_cfg, sample_dim)[1] else: self.norm = None self.reduction = nn.Linear(sample_dim, out_channels, bias=bias) def forward(self, x, input_size): '\n Args:\n x (Tensor): Has shape (B, H*W, C_in).\n input_size (tuple[int]): The spatial shape of x, arrange as (H, W).\n Default: None.\n\n Returns:\n tuple: Contains merged results and its spatial shape.\n\n - x (Tensor): Has shape (B, Merged_H * Merged_W, C_out)\n - out_size (tuple[int]): Spatial shape of x, arrange as\n (Merged_H, Merged_W).\n ' (B, L, C) = x.shape assert isinstance(input_size, Sequence), f'Expect input_size is `Sequence` but get {input_size}' (H, W) = input_size assert (L == (H * W)), 'input feature has wrong size' x = x.view(B, H, W, C).permute([0, 3, 1, 2]) if self.adaptive_padding: x = self.adaptive_padding(x) (H, W) = x.shape[(- 2):] x = self.sampler(x) out_h = (((((H + (2 * self.sampler.padding[0])) - (self.sampler.dilation[0] * (self.sampler.kernel_size[0] - 1))) - 1) // self.sampler.stride[0]) + 1) out_w = (((((W + (2 * self.sampler.padding[1])) - (self.sampler.dilation[1] * (self.sampler.kernel_size[1] - 1))) - 1) // self.sampler.stride[1]) + 1) output_size = (out_h, out_w) x = x.transpose(1, 2) x = (self.norm(x) if self.norm else x) x = self.reduction(x) return (x, output_size)
@ATTENTION.register_module() class MultiheadAttention(BaseModule): 'A wrapper for ``torch.nn.MultiheadAttention``.\n\n This module implements MultiheadAttention with identity connection,\n and positional encoding is also passed as input.\n\n Args:\n embed_dims (int): The embedding dimension.\n num_heads (int): Parallel attention heads.\n attn_drop (float): A Dropout layer on attn_output_weights.\n Default: 0.0.\n proj_drop (float): A Dropout layer after `nn.MultiheadAttention`.\n Default: 0.0.\n dropout_layer (obj:`ConfigDict`): The dropout_layer used\n when adding the shortcut.\n init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization.\n Default: None.\n batch_first (bool): When it is True, Key, Query and Value are shape of\n (batch, n, embed_dim), otherwise (n, batch, embed_dim).\n Default to False.\n ' def __init__(self, embed_dims, num_heads, attn_drop=0.0, proj_drop=0.0, dropout_layer=dict(type='Dropout', drop_prob=0.0), init_cfg=None, batch_first=False, **kwargs): super(MultiheadAttention, self).__init__(init_cfg) if ('dropout' in kwargs): warnings.warn('The arguments `dropout` in MultiheadAttention has been deprecated, now you can separately set `attn_drop`(float), proj_drop(float), and `dropout_layer`(dict) ', DeprecationWarning) attn_drop = kwargs['dropout'] dropout_layer['drop_prob'] = kwargs.pop('dropout') self.embed_dims = embed_dims self.num_heads = num_heads self.batch_first = batch_first self.attn = nn.MultiheadAttention(embed_dims, num_heads, attn_drop, **kwargs) self.proj_drop = nn.Dropout(proj_drop) self.dropout_layer = (build_dropout(dropout_layer) if dropout_layer else nn.Identity()) @deprecated_api_warning({'residual': 'identity'}, cls_name='MultiheadAttention') def forward(self, query, key=None, value=None, identity=None, query_pos=None, key_pos=None, attn_mask=None, key_padding_mask=None, **kwargs): 'Forward function for `MultiheadAttention`.\n\n **kwargs allow passing a more general data flow when combining\n with other operations in `transformerlayer`.\n\n Args:\n query (Tensor): The input query with shape [num_queries, bs,\n embed_dims] if self.batch_first is False, else\n [bs, num_queries embed_dims].\n key (Tensor): The key tensor with shape [num_keys, bs,\n embed_dims] if self.batch_first is False, else\n [bs, num_keys, embed_dims] .\n If None, the ``query`` will be used. Defaults to None.\n value (Tensor): The value tensor with same shape as `key`.\n Same in `nn.MultiheadAttention.forward`. Defaults to None.\n If None, the `key` will be used.\n identity (Tensor): This tensor, with the same shape as x,\n will be used for the identity link.\n If None, `x` will be used. Defaults to None.\n query_pos (Tensor): The positional encoding for query, with\n the same shape as `x`. If not None, it will\n be added to `x` before forward function. Defaults to None.\n key_pos (Tensor): The positional encoding for `key`, with the\n same shape as `key`. Defaults to None. If not None, it will\n be added to `key` before forward function. If None, and\n `query_pos` has the same shape as `key`, then `query_pos`\n will be used for `key_pos`. Defaults to None.\n attn_mask (Tensor): ByteTensor mask with shape [num_queries,\n num_keys]. Same in `nn.MultiheadAttention.forward`.\n Defaults to None.\n key_padding_mask (Tensor): ByteTensor with shape [bs, num_keys].\n Defaults to None.\n\n Returns:\n Tensor: forwarded results with shape\n [num_queries, bs, embed_dims]\n if self.batch_first is False, else\n [bs, num_queries embed_dims].\n ' if (key is None): key = query if (value is None): value = key if (identity is None): identity = query if (key_pos is None): if (query_pos is not None): if (query_pos.shape == key.shape): key_pos = query_pos else: warnings.warn(f'position encoding of key ismissing in {self.__class__.__name__}.') if (query_pos is not None): query = (query + query_pos) if (key_pos is not None): key = (key + key_pos) if self.batch_first: query = query.transpose(0, 1) key = key.transpose(0, 1) value = value.transpose(0, 1) out = self.attn(query=query, key=key, value=value, attn_mask=attn_mask, key_padding_mask=key_padding_mask)[0] if self.batch_first: out = out.transpose(0, 1) return (identity + self.dropout_layer(self.proj_drop(out)))
@FEEDFORWARD_NETWORK.register_module() class FFN(BaseModule): "Implements feed-forward networks (FFNs) with identity connection.\n\n Args:\n embed_dims (int): The feature dimension. Same as\n `MultiheadAttention`. Defaults: 256.\n feedforward_channels (int): The hidden dimension of FFNs.\n Defaults: 1024.\n num_fcs (int, optional): The number of fully-connected layers in\n FFNs. Default: 2.\n act_cfg (dict, optional): The activation config for FFNs.\n Default: dict(type='ReLU')\n ffn_drop (float, optional): Probability of an element to be\n zeroed in FFN. Default 0.0.\n add_identity (bool, optional): Whether to add the\n identity connection. Default: `True`.\n dropout_layer (obj:`ConfigDict`): The dropout_layer used\n when adding the shortcut.\n init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization.\n Default: None.\n " @deprecated_api_warning({'dropout': 'ffn_drop', 'add_residual': 'add_identity'}, cls_name='FFN') def __init__(self, embed_dims=256, feedforward_channels=1024, num_fcs=2, act_cfg=dict(type='ReLU', inplace=True), ffn_drop=0.0, dropout_layer=None, add_identity=True, init_cfg=None, **kwargs): super(FFN, self).__init__(init_cfg) assert (num_fcs >= 2), f'num_fcs should be no less than 2. got {num_fcs}.' self.embed_dims = embed_dims self.feedforward_channels = feedforward_channels self.num_fcs = num_fcs self.act_cfg = act_cfg self.activate = build_activation_layer(act_cfg) layers = [] in_channels = embed_dims for _ in range((num_fcs - 1)): layers.append(Sequential(Linear(in_channels, feedforward_channels), self.activate, nn.Dropout(ffn_drop))) in_channels = feedforward_channels layers.append(Linear(feedforward_channels, embed_dims)) layers.append(nn.Dropout(ffn_drop)) self.layers = Sequential(*layers) self.dropout_layer = (build_dropout(dropout_layer) if dropout_layer else torch.nn.Identity()) self.add_identity = add_identity @deprecated_api_warning({'residual': 'identity'}, cls_name='FFN') def forward(self, x, identity=None): 'Forward function for `FFN`.\n\n The function would add x to the output tensor if residue is None.\n ' out = self.layers(x) if (not self.add_identity): return self.dropout_layer(out) if (identity is None): identity = x return (identity + self.dropout_layer(out))
@TRANSFORMER_LAYER.register_module() class BaseTransformerLayer(BaseModule): "Base `TransformerLayer` for vision transformer.\n\n It can be built from `mmcv.ConfigDict` and support more flexible\n customization, for example, using any number of `FFN or LN ` and\n use different kinds of `attention` by specifying a list of `ConfigDict`\n named `attn_cfgs`. It is worth mentioning that it supports `prenorm`\n when you specifying `norm` as the first element of `operation_order`.\n More details about the `prenorm`: `On Layer Normalization in the\n Transformer Architecture <https://arxiv.org/abs/2002.04745>`_ .\n\n Args:\n attn_cfgs (list[`mmcv.ConfigDict`] | obj:`mmcv.ConfigDict` | None )):\n Configs for `self_attention` or `cross_attention` modules,\n The order of the configs in the list should be consistent with\n corresponding attentions in operation_order.\n If it is a dict, all of the attention modules in operation_order\n will be built with this config. Default: None.\n ffn_cfgs (list[`mmcv.ConfigDict`] | obj:`mmcv.ConfigDict` | None )):\n Configs for FFN, The order of the configs in the list should be\n consistent with corresponding ffn in operation_order.\n If it is a dict, all of the attention modules in operation_order\n will be built with this config.\n operation_order (tuple[str]): The execution order of operation\n in transformer. Such as ('self_attn', 'norm', 'ffn', 'norm').\n Support `prenorm` when you specifying first element as `norm`.\n Default:None.\n norm_cfg (dict): Config dict for normalization layer.\n Default: dict(type='LN').\n init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization.\n Default: None.\n batch_first (bool): Key, Query and Value are shape\n of (batch, n, embed_dim)\n or (n, batch, embed_dim). Default to False.\n " def __init__(self, attn_cfgs=None, ffn_cfgs=dict(type='FFN', embed_dims=256, feedforward_channels=1024, num_fcs=2, ffn_drop=0.0, act_cfg=dict(type='ReLU', inplace=True)), operation_order=None, norm_cfg=dict(type='LN'), init_cfg=None, batch_first=False, **kwargs): deprecated_args = dict(feedforward_channels='feedforward_channels', ffn_dropout='ffn_drop', ffn_num_fcs='num_fcs') for (ori_name, new_name) in deprecated_args.items(): if (ori_name in kwargs): warnings.warn(f'The arguments `{ori_name}` in BaseTransformerLayer has been deprecated, now you should set `{new_name}` and other FFN related arguments to a dict named `ffn_cfgs`. ', DeprecationWarning) ffn_cfgs[new_name] = kwargs[ori_name] super(BaseTransformerLayer, self).__init__(init_cfg) self.batch_first = batch_first assert ((set(operation_order) & set(['self_attn', 'norm', 'ffn', 'cross_attn'])) == set(operation_order)), f"The operation_order of {self.__class__.__name__} should contains all four operation type {['self_attn', 'norm', 'ffn', 'cross_attn']}" num_attn = (operation_order.count('self_attn') + operation_order.count('cross_attn')) if isinstance(attn_cfgs, dict): attn_cfgs = [copy.deepcopy(attn_cfgs) for _ in range(num_attn)] else: assert (num_attn == len(attn_cfgs)), f'The length of attn_cfg {num_attn} is not consistent with the number of attentionin operation_order {operation_order}.' self.num_attn = num_attn self.operation_order = operation_order self.norm_cfg = norm_cfg self.pre_norm = (operation_order[0] == 'norm') self.attentions = ModuleList() index = 0 for operation_name in operation_order: if (operation_name in ['self_attn', 'cross_attn']): if ('batch_first' in attn_cfgs[index]): assert (self.batch_first == attn_cfgs[index]['batch_first']) else: attn_cfgs[index]['batch_first'] = self.batch_first attention = build_attention(attn_cfgs[index]) attention.operation_name = operation_name self.attentions.append(attention) index += 1 self.embed_dims = self.attentions[0].embed_dims self.ffns = ModuleList() num_ffns = operation_order.count('ffn') if isinstance(ffn_cfgs, dict): ffn_cfgs = ConfigDict(ffn_cfgs) if isinstance(ffn_cfgs, dict): ffn_cfgs = [copy.deepcopy(ffn_cfgs) for _ in range(num_ffns)] assert (len(ffn_cfgs) == num_ffns) for ffn_index in range(num_ffns): if ('embed_dims' not in ffn_cfgs[ffn_index]): ffn_cfgs[ffn_index]['embed_dims'] = self.embed_dims else: assert (ffn_cfgs[ffn_index]['embed_dims'] == self.embed_dims) self.ffns.append(build_feedforward_network(ffn_cfgs[ffn_index], dict(type='FFN'))) self.norms = ModuleList() num_norms = operation_order.count('norm') for _ in range(num_norms): self.norms.append(build_norm_layer(norm_cfg, self.embed_dims)[1]) def forward(self, query, key=None, value=None, query_pos=None, key_pos=None, attn_masks=None, query_key_padding_mask=None, key_padding_mask=None, **kwargs): 'Forward function for `TransformerDecoderLayer`.\n\n **kwargs contains some specific arguments of attentions.\n\n Args:\n query (Tensor): The input query with shape\n [num_queries, bs, embed_dims] if\n self.batch_first is False, else\n [bs, num_queries embed_dims].\n key (Tensor): The key tensor with shape [num_keys, bs,\n embed_dims] if self.batch_first is False, else\n [bs, num_keys, embed_dims] .\n value (Tensor): The value tensor with same shape as `key`.\n query_pos (Tensor): The positional encoding for `query`.\n Default: None.\n key_pos (Tensor): The positional encoding for `key`.\n Default: None.\n attn_masks (List[Tensor] | None): 2D Tensor used in\n calculation of corresponding attention. The length of\n it should equal to the number of `attention` in\n `operation_order`. Default: None.\n query_key_padding_mask (Tensor): ByteTensor for `query`, with\n shape [bs, num_queries]. Only used in `self_attn` layer.\n Defaults to None.\n key_padding_mask (Tensor): ByteTensor for `query`, with\n shape [bs, num_keys]. Default: None.\n\n Returns:\n Tensor: forwarded results with shape [num_queries, bs, embed_dims].\n ' norm_index = 0 attn_index = 0 ffn_index = 0 identity = query if (attn_masks is None): attn_masks = [None for _ in range(self.num_attn)] elif isinstance(attn_masks, torch.Tensor): attn_masks = [copy.deepcopy(attn_masks) for _ in range(self.num_attn)] warnings.warn(f'Use same attn_mask in all attentions in {self.__class__.__name__} ') else: assert (len(attn_masks) == self.num_attn), f'The length of attn_masks {len(attn_masks)} must be equal to the number of attention in operation_order {self.num_attn}' for layer in self.operation_order: if (layer == 'self_attn'): temp_key = temp_value = query query = self.attentions[attn_index](query, temp_key, temp_value, (identity if self.pre_norm else None), query_pos=query_pos, key_pos=query_pos, attn_mask=attn_masks[attn_index], key_padding_mask=query_key_padding_mask, **kwargs) attn_index += 1 identity = query elif (layer == 'norm'): query = self.norms[norm_index](query) norm_index += 1 elif (layer == 'cross_attn'): query = self.attentions[attn_index](query, key, value, (identity if self.pre_norm else None), query_pos=query_pos, key_pos=key_pos, attn_mask=attn_masks[attn_index], key_padding_mask=key_padding_mask, **kwargs) attn_index += 1 identity = query elif (layer == 'ffn'): query = self.ffns[ffn_index](query, (identity if self.pre_norm else None)) ffn_index += 1 return query
@TRANSFORMER_LAYER_SEQUENCE.register_module() class TransformerLayerSequence(BaseModule): 'Base class for TransformerEncoder and TransformerDecoder in vision\n transformer.\n\n As base-class of Encoder and Decoder in vision transformer.\n Support customization such as specifying different kind\n of `transformer_layer` in `transformer_coder`.\n\n Args:\n transformerlayer (list[obj:`mmcv.ConfigDict`] |\n obj:`mmcv.ConfigDict`): Config of transformerlayer\n in TransformerCoder. If it is obj:`mmcv.ConfigDict`,\n it would be repeated `num_layer` times to a\n list[`mmcv.ConfigDict`]. Default: None.\n num_layers (int): The number of `TransformerLayer`. Default: None.\n init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization.\n Default: None.\n ' def __init__(self, transformerlayers=None, num_layers=None, init_cfg=None): super(TransformerLayerSequence, self).__init__(init_cfg) if isinstance(transformerlayers, dict): transformerlayers = [copy.deepcopy(transformerlayers) for _ in range(num_layers)] else: assert (isinstance(transformerlayers, list) and (len(transformerlayers) == num_layers)) self.num_layers = num_layers self.layers = ModuleList() for i in range(num_layers): self.layers.append(build_transformer_layer(transformerlayers[i])) self.embed_dims = self.layers[0].embed_dims self.pre_norm = self.layers[0].pre_norm def forward(self, query, key, value, query_pos=None, key_pos=None, attn_masks=None, query_key_padding_mask=None, key_padding_mask=None, **kwargs): 'Forward function for `TransformerCoder`.\n\n Args:\n query (Tensor): Input query with shape\n `(num_queries, bs, embed_dims)`.\n key (Tensor): The key tensor with shape\n `(num_keys, bs, embed_dims)`.\n value (Tensor): The value tensor with shape\n `(num_keys, bs, embed_dims)`.\n query_pos (Tensor): The positional encoding for `query`.\n Default: None.\n key_pos (Tensor): The positional encoding for `key`.\n Default: None.\n attn_masks (List[Tensor], optional): Each element is 2D Tensor\n which is used in calculation of corresponding attention in\n operation_order. Default: None.\n query_key_padding_mask (Tensor): ByteTensor for `query`, with\n shape [bs, num_queries]. Only used in self-attention\n Default: None.\n key_padding_mask (Tensor): ByteTensor for `query`, with\n shape [bs, num_keys]. Default: None.\n\n Returns:\n Tensor: results with shape [num_queries, bs, embed_dims].\n ' for layer in self.layers: query = layer(query, key, value, query_pos=query_pos, key_pos=key_pos, attn_masks=attn_masks, query_key_padding_mask=query_key_padding_mask, key_padding_mask=key_padding_mask, **kwargs) return query
@UPSAMPLE_LAYERS.register_module(name='pixel_shuffle') class PixelShufflePack(nn.Module): 'Pixel Shuffle upsample layer.\n\n This module packs `F.pixel_shuffle()` and a nn.Conv2d module together to\n achieve a simple upsampling with pixel shuffle.\n\n Args:\n in_channels (int): Number of input channels.\n out_channels (int): Number of output channels.\n scale_factor (int): Upsample ratio.\n upsample_kernel (int): Kernel size of the conv layer to expand the\n channels.\n ' def __init__(self, in_channels, out_channels, scale_factor, upsample_kernel): super(PixelShufflePack, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.scale_factor = scale_factor self.upsample_kernel = upsample_kernel self.upsample_conv = nn.Conv2d(self.in_channels, ((self.out_channels * scale_factor) * scale_factor), self.upsample_kernel, padding=((self.upsample_kernel - 1) // 2)) self.init_weights() def init_weights(self): xavier_init(self.upsample_conv, distribution='uniform') def forward(self, x): x = self.upsample_conv(x) x = F.pixel_shuffle(x, self.scale_factor) return x
def build_upsample_layer(cfg, *args, **kwargs): 'Build upsample layer.\n\n Args:\n cfg (dict): The upsample layer config, which should contain:\n\n - type (str): Layer type.\n - scale_factor (int): Upsample ratio, which is not applicable to\n deconv.\n - layer args: Args needed to instantiate a upsample layer.\n args (argument list): Arguments passed to the ``__init__``\n method of the corresponding conv layer.\n kwargs (keyword arguments): Keyword arguments passed to the\n ``__init__`` method of the corresponding conv layer.\n\n Returns:\n nn.Module: Created upsample layer.\n ' if (not isinstance(cfg, dict)): raise TypeError(f'cfg must be a dict, but got {type(cfg)}') if ('type' not in cfg): raise KeyError(f'the cfg dict must contain the key "type", but got {cfg}') cfg_ = cfg.copy() layer_type = cfg_.pop('type') if (layer_type not in UPSAMPLE_LAYERS): raise KeyError(f'Unrecognized upsample type {layer_type}') else: upsample = UPSAMPLE_LAYERS.get(layer_type) if (upsample is nn.Upsample): cfg_['mode'] = layer_type layer = upsample(*args, **kwargs, **cfg_) return layer
def obsolete_torch_version(torch_version, version_threshold): return ((torch_version == 'parrots') or (torch_version <= version_threshold))
class NewEmptyTensorOp(torch.autograd.Function): @staticmethod def forward(ctx, x, new_shape): ctx.shape = x.shape return x.new_empty(new_shape) @staticmethod def backward(ctx, grad): shape = ctx.shape return (NewEmptyTensorOp.apply(grad, shape), None)
@CONV_LAYERS.register_module('Conv', force=True) class Conv2d(nn.Conv2d): def forward(self, x): if ((x.numel() == 0) and obsolete_torch_version(TORCH_VERSION, (1, 4))): out_shape = [x.shape[0], self.out_channels] for (i, k, p, s, d) in zip(x.shape[(- 2):], self.kernel_size, self.padding, self.stride, self.dilation): o = ((((i + (2 * p)) - ((d * (k - 1)) + 1)) // s) + 1) out_shape.append(o) empty = NewEmptyTensorOp.apply(x, out_shape) if self.training: dummy = (sum((x.view((- 1))[0] for x in self.parameters())) * 0.0) return (empty + dummy) else: return empty return super().forward(x)
@CONV_LAYERS.register_module('Conv3d', force=True) class Conv3d(nn.Conv3d): def forward(self, x): if ((x.numel() == 0) and obsolete_torch_version(TORCH_VERSION, (1, 4))): out_shape = [x.shape[0], self.out_channels] for (i, k, p, s, d) in zip(x.shape[(- 3):], self.kernel_size, self.padding, self.stride, self.dilation): o = ((((i + (2 * p)) - ((d * (k - 1)) + 1)) // s) + 1) out_shape.append(o) empty = NewEmptyTensorOp.apply(x, out_shape) if self.training: dummy = (sum((x.view((- 1))[0] for x in self.parameters())) * 0.0) return (empty + dummy) else: return empty return super().forward(x)
@CONV_LAYERS.register_module() @CONV_LAYERS.register_module('deconv') @UPSAMPLE_LAYERS.register_module('deconv', force=True) class ConvTranspose2d(nn.ConvTranspose2d): def forward(self, x): if ((x.numel() == 0) and obsolete_torch_version(TORCH_VERSION, (1, 4))): out_shape = [x.shape[0], self.out_channels] for (i, k, p, s, d, op) in zip(x.shape[(- 2):], self.kernel_size, self.padding, self.stride, self.dilation, self.output_padding): out_shape.append((((((i - 1) * s) - (2 * p)) + ((d * (k - 1)) + 1)) + op)) empty = NewEmptyTensorOp.apply(x, out_shape) if self.training: dummy = (sum((x.view((- 1))[0] for x in self.parameters())) * 0.0) return (empty + dummy) else: return empty return super().forward(x)
@CONV_LAYERS.register_module() @CONV_LAYERS.register_module('deconv3d') @UPSAMPLE_LAYERS.register_module('deconv3d', force=True) class ConvTranspose3d(nn.ConvTranspose3d): def forward(self, x): if ((x.numel() == 0) and obsolete_torch_version(TORCH_VERSION, (1, 4))): out_shape = [x.shape[0], self.out_channels] for (i, k, p, s, d, op) in zip(x.shape[(- 3):], self.kernel_size, self.padding, self.stride, self.dilation, self.output_padding): out_shape.append((((((i - 1) * s) - (2 * p)) + ((d * (k - 1)) + 1)) + op)) empty = NewEmptyTensorOp.apply(x, out_shape) if self.training: dummy = (sum((x.view((- 1))[0] for x in self.parameters())) * 0.0) return (empty + dummy) else: return empty return super().forward(x)
class MaxPool2d(nn.MaxPool2d): def forward(self, x): if ((x.numel() == 0) and obsolete_torch_version(TORCH_VERSION, (1, 9))): out_shape = list(x.shape[:2]) for (i, k, p, s, d) in zip(x.shape[(- 2):], _pair(self.kernel_size), _pair(self.padding), _pair(self.stride), _pair(self.dilation)): o = ((((i + (2 * p)) - ((d * (k - 1)) + 1)) / s) + 1) o = (math.ceil(o) if self.ceil_mode else math.floor(o)) out_shape.append(o) empty = NewEmptyTensorOp.apply(x, out_shape) return empty return super().forward(x)
class MaxPool3d(nn.MaxPool3d): def forward(self, x): if ((x.numel() == 0) and obsolete_torch_version(TORCH_VERSION, (1, 9))): out_shape = list(x.shape[:2]) for (i, k, p, s, d) in zip(x.shape[(- 3):], _triple(self.kernel_size), _triple(self.padding), _triple(self.stride), _triple(self.dilation)): o = ((((i + (2 * p)) - ((d * (k - 1)) + 1)) / s) + 1) o = (math.ceil(o) if self.ceil_mode else math.floor(o)) out_shape.append(o) empty = NewEmptyTensorOp.apply(x, out_shape) return empty return super().forward(x)
class Linear(torch.nn.Linear): def forward(self, x): if ((x.numel() == 0) and obsolete_torch_version(TORCH_VERSION, (1, 5))): out_shape = [x.shape[0], self.out_features] empty = NewEmptyTensorOp.apply(x, out_shape) if self.training: dummy = (sum((x.view((- 1))[0] for x in self.parameters())) * 0.0) return (empty + dummy) else: return empty return super().forward(x)
def build_model_from_cfg(cfg, registry, default_args=None): 'Build a PyTorch model from config dict(s). Different from\n ``build_from_cfg``, if cfg is a list, a ``nn.Sequential`` will be built.\n\n Args:\n cfg (dict, list[dict]): The config of modules, is is either a config\n dict or a list of config dicts. If cfg is a list, a\n the built modules will be wrapped with ``nn.Sequential``.\n registry (:obj:`Registry`): A registry the module belongs to.\n default_args (dict, optional): Default arguments to build the module.\n Defaults to None.\n\n Returns:\n nn.Module: A built nn module.\n ' if isinstance(cfg, list): modules = [build_from_cfg(cfg_, registry, default_args) for cfg_ in cfg] return Sequential(*modules) else: return build_from_cfg(cfg, registry, default_args)
def conv3x3(in_planes, out_planes, stride=1, dilation=1): '3x3 convolution with padding.' return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=dilation, dilation=dilation, bias=False)
class BasicBlock(nn.Module): expansion = 1 def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, style='pytorch', with_cp=False): super(BasicBlock, self).__init__() assert (style in ['pytorch', 'caffe']) self.conv1 = conv3x3(inplanes, planes, stride, dilation) self.bn1 = nn.BatchNorm2d(planes) self.relu = nn.ReLU(inplace=True) self.conv2 = conv3x3(planes, planes) self.bn2 = nn.BatchNorm2d(planes) self.downsample = downsample self.stride = stride self.dilation = dilation assert (not with_cp) def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) if (self.downsample is not None): residual = self.downsample(x) out += residual out = self.relu(out) return out
class Bottleneck(nn.Module): expansion = 4 def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, style='pytorch', with_cp=False): 'Bottleneck block.\n\n If style is "pytorch", the stride-two layer is the 3x3 conv layer, if\n it is "caffe", the stride-two layer is the first 1x1 conv layer.\n ' super(Bottleneck, self).__init__() assert (style in ['pytorch', 'caffe']) if (style == 'pytorch'): conv1_stride = 1 conv2_stride = stride else: conv1_stride = stride conv2_stride = 1 self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, stride=conv1_stride, bias=False) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=conv2_stride, padding=dilation, dilation=dilation, bias=False) self.bn1 = nn.BatchNorm2d(planes) self.bn2 = nn.BatchNorm2d(planes) self.conv3 = nn.Conv2d(planes, (planes * self.expansion), kernel_size=1, bias=False) self.bn3 = nn.BatchNorm2d((planes * self.expansion)) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride self.dilation = dilation self.with_cp = with_cp def forward(self, x): def _inner_forward(x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out = self.conv3(out) out = self.bn3(out) if (self.downsample is not None): residual = self.downsample(x) out += residual return out if (self.with_cp and x.requires_grad): out = cp.checkpoint(_inner_forward, x) else: out = _inner_forward(x) out = self.relu(out) return out
def make_res_layer(block, inplanes, planes, blocks, stride=1, dilation=1, style='pytorch', with_cp=False): downsample = None if ((stride != 1) or (inplanes != (planes * block.expansion))): downsample = nn.Sequential(nn.Conv2d(inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d((planes * block.expansion))) layers = [] layers.append(block(inplanes, planes, stride, dilation, downsample, style=style, with_cp=with_cp)) inplanes = (planes * block.expansion) for _ in range(1, blocks): layers.append(block(inplanes, planes, 1, dilation, style=style, with_cp=with_cp)) return nn.Sequential(*layers)
class ResNet(nn.Module): 'ResNet backbone.\n\n Args:\n depth (int): Depth of resnet, from {18, 34, 50, 101, 152}.\n num_stages (int): Resnet stages, normally 4.\n strides (Sequence[int]): Strides of the first block of each stage.\n dilations (Sequence[int]): Dilation of each stage.\n out_indices (Sequence[int]): Output from which stages.\n style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two\n layer is the 3x3 conv layer, otherwise the stride-two layer is\n the first 1x1 conv layer.\n frozen_stages (int): Stages to be frozen (all param fixed). -1 means\n not freezing any parameters.\n bn_eval (bool): Whether to set BN layers as eval mode, namely, freeze\n running stats (mean and var).\n bn_frozen (bool): Whether to freeze weight and bias of BN layers.\n with_cp (bool): Use checkpoint or not. Using checkpoint will save some\n memory while slowing down the training speed.\n ' arch_settings = {18: (BasicBlock, (2, 2, 2, 2)), 34: (BasicBlock, (3, 4, 6, 3)), 50: (Bottleneck, (3, 4, 6, 3)), 101: (Bottleneck, (3, 4, 23, 3)), 152: (Bottleneck, (3, 8, 36, 3))} def __init__(self, depth, num_stages=4, strides=(1, 2, 2, 2), dilations=(1, 1, 1, 1), out_indices=(0, 1, 2, 3), style='pytorch', frozen_stages=(- 1), bn_eval=True, bn_frozen=False, with_cp=False): super(ResNet, self).__init__() if (depth not in self.arch_settings): raise KeyError(f'invalid depth {depth} for resnet') assert ((num_stages >= 1) and (num_stages <= 4)) (block, stage_blocks) = self.arch_settings[depth] stage_blocks = stage_blocks[:num_stages] assert (len(strides) == len(dilations) == num_stages) assert (max(out_indices) < num_stages) self.out_indices = out_indices self.style = style self.frozen_stages = frozen_stages self.bn_eval = bn_eval self.bn_frozen = bn_frozen self.with_cp = with_cp self.inplanes = 64 self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = nn.BatchNorm2d(64) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.res_layers = [] for (i, num_blocks) in enumerate(stage_blocks): stride = strides[i] dilation = dilations[i] planes = (64 * (2 ** i)) res_layer = make_res_layer(block, self.inplanes, planes, num_blocks, stride=stride, dilation=dilation, style=self.style, with_cp=with_cp) self.inplanes = (planes * block.expansion) layer_name = f'layer{(i + 1)}' self.add_module(layer_name, res_layer) self.res_layers.append(layer_name) self.feat_dim = ((block.expansion * 64) * (2 ** (len(stage_blocks) - 1))) def init_weights(self, pretrained=None): if isinstance(pretrained, str): logger = logging.getLogger() from ..runner import load_checkpoint load_checkpoint(self, pretrained, strict=False, logger=logger) elif (pretrained is None): for m in self.modules(): if isinstance(m, nn.Conv2d): kaiming_init(m) elif isinstance(m, nn.BatchNorm2d): constant_init(m, 1) else: raise TypeError('pretrained must be a str or None') def forward(self, x): x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.maxpool(x) outs = [] for (i, layer_name) in enumerate(self.res_layers): res_layer = getattr(self, layer_name) x = res_layer(x) if (i in self.out_indices): outs.append(x) if (len(outs) == 1): return outs[0] else: return tuple(outs) def train(self, mode=True): super(ResNet, self).train(mode) if self.bn_eval: for m in self.modules(): if isinstance(m, nn.BatchNorm2d): m.eval() if self.bn_frozen: for params in m.parameters(): params.requires_grad = False if (mode and (self.frozen_stages >= 0)): for param in self.conv1.parameters(): param.requires_grad = False for param in self.bn1.parameters(): param.requires_grad = False self.bn1.eval() self.bn1.weight.requires_grad = False self.bn1.bias.requires_grad = False for i in range(1, (self.frozen_stages + 1)): mod = getattr(self, f'layer{i}') mod.eval() for param in mod.parameters(): param.requires_grad = False
def get_model_complexity_info(model, input_shape, print_per_layer_stat=True, as_strings=True, input_constructor=None, flush=False, ost=sys.stdout): 'Get complexity information of a model.\n\n This method can calculate FLOPs and parameter counts of a model with\n corresponding input shape. It can also print complexity information for\n each layer in a model.\n\n Supported layers are listed as below:\n - Convolutions: ``nn.Conv1d``, ``nn.Conv2d``, ``nn.Conv3d``.\n - Activations: ``nn.ReLU``, ``nn.PReLU``, ``nn.ELU``,\n ``nn.LeakyReLU``, ``nn.ReLU6``.\n - Poolings: ``nn.MaxPool1d``, ``nn.MaxPool2d``, ``nn.MaxPool3d``,\n ``nn.AvgPool1d``, ``nn.AvgPool2d``, ``nn.AvgPool3d``,\n ``nn.AdaptiveMaxPool1d``, ``nn.AdaptiveMaxPool2d``,\n ``nn.AdaptiveMaxPool3d``, ``nn.AdaptiveAvgPool1d``,\n ``nn.AdaptiveAvgPool2d``, ``nn.AdaptiveAvgPool3d``.\n - BatchNorms: ``nn.BatchNorm1d``, ``nn.BatchNorm2d``,\n ``nn.BatchNorm3d``, ``nn.GroupNorm``, ``nn.InstanceNorm1d``,\n ``InstanceNorm2d``, ``InstanceNorm3d``, ``nn.LayerNorm``.\n - Linear: ``nn.Linear``.\n - Deconvolution: ``nn.ConvTranspose2d``.\n - Upsample: ``nn.Upsample``.\n\n Args:\n model (nn.Module): The model for complexity calculation.\n input_shape (tuple): Input shape used for calculation.\n print_per_layer_stat (bool): Whether to print complexity information\n for each layer in a model. Default: True.\n as_strings (bool): Output FLOPs and params counts in a string form.\n Default: True.\n input_constructor (None | callable): If specified, it takes a callable\n method that generates input. otherwise, it will generate a random\n tensor with input shape to calculate FLOPs. Default: None.\n flush (bool): same as that in :func:`print`. Default: False.\n ost (stream): same as ``file`` param in :func:`print`.\n Default: sys.stdout.\n\n Returns:\n tuple[float | str]: If ``as_strings`` is set to True, it will return\n FLOPs and parameter counts in a string format. otherwise, it will\n return those in a float number format.\n ' assert (type(input_shape) is tuple) assert (len(input_shape) >= 1) assert isinstance(model, nn.Module) flops_model = add_flops_counting_methods(model) flops_model.eval() flops_model.start_flops_count() if input_constructor: input = input_constructor(input_shape) _ = flops_model(**input) else: try: batch = torch.ones(()).new_empty((1, *input_shape), dtype=next(flops_model.parameters()).dtype, device=next(flops_model.parameters()).device) except StopIteration: batch = torch.ones(()).new_empty((1, *input_shape)) _ = flops_model(batch) (flops_count, params_count) = flops_model.compute_average_flops_cost() if print_per_layer_stat: print_model_with_flops(flops_model, flops_count, params_count, ost=ost, flush=flush) flops_model.stop_flops_count() if as_strings: return (flops_to_string(flops_count), params_to_string(params_count)) return (flops_count, params_count)
def flops_to_string(flops, units='GFLOPs', precision=2): "Convert FLOPs number into a string.\n\n Note that Here we take a multiply-add counts as one FLOP.\n\n Args:\n flops (float): FLOPs number to be converted.\n units (str | None): Converted FLOPs units. Options are None, 'GFLOPs',\n 'MFLOPs', 'KFLOPs', 'FLOPs'. If set to None, it will automatically\n choose the most suitable unit for FLOPs. Default: 'GFLOPs'.\n precision (int): Digit number after the decimal point. Default: 2.\n\n Returns:\n str: The converted FLOPs number with units.\n\n Examples:\n >>> flops_to_string(1e9)\n '1.0 GFLOPs'\n >>> flops_to_string(2e5, 'MFLOPs')\n '0.2 MFLOPs'\n >>> flops_to_string(3e-9, None)\n '3e-09 FLOPs'\n " if (units is None): if ((flops // (10 ** 9)) > 0): return (str(round((flops / (10.0 ** 9)), precision)) + ' GFLOPs') elif ((flops // (10 ** 6)) > 0): return (str(round((flops / (10.0 ** 6)), precision)) + ' MFLOPs') elif ((flops // (10 ** 3)) > 0): return (str(round((flops / (10.0 ** 3)), precision)) + ' KFLOPs') else: return (str(flops) + ' FLOPs') elif (units == 'GFLOPs'): return ((str(round((flops / (10.0 ** 9)), precision)) + ' ') + units) elif (units == 'MFLOPs'): return ((str(round((flops / (10.0 ** 6)), precision)) + ' ') + units) elif (units == 'KFLOPs'): return ((str(round((flops / (10.0 ** 3)), precision)) + ' ') + units) else: return (str(flops) + ' FLOPs')
def params_to_string(num_params, units=None, precision=2): "Convert parameter number into a string.\n\n Args:\n num_params (float): Parameter number to be converted.\n units (str | None): Converted FLOPs units. Options are None, 'M',\n 'K' and ''. If set to None, it will automatically choose the most\n suitable unit for Parameter number. Default: None.\n precision (int): Digit number after the decimal point. Default: 2.\n\n Returns:\n str: The converted parameter number with units.\n\n Examples:\n >>> params_to_string(1e9)\n '1000.0 M'\n >>> params_to_string(2e5)\n '200.0 k'\n >>> params_to_string(3e-9)\n '3e-09'\n " if (units is None): if ((num_params // (10 ** 6)) > 0): return (str(round((num_params / (10 ** 6)), precision)) + ' M') elif (num_params // (10 ** 3)): return (str(round((num_params / (10 ** 3)), precision)) + ' k') else: return str(num_params) elif (units == 'M'): return ((str(round((num_params / (10.0 ** 6)), precision)) + ' ') + units) elif (units == 'K'): return ((str(round((num_params / (10.0 ** 3)), precision)) + ' ') + units) else: return str(num_params)
def print_model_with_flops(model, total_flops, total_params, units='GFLOPs', precision=3, ost=sys.stdout, flush=False): "Print a model with FLOPs for each layer.\n\n Args:\n model (nn.Module): The model to be printed.\n total_flops (float): Total FLOPs of the model.\n total_params (float): Total parameter counts of the model.\n units (str | None): Converted FLOPs units. Default: 'GFLOPs'.\n precision (int): Digit number after the decimal point. Default: 3.\n ost (stream): same as `file` param in :func:`print`.\n Default: sys.stdout.\n flush (bool): same as that in :func:`print`. Default: False.\n\n Example:\n >>> class ExampleModel(nn.Module):\n\n >>> def __init__(self):\n >>> super().__init__()\n >>> self.conv1 = nn.Conv2d(3, 8, 3)\n >>> self.conv2 = nn.Conv2d(8, 256, 3)\n >>> self.conv3 = nn.Conv2d(256, 8, 3)\n >>> self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))\n >>> self.flatten = nn.Flatten()\n >>> self.fc = nn.Linear(8, 1)\n\n >>> def forward(self, x):\n >>> x = self.conv1(x)\n >>> x = self.conv2(x)\n >>> x = self.conv3(x)\n >>> x = self.avg_pool(x)\n >>> x = self.flatten(x)\n >>> x = self.fc(x)\n >>> return x\n\n >>> model = ExampleModel()\n >>> x = (3, 16, 16)\n to print the complexity information state for each layer, you can use\n >>> get_model_complexity_info(model, x)\n or directly use\n >>> print_model_with_flops(model, 4579784.0, 37361)\n ExampleModel(\n 0.037 M, 100.000% Params, 0.005 GFLOPs, 100.000% FLOPs,\n (conv1): Conv2d(0.0 M, 0.600% Params, 0.0 GFLOPs, 0.959% FLOPs, 3, 8, kernel_size=(3, 3), stride=(1, 1)) # noqa: E501\n (conv2): Conv2d(0.019 M, 50.020% Params, 0.003 GFLOPs, 58.760% FLOPs, 8, 256, kernel_size=(3, 3), stride=(1, 1))\n (conv3): Conv2d(0.018 M, 49.356% Params, 0.002 GFLOPs, 40.264% FLOPs, 256, 8, kernel_size=(3, 3), stride=(1, 1))\n (avg_pool): AdaptiveAvgPool2d(0.0 M, 0.000% Params, 0.0 GFLOPs, 0.017% FLOPs, output_size=(1, 1))\n (flatten): Flatten(0.0 M, 0.000% Params, 0.0 GFLOPs, 0.000% FLOPs, )\n (fc): Linear(0.0 M, 0.024% Params, 0.0 GFLOPs, 0.000% FLOPs, in_features=8, out_features=1, bias=True)\n )\n " def accumulate_params(self): if is_supported_instance(self): return self.__params__ else: sum = 0 for m in self.children(): sum += m.accumulate_params() return sum def accumulate_flops(self): if is_supported_instance(self): return (self.__flops__ / model.__batch_counter__) else: sum = 0 for m in self.children(): sum += m.accumulate_flops() return sum def flops_repr(self): accumulated_num_params = self.accumulate_params() accumulated_flops_cost = self.accumulate_flops() return ', '.join([params_to_string(accumulated_num_params, units='M', precision=precision), '{:.3%} Params'.format((accumulated_num_params / total_params)), flops_to_string(accumulated_flops_cost, units=units, precision=precision), '{:.3%} FLOPs'.format((accumulated_flops_cost / total_flops)), self.original_extra_repr()]) def add_extra_repr(m): m.accumulate_flops = accumulate_flops.__get__(m) m.accumulate_params = accumulate_params.__get__(m) flops_extra_repr = flops_repr.__get__(m) if (m.extra_repr != flops_extra_repr): m.original_extra_repr = m.extra_repr m.extra_repr = flops_extra_repr assert (m.extra_repr != m.original_extra_repr) def del_extra_repr(m): if hasattr(m, 'original_extra_repr'): m.extra_repr = m.original_extra_repr del m.original_extra_repr if hasattr(m, 'accumulate_flops'): del m.accumulate_flops model.apply(add_extra_repr) print(model, file=ost, flush=flush) model.apply(del_extra_repr)
def get_model_parameters_number(model): 'Calculate parameter number of a model.\n\n Args:\n model (nn.module): The model for parameter number calculation.\n\n Returns:\n float: Parameter number of the model.\n ' num_params = sum((p.numel() for p in model.parameters() if p.requires_grad)) return num_params
def add_flops_counting_methods(net_main_module): net_main_module.start_flops_count = start_flops_count.__get__(net_main_module) net_main_module.stop_flops_count = stop_flops_count.__get__(net_main_module) net_main_module.reset_flops_count = reset_flops_count.__get__(net_main_module) net_main_module.compute_average_flops_cost = compute_average_flops_cost.__get__(net_main_module) net_main_module.reset_flops_count() return net_main_module
def compute_average_flops_cost(self): 'Compute average FLOPs cost.\n\n A method to compute average FLOPs cost, which will be available after\n `add_flops_counting_methods()` is called on a desired net object.\n\n Returns:\n float: Current mean flops consumption per image.\n ' batches_count = self.__batch_counter__ flops_sum = 0 for module in self.modules(): if is_supported_instance(module): flops_sum += module.__flops__ params_sum = get_model_parameters_number(self) return ((flops_sum / batches_count), params_sum)
def start_flops_count(self): 'Activate the computation of mean flops consumption per image.\n\n A method to activate the computation of mean flops consumption per image.\n which will be available after ``add_flops_counting_methods()`` is called on\n a desired net object. It should be called before running the network.\n ' add_batch_counter_hook_function(self) def add_flops_counter_hook_function(module): if is_supported_instance(module): if hasattr(module, '__flops_handle__'): return else: handle = module.register_forward_hook(get_modules_mapping()[type(module)]) module.__flops_handle__ = handle self.apply(partial(add_flops_counter_hook_function))
def stop_flops_count(self): 'Stop computing the mean flops consumption per image.\n\n A method to stop computing the mean flops consumption per image, which will\n be available after ``add_flops_counting_methods()`` is called on a desired\n net object. It can be called to pause the computation whenever.\n ' remove_batch_counter_hook_function(self) self.apply(remove_flops_counter_hook_function)
def reset_flops_count(self): 'Reset statistics computed so far.\n\n A method to Reset computed statistics, which will be available after\n `add_flops_counting_methods()` is called on a desired net object.\n ' add_batch_counter_variables_or_reset(self) self.apply(add_flops_counter_variable_or_reset)
def empty_flops_counter_hook(module, input, output): module.__flops__ += 0